regulator: max8998: fix incorrect min_uV value for ldo10
[~shefty/rdma-dev.git] / drivers / net / tun.c
1 /*
2  *  TUN - Universal TUN/TAP device driver.
3  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  *  GNU General Public License for more details.
14  *
15  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
16  */
17
18 /*
19  *  Changes:
20  *
21  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22  *    Add TUNSETLINK ioctl to set the link encapsulation
23  *
24  *  Mark Smith <markzzzsmith@yahoo.com.au>
25  *    Use eth_random_addr() for tap MAC address.
26  *
27  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
28  *    Fixes in packet dropping, queue length setting and queue wakeup.
29  *    Increased default tx queue length.
30  *    Added ethtool API.
31  *    Minor cleanups
32  *
33  *  Daniel Podlejski <underley@underley.eu.org>
34  *    Modifications for 2.3.99-pre5 kernel.
35  */
36
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
39 #define DRV_NAME        "tun"
40 #define DRV_VERSION     "1.6"
41 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
42 #define DRV_COPYRIGHT   "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
43
44 #include <linux/module.h>
45 #include <linux/errno.h>
46 #include <linux/kernel.h>
47 #include <linux/major.h>
48 #include <linux/slab.h>
49 #include <linux/poll.h>
50 #include <linux/fcntl.h>
51 #include <linux/init.h>
52 #include <linux/skbuff.h>
53 #include <linux/netdevice.h>
54 #include <linux/etherdevice.h>
55 #include <linux/miscdevice.h>
56 #include <linux/ethtool.h>
57 #include <linux/rtnetlink.h>
58 #include <linux/compat.h>
59 #include <linux/if.h>
60 #include <linux/if_arp.h>
61 #include <linux/if_ether.h>
62 #include <linux/if_tun.h>
63 #include <linux/crc32.h>
64 #include <linux/nsproxy.h>
65 #include <linux/virtio_net.h>
66 #include <linux/rcupdate.h>
67 #include <net/net_namespace.h>
68 #include <net/netns/generic.h>
69 #include <net/rtnetlink.h>
70 #include <net/sock.h>
71
72 #include <asm/uaccess.h>
73
74 /* Uncomment to enable debugging */
75 /* #define TUN_DEBUG 1 */
76
77 #ifdef TUN_DEBUG
78 static int debug;
79
80 #define tun_debug(level, tun, fmt, args...)                     \
81 do {                                                            \
82         if (tun->debug)                                         \
83                 netdev_printk(level, tun->dev, fmt, ##args);    \
84 } while (0)
85 #define DBG1(level, fmt, args...)                               \
86 do {                                                            \
87         if (debug == 2)                                         \
88                 printk(level fmt, ##args);                      \
89 } while (0)
90 #else
91 #define tun_debug(level, tun, fmt, args...)                     \
92 do {                                                            \
93         if (0)                                                  \
94                 netdev_printk(level, tun->dev, fmt, ##args);    \
95 } while (0)
96 #define DBG1(level, fmt, args...)                               \
97 do {                                                            \
98         if (0)                                                  \
99                 printk(level fmt, ##args);                      \
100 } while (0)
101 #endif
102
103 #define GOODCOPY_LEN 128
104
105 #define FLT_EXACT_COUNT 8
106 struct tap_filter {
107         unsigned int    count;    /* Number of addrs. Zero means disabled */
108         u32             mask[2];  /* Mask of the hashed addrs */
109         unsigned char   addr[FLT_EXACT_COUNT][ETH_ALEN];
110 };
111
112 /* 1024 is probably a high enough limit: modern hypervisors seem to support on
113  * the order of 100-200 CPUs so this leaves us some breathing space if we want
114  * to match a queue per guest CPU.
115  */
116 #define MAX_TAP_QUEUES 1024
117
118 #define TUN_FLOW_EXPIRE (3 * HZ)
119
120 /* A tun_file connects an open character device to a tuntap netdevice. It
121  * also contains all socket related strctures (except sock_fprog and tap_filter)
122  * to serve as one transmit queue for tuntap device. The sock_fprog and
123  * tap_filter were kept in tun_struct since they were used for filtering for the
124  * netdevice not for a specific queue (at least I didn't see the requirement for
125  * this).
126  *
127  * RCU usage:
128  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
129  * other can only be read while rcu_read_lock or rtnl_lock is held.
130  */
131 struct tun_file {
132         struct sock sk;
133         struct socket socket;
134         struct socket_wq wq;
135         struct tun_struct __rcu *tun;
136         struct net *net;
137         struct fasync_struct *fasync;
138         /* only used for fasnyc */
139         unsigned int flags;
140         u16 queue_index;
141         struct list_head next;
142         struct tun_struct *detached;
143 };
144
145 struct tun_flow_entry {
146         struct hlist_node hash_link;
147         struct rcu_head rcu;
148         struct tun_struct *tun;
149
150         u32 rxhash;
151         int queue_index;
152         unsigned long updated;
153 };
154
155 #define TUN_NUM_FLOW_ENTRIES 1024
156
157 /* Since the socket were moved to tun_file, to preserve the behavior of persist
158  * device, socket filter, sndbuf and vnet header size were restore when the
159  * file were attached to a persist device.
160  */
161 struct tun_struct {
162         struct tun_file __rcu   *tfiles[MAX_TAP_QUEUES];
163         unsigned int            numqueues;
164         unsigned int            flags;
165         kuid_t                  owner;
166         kgid_t                  group;
167
168         struct net_device       *dev;
169         netdev_features_t       set_features;
170 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
171                           NETIF_F_TSO6|NETIF_F_UFO)
172
173         int                     vnet_hdr_sz;
174         int                     sndbuf;
175         struct tap_filter       txflt;
176         struct sock_fprog       fprog;
177         /* protected by rtnl lock */
178         bool                    filter_attached;
179 #ifdef TUN_DEBUG
180         int debug;
181 #endif
182         spinlock_t lock;
183         struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
184         struct timer_list flow_gc_timer;
185         unsigned long ageing_time;
186         unsigned int numdisabled;
187         struct list_head disabled;
188 };
189
190 static inline u32 tun_hashfn(u32 rxhash)
191 {
192         return rxhash & 0x3ff;
193 }
194
195 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
196 {
197         struct tun_flow_entry *e;
198         struct hlist_node *n;
199
200         hlist_for_each_entry_rcu(e, n, head, hash_link) {
201                 if (e->rxhash == rxhash)
202                         return e;
203         }
204         return NULL;
205 }
206
207 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
208                                               struct hlist_head *head,
209                                               u32 rxhash, u16 queue_index)
210 {
211         struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
212
213         if (e) {
214                 tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
215                           rxhash, queue_index);
216                 e->updated = jiffies;
217                 e->rxhash = rxhash;
218                 e->queue_index = queue_index;
219                 e->tun = tun;
220                 hlist_add_head_rcu(&e->hash_link, head);
221         }
222         return e;
223 }
224
225 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
226 {
227         tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
228                   e->rxhash, e->queue_index);
229         hlist_del_rcu(&e->hash_link);
230         kfree_rcu(e, rcu);
231 }
232
233 static void tun_flow_flush(struct tun_struct *tun)
234 {
235         int i;
236
237         spin_lock_bh(&tun->lock);
238         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
239                 struct tun_flow_entry *e;
240                 struct hlist_node *h, *n;
241
242                 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link)
243                         tun_flow_delete(tun, e);
244         }
245         spin_unlock_bh(&tun->lock);
246 }
247
248 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
249 {
250         int i;
251
252         spin_lock_bh(&tun->lock);
253         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
254                 struct tun_flow_entry *e;
255                 struct hlist_node *h, *n;
256
257                 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
258                         if (e->queue_index == queue_index)
259                                 tun_flow_delete(tun, e);
260                 }
261         }
262         spin_unlock_bh(&tun->lock);
263 }
264
265 static void tun_flow_cleanup(unsigned long data)
266 {
267         struct tun_struct *tun = (struct tun_struct *)data;
268         unsigned long delay = tun->ageing_time;
269         unsigned long next_timer = jiffies + delay;
270         unsigned long count = 0;
271         int i;
272
273         tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
274
275         spin_lock_bh(&tun->lock);
276         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
277                 struct tun_flow_entry *e;
278                 struct hlist_node *h, *n;
279
280                 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
281                         unsigned long this_timer;
282                         count++;
283                         this_timer = e->updated + delay;
284                         if (time_before_eq(this_timer, jiffies))
285                                 tun_flow_delete(tun, e);
286                         else if (time_before(this_timer, next_timer))
287                                 next_timer = this_timer;
288                 }
289         }
290
291         if (count)
292                 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
293         spin_unlock_bh(&tun->lock);
294 }
295
296 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
297                             u16 queue_index)
298 {
299         struct hlist_head *head;
300         struct tun_flow_entry *e;
301         unsigned long delay = tun->ageing_time;
302
303         if (!rxhash)
304                 return;
305         else
306                 head = &tun->flows[tun_hashfn(rxhash)];
307
308         rcu_read_lock();
309
310         if (tun->numqueues == 1)
311                 goto unlock;
312
313         e = tun_flow_find(head, rxhash);
314         if (likely(e)) {
315                 /* TODO: keep queueing to old queue until it's empty? */
316                 e->queue_index = queue_index;
317                 e->updated = jiffies;
318         } else {
319                 spin_lock_bh(&tun->lock);
320                 if (!tun_flow_find(head, rxhash))
321                         tun_flow_create(tun, head, rxhash, queue_index);
322
323                 if (!timer_pending(&tun->flow_gc_timer))
324                         mod_timer(&tun->flow_gc_timer,
325                                   round_jiffies_up(jiffies + delay));
326                 spin_unlock_bh(&tun->lock);
327         }
328
329 unlock:
330         rcu_read_unlock();
331 }
332
333 /* We try to identify a flow through its rxhash first. The reason that
334  * we do not check rxq no. is becuase some cards(e.g 82599), chooses
335  * the rxq based on the txq where the last packet of the flow comes. As
336  * the userspace application move between processors, we may get a
337  * different rxq no. here. If we could not get rxhash, then we would
338  * hope the rxq no. may help here.
339  */
340 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
341 {
342         struct tun_struct *tun = netdev_priv(dev);
343         struct tun_flow_entry *e;
344         u32 txq = 0;
345         u32 numqueues = 0;
346
347         rcu_read_lock();
348         numqueues = tun->numqueues;
349
350         txq = skb_get_rxhash(skb);
351         if (txq) {
352                 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
353                 if (e)
354                         txq = e->queue_index;
355                 else
356                         /* use multiply and shift instead of expensive divide */
357                         txq = ((u64)txq * numqueues) >> 32;
358         } else if (likely(skb_rx_queue_recorded(skb))) {
359                 txq = skb_get_rx_queue(skb);
360                 while (unlikely(txq >= numqueues))
361                         txq -= numqueues;
362         }
363
364         rcu_read_unlock();
365         return txq;
366 }
367
368 static inline bool tun_not_capable(struct tun_struct *tun)
369 {
370         const struct cred *cred = current_cred();
371         struct net *net = dev_net(tun->dev);
372
373         return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
374                   (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
375                 !ns_capable(net->user_ns, CAP_NET_ADMIN);
376 }
377
378 static void tun_set_real_num_queues(struct tun_struct *tun)
379 {
380         netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
381         netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
382 }
383
384 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
385 {
386         tfile->detached = tun;
387         list_add_tail(&tfile->next, &tun->disabled);
388         ++tun->numdisabled;
389 }
390
391 static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
392 {
393         struct tun_struct *tun = tfile->detached;
394
395         tfile->detached = NULL;
396         list_del_init(&tfile->next);
397         --tun->numdisabled;
398         return tun;
399 }
400
401 static void __tun_detach(struct tun_file *tfile, bool clean)
402 {
403         struct tun_file *ntfile;
404         struct tun_struct *tun;
405         struct net_device *dev;
406
407         tun = rtnl_dereference(tfile->tun);
408
409         if (tun) {
410                 u16 index = tfile->queue_index;
411                 BUG_ON(index >= tun->numqueues);
412                 dev = tun->dev;
413
414                 rcu_assign_pointer(tun->tfiles[index],
415                                    tun->tfiles[tun->numqueues - 1]);
416                 rcu_assign_pointer(tfile->tun, NULL);
417                 ntfile = rtnl_dereference(tun->tfiles[index]);
418                 ntfile->queue_index = index;
419
420                 --tun->numqueues;
421                 if (clean)
422                         sock_put(&tfile->sk);
423                 else
424                         tun_disable_queue(tun, tfile);
425
426                 synchronize_net();
427                 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
428                 /* Drop read queue */
429                 skb_queue_purge(&tfile->sk.sk_receive_queue);
430                 tun_set_real_num_queues(tun);
431         } else if (tfile->detached && clean) {
432                 tun = tun_enable_queue(tfile);
433                 sock_put(&tfile->sk);
434         }
435
436         if (clean) {
437                 if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
438                     !(tun->flags & TUN_PERSIST))
439                         if (tun->dev->reg_state == NETREG_REGISTERED)
440                                 unregister_netdevice(tun->dev);
441
442                 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
443                                  &tfile->socket.flags));
444                 sk_release_kernel(&tfile->sk);
445         }
446 }
447
448 static void tun_detach(struct tun_file *tfile, bool clean)
449 {
450         rtnl_lock();
451         __tun_detach(tfile, clean);
452         rtnl_unlock();
453 }
454
455 static void tun_detach_all(struct net_device *dev)
456 {
457         struct tun_struct *tun = netdev_priv(dev);
458         struct tun_file *tfile, *tmp;
459         int i, n = tun->numqueues;
460
461         for (i = 0; i < n; i++) {
462                 tfile = rtnl_dereference(tun->tfiles[i]);
463                 BUG_ON(!tfile);
464                 wake_up_all(&tfile->wq.wait);
465                 rcu_assign_pointer(tfile->tun, NULL);
466                 --tun->numqueues;
467         }
468         BUG_ON(tun->numqueues != 0);
469
470         synchronize_net();
471         for (i = 0; i < n; i++) {
472                 tfile = rtnl_dereference(tun->tfiles[i]);
473                 /* Drop read queue */
474                 skb_queue_purge(&tfile->sk.sk_receive_queue);
475                 sock_put(&tfile->sk);
476         }
477         list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
478                 tun_enable_queue(tfile);
479                 skb_queue_purge(&tfile->sk.sk_receive_queue);
480                 sock_put(&tfile->sk);
481         }
482         BUG_ON(tun->numdisabled != 0);
483
484         if (tun->flags & TUN_PERSIST)
485                 module_put(THIS_MODULE);
486 }
487
488 static int tun_attach(struct tun_struct *tun, struct file *file)
489 {
490         struct tun_file *tfile = file->private_data;
491         int err;
492
493         err = -EINVAL;
494         if (rtnl_dereference(tfile->tun))
495                 goto out;
496
497         err = -EBUSY;
498         if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
499                 goto out;
500
501         err = -E2BIG;
502         if (!tfile->detached &&
503             tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
504                 goto out;
505
506         err = 0;
507
508         /* Re-attach the filter to presist device */
509         if (tun->filter_attached == true) {
510                 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
511                 if (!err)
512                         goto out;
513         }
514         tfile->queue_index = tun->numqueues;
515         rcu_assign_pointer(tfile->tun, tun);
516         rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
517         tun->numqueues++;
518
519         if (tfile->detached)
520                 tun_enable_queue(tfile);
521         else
522                 sock_hold(&tfile->sk);
523
524         tun_set_real_num_queues(tun);
525
526         /* device is allowed to go away first, so no need to hold extra
527          * refcnt.
528          */
529
530 out:
531         return err;
532 }
533
534 static struct tun_struct *__tun_get(struct tun_file *tfile)
535 {
536         struct tun_struct *tun;
537
538         rcu_read_lock();
539         tun = rcu_dereference(tfile->tun);
540         if (tun)
541                 dev_hold(tun->dev);
542         rcu_read_unlock();
543
544         return tun;
545 }
546
547 static struct tun_struct *tun_get(struct file *file)
548 {
549         return __tun_get(file->private_data);
550 }
551
552 static void tun_put(struct tun_struct *tun)
553 {
554         dev_put(tun->dev);
555 }
556
557 /* TAP filtering */
558 static void addr_hash_set(u32 *mask, const u8 *addr)
559 {
560         int n = ether_crc(ETH_ALEN, addr) >> 26;
561         mask[n >> 5] |= (1 << (n & 31));
562 }
563
564 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
565 {
566         int n = ether_crc(ETH_ALEN, addr) >> 26;
567         return mask[n >> 5] & (1 << (n & 31));
568 }
569
570 static int update_filter(struct tap_filter *filter, void __user *arg)
571 {
572         struct { u8 u[ETH_ALEN]; } *addr;
573         struct tun_filter uf;
574         int err, alen, n, nexact;
575
576         if (copy_from_user(&uf, arg, sizeof(uf)))
577                 return -EFAULT;
578
579         if (!uf.count) {
580                 /* Disabled */
581                 filter->count = 0;
582                 return 0;
583         }
584
585         alen = ETH_ALEN * uf.count;
586         addr = kmalloc(alen, GFP_KERNEL);
587         if (!addr)
588                 return -ENOMEM;
589
590         if (copy_from_user(addr, arg + sizeof(uf), alen)) {
591                 err = -EFAULT;
592                 goto done;
593         }
594
595         /* The filter is updated without holding any locks. Which is
596          * perfectly safe. We disable it first and in the worst
597          * case we'll accept a few undesired packets. */
598         filter->count = 0;
599         wmb();
600
601         /* Use first set of addresses as an exact filter */
602         for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
603                 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
604
605         nexact = n;
606
607         /* Remaining multicast addresses are hashed,
608          * unicast will leave the filter disabled. */
609         memset(filter->mask, 0, sizeof(filter->mask));
610         for (; n < uf.count; n++) {
611                 if (!is_multicast_ether_addr(addr[n].u)) {
612                         err = 0; /* no filter */
613                         goto done;
614                 }
615                 addr_hash_set(filter->mask, addr[n].u);
616         }
617
618         /* For ALLMULTI just set the mask to all ones.
619          * This overrides the mask populated above. */
620         if ((uf.flags & TUN_FLT_ALLMULTI))
621                 memset(filter->mask, ~0, sizeof(filter->mask));
622
623         /* Now enable the filter */
624         wmb();
625         filter->count = nexact;
626
627         /* Return the number of exact filters */
628         err = nexact;
629
630 done:
631         kfree(addr);
632         return err;
633 }
634
635 /* Returns: 0 - drop, !=0 - accept */
636 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
637 {
638         /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
639          * at this point. */
640         struct ethhdr *eh = (struct ethhdr *) skb->data;
641         int i;
642
643         /* Exact match */
644         for (i = 0; i < filter->count; i++)
645                 if (ether_addr_equal(eh->h_dest, filter->addr[i]))
646                         return 1;
647
648         /* Inexact match (multicast only) */
649         if (is_multicast_ether_addr(eh->h_dest))
650                 return addr_hash_test(filter->mask, eh->h_dest);
651
652         return 0;
653 }
654
655 /*
656  * Checks whether the packet is accepted or not.
657  * Returns: 0 - drop, !=0 - accept
658  */
659 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
660 {
661         if (!filter->count)
662                 return 1;
663
664         return run_filter(filter, skb);
665 }
666
667 /* Network device part of the driver */
668
669 static const struct ethtool_ops tun_ethtool_ops;
670
671 /* Net device detach from fd. */
672 static void tun_net_uninit(struct net_device *dev)
673 {
674         tun_detach_all(dev);
675 }
676
677 /* Net device open. */
678 static int tun_net_open(struct net_device *dev)
679 {
680         netif_tx_start_all_queues(dev);
681         return 0;
682 }
683
684 /* Net device close. */
685 static int tun_net_close(struct net_device *dev)
686 {
687         netif_tx_stop_all_queues(dev);
688         return 0;
689 }
690
691 /* Net device start xmit */
692 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
693 {
694         struct tun_struct *tun = netdev_priv(dev);
695         int txq = skb->queue_mapping;
696         struct tun_file *tfile;
697
698         rcu_read_lock();
699         tfile = rcu_dereference(tun->tfiles[txq]);
700
701         /* Drop packet if interface is not attached */
702         if (txq >= tun->numqueues)
703                 goto drop;
704
705         tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
706
707         BUG_ON(!tfile);
708
709         /* Drop if the filter does not like it.
710          * This is a noop if the filter is disabled.
711          * Filter can be enabled only for the TAP devices. */
712         if (!check_filter(&tun->txflt, skb))
713                 goto drop;
714
715         if (tfile->socket.sk->sk_filter &&
716             sk_filter(tfile->socket.sk, skb))
717                 goto drop;
718
719         /* Limit the number of packets queued by dividing txq length with the
720          * number of queues.
721          */
722         if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
723                           >= dev->tx_queue_len / tun->numqueues)
724                 goto drop;
725
726         /* Orphan the skb - required as we might hang on to it
727          * for indefinite time. */
728         if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
729                 goto drop;
730         skb_orphan(skb);
731
732         /* Enqueue packet */
733         skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
734
735         /* Notify and wake up reader process */
736         if (tfile->flags & TUN_FASYNC)
737                 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
738         wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
739                                    POLLRDNORM | POLLRDBAND);
740
741         rcu_read_unlock();
742         return NETDEV_TX_OK;
743
744 drop:
745         dev->stats.tx_dropped++;
746         skb_tx_error(skb);
747         kfree_skb(skb);
748         rcu_read_unlock();
749         return NETDEV_TX_OK;
750 }
751
752 static void tun_net_mclist(struct net_device *dev)
753 {
754         /*
755          * This callback is supposed to deal with mc filter in
756          * _rx_ path and has nothing to do with the _tx_ path.
757          * In rx path we always accept everything userspace gives us.
758          */
759 }
760
761 #define MIN_MTU 68
762 #define MAX_MTU 65535
763
764 static int
765 tun_net_change_mtu(struct net_device *dev, int new_mtu)
766 {
767         if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
768                 return -EINVAL;
769         dev->mtu = new_mtu;
770         return 0;
771 }
772
773 static netdev_features_t tun_net_fix_features(struct net_device *dev,
774         netdev_features_t features)
775 {
776         struct tun_struct *tun = netdev_priv(dev);
777
778         return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
779 }
780 #ifdef CONFIG_NET_POLL_CONTROLLER
781 static void tun_poll_controller(struct net_device *dev)
782 {
783         /*
784          * Tun only receives frames when:
785          * 1) the char device endpoint gets data from user space
786          * 2) the tun socket gets a sendmsg call from user space
787          * Since both of those are syncronous operations, we are guaranteed
788          * never to have pending data when we poll for it
789          * so theres nothing to do here but return.
790          * We need this though so netpoll recognizes us as an interface that
791          * supports polling, which enables bridge devices in virt setups to
792          * still use netconsole
793          */
794         return;
795 }
796 #endif
797 static const struct net_device_ops tun_netdev_ops = {
798         .ndo_uninit             = tun_net_uninit,
799         .ndo_open               = tun_net_open,
800         .ndo_stop               = tun_net_close,
801         .ndo_start_xmit         = tun_net_xmit,
802         .ndo_change_mtu         = tun_net_change_mtu,
803         .ndo_fix_features       = tun_net_fix_features,
804         .ndo_select_queue       = tun_select_queue,
805 #ifdef CONFIG_NET_POLL_CONTROLLER
806         .ndo_poll_controller    = tun_poll_controller,
807 #endif
808 };
809
810 static const struct net_device_ops tap_netdev_ops = {
811         .ndo_uninit             = tun_net_uninit,
812         .ndo_open               = tun_net_open,
813         .ndo_stop               = tun_net_close,
814         .ndo_start_xmit         = tun_net_xmit,
815         .ndo_change_mtu         = tun_net_change_mtu,
816         .ndo_fix_features       = tun_net_fix_features,
817         .ndo_set_rx_mode        = tun_net_mclist,
818         .ndo_set_mac_address    = eth_mac_addr,
819         .ndo_validate_addr      = eth_validate_addr,
820         .ndo_select_queue       = tun_select_queue,
821 #ifdef CONFIG_NET_POLL_CONTROLLER
822         .ndo_poll_controller    = tun_poll_controller,
823 #endif
824 };
825
826 static int tun_flow_init(struct tun_struct *tun)
827 {
828         int i;
829
830         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
831                 INIT_HLIST_HEAD(&tun->flows[i]);
832
833         tun->ageing_time = TUN_FLOW_EXPIRE;
834         setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
835         mod_timer(&tun->flow_gc_timer,
836                   round_jiffies_up(jiffies + tun->ageing_time));
837
838         return 0;
839 }
840
841 static void tun_flow_uninit(struct tun_struct *tun)
842 {
843         del_timer_sync(&tun->flow_gc_timer);
844         tun_flow_flush(tun);
845 }
846
847 /* Initialize net device. */
848 static void tun_net_init(struct net_device *dev)
849 {
850         struct tun_struct *tun = netdev_priv(dev);
851
852         switch (tun->flags & TUN_TYPE_MASK) {
853         case TUN_TUN_DEV:
854                 dev->netdev_ops = &tun_netdev_ops;
855
856                 /* Point-to-Point TUN Device */
857                 dev->hard_header_len = 0;
858                 dev->addr_len = 0;
859                 dev->mtu = 1500;
860
861                 /* Zero header length */
862                 dev->type = ARPHRD_NONE;
863                 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
864                 dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
865                 break;
866
867         case TUN_TAP_DEV:
868                 dev->netdev_ops = &tap_netdev_ops;
869                 /* Ethernet TAP Device */
870                 ether_setup(dev);
871                 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
872                 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
873
874                 eth_hw_addr_random(dev);
875
876                 dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
877                 break;
878         }
879 }
880
881 /* Character device part */
882
883 /* Poll */
884 static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
885 {
886         struct tun_file *tfile = file->private_data;
887         struct tun_struct *tun = __tun_get(tfile);
888         struct sock *sk;
889         unsigned int mask = 0;
890
891         if (!tun)
892                 return POLLERR;
893
894         sk = tfile->socket.sk;
895
896         tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
897
898         poll_wait(file, &tfile->wq.wait, wait);
899
900         if (!skb_queue_empty(&sk->sk_receive_queue))
901                 mask |= POLLIN | POLLRDNORM;
902
903         if (sock_writeable(sk) ||
904             (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
905              sock_writeable(sk)))
906                 mask |= POLLOUT | POLLWRNORM;
907
908         if (tun->dev->reg_state != NETREG_REGISTERED)
909                 mask = POLLERR;
910
911         tun_put(tun);
912         return mask;
913 }
914
915 /* prepad is the amount to reserve at front.  len is length after that.
916  * linear is a hint as to how much to copy (usually headers). */
917 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
918                                      size_t prepad, size_t len,
919                                      size_t linear, int noblock)
920 {
921         struct sock *sk = tfile->socket.sk;
922         struct sk_buff *skb;
923         int err;
924
925         /* Under a page?  Don't bother with paged skb. */
926         if (prepad + len < PAGE_SIZE || !linear)
927                 linear = len;
928
929         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
930                                    &err);
931         if (!skb)
932                 return ERR_PTR(err);
933
934         skb_reserve(skb, prepad);
935         skb_put(skb, linear);
936         skb->data_len = len - linear;
937         skb->len += len - linear;
938
939         return skb;
940 }
941
942 /* set skb frags from iovec, this can move to core network code for reuse */
943 static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
944                                   int offset, size_t count)
945 {
946         int len = iov_length(from, count) - offset;
947         int copy = skb_headlen(skb);
948         int size, offset1 = 0;
949         int i = 0;
950
951         /* Skip over from offset */
952         while (count && (offset >= from->iov_len)) {
953                 offset -= from->iov_len;
954                 ++from;
955                 --count;
956         }
957
958         /* copy up to skb headlen */
959         while (count && (copy > 0)) {
960                 size = min_t(unsigned int, copy, from->iov_len - offset);
961                 if (copy_from_user(skb->data + offset1, from->iov_base + offset,
962                                    size))
963                         return -EFAULT;
964                 if (copy > size) {
965                         ++from;
966                         --count;
967                         offset = 0;
968                 } else
969                         offset += size;
970                 copy -= size;
971                 offset1 += size;
972         }
973
974         if (len == offset1)
975                 return 0;
976
977         while (count--) {
978                 struct page *page[MAX_SKB_FRAGS];
979                 int num_pages;
980                 unsigned long base;
981                 unsigned long truesize;
982
983                 len = from->iov_len - offset;
984                 if (!len) {
985                         offset = 0;
986                         ++from;
987                         continue;
988                 }
989                 base = (unsigned long)from->iov_base + offset;
990                 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
991                 if (i + size > MAX_SKB_FRAGS)
992                         return -EMSGSIZE;
993                 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
994                 if (num_pages != size) {
995                         for (i = 0; i < num_pages; i++)
996                                 put_page(page[i]);
997                         return -EFAULT;
998                 }
999                 truesize = size * PAGE_SIZE;
1000                 skb->data_len += len;
1001                 skb->len += len;
1002                 skb->truesize += truesize;
1003                 atomic_add(truesize, &skb->sk->sk_wmem_alloc);
1004                 while (len) {
1005                         int off = base & ~PAGE_MASK;
1006                         int size = min_t(int, len, PAGE_SIZE - off);
1007                         __skb_fill_page_desc(skb, i, page[i], off, size);
1008                         skb_shinfo(skb)->nr_frags++;
1009                         /* increase sk_wmem_alloc */
1010                         base += size;
1011                         len -= size;
1012                         i++;
1013                 }
1014                 offset = 0;
1015                 ++from;
1016         }
1017         return 0;
1018 }
1019
1020 /* Get packet from user space buffer */
1021 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1022                             void *msg_control, const struct iovec *iv,
1023                             size_t total_len, size_t count, int noblock)
1024 {
1025         struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1026         struct sk_buff *skb;
1027         size_t len = total_len, align = NET_SKB_PAD;
1028         struct virtio_net_hdr gso = { 0 };
1029         int offset = 0;
1030         int copylen;
1031         bool zerocopy = false;
1032         int err;
1033         u32 rxhash;
1034
1035         if (!(tun->flags & TUN_NO_PI)) {
1036                 if ((len -= sizeof(pi)) > total_len)
1037                         return -EINVAL;
1038
1039                 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
1040                         return -EFAULT;
1041                 offset += sizeof(pi);
1042         }
1043
1044         if (tun->flags & TUN_VNET_HDR) {
1045                 if ((len -= tun->vnet_hdr_sz) > total_len)
1046                         return -EINVAL;
1047
1048                 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
1049                         return -EFAULT;
1050
1051                 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1052                     gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
1053                         gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
1054
1055                 if (gso.hdr_len > len)
1056                         return -EINVAL;
1057                 offset += tun->vnet_hdr_sz;
1058         }
1059
1060         if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
1061                 align += NET_IP_ALIGN;
1062                 if (unlikely(len < ETH_HLEN ||
1063                              (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
1064                         return -EINVAL;
1065         }
1066
1067         if (msg_control)
1068                 zerocopy = true;
1069
1070         if (zerocopy) {
1071                 /* Userspace may produce vectors with count greater than
1072                  * MAX_SKB_FRAGS, so we need to linearize parts of the skb
1073                  * to let the rest of data to be fit in the frags.
1074                  */
1075                 if (count > MAX_SKB_FRAGS) {
1076                         copylen = iov_length(iv, count - MAX_SKB_FRAGS);
1077                         if (copylen < offset)
1078                                 copylen = 0;
1079                         else
1080                                 copylen -= offset;
1081                 } else
1082                                 copylen = 0;
1083                 /* There are 256 bytes to be copied in skb, so there is enough
1084                  * room for skb expand head in case it is used.
1085                  * The rest of the buffer is mapped from userspace.
1086                  */
1087                 if (copylen < gso.hdr_len)
1088                         copylen = gso.hdr_len;
1089                 if (!copylen)
1090                         copylen = GOODCOPY_LEN;
1091         } else
1092                 copylen = len;
1093
1094         skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock);
1095         if (IS_ERR(skb)) {
1096                 if (PTR_ERR(skb) != -EAGAIN)
1097                         tun->dev->stats.rx_dropped++;
1098                 return PTR_ERR(skb);
1099         }
1100
1101         if (zerocopy)
1102                 err = zerocopy_sg_from_iovec(skb, iv, offset, count);
1103         else
1104                 err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
1105
1106         if (err) {
1107                 tun->dev->stats.rx_dropped++;
1108                 kfree_skb(skb);
1109                 return -EFAULT;
1110         }
1111
1112         if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1113                 if (!skb_partial_csum_set(skb, gso.csum_start,
1114                                           gso.csum_offset)) {
1115                         tun->dev->stats.rx_frame_errors++;
1116                         kfree_skb(skb);
1117                         return -EINVAL;
1118                 }
1119         }
1120
1121         switch (tun->flags & TUN_TYPE_MASK) {
1122         case TUN_TUN_DEV:
1123                 if (tun->flags & TUN_NO_PI) {
1124                         switch (skb->data[0] & 0xf0) {
1125                         case 0x40:
1126                                 pi.proto = htons(ETH_P_IP);
1127                                 break;
1128                         case 0x60:
1129                                 pi.proto = htons(ETH_P_IPV6);
1130                                 break;
1131                         default:
1132                                 tun->dev->stats.rx_dropped++;
1133                                 kfree_skb(skb);
1134                                 return -EINVAL;
1135                         }
1136                 }
1137
1138                 skb_reset_mac_header(skb);
1139                 skb->protocol = pi.proto;
1140                 skb->dev = tun->dev;
1141                 break;
1142         case TUN_TAP_DEV:
1143                 skb->protocol = eth_type_trans(skb, tun->dev);
1144                 break;
1145         }
1146
1147         if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1148                 pr_debug("GSO!\n");
1149                 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1150                 case VIRTIO_NET_HDR_GSO_TCPV4:
1151                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1152                         break;
1153                 case VIRTIO_NET_HDR_GSO_TCPV6:
1154                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1155                         break;
1156                 case VIRTIO_NET_HDR_GSO_UDP:
1157                         skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1158                         break;
1159                 default:
1160                         tun->dev->stats.rx_frame_errors++;
1161                         kfree_skb(skb);
1162                         return -EINVAL;
1163                 }
1164
1165                 if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1166                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1167
1168                 skb_shinfo(skb)->gso_size = gso.gso_size;
1169                 if (skb_shinfo(skb)->gso_size == 0) {
1170                         tun->dev->stats.rx_frame_errors++;
1171                         kfree_skb(skb);
1172                         return -EINVAL;
1173                 }
1174
1175                 /* Header must be checked, and gso_segs computed. */
1176                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1177                 skb_shinfo(skb)->gso_segs = 0;
1178         }
1179
1180         /* copy skb_ubuf_info for callback when skb has no error */
1181         if (zerocopy) {
1182                 skb_shinfo(skb)->destructor_arg = msg_control;
1183                 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1184         }
1185
1186         skb_reset_network_header(skb);
1187         rxhash = skb_get_rxhash(skb);
1188         netif_rx_ni(skb);
1189
1190         tun->dev->stats.rx_packets++;
1191         tun->dev->stats.rx_bytes += len;
1192
1193         tun_flow_update(tun, rxhash, tfile->queue_index);
1194         return total_len;
1195 }
1196
1197 static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
1198                               unsigned long count, loff_t pos)
1199 {
1200         struct file *file = iocb->ki_filp;
1201         struct tun_struct *tun = tun_get(file);
1202         struct tun_file *tfile = file->private_data;
1203         ssize_t result;
1204
1205         if (!tun)
1206                 return -EBADFD;
1207
1208         tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
1209
1210         result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
1211                               count, file->f_flags & O_NONBLOCK);
1212
1213         tun_put(tun);
1214         return result;
1215 }
1216
1217 /* Put packet to the user space buffer */
1218 static ssize_t tun_put_user(struct tun_struct *tun,
1219                             struct tun_file *tfile,
1220                             struct sk_buff *skb,
1221                             const struct iovec *iv, int len)
1222 {
1223         struct tun_pi pi = { 0, skb->protocol };
1224         ssize_t total = 0;
1225
1226         if (!(tun->flags & TUN_NO_PI)) {
1227                 if ((len -= sizeof(pi)) < 0)
1228                         return -EINVAL;
1229
1230                 if (len < skb->len) {
1231                         /* Packet will be striped */
1232                         pi.flags |= TUN_PKT_STRIP;
1233                 }
1234
1235                 if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
1236                         return -EFAULT;
1237                 total += sizeof(pi);
1238         }
1239
1240         if (tun->flags & TUN_VNET_HDR) {
1241                 struct virtio_net_hdr gso = { 0 }; /* no info leak */
1242                 if ((len -= tun->vnet_hdr_sz) < 0)
1243                         return -EINVAL;
1244
1245                 if (skb_is_gso(skb)) {
1246                         struct skb_shared_info *sinfo = skb_shinfo(skb);
1247
1248                         /* This is a hint as to how much should be linear. */
1249                         gso.hdr_len = skb_headlen(skb);
1250                         gso.gso_size = sinfo->gso_size;
1251                         if (sinfo->gso_type & SKB_GSO_TCPV4)
1252                                 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1253                         else if (sinfo->gso_type & SKB_GSO_TCPV6)
1254                                 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1255                         else if (sinfo->gso_type & SKB_GSO_UDP)
1256                                 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1257                         else {
1258                                 pr_err("unexpected GSO type: "
1259                                        "0x%x, gso_size %d, hdr_len %d\n",
1260                                        sinfo->gso_type, gso.gso_size,
1261                                        gso.hdr_len);
1262                                 print_hex_dump(KERN_ERR, "tun: ",
1263                                                DUMP_PREFIX_NONE,
1264                                                16, 1, skb->head,
1265                                                min((int)gso.hdr_len, 64), true);
1266                                 WARN_ON_ONCE(1);
1267                                 return -EINVAL;
1268                         }
1269                         if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1270                                 gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1271                 } else
1272                         gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1273
1274                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1275                         gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1276                         gso.csum_start = skb_checksum_start_offset(skb);
1277                         gso.csum_offset = skb->csum_offset;
1278                 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1279                         gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
1280                 } /* else everything is zero */
1281
1282                 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
1283                                                sizeof(gso))))
1284                         return -EFAULT;
1285                 total += tun->vnet_hdr_sz;
1286         }
1287
1288         len = min_t(int, skb->len, len);
1289
1290         skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
1291         total += skb->len;
1292
1293         tun->dev->stats.tx_packets++;
1294         tun->dev->stats.tx_bytes += len;
1295
1296         return total;
1297 }
1298
1299 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1300                            struct kiocb *iocb, const struct iovec *iv,
1301                            ssize_t len, int noblock)
1302 {
1303         DECLARE_WAITQUEUE(wait, current);
1304         struct sk_buff *skb;
1305         ssize_t ret = 0;
1306
1307         tun_debug(KERN_INFO, tun, "tun_do_read\n");
1308
1309         if (unlikely(!noblock))
1310                 add_wait_queue(&tfile->wq.wait, &wait);
1311         while (len) {
1312                 current->state = TASK_INTERRUPTIBLE;
1313
1314                 /* Read frames from the queue */
1315                 if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
1316                         if (noblock) {
1317                                 ret = -EAGAIN;
1318                                 break;
1319                         }
1320                         if (signal_pending(current)) {
1321                                 ret = -ERESTARTSYS;
1322                                 break;
1323                         }
1324                         if (tun->dev->reg_state != NETREG_REGISTERED) {
1325                                 ret = -EIO;
1326                                 break;
1327                         }
1328
1329                         /* Nothing to read, let's sleep */
1330                         schedule();
1331                         continue;
1332                 }
1333
1334                 ret = tun_put_user(tun, tfile, skb, iv, len);
1335                 kfree_skb(skb);
1336                 break;
1337         }
1338
1339         current->state = TASK_RUNNING;
1340         if (unlikely(!noblock))
1341                 remove_wait_queue(&tfile->wq.wait, &wait);
1342
1343         return ret;
1344 }
1345
1346 static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
1347                             unsigned long count, loff_t pos)
1348 {
1349         struct file *file = iocb->ki_filp;
1350         struct tun_file *tfile = file->private_data;
1351         struct tun_struct *tun = __tun_get(tfile);
1352         ssize_t len, ret;
1353
1354         if (!tun)
1355                 return -EBADFD;
1356         len = iov_length(iv, count);
1357         if (len < 0) {
1358                 ret = -EINVAL;
1359                 goto out;
1360         }
1361
1362         ret = tun_do_read(tun, tfile, iocb, iv, len,
1363                           file->f_flags & O_NONBLOCK);
1364         ret = min_t(ssize_t, ret, len);
1365 out:
1366         tun_put(tun);
1367         return ret;
1368 }
1369
1370 static void tun_free_netdev(struct net_device *dev)
1371 {
1372         struct tun_struct *tun = netdev_priv(dev);
1373
1374         BUG_ON(!(list_empty(&tun->disabled)));
1375         tun_flow_uninit(tun);
1376         free_netdev(dev);
1377 }
1378
1379 static void tun_setup(struct net_device *dev)
1380 {
1381         struct tun_struct *tun = netdev_priv(dev);
1382
1383         tun->owner = INVALID_UID;
1384         tun->group = INVALID_GID;
1385
1386         dev->ethtool_ops = &tun_ethtool_ops;
1387         dev->destructor = tun_free_netdev;
1388 }
1389
1390 /* Trivial set of netlink ops to allow deleting tun or tap
1391  * device with netlink.
1392  */
1393 static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
1394 {
1395         return -EINVAL;
1396 }
1397
1398 static struct rtnl_link_ops tun_link_ops __read_mostly = {
1399         .kind           = DRV_NAME,
1400         .priv_size      = sizeof(struct tun_struct),
1401         .setup          = tun_setup,
1402         .validate       = tun_validate,
1403 };
1404
1405 static void tun_sock_write_space(struct sock *sk)
1406 {
1407         struct tun_file *tfile;
1408         wait_queue_head_t *wqueue;
1409
1410         if (!sock_writeable(sk))
1411                 return;
1412
1413         if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
1414                 return;
1415
1416         wqueue = sk_sleep(sk);
1417         if (wqueue && waitqueue_active(wqueue))
1418                 wake_up_interruptible_sync_poll(wqueue, POLLOUT |
1419                                                 POLLWRNORM | POLLWRBAND);
1420
1421         tfile = container_of(sk, struct tun_file, sk);
1422         kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
1423 }
1424
1425 static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
1426                        struct msghdr *m, size_t total_len)
1427 {
1428         int ret;
1429         struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1430         struct tun_struct *tun = __tun_get(tfile);
1431
1432         if (!tun)
1433                 return -EBADFD;
1434         ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
1435                            m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
1436         tun_put(tun);
1437         return ret;
1438 }
1439
1440
1441 static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
1442                        struct msghdr *m, size_t total_len,
1443                        int flags)
1444 {
1445         struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1446         struct tun_struct *tun = __tun_get(tfile);
1447         int ret;
1448
1449         if (!tun)
1450                 return -EBADFD;
1451
1452         if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1453                 return -EINVAL;
1454         ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
1455                           flags & MSG_DONTWAIT);
1456         if (ret > total_len) {
1457                 m->msg_flags |= MSG_TRUNC;
1458                 ret = flags & MSG_TRUNC ? ret : total_len;
1459         }
1460         tun_put(tun);
1461         return ret;
1462 }
1463
1464 static int tun_release(struct socket *sock)
1465 {
1466         if (sock->sk)
1467                 sock_put(sock->sk);
1468         return 0;
1469 }
1470
1471 /* Ops structure to mimic raw sockets with tun */
1472 static const struct proto_ops tun_socket_ops = {
1473         .sendmsg = tun_sendmsg,
1474         .recvmsg = tun_recvmsg,
1475         .release = tun_release,
1476 };
1477
1478 static struct proto tun_proto = {
1479         .name           = "tun",
1480         .owner          = THIS_MODULE,
1481         .obj_size       = sizeof(struct tun_file),
1482 };
1483
1484 static int tun_flags(struct tun_struct *tun)
1485 {
1486         int flags = 0;
1487
1488         if (tun->flags & TUN_TUN_DEV)
1489                 flags |= IFF_TUN;
1490         else
1491                 flags |= IFF_TAP;
1492
1493         if (tun->flags & TUN_NO_PI)
1494                 flags |= IFF_NO_PI;
1495
1496         /* This flag has no real effect.  We track the value for backwards
1497          * compatibility.
1498          */
1499         if (tun->flags & TUN_ONE_QUEUE)
1500                 flags |= IFF_ONE_QUEUE;
1501
1502         if (tun->flags & TUN_VNET_HDR)
1503                 flags |= IFF_VNET_HDR;
1504
1505         if (tun->flags & TUN_TAP_MQ)
1506                 flags |= IFF_MULTI_QUEUE;
1507
1508         return flags;
1509 }
1510
1511 static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
1512                               char *buf)
1513 {
1514         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1515         return sprintf(buf, "0x%x\n", tun_flags(tun));
1516 }
1517
1518 static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
1519                               char *buf)
1520 {
1521         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1522         return uid_valid(tun->owner)?
1523                 sprintf(buf, "%u\n",
1524                         from_kuid_munged(current_user_ns(), tun->owner)):
1525                 sprintf(buf, "-1\n");
1526 }
1527
1528 static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
1529                               char *buf)
1530 {
1531         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1532         return gid_valid(tun->group) ?
1533                 sprintf(buf, "%u\n",
1534                         from_kgid_munged(current_user_ns(), tun->group)):
1535                 sprintf(buf, "-1\n");
1536 }
1537
1538 static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
1539 static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
1540 static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
1541
1542 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1543 {
1544         struct tun_struct *tun;
1545         struct tun_file *tfile = file->private_data;
1546         struct net_device *dev;
1547         int err;
1548
1549         if (tfile->detached)
1550                 return -EINVAL;
1551
1552         dev = __dev_get_by_name(net, ifr->ifr_name);
1553         if (dev) {
1554                 if (ifr->ifr_flags & IFF_TUN_EXCL)
1555                         return -EBUSY;
1556                 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
1557                         tun = netdev_priv(dev);
1558                 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
1559                         tun = netdev_priv(dev);
1560                 else
1561                         return -EINVAL;
1562
1563                 if (tun_not_capable(tun))
1564                         return -EPERM;
1565                 err = security_tun_dev_attach(tfile->socket.sk);
1566                 if (err < 0)
1567                         return err;
1568
1569                 err = tun_attach(tun, file);
1570                 if (err < 0)
1571                         return err;
1572
1573                 if (tun->flags & TUN_TAP_MQ &&
1574                     (tun->numqueues + tun->numdisabled > 1))
1575                         return err;
1576         }
1577         else {
1578                 char *name;
1579                 unsigned long flags = 0;
1580
1581                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1582                         return -EPERM;
1583                 err = security_tun_dev_create();
1584                 if (err < 0)
1585                         return err;
1586
1587                 /* Set dev type */
1588                 if (ifr->ifr_flags & IFF_TUN) {
1589                         /* TUN device */
1590                         flags |= TUN_TUN_DEV;
1591                         name = "tun%d";
1592                 } else if (ifr->ifr_flags & IFF_TAP) {
1593                         /* TAP device */
1594                         flags |= TUN_TAP_DEV;
1595                         name = "tap%d";
1596                 } else
1597                         return -EINVAL;
1598
1599                 if (*ifr->ifr_name)
1600                         name = ifr->ifr_name;
1601
1602                 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
1603                                        tun_setup,
1604                                        MAX_TAP_QUEUES, MAX_TAP_QUEUES);
1605                 if (!dev)
1606                         return -ENOMEM;
1607
1608                 dev_net_set(dev, net);
1609                 dev->rtnl_link_ops = &tun_link_ops;
1610
1611                 tun = netdev_priv(dev);
1612                 tun->dev = dev;
1613                 tun->flags = flags;
1614                 tun->txflt.count = 0;
1615                 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
1616
1617                 tun->filter_attached = false;
1618                 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
1619
1620                 spin_lock_init(&tun->lock);
1621
1622                 security_tun_dev_post_create(&tfile->sk);
1623
1624                 tun_net_init(dev);
1625
1626                 err = tun_flow_init(tun);
1627                 if (err < 0)
1628                         goto err_free_dev;
1629
1630                 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1631                         TUN_USER_FEATURES;
1632                 dev->features = dev->hw_features;
1633
1634                 INIT_LIST_HEAD(&tun->disabled);
1635                 err = tun_attach(tun, file);
1636                 if (err < 0)
1637                         goto err_free_dev;
1638
1639                 err = register_netdevice(tun->dev);
1640                 if (err < 0)
1641                         goto err_free_dev;
1642
1643                 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1644                     device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1645                     device_create_file(&tun->dev->dev, &dev_attr_group))
1646                         pr_err("Failed to create tun sysfs files\n");
1647
1648                 netif_carrier_on(tun->dev);
1649         }
1650
1651         tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1652
1653         if (ifr->ifr_flags & IFF_NO_PI)
1654                 tun->flags |= TUN_NO_PI;
1655         else
1656                 tun->flags &= ~TUN_NO_PI;
1657
1658         /* This flag has no real effect.  We track the value for backwards
1659          * compatibility.
1660          */
1661         if (ifr->ifr_flags & IFF_ONE_QUEUE)
1662                 tun->flags |= TUN_ONE_QUEUE;
1663         else
1664                 tun->flags &= ~TUN_ONE_QUEUE;
1665
1666         if (ifr->ifr_flags & IFF_VNET_HDR)
1667                 tun->flags |= TUN_VNET_HDR;
1668         else
1669                 tun->flags &= ~TUN_VNET_HDR;
1670
1671         if (ifr->ifr_flags & IFF_MULTI_QUEUE)
1672                 tun->flags |= TUN_TAP_MQ;
1673         else
1674                 tun->flags &= ~TUN_TAP_MQ;
1675
1676         /* Make sure persistent devices do not get stuck in
1677          * xoff state.
1678          */
1679         if (netif_running(tun->dev))
1680                 netif_tx_wake_all_queues(tun->dev);
1681
1682         strcpy(ifr->ifr_name, tun->dev->name);
1683         return 0;
1684
1685  err_free_dev:
1686         free_netdev(dev);
1687         return err;
1688 }
1689
1690 static void tun_get_iff(struct net *net, struct tun_struct *tun,
1691                        struct ifreq *ifr)
1692 {
1693         tun_debug(KERN_INFO, tun, "tun_get_iff\n");
1694
1695         strcpy(ifr->ifr_name, tun->dev->name);
1696
1697         ifr->ifr_flags = tun_flags(tun);
1698
1699 }
1700
1701 /* This is like a cut-down ethtool ops, except done via tun fd so no
1702  * privs required. */
1703 static int set_offload(struct tun_struct *tun, unsigned long arg)
1704 {
1705         netdev_features_t features = 0;
1706
1707         if (arg & TUN_F_CSUM) {
1708                 features |= NETIF_F_HW_CSUM;
1709                 arg &= ~TUN_F_CSUM;
1710
1711                 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
1712                         if (arg & TUN_F_TSO_ECN) {
1713                                 features |= NETIF_F_TSO_ECN;
1714                                 arg &= ~TUN_F_TSO_ECN;
1715                         }
1716                         if (arg & TUN_F_TSO4)
1717                                 features |= NETIF_F_TSO;
1718                         if (arg & TUN_F_TSO6)
1719                                 features |= NETIF_F_TSO6;
1720                         arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1721                 }
1722
1723                 if (arg & TUN_F_UFO) {
1724                         features |= NETIF_F_UFO;
1725                         arg &= ~TUN_F_UFO;
1726                 }
1727         }
1728
1729         /* This gives the user a way to test for new features in future by
1730          * trying to set them. */
1731         if (arg)
1732                 return -EINVAL;
1733
1734         tun->set_features = features;
1735         netdev_update_features(tun->dev);
1736
1737         return 0;
1738 }
1739
1740 static void tun_detach_filter(struct tun_struct *tun, int n)
1741 {
1742         int i;
1743         struct tun_file *tfile;
1744
1745         for (i = 0; i < n; i++) {
1746                 tfile = rtnl_dereference(tun->tfiles[i]);
1747                 sk_detach_filter(tfile->socket.sk);
1748         }
1749
1750         tun->filter_attached = false;
1751 }
1752
1753 static int tun_attach_filter(struct tun_struct *tun)
1754 {
1755         int i, ret = 0;
1756         struct tun_file *tfile;
1757
1758         for (i = 0; i < tun->numqueues; i++) {
1759                 tfile = rtnl_dereference(tun->tfiles[i]);
1760                 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
1761                 if (ret) {
1762                         tun_detach_filter(tun, i);
1763                         return ret;
1764                 }
1765         }
1766
1767         tun->filter_attached = true;
1768         return ret;
1769 }
1770
1771 static void tun_set_sndbuf(struct tun_struct *tun)
1772 {
1773         struct tun_file *tfile;
1774         int i;
1775
1776         for (i = 0; i < tun->numqueues; i++) {
1777                 tfile = rtnl_dereference(tun->tfiles[i]);
1778                 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
1779         }
1780 }
1781
1782 static int tun_set_queue(struct file *file, struct ifreq *ifr)
1783 {
1784         struct tun_file *tfile = file->private_data;
1785         struct tun_struct *tun;
1786         int ret = 0;
1787
1788         rtnl_lock();
1789
1790         if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
1791                 tun = tfile->detached;
1792                 if (!tun)
1793                         ret = -EINVAL;
1794                 else
1795                         ret = tun_attach(tun, file);
1796         } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1797                 tun = rtnl_dereference(tfile->tun);
1798                 if (!tun || !(tun->flags & TUN_TAP_MQ))
1799                         ret = -EINVAL;
1800                 else
1801                         __tun_detach(tfile, false);
1802         } else
1803                 ret = -EINVAL;
1804
1805         rtnl_unlock();
1806         return ret;
1807 }
1808
1809 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1810                             unsigned long arg, int ifreq_len)
1811 {
1812         struct tun_file *tfile = file->private_data;
1813         struct tun_struct *tun;
1814         void __user* argp = (void __user*)arg;
1815         struct ifreq ifr;
1816         kuid_t owner;
1817         kgid_t group;
1818         int sndbuf;
1819         int vnet_hdr_sz;
1820         int ret;
1821
1822         if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
1823                 if (copy_from_user(&ifr, argp, ifreq_len))
1824                         return -EFAULT;
1825         } else {
1826                 memset(&ifr, 0, sizeof(ifr));
1827         }
1828         if (cmd == TUNGETFEATURES) {
1829                 /* Currently this just means: "what IFF flags are valid?".
1830                  * This is needed because we never checked for invalid flags on
1831                  * TUNSETIFF. */
1832                 return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
1833                                 IFF_VNET_HDR | IFF_MULTI_QUEUE,
1834                                 (unsigned int __user*)argp);
1835         } else if (cmd == TUNSETQUEUE)
1836                 return tun_set_queue(file, &ifr);
1837
1838         ret = 0;
1839         rtnl_lock();
1840
1841         tun = __tun_get(tfile);
1842         if (cmd == TUNSETIFF && !tun) {
1843                 ifr.ifr_name[IFNAMSIZ-1] = '\0';
1844
1845                 ret = tun_set_iff(tfile->net, file, &ifr);
1846
1847                 if (ret)
1848                         goto unlock;
1849
1850                 if (copy_to_user(argp, &ifr, ifreq_len))
1851                         ret = -EFAULT;
1852                 goto unlock;
1853         }
1854
1855         ret = -EBADFD;
1856         if (!tun)
1857                 goto unlock;
1858
1859         tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
1860
1861         ret = 0;
1862         switch (cmd) {
1863         case TUNGETIFF:
1864                 tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
1865
1866                 if (copy_to_user(argp, &ifr, ifreq_len))
1867                         ret = -EFAULT;
1868                 break;
1869
1870         case TUNSETNOCSUM:
1871                 /* Disable/Enable checksum */
1872
1873                 /* [unimplemented] */
1874                 tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
1875                           arg ? "disabled" : "enabled");
1876                 break;
1877
1878         case TUNSETPERSIST:
1879                 /* Disable/Enable persist mode. Keep an extra reference to the
1880                  * module to prevent the module being unprobed.
1881                  */
1882                 if (arg && !(tun->flags & TUN_PERSIST)) {
1883                         tun->flags |= TUN_PERSIST;
1884                         __module_get(THIS_MODULE);
1885                 }
1886                 if (!arg && (tun->flags & TUN_PERSIST)) {
1887                         tun->flags &= ~TUN_PERSIST;
1888                         module_put(THIS_MODULE);
1889                 }
1890
1891                 tun_debug(KERN_INFO, tun, "persist %s\n",
1892                           arg ? "enabled" : "disabled");
1893                 break;
1894
1895         case TUNSETOWNER:
1896                 /* Set owner of the device */
1897                 owner = make_kuid(current_user_ns(), arg);
1898                 if (!uid_valid(owner)) {
1899                         ret = -EINVAL;
1900                         break;
1901                 }
1902                 tun->owner = owner;
1903                 tun_debug(KERN_INFO, tun, "owner set to %u\n",
1904                           from_kuid(&init_user_ns, tun->owner));
1905                 break;
1906
1907         case TUNSETGROUP:
1908                 /* Set group of the device */
1909                 group = make_kgid(current_user_ns(), arg);
1910                 if (!gid_valid(group)) {
1911                         ret = -EINVAL;
1912                         break;
1913                 }
1914                 tun->group = group;
1915                 tun_debug(KERN_INFO, tun, "group set to %u\n",
1916                           from_kgid(&init_user_ns, tun->group));
1917                 break;
1918
1919         case TUNSETLINK:
1920                 /* Only allow setting the type when the interface is down */
1921                 if (tun->dev->flags & IFF_UP) {
1922                         tun_debug(KERN_INFO, tun,
1923                                   "Linktype set failed because interface is up\n");
1924                         ret = -EBUSY;
1925                 } else {
1926                         tun->dev->type = (int) arg;
1927                         tun_debug(KERN_INFO, tun, "linktype set to %d\n",
1928                                   tun->dev->type);
1929                         ret = 0;
1930                 }
1931                 break;
1932
1933 #ifdef TUN_DEBUG
1934         case TUNSETDEBUG:
1935                 tun->debug = arg;
1936                 break;
1937 #endif
1938         case TUNSETOFFLOAD:
1939                 ret = set_offload(tun, arg);
1940                 break;
1941
1942         case TUNSETTXFILTER:
1943                 /* Can be set only for TAPs */
1944                 ret = -EINVAL;
1945                 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1946                         break;
1947                 ret = update_filter(&tun->txflt, (void __user *)arg);
1948                 break;
1949
1950         case SIOCGIFHWADDR:
1951                 /* Get hw address */
1952                 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
1953                 ifr.ifr_hwaddr.sa_family = tun->dev->type;
1954                 if (copy_to_user(argp, &ifr, ifreq_len))
1955                         ret = -EFAULT;
1956                 break;
1957
1958         case SIOCSIFHWADDR:
1959                 /* Set hw address */
1960                 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
1961                           ifr.ifr_hwaddr.sa_data);
1962
1963                 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
1964                 break;
1965
1966         case TUNGETSNDBUF:
1967                 sndbuf = tfile->socket.sk->sk_sndbuf;
1968                 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
1969                         ret = -EFAULT;
1970                 break;
1971
1972         case TUNSETSNDBUF:
1973                 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
1974                         ret = -EFAULT;
1975                         break;
1976                 }
1977
1978                 tun->sndbuf = sndbuf;
1979                 tun_set_sndbuf(tun);
1980                 break;
1981
1982         case TUNGETVNETHDRSZ:
1983                 vnet_hdr_sz = tun->vnet_hdr_sz;
1984                 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
1985                         ret = -EFAULT;
1986                 break;
1987
1988         case TUNSETVNETHDRSZ:
1989                 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
1990                         ret = -EFAULT;
1991                         break;
1992                 }
1993                 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
1994                         ret = -EINVAL;
1995                         break;
1996                 }
1997
1998                 tun->vnet_hdr_sz = vnet_hdr_sz;
1999                 break;
2000
2001         case TUNATTACHFILTER:
2002                 /* Can be set only for TAPs */
2003                 ret = -EINVAL;
2004                 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2005                         break;
2006                 ret = -EFAULT;
2007                 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
2008                         break;
2009
2010                 ret = tun_attach_filter(tun);
2011                 break;
2012
2013         case TUNDETACHFILTER:
2014                 /* Can be set only for TAPs */
2015                 ret = -EINVAL;
2016                 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2017                         break;
2018                 ret = 0;
2019                 tun_detach_filter(tun, tun->numqueues);
2020                 break;
2021
2022         default:
2023                 ret = -EINVAL;
2024                 break;
2025         }
2026
2027 unlock:
2028         rtnl_unlock();
2029         if (tun)
2030                 tun_put(tun);
2031         return ret;
2032 }
2033
2034 static long tun_chr_ioctl(struct file *file,
2035                           unsigned int cmd, unsigned long arg)
2036 {
2037         return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
2038 }
2039
2040 #ifdef CONFIG_COMPAT
2041 static long tun_chr_compat_ioctl(struct file *file,
2042                          unsigned int cmd, unsigned long arg)
2043 {
2044         switch (cmd) {
2045         case TUNSETIFF:
2046         case TUNGETIFF:
2047         case TUNSETTXFILTER:
2048         case TUNGETSNDBUF:
2049         case TUNSETSNDBUF:
2050         case SIOCGIFHWADDR:
2051         case SIOCSIFHWADDR:
2052                 arg = (unsigned long)compat_ptr(arg);
2053                 break;
2054         default:
2055                 arg = (compat_ulong_t)arg;
2056                 break;
2057         }
2058
2059         /*
2060          * compat_ifreq is shorter than ifreq, so we must not access beyond
2061          * the end of that structure. All fields that are used in this
2062          * driver are compatible though, we don't need to convert the
2063          * contents.
2064          */
2065         return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
2066 }
2067 #endif /* CONFIG_COMPAT */
2068
2069 static int tun_chr_fasync(int fd, struct file *file, int on)
2070 {
2071         struct tun_file *tfile = file->private_data;
2072         int ret;
2073
2074         if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
2075                 goto out;
2076
2077         if (on) {
2078                 ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
2079                 if (ret)
2080                         goto out;
2081                 tfile->flags |= TUN_FASYNC;
2082         } else
2083                 tfile->flags &= ~TUN_FASYNC;
2084         ret = 0;
2085 out:
2086         return ret;
2087 }
2088
2089 static int tun_chr_open(struct inode *inode, struct file * file)
2090 {
2091         struct tun_file *tfile;
2092
2093         DBG1(KERN_INFO, "tunX: tun_chr_open\n");
2094
2095         tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
2096                                             &tun_proto);
2097         if (!tfile)
2098                 return -ENOMEM;
2099         rcu_assign_pointer(tfile->tun, NULL);
2100         tfile->net = get_net(current->nsproxy->net_ns);
2101         tfile->flags = 0;
2102
2103         rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
2104         init_waitqueue_head(&tfile->wq.wait);
2105
2106         tfile->socket.file = file;
2107         tfile->socket.ops = &tun_socket_ops;
2108
2109         sock_init_data(&tfile->socket, &tfile->sk);
2110         sk_change_net(&tfile->sk, tfile->net);
2111
2112         tfile->sk.sk_write_space = tun_sock_write_space;
2113         tfile->sk.sk_sndbuf = INT_MAX;
2114
2115         file->private_data = tfile;
2116         set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
2117         INIT_LIST_HEAD(&tfile->next);
2118
2119         return 0;
2120 }
2121
2122 static int tun_chr_close(struct inode *inode, struct file *file)
2123 {
2124         struct tun_file *tfile = file->private_data;
2125         struct net *net = tfile->net;
2126
2127         tun_detach(tfile, true);
2128         put_net(net);
2129
2130         return 0;
2131 }
2132
2133 static const struct file_operations tun_fops = {
2134         .owner  = THIS_MODULE,
2135         .llseek = no_llseek,
2136         .read  = do_sync_read,
2137         .aio_read  = tun_chr_aio_read,
2138         .write = do_sync_write,
2139         .aio_write = tun_chr_aio_write,
2140         .poll   = tun_chr_poll,
2141         .unlocked_ioctl = tun_chr_ioctl,
2142 #ifdef CONFIG_COMPAT
2143         .compat_ioctl = tun_chr_compat_ioctl,
2144 #endif
2145         .open   = tun_chr_open,
2146         .release = tun_chr_close,
2147         .fasync = tun_chr_fasync
2148 };
2149
2150 static struct miscdevice tun_miscdev = {
2151         .minor = TUN_MINOR,
2152         .name = "tun",
2153         .nodename = "net/tun",
2154         .fops = &tun_fops,
2155 };
2156
2157 /* ethtool interface */
2158
2159 static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2160 {
2161         cmd->supported          = 0;
2162         cmd->advertising        = 0;
2163         ethtool_cmd_speed_set(cmd, SPEED_10);
2164         cmd->duplex             = DUPLEX_FULL;
2165         cmd->port               = PORT_TP;
2166         cmd->phy_address        = 0;
2167         cmd->transceiver        = XCVR_INTERNAL;
2168         cmd->autoneg            = AUTONEG_DISABLE;
2169         cmd->maxtxpkt           = 0;
2170         cmd->maxrxpkt           = 0;
2171         return 0;
2172 }
2173
2174 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2175 {
2176         struct tun_struct *tun = netdev_priv(dev);
2177
2178         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2179         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2180
2181         switch (tun->flags & TUN_TYPE_MASK) {
2182         case TUN_TUN_DEV:
2183                 strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
2184                 break;
2185         case TUN_TAP_DEV:
2186                 strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
2187                 break;
2188         }
2189 }
2190
2191 static u32 tun_get_msglevel(struct net_device *dev)
2192 {
2193 #ifdef TUN_DEBUG
2194         struct tun_struct *tun = netdev_priv(dev);
2195         return tun->debug;
2196 #else
2197         return -EOPNOTSUPP;
2198 #endif
2199 }
2200
2201 static void tun_set_msglevel(struct net_device *dev, u32 value)
2202 {
2203 #ifdef TUN_DEBUG
2204         struct tun_struct *tun = netdev_priv(dev);
2205         tun->debug = value;
2206 #endif
2207 }
2208
2209 static const struct ethtool_ops tun_ethtool_ops = {
2210         .get_settings   = tun_get_settings,
2211         .get_drvinfo    = tun_get_drvinfo,
2212         .get_msglevel   = tun_get_msglevel,
2213         .set_msglevel   = tun_set_msglevel,
2214         .get_link       = ethtool_op_get_link,
2215 };
2216
2217
2218 static int __init tun_init(void)
2219 {
2220         int ret = 0;
2221
2222         pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2223         pr_info("%s\n", DRV_COPYRIGHT);
2224
2225         ret = rtnl_link_register(&tun_link_ops);
2226         if (ret) {
2227                 pr_err("Can't register link_ops\n");
2228                 goto err_linkops;
2229         }
2230
2231         ret = misc_register(&tun_miscdev);
2232         if (ret) {
2233                 pr_err("Can't register misc device %d\n", TUN_MINOR);
2234                 goto err_misc;
2235         }
2236         return  0;
2237 err_misc:
2238         rtnl_link_unregister(&tun_link_ops);
2239 err_linkops:
2240         return ret;
2241 }
2242
2243 static void tun_cleanup(void)
2244 {
2245         misc_deregister(&tun_miscdev);
2246         rtnl_link_unregister(&tun_link_ops);
2247 }
2248
2249 /* Get an underlying socket object from tun file.  Returns error unless file is
2250  * attached to a device.  The returned object works like a packet socket, it
2251  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
2252  * holding a reference to the file for as long as the socket is in use. */
2253 struct socket *tun_get_socket(struct file *file)
2254 {
2255         struct tun_file *tfile;
2256         if (file->f_op != &tun_fops)
2257                 return ERR_PTR(-EINVAL);
2258         tfile = file->private_data;
2259         if (!tfile)
2260                 return ERR_PTR(-EBADFD);
2261         return &tfile->socket;
2262 }
2263 EXPORT_SYMBOL_GPL(tun_get_socket);
2264
2265 module_init(tun_init);
2266 module_exit(tun_cleanup);
2267 MODULE_DESCRIPTION(DRV_DESCRIPTION);
2268 MODULE_AUTHOR(DRV_COPYRIGHT);
2269 MODULE_LICENSE("GPL");
2270 MODULE_ALIAS_MISCDEV(TUN_MINOR);
2271 MODULE_ALIAS("devname:net/tun");