Merge remote-tracking branches 'asoc/topic/txx9', 'asoc/topic/wm8750', 'asoc/topic...
[cascardo/linux.git] / net / ipv6 / ip6_flowlabel.c
1 /*
2  *      ip6_flowlabel.c         IPv6 flowlabel manager.
3  *
4  *      This program is free software; you can redistribute it and/or
5  *      modify it under the terms of the GNU General Public License
6  *      as published by the Free Software Foundation; either version
7  *      2 of the License, or (at your option) any later version.
8  *
9  *      Authors:        Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  */
11
12 #include <linux/capability.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/socket.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/in6.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 #include <linux/pid_namespace.h>
24
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27
28 #include <net/ipv6.h>
29 #include <net/rawv6.h>
30 #include <net/transp_v6.h>
31
32 #include <asm/uaccess.h>
33
34 #define FL_MIN_LINGER   6       /* Minimal linger. It is set to 6sec specified
35                                    in old IPv6 RFC. Well, it was reasonable value.
36                                  */
37 #define FL_MAX_LINGER   150     /* Maximal linger timeout */
38
39 /* FL hash table */
40
41 #define FL_MAX_PER_SOCK 32
42 #define FL_MAX_SIZE     4096
43 #define FL_HASH_MASK    255
44 #define FL_HASH(l)      (ntohl(l)&FL_HASH_MASK)
45
46 static atomic_t fl_size = ATOMIC_INIT(0);
47 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
48
49 static void ip6_fl_gc(unsigned long dummy);
50 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
51
52 /* FL hash table lock: it protects only of GC */
53
54 static DEFINE_SPINLOCK(ip6_fl_lock);
55
56 /* Big socket sock */
57
58 static DEFINE_SPINLOCK(ip6_sk_fl_lock);
59
60 #define for_each_fl_rcu(hash, fl)                               \
61         for (fl = rcu_dereference_bh(fl_ht[(hash)]);            \
62              fl != NULL;                                        \
63              fl = rcu_dereference_bh(fl->next))
64 #define for_each_fl_continue_rcu(fl)                            \
65         for (fl = rcu_dereference_bh(fl->next);                 \
66              fl != NULL;                                        \
67              fl = rcu_dereference_bh(fl->next))
68
69 #define for_each_sk_fl_rcu(np, sfl)                             \
70         for (sfl = rcu_dereference_bh(np->ipv6_fl_list);        \
71              sfl != NULL;                                       \
72              sfl = rcu_dereference_bh(sfl->next))
73
74 static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
75 {
76         struct ip6_flowlabel *fl;
77
78         for_each_fl_rcu(FL_HASH(label), fl) {
79                 if (fl->label == label && net_eq(fl->fl_net, net))
80                         return fl;
81         }
82         return NULL;
83 }
84
85 static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
86 {
87         struct ip6_flowlabel *fl;
88
89         rcu_read_lock_bh();
90         fl = __fl_lookup(net, label);
91         if (fl && !atomic_inc_not_zero(&fl->users))
92                 fl = NULL;
93         rcu_read_unlock_bh();
94         return fl;
95 }
96
97
98 static void fl_free(struct ip6_flowlabel *fl)
99 {
100         if (fl) {
101                 if (fl->share == IPV6_FL_S_PROCESS)
102                         put_pid(fl->owner.pid);
103                 release_net(fl->fl_net);
104                 kfree(fl->opt);
105                 kfree_rcu(fl, rcu);
106         }
107 }
108
109 static void fl_release(struct ip6_flowlabel *fl)
110 {
111         spin_lock_bh(&ip6_fl_lock);
112
113         fl->lastuse = jiffies;
114         if (atomic_dec_and_test(&fl->users)) {
115                 unsigned long ttd = fl->lastuse + fl->linger;
116                 if (time_after(ttd, fl->expires))
117                         fl->expires = ttd;
118                 ttd = fl->expires;
119                 if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
120                         struct ipv6_txoptions *opt = fl->opt;
121                         fl->opt = NULL;
122                         kfree(opt);
123                 }
124                 if (!timer_pending(&ip6_fl_gc_timer) ||
125                     time_after(ip6_fl_gc_timer.expires, ttd))
126                         mod_timer(&ip6_fl_gc_timer, ttd);
127         }
128         spin_unlock_bh(&ip6_fl_lock);
129 }
130
131 static void ip6_fl_gc(unsigned long dummy)
132 {
133         int i;
134         unsigned long now = jiffies;
135         unsigned long sched = 0;
136
137         spin_lock(&ip6_fl_lock);
138
139         for (i = 0; i <= FL_HASH_MASK; i++) {
140                 struct ip6_flowlabel *fl;
141                 struct ip6_flowlabel __rcu **flp;
142
143                 flp = &fl_ht[i];
144                 while ((fl = rcu_dereference_protected(*flp,
145                                                        lockdep_is_held(&ip6_fl_lock))) != NULL) {
146                         if (atomic_read(&fl->users) == 0) {
147                                 unsigned long ttd = fl->lastuse + fl->linger;
148                                 if (time_after(ttd, fl->expires))
149                                         fl->expires = ttd;
150                                 ttd = fl->expires;
151                                 if (time_after_eq(now, ttd)) {
152                                         *flp = fl->next;
153                                         fl_free(fl);
154                                         atomic_dec(&fl_size);
155                                         continue;
156                                 }
157                                 if (!sched || time_before(ttd, sched))
158                                         sched = ttd;
159                         }
160                         flp = &fl->next;
161                 }
162         }
163         if (!sched && atomic_read(&fl_size))
164                 sched = now + FL_MAX_LINGER;
165         if (sched) {
166                 mod_timer(&ip6_fl_gc_timer, sched);
167         }
168         spin_unlock(&ip6_fl_lock);
169 }
170
171 static void __net_exit ip6_fl_purge(struct net *net)
172 {
173         int i;
174
175         spin_lock(&ip6_fl_lock);
176         for (i = 0; i <= FL_HASH_MASK; i++) {
177                 struct ip6_flowlabel *fl;
178                 struct ip6_flowlabel __rcu **flp;
179
180                 flp = &fl_ht[i];
181                 while ((fl = rcu_dereference_protected(*flp,
182                                                        lockdep_is_held(&ip6_fl_lock))) != NULL) {
183                         if (net_eq(fl->fl_net, net) &&
184                             atomic_read(&fl->users) == 0) {
185                                 *flp = fl->next;
186                                 fl_free(fl);
187                                 atomic_dec(&fl_size);
188                                 continue;
189                         }
190                         flp = &fl->next;
191                 }
192         }
193         spin_unlock(&ip6_fl_lock);
194 }
195
196 static struct ip6_flowlabel *fl_intern(struct net *net,
197                                        struct ip6_flowlabel *fl, __be32 label)
198 {
199         struct ip6_flowlabel *lfl;
200
201         fl->label = label & IPV6_FLOWLABEL_MASK;
202
203         spin_lock_bh(&ip6_fl_lock);
204         if (label == 0) {
205                 for (;;) {
206                         fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
207                         if (fl->label) {
208                                 lfl = __fl_lookup(net, fl->label);
209                                 if (lfl == NULL)
210                                         break;
211                         }
212                 }
213         } else {
214                 /*
215                  * we dropper the ip6_fl_lock, so this entry could reappear
216                  * and we need to recheck with it.
217                  *
218                  * OTOH no need to search the active socket first, like it is
219                  * done in ipv6_flowlabel_opt - sock is locked, so new entry
220                  * with the same label can only appear on another sock
221                  */
222                 lfl = __fl_lookup(net, fl->label);
223                 if (lfl != NULL) {
224                         atomic_inc(&lfl->users);
225                         spin_unlock_bh(&ip6_fl_lock);
226                         return lfl;
227                 }
228         }
229
230         fl->lastuse = jiffies;
231         fl->next = fl_ht[FL_HASH(fl->label)];
232         rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
233         atomic_inc(&fl_size);
234         spin_unlock_bh(&ip6_fl_lock);
235         return NULL;
236 }
237
238
239
240 /* Socket flowlabel lists */
241
242 struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
243 {
244         struct ipv6_fl_socklist *sfl;
245         struct ipv6_pinfo *np = inet6_sk(sk);
246
247         label &= IPV6_FLOWLABEL_MASK;
248
249         rcu_read_lock_bh();
250         for_each_sk_fl_rcu(np, sfl) {
251                 struct ip6_flowlabel *fl = sfl->fl;
252                 if (fl->label == label) {
253                         fl->lastuse = jiffies;
254                         atomic_inc(&fl->users);
255                         rcu_read_unlock_bh();
256                         return fl;
257                 }
258         }
259         rcu_read_unlock_bh();
260         return NULL;
261 }
262 EXPORT_SYMBOL_GPL(fl6_sock_lookup);
263
264 void fl6_free_socklist(struct sock *sk)
265 {
266         struct ipv6_pinfo *np = inet6_sk(sk);
267         struct ipv6_fl_socklist *sfl;
268
269         if (!rcu_access_pointer(np->ipv6_fl_list))
270                 return;
271
272         spin_lock_bh(&ip6_sk_fl_lock);
273         while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
274                                                 lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
275                 np->ipv6_fl_list = sfl->next;
276                 spin_unlock_bh(&ip6_sk_fl_lock);
277
278                 fl_release(sfl->fl);
279                 kfree_rcu(sfl, rcu);
280
281                 spin_lock_bh(&ip6_sk_fl_lock);
282         }
283         spin_unlock_bh(&ip6_sk_fl_lock);
284 }
285
286 /* Service routines */
287
288
289 /*
290    It is the only difficult place. flowlabel enforces equal headers
291    before and including routing header, however user may supply options
292    following rthdr.
293  */
294
295 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
296                                          struct ip6_flowlabel *fl,
297                                          struct ipv6_txoptions *fopt)
298 {
299         struct ipv6_txoptions *fl_opt = fl->opt;
300
301         if (fopt == NULL || fopt->opt_flen == 0)
302                 return fl_opt;
303
304         if (fl_opt != NULL) {
305                 opt_space->hopopt = fl_opt->hopopt;
306                 opt_space->dst0opt = fl_opt->dst0opt;
307                 opt_space->srcrt = fl_opt->srcrt;
308                 opt_space->opt_nflen = fl_opt->opt_nflen;
309         } else {
310                 if (fopt->opt_nflen == 0)
311                         return fopt;
312                 opt_space->hopopt = NULL;
313                 opt_space->dst0opt = NULL;
314                 opt_space->srcrt = NULL;
315                 opt_space->opt_nflen = 0;
316         }
317         opt_space->dst1opt = fopt->dst1opt;
318         opt_space->opt_flen = fopt->opt_flen;
319         return opt_space;
320 }
321 EXPORT_SYMBOL_GPL(fl6_merge_options);
322
323 static unsigned long check_linger(unsigned long ttl)
324 {
325         if (ttl < FL_MIN_LINGER)
326                 return FL_MIN_LINGER*HZ;
327         if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
328                 return 0;
329         return ttl*HZ;
330 }
331
332 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
333 {
334         linger = check_linger(linger);
335         if (!linger)
336                 return -EPERM;
337         expires = check_linger(expires);
338         if (!expires)
339                 return -EPERM;
340
341         spin_lock_bh(&ip6_fl_lock);
342         fl->lastuse = jiffies;
343         if (time_before(fl->linger, linger))
344                 fl->linger = linger;
345         if (time_before(expires, fl->linger))
346                 expires = fl->linger;
347         if (time_before(fl->expires, fl->lastuse + expires))
348                 fl->expires = fl->lastuse + expires;
349         spin_unlock_bh(&ip6_fl_lock);
350
351         return 0;
352 }
353
354 static struct ip6_flowlabel *
355 fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
356           char __user *optval, int optlen, int *err_p)
357 {
358         struct ip6_flowlabel *fl = NULL;
359         int olen;
360         int addr_type;
361         int err;
362
363         olen = optlen - CMSG_ALIGN(sizeof(*freq));
364         err = -EINVAL;
365         if (olen > 64 * 1024)
366                 goto done;
367
368         err = -ENOMEM;
369         fl = kzalloc(sizeof(*fl), GFP_KERNEL);
370         if (fl == NULL)
371                 goto done;
372
373         if (olen > 0) {
374                 struct msghdr msg;
375                 struct flowi6 flowi6;
376                 int junk;
377
378                 err = -ENOMEM;
379                 fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
380                 if (fl->opt == NULL)
381                         goto done;
382
383                 memset(fl->opt, 0, sizeof(*fl->opt));
384                 fl->opt->tot_len = sizeof(*fl->opt) + olen;
385                 err = -EFAULT;
386                 if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
387                         goto done;
388
389                 msg.msg_controllen = olen;
390                 msg.msg_control = (void *)(fl->opt+1);
391                 memset(&flowi6, 0, sizeof(flowi6));
392
393                 err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
394                                             &junk, &junk, &junk);
395                 if (err)
396                         goto done;
397                 err = -EINVAL;
398                 if (fl->opt->opt_flen)
399                         goto done;
400                 if (fl->opt->opt_nflen == 0) {
401                         kfree(fl->opt);
402                         fl->opt = NULL;
403                 }
404         }
405
406         fl->fl_net = hold_net(net);
407         fl->expires = jiffies;
408         err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
409         if (err)
410                 goto done;
411         fl->share = freq->flr_share;
412         addr_type = ipv6_addr_type(&freq->flr_dst);
413         if ((addr_type & IPV6_ADDR_MAPPED) ||
414             addr_type == IPV6_ADDR_ANY) {
415                 err = -EINVAL;
416                 goto done;
417         }
418         fl->dst = freq->flr_dst;
419         atomic_set(&fl->users, 1);
420         switch (fl->share) {
421         case IPV6_FL_S_EXCL:
422         case IPV6_FL_S_ANY:
423                 break;
424         case IPV6_FL_S_PROCESS:
425                 fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
426                 break;
427         case IPV6_FL_S_USER:
428                 fl->owner.uid = current_euid();
429                 break;
430         default:
431                 err = -EINVAL;
432                 goto done;
433         }
434         return fl;
435
436 done:
437         fl_free(fl);
438         *err_p = err;
439         return NULL;
440 }
441
442 static int mem_check(struct sock *sk)
443 {
444         struct ipv6_pinfo *np = inet6_sk(sk);
445         struct ipv6_fl_socklist *sfl;
446         int room = FL_MAX_SIZE - atomic_read(&fl_size);
447         int count = 0;
448
449         if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
450                 return 0;
451
452         rcu_read_lock_bh();
453         for_each_sk_fl_rcu(np, sfl)
454                 count++;
455         rcu_read_unlock_bh();
456
457         if (room <= 0 ||
458             ((count >= FL_MAX_PER_SOCK ||
459               (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
460              !capable(CAP_NET_ADMIN)))
461                 return -ENOBUFS;
462
463         return 0;
464 }
465
466 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
467                 struct ip6_flowlabel *fl)
468 {
469         spin_lock_bh(&ip6_sk_fl_lock);
470         sfl->fl = fl;
471         sfl->next = np->ipv6_fl_list;
472         rcu_assign_pointer(np->ipv6_fl_list, sfl);
473         spin_unlock_bh(&ip6_sk_fl_lock);
474 }
475
476 int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
477                            int flags)
478 {
479         struct ipv6_pinfo *np = inet6_sk(sk);
480         struct ipv6_fl_socklist *sfl;
481
482         if (flags & IPV6_FL_F_REMOTE) {
483                 freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
484                 return 0;
485         }
486
487         if (np->repflow) {
488                 freq->flr_label = np->flow_label;
489                 return 0;
490         }
491
492         rcu_read_lock_bh();
493
494         for_each_sk_fl_rcu(np, sfl) {
495                 if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
496                         spin_lock_bh(&ip6_fl_lock);
497                         freq->flr_label = sfl->fl->label;
498                         freq->flr_dst = sfl->fl->dst;
499                         freq->flr_share = sfl->fl->share;
500                         freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
501                         freq->flr_linger = sfl->fl->linger / HZ;
502
503                         spin_unlock_bh(&ip6_fl_lock);
504                         rcu_read_unlock_bh();
505                         return 0;
506                 }
507         }
508         rcu_read_unlock_bh();
509
510         return -ENOENT;
511 }
512
513 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
514 {
515         int uninitialized_var(err);
516         struct net *net = sock_net(sk);
517         struct ipv6_pinfo *np = inet6_sk(sk);
518         struct in6_flowlabel_req freq;
519         struct ipv6_fl_socklist *sfl1 = NULL;
520         struct ipv6_fl_socklist *sfl;
521         struct ipv6_fl_socklist __rcu **sflp;
522         struct ip6_flowlabel *fl, *fl1 = NULL;
523
524
525         if (optlen < sizeof(freq))
526                 return -EINVAL;
527
528         if (copy_from_user(&freq, optval, sizeof(freq)))
529                 return -EFAULT;
530
531         switch (freq.flr_action) {
532         case IPV6_FL_A_PUT:
533                 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
534                         if (sk->sk_protocol != IPPROTO_TCP)
535                                 return -ENOPROTOOPT;
536                         if (!np->repflow)
537                                 return -ESRCH;
538                         np->flow_label = 0;
539                         np->repflow = 0;
540                         return 0;
541                 }
542                 spin_lock_bh(&ip6_sk_fl_lock);
543                 for (sflp = &np->ipv6_fl_list;
544                      (sfl = rcu_dereference(*sflp)) != NULL;
545                      sflp = &sfl->next) {
546                         if (sfl->fl->label == freq.flr_label) {
547                                 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
548                                         np->flow_label &= ~IPV6_FLOWLABEL_MASK;
549                                 *sflp = rcu_dereference(sfl->next);
550                                 spin_unlock_bh(&ip6_sk_fl_lock);
551                                 fl_release(sfl->fl);
552                                 kfree_rcu(sfl, rcu);
553                                 return 0;
554                         }
555                 }
556                 spin_unlock_bh(&ip6_sk_fl_lock);
557                 return -ESRCH;
558
559         case IPV6_FL_A_RENEW:
560                 rcu_read_lock_bh();
561                 for_each_sk_fl_rcu(np, sfl) {
562                         if (sfl->fl->label == freq.flr_label) {
563                                 err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
564                                 rcu_read_unlock_bh();
565                                 return err;
566                         }
567                 }
568                 rcu_read_unlock_bh();
569
570                 if (freq.flr_share == IPV6_FL_S_NONE &&
571                     ns_capable(net->user_ns, CAP_NET_ADMIN)) {
572                         fl = fl_lookup(net, freq.flr_label);
573                         if (fl) {
574                                 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
575                                 fl_release(fl);
576                                 return err;
577                         }
578                 }
579                 return -ESRCH;
580
581         case IPV6_FL_A_GET:
582                 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
583                         struct net *net = sock_net(sk);
584                         if (net->ipv6.sysctl.flowlabel_consistency) {
585                                 net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
586                                 return -EPERM;
587                         }
588
589                         if (sk->sk_protocol != IPPROTO_TCP)
590                                 return -ENOPROTOOPT;
591
592                         np->repflow = 1;
593                         return 0;
594                 }
595
596                 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
597                         return -EINVAL;
598
599                 fl = fl_create(net, sk, &freq, optval, optlen, &err);
600                 if (fl == NULL)
601                         return err;
602                 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
603
604                 if (freq.flr_label) {
605                         err = -EEXIST;
606                         rcu_read_lock_bh();
607                         for_each_sk_fl_rcu(np, sfl) {
608                                 if (sfl->fl->label == freq.flr_label) {
609                                         if (freq.flr_flags&IPV6_FL_F_EXCL) {
610                                                 rcu_read_unlock_bh();
611                                                 goto done;
612                                         }
613                                         fl1 = sfl->fl;
614                                         atomic_inc(&fl1->users);
615                                         break;
616                                 }
617                         }
618                         rcu_read_unlock_bh();
619
620                         if (fl1 == NULL)
621                                 fl1 = fl_lookup(net, freq.flr_label);
622                         if (fl1) {
623 recheck:
624                                 err = -EEXIST;
625                                 if (freq.flr_flags&IPV6_FL_F_EXCL)
626                                         goto release;
627                                 err = -EPERM;
628                                 if (fl1->share == IPV6_FL_S_EXCL ||
629                                     fl1->share != fl->share ||
630                                     ((fl1->share == IPV6_FL_S_PROCESS) &&
631                                      (fl1->owner.pid == fl->owner.pid)) ||
632                                     ((fl1->share == IPV6_FL_S_USER) &&
633                                      uid_eq(fl1->owner.uid, fl->owner.uid)))
634                                         goto release;
635
636                                 err = -ENOMEM;
637                                 if (sfl1 == NULL)
638                                         goto release;
639                                 if (fl->linger > fl1->linger)
640                                         fl1->linger = fl->linger;
641                                 if ((long)(fl->expires - fl1->expires) > 0)
642                                         fl1->expires = fl->expires;
643                                 fl_link(np, sfl1, fl1);
644                                 fl_free(fl);
645                                 return 0;
646
647 release:
648                                 fl_release(fl1);
649                                 goto done;
650                         }
651                 }
652                 err = -ENOENT;
653                 if (!(freq.flr_flags&IPV6_FL_F_CREATE))
654                         goto done;
655
656                 err = -ENOMEM;
657                 if (sfl1 == NULL)
658                         goto done;
659
660                 err = mem_check(sk);
661                 if (err != 0)
662                         goto done;
663
664                 fl1 = fl_intern(net, fl, freq.flr_label);
665                 if (fl1 != NULL)
666                         goto recheck;
667
668                 if (!freq.flr_label) {
669                         if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
670                                          &fl->label, sizeof(fl->label))) {
671                                 /* Intentionally ignore fault. */
672                         }
673                 }
674
675                 fl_link(np, sfl1, fl);
676                 return 0;
677
678         default:
679                 return -EINVAL;
680         }
681
682 done:
683         fl_free(fl);
684         kfree(sfl1);
685         return err;
686 }
687
688 #ifdef CONFIG_PROC_FS
689
690 struct ip6fl_iter_state {
691         struct seq_net_private p;
692         struct pid_namespace *pid_ns;
693         int bucket;
694 };
695
696 #define ip6fl_seq_private(seq)  ((struct ip6fl_iter_state *)(seq)->private)
697
698 static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
699 {
700         struct ip6_flowlabel *fl = NULL;
701         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
702         struct net *net = seq_file_net(seq);
703
704         for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
705                 for_each_fl_rcu(state->bucket, fl) {
706                         if (net_eq(fl->fl_net, net))
707                                 goto out;
708                 }
709         }
710         fl = NULL;
711 out:
712         return fl;
713 }
714
715 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
716 {
717         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
718         struct net *net = seq_file_net(seq);
719
720         for_each_fl_continue_rcu(fl) {
721                 if (net_eq(fl->fl_net, net))
722                         goto out;
723         }
724
725 try_again:
726         if (++state->bucket <= FL_HASH_MASK) {
727                 for_each_fl_rcu(state->bucket, fl) {
728                         if (net_eq(fl->fl_net, net))
729                                 goto out;
730                 }
731                 goto try_again;
732         }
733         fl = NULL;
734
735 out:
736         return fl;
737 }
738
739 static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
740 {
741         struct ip6_flowlabel *fl = ip6fl_get_first(seq);
742         if (fl)
743                 while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
744                         --pos;
745         return pos ? NULL : fl;
746 }
747
748 static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
749         __acquires(RCU)
750 {
751         rcu_read_lock_bh();
752         return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
753 }
754
755 static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
756 {
757         struct ip6_flowlabel *fl;
758
759         if (v == SEQ_START_TOKEN)
760                 fl = ip6fl_get_first(seq);
761         else
762                 fl = ip6fl_get_next(seq, v);
763         ++*pos;
764         return fl;
765 }
766
767 static void ip6fl_seq_stop(struct seq_file *seq, void *v)
768         __releases(RCU)
769 {
770         rcu_read_unlock_bh();
771 }
772
773 static int ip6fl_seq_show(struct seq_file *seq, void *v)
774 {
775         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
776         if (v == SEQ_START_TOKEN) {
777                 seq_puts(seq, "Label S Owner  Users  Linger Expires  Dst                              Opt\n");
778         } else {
779                 struct ip6_flowlabel *fl = v;
780                 seq_printf(seq,
781                            "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
782                            (unsigned int)ntohl(fl->label),
783                            fl->share,
784                            ((fl->share == IPV6_FL_S_PROCESS) ?
785                             pid_nr_ns(fl->owner.pid, state->pid_ns) :
786                             ((fl->share == IPV6_FL_S_USER) ?
787                              from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
788                              0)),
789                            atomic_read(&fl->users),
790                            fl->linger/HZ,
791                            (long)(fl->expires - jiffies)/HZ,
792                            &fl->dst,
793                            fl->opt ? fl->opt->opt_nflen : 0);
794         }
795         return 0;
796 }
797
798 static const struct seq_operations ip6fl_seq_ops = {
799         .start  =       ip6fl_seq_start,
800         .next   =       ip6fl_seq_next,
801         .stop   =       ip6fl_seq_stop,
802         .show   =       ip6fl_seq_show,
803 };
804
805 static int ip6fl_seq_open(struct inode *inode, struct file *file)
806 {
807         struct seq_file *seq;
808         struct ip6fl_iter_state *state;
809         int err;
810
811         err = seq_open_net(inode, file, &ip6fl_seq_ops,
812                            sizeof(struct ip6fl_iter_state));
813
814         if (!err) {
815                 seq = file->private_data;
816                 state = ip6fl_seq_private(seq);
817                 rcu_read_lock();
818                 state->pid_ns = get_pid_ns(task_active_pid_ns(current));
819                 rcu_read_unlock();
820         }
821         return err;
822 }
823
824 static int ip6fl_seq_release(struct inode *inode, struct file *file)
825 {
826         struct seq_file *seq = file->private_data;
827         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
828         put_pid_ns(state->pid_ns);
829         return seq_release_net(inode, file);
830 }
831
832 static const struct file_operations ip6fl_seq_fops = {
833         .owner          =       THIS_MODULE,
834         .open           =       ip6fl_seq_open,
835         .read           =       seq_read,
836         .llseek         =       seq_lseek,
837         .release        =       ip6fl_seq_release,
838 };
839
840 static int __net_init ip6_flowlabel_proc_init(struct net *net)
841 {
842         if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
843                          &ip6fl_seq_fops))
844                 return -ENOMEM;
845         return 0;
846 }
847
848 static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
849 {
850         remove_proc_entry("ip6_flowlabel", net->proc_net);
851 }
852 #else
853 static inline int ip6_flowlabel_proc_init(struct net *net)
854 {
855         return 0;
856 }
857 static inline void ip6_flowlabel_proc_fini(struct net *net)
858 {
859 }
860 #endif
861
862 static void __net_exit ip6_flowlabel_net_exit(struct net *net)
863 {
864         ip6_fl_purge(net);
865         ip6_flowlabel_proc_fini(net);
866 }
867
868 static struct pernet_operations ip6_flowlabel_net_ops = {
869         .init = ip6_flowlabel_proc_init,
870         .exit = ip6_flowlabel_net_exit,
871 };
872
873 int ip6_flowlabel_init(void)
874 {
875         return register_pernet_subsys(&ip6_flowlabel_net_ops);
876 }
877
878 void ip6_flowlabel_cleanup(void)
879 {
880         del_timer(&ip6_fl_gc_timer);
881         unregister_pernet_subsys(&ip6_flowlabel_net_ops);
882 }