datapath: Fix race.
[cascardo/ovs.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007-2013 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/div64.h>
43 #include <linux/highmem.h>
44 #include <linux/netfilter_bridge.h>
45 #include <linux/netfilter_ipv4.h>
46 #include <linux/inetdevice.h>
47 #include <linux/list.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <linux/genetlink.h>
52 #include <net/genetlink.h>
53 #include <net/genetlink.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56
57 #include "datapath.h"
58 #include "flow.h"
59 #include "vlan.h"
60 #include "vport-internal_dev.h"
61 #include "vport-netdev.h"
62
63 #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
64 static void rehash_flow_table(struct work_struct *work);
65 static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
66
67 int ovs_net_id __read_mostly;
68
69 static void ovs_notify(struct sk_buff *skb, struct genl_info *info,
70                        struct genl_multicast_group *grp)
71 {
72         genl_notify(skb, genl_info_net(info), info->snd_portid,
73                     grp->id, info->nlhdr, GFP_KERNEL);
74 }
75
76 /**
77  * DOC: Locking:
78  *
79  * All writes e.g. Writes to device state (add/remove datapath, port, set
80  * operations on vports, etc.), Writes to other state (flow table
81  * modifications, set miscellaneous datapath parameters, etc.) are protected
82  * by ovs_lock.
83  *
84  * Reads are protected by RCU.
85  *
86  * There are a few special cases (mostly stats) that have their own
87  * synchronization but they nest under all of above and don't interact with
88  * each other.
89  *
90  * The RTNL lock nests inside ovs_mutex.
91  */
92
93 static DEFINE_MUTEX(ovs_mutex);
94
95 void ovs_lock(void)
96 {
97         mutex_lock(&ovs_mutex);
98 }
99
100 void ovs_unlock(void)
101 {
102         mutex_unlock(&ovs_mutex);
103 }
104
105 #ifdef CONFIG_LOCKDEP
106 int lockdep_ovsl_is_held(void)
107 {
108         if (debug_locks)
109                 return lockdep_is_held(&ovs_mutex);
110         else
111                 return 1;
112 }
113 #endif
114
115 static struct vport *new_vport(const struct vport_parms *);
116 static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
117                              const struct dp_upcall_info *);
118 static int queue_userspace_packet(struct net *, int dp_ifindex,
119                                   struct sk_buff *,
120                                   const struct dp_upcall_info *);
121
122 /* Must be called with rcu_read_lock or ovs_mutex. */
123 static struct datapath *get_dp(struct net *net, int dp_ifindex)
124 {
125         struct datapath *dp = NULL;
126         struct net_device *dev;
127
128         rcu_read_lock();
129         dev = dev_get_by_index_rcu(net, dp_ifindex);
130         if (dev) {
131                 struct vport *vport = ovs_internal_dev_get_vport(dev);
132                 if (vport)
133                         dp = vport->dp;
134         }
135         rcu_read_unlock();
136
137         return dp;
138 }
139
140 /* Must be called with rcu_read_lock or ovs_mutex. */
141 const char *ovs_dp_name(const struct datapath *dp)
142 {
143         struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
144         return vport->ops->get_name(vport);
145 }
146
147 static int get_dpifindex(struct datapath *dp)
148 {
149         struct vport *local;
150         int ifindex;
151
152         rcu_read_lock();
153
154         local = ovs_vport_rcu(dp, OVSP_LOCAL);
155         if (local)
156                 ifindex = netdev_vport_priv(local)->dev->ifindex;
157         else
158                 ifindex = 0;
159
160         rcu_read_unlock();
161
162         return ifindex;
163 }
164
165 static void destroy_dp_rcu(struct rcu_head *rcu)
166 {
167         struct datapath *dp = container_of(rcu, struct datapath, rcu);
168
169         free_percpu(dp->stats_percpu);
170         release_net(ovs_dp_get_net(dp));
171         kfree(dp->ports);
172         kfree(dp);
173 }
174
175 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
176                                             u16 port_no)
177 {
178         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
179 }
180
181 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
182 {
183         struct vport *vport;
184         struct hlist_head *head;
185
186         head = vport_hash_bucket(dp, port_no);
187         hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
188                 if (vport->port_no == port_no)
189                         return vport;
190         }
191         return NULL;
192 }
193
194 /* Called with ovs_mutex. */
195 static struct vport *new_vport(const struct vport_parms *parms)
196 {
197         struct vport *vport;
198
199         vport = ovs_vport_add(parms);
200         if (!IS_ERR(vport)) {
201                 struct datapath *dp = parms->dp;
202                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
203
204                 hlist_add_head_rcu(&vport->dp_hash_node, head);
205         }
206         return vport;
207 }
208
209 void ovs_dp_detach_port(struct vport *p)
210 {
211         ASSERT_OVSL();
212
213         /* First drop references to device. */
214         hlist_del_rcu(&p->dp_hash_node);
215
216         /* Then destroy it. */
217         ovs_vport_del(p);
218 }
219
220 /* Must be called with rcu_read_lock. */
221 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
222 {
223         struct datapath *dp = p->dp;
224         struct sw_flow *flow;
225         struct dp_stats_percpu *stats;
226         struct sw_flow_key key;
227         u64 *stats_counter;
228         int error;
229
230         stats = this_cpu_ptr(dp->stats_percpu);
231
232         /* Extract flow from 'skb' into 'key'. */
233         error = ovs_flow_extract(skb, p->port_no, &key);
234         if (unlikely(error)) {
235                 kfree_skb(skb);
236                 return;
237         }
238
239         /* Look up flow. */
240         flow = ovs_flow_lookup(rcu_dereference(dp->table), &key);
241         if (unlikely(!flow)) {
242                 struct dp_upcall_info upcall;
243
244                 upcall.cmd = OVS_PACKET_CMD_MISS;
245                 upcall.key = &key;
246                 upcall.userdata = NULL;
247                 upcall.portid = p->upcall_portid;
248                 ovs_dp_upcall(dp, skb, &upcall);
249                 consume_skb(skb);
250                 stats_counter = &stats->n_missed;
251                 goto out;
252         }
253
254         OVS_CB(skb)->flow = flow;
255         OVS_CB(skb)->pkt_key = &key;
256
257         stats_counter = &stats->n_hit;
258         ovs_flow_used(OVS_CB(skb)->flow, skb);
259         ovs_execute_actions(dp, skb);
260
261 out:
262         /* Update datapath statistics. */
263         u64_stats_update_begin(&stats->sync);
264         (*stats_counter)++;
265         u64_stats_update_end(&stats->sync);
266 }
267
268 static struct genl_family dp_packet_genl_family = {
269         .id = GENL_ID_GENERATE,
270         .hdrsize = sizeof(struct ovs_header),
271         .name = OVS_PACKET_FAMILY,
272         .version = OVS_PACKET_VERSION,
273         .maxattr = OVS_PACKET_ATTR_MAX,
274         .netnsok = true,
275          SET_PARALLEL_OPS
276 };
277
278 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
279                   const struct dp_upcall_info *upcall_info)
280 {
281         struct dp_stats_percpu *stats;
282         int dp_ifindex;
283         int err;
284
285         if (upcall_info->portid == 0) {
286                 err = -ENOTCONN;
287                 goto err;
288         }
289
290         dp_ifindex = get_dpifindex(dp);
291         if (!dp_ifindex) {
292                 err = -ENODEV;
293                 goto err;
294         }
295
296         if (!skb_is_gso(skb))
297                 err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
298         else
299                 err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
300         if (err)
301                 goto err;
302
303         return 0;
304
305 err:
306         stats = this_cpu_ptr(dp->stats_percpu);
307
308         u64_stats_update_begin(&stats->sync);
309         stats->n_lost++;
310         u64_stats_update_end(&stats->sync);
311
312         return err;
313 }
314
315 static int queue_gso_packets(struct net *net, int dp_ifindex,
316                              struct sk_buff *skb,
317                              const struct dp_upcall_info *upcall_info)
318 {
319         unsigned short gso_type = skb_shinfo(skb)->gso_type;
320         struct dp_upcall_info later_info;
321         struct sw_flow_key later_key;
322         struct sk_buff *segs, *nskb;
323         int err;
324
325         segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false);
326         if (IS_ERR(segs))
327                 return PTR_ERR(segs);
328
329         /* Queue all of the segments. */
330         skb = segs;
331         do {
332                 err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
333                 if (err)
334                         break;
335
336                 if (skb == segs && gso_type & SKB_GSO_UDP) {
337                         /* The initial flow key extracted by ovs_flow_extract()
338                          * in this case is for a first fragment, so we need to
339                          * properly mark later fragments.
340                          */
341                         later_key = *upcall_info->key;
342                         later_key.ip.frag = OVS_FRAG_TYPE_LATER;
343
344                         later_info = *upcall_info;
345                         later_info.key = &later_key;
346                         upcall_info = &later_info;
347                 }
348         } while ((skb = skb->next));
349
350         /* Free all of the segments. */
351         skb = segs;
352         do {
353                 nskb = skb->next;
354                 if (err)
355                         kfree_skb(skb);
356                 else
357                         consume_skb(skb);
358         } while ((skb = nskb));
359         return err;
360 }
361
362 static size_t key_attr_size(void)
363 {
364         return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
365                 + nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
366                   + nla_total_size(8)   /* OVS_TUNNEL_KEY_ATTR_ID */
367                   + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
368                   + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
369                   + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TOS */
370                   + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TTL */
371                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
372                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_CSUM */
373                 + nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
374                 + nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
375                 + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
376                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
377                 + nla_total_size(4)   /* OVS_KEY_ATTR_8021Q */
378                 + nla_total_size(0)   /* OVS_KEY_ATTR_ENCAP */
379                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
380                 + nla_total_size(40)  /* OVS_KEY_ATTR_IPV6 */
381                 + nla_total_size(2)   /* OVS_KEY_ATTR_ICMPV6 */
382                 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
383 }
384
385 static size_t upcall_msg_size(const struct sk_buff *skb,
386                               const struct nlattr *userdata)
387 {
388         size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
389                 + nla_total_size(skb->len) /* OVS_PACKET_ATTR_PACKET */
390                 + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
391
392         /* OVS_PACKET_ATTR_USERDATA */
393         if (userdata)
394                 size += NLA_ALIGN(userdata->nla_len);
395
396         return size;
397 }
398
399 static int queue_userspace_packet(struct net *net, int dp_ifindex,
400                                   struct sk_buff *skb,
401                                   const struct dp_upcall_info *upcall_info)
402 {
403         struct ovs_header *upcall;
404         struct sk_buff *nskb = NULL;
405         struct sk_buff *user_skb; /* to be queued to userspace */
406         struct nlattr *nla;
407         int err;
408
409         if (vlan_tx_tag_present(skb)) {
410                 nskb = skb_clone(skb, GFP_ATOMIC);
411                 if (!nskb)
412                         return -ENOMEM;
413
414                 nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
415                 if (!nskb)
416                         return -ENOMEM;
417
418                 vlan_set_tci(nskb, 0);
419
420                 skb = nskb;
421         }
422
423         if (nla_attr_size(skb->len) > USHRT_MAX) {
424                 err = -EFBIG;
425                 goto out;
426         }
427
428         user_skb = genlmsg_new(upcall_msg_size(skb, upcall_info->userdata), GFP_ATOMIC);
429         if (!user_skb) {
430                 err = -ENOMEM;
431                 goto out;
432         }
433
434         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
435                              0, upcall_info->cmd);
436         upcall->dp_ifindex = dp_ifindex;
437
438         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
439         ovs_flow_to_nlattrs(upcall_info->key, upcall_info->key, user_skb);
440         nla_nest_end(user_skb, nla);
441
442         if (upcall_info->userdata)
443                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
444                           nla_len(upcall_info->userdata),
445                           nla_data(upcall_info->userdata));
446
447         nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
448
449         skb_copy_and_csum_dev(skb, nla_data(nla));
450
451         genlmsg_end(user_skb, upcall);
452         err = genlmsg_unicast(net, user_skb, upcall_info->portid);
453
454 out:
455         kfree_skb(nskb);
456         return err;
457 }
458
459 /* Called with ovs_mutex. */
460 static int flush_flows(struct datapath *dp)
461 {
462         struct flow_table *old_table;
463         struct flow_table *new_table;
464
465         old_table = ovsl_dereference(dp->table);
466         new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
467         if (!new_table)
468                 return -ENOMEM;
469
470         rcu_assign_pointer(dp->table, new_table);
471
472         ovs_flow_tbl_destroy(old_table, true);
473         return 0;
474 }
475
476 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, int attr_len)
477 {
478
479         struct sw_flow_actions *acts;
480         int new_acts_size;
481         int req_size = NLA_ALIGN(attr_len);
482         int next_offset = offsetof(struct sw_flow_actions, actions) +
483                                         (*sfa)->actions_len;
484
485         if (req_size <= (ksize(*sfa) - next_offset))
486                 goto out;
487
488         new_acts_size = ksize(*sfa) * 2;
489
490         if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
491                 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
492                         return ERR_PTR(-EMSGSIZE);
493                 new_acts_size = MAX_ACTIONS_BUFSIZE;
494         }
495
496         acts = ovs_flow_actions_alloc(new_acts_size);
497         if (IS_ERR(acts))
498                 return (void *)acts;
499
500         memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
501         acts->actions_len = (*sfa)->actions_len;
502         kfree(*sfa);
503         *sfa = acts;
504
505 out:
506         (*sfa)->actions_len += req_size;
507         return  (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
508 }
509
510 static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
511 {
512         struct nlattr *a;
513
514         a = reserve_sfa_size(sfa, nla_attr_size(len));
515         if (IS_ERR(a))
516                 return PTR_ERR(a);
517
518         a->nla_type = attrtype;
519         a->nla_len = nla_attr_size(len);
520
521         if (data)
522                 memcpy(nla_data(a), data, len);
523         memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
524
525         return 0;
526 }
527
528 static inline int add_nested_action_start(struct sw_flow_actions **sfa, int attrtype)
529 {
530         int used = (*sfa)->actions_len;
531         int err;
532
533         err = add_action(sfa, attrtype, NULL, 0);
534         if (err)
535                 return err;
536
537         return used;
538 }
539
540 static inline void add_nested_action_end(struct sw_flow_actions *sfa, int st_offset)
541 {
542         struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + st_offset);
543
544         a->nla_len = sfa->actions_len - st_offset;
545 }
546
547 static int validate_and_copy_actions(const struct nlattr *attr,
548                                 const struct sw_flow_key *key, int depth,
549                                 struct sw_flow_actions **sfa);
550
551 static int validate_and_copy_sample(const struct nlattr *attr,
552                            const struct sw_flow_key *key, int depth,
553                            struct sw_flow_actions **sfa)
554 {
555         const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
556         const struct nlattr *probability, *actions;
557         const struct nlattr *a;
558         int rem, start, err, st_acts;
559
560         memset(attrs, 0, sizeof(attrs));
561         nla_for_each_nested(a, attr, rem) {
562                 int type = nla_type(a);
563                 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
564                         return -EINVAL;
565                 attrs[type] = a;
566         }
567         if (rem)
568                 return -EINVAL;
569
570         probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
571         if (!probability || nla_len(probability) != sizeof(u32))
572                 return -EINVAL;
573
574         actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
575         if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
576                 return -EINVAL;
577
578         /* validation done, copy sample action. */
579         start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
580         if (start < 0)
581                 return start;
582         err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, nla_data(probability), sizeof(u32));
583         if (err)
584                 return err;
585         st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
586         if (st_acts < 0)
587                 return st_acts;
588
589         err = validate_and_copy_actions(actions, key, depth + 1, sfa);
590         if (err)
591                 return err;
592
593         add_nested_action_end(*sfa, st_acts);
594         add_nested_action_end(*sfa, start);
595
596         return 0;
597 }
598
599 static int validate_tp_port(const struct sw_flow_key *flow_key)
600 {
601         if (flow_key->eth.type == htons(ETH_P_IP)) {
602                 if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
603                         return 0;
604         } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
605                 if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
606                         return 0;
607         }
608
609         return -EINVAL;
610 }
611
612 static int validate_and_copy_set_tun(const struct nlattr *attr,
613                                      struct sw_flow_actions **sfa)
614 {
615         struct sw_flow_match match;
616         struct sw_flow_key key;
617         int err, start;
618
619         ovs_match_init(&match, &key, NULL);
620         err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &match, false);
621         if (err)
622                 return err;
623
624         start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
625         if (start < 0)
626                 return start;
627
628         err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key,
629                         sizeof(match.key->tun_key));
630         add_nested_action_end(*sfa, start);
631
632         return err;
633 }
634
635 static int validate_set(const struct nlattr *a,
636                         const struct sw_flow_key *flow_key,
637                         struct sw_flow_actions **sfa,
638                         bool *set_tun)
639 {
640         const struct nlattr *ovs_key = nla_data(a);
641         int key_type = nla_type(ovs_key);
642
643         /* There can be only one key in a action */
644         if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
645                 return -EINVAL;
646
647         if (key_type > OVS_KEY_ATTR_MAX ||
648             (ovs_key_lens[key_type] != nla_len(ovs_key) &&
649              ovs_key_lens[key_type] != -1))
650                 return -EINVAL;
651
652         switch (key_type) {
653         const struct ovs_key_ipv4 *ipv4_key;
654         const struct ovs_key_ipv6 *ipv6_key;
655         int err;
656
657         case OVS_KEY_ATTR_PRIORITY:
658         case OVS_KEY_ATTR_SKB_MARK:
659         case OVS_KEY_ATTR_ETHERNET:
660                 break;
661
662         case OVS_KEY_ATTR_TUNNEL:
663                 *set_tun = true;
664                 err = validate_and_copy_set_tun(a, sfa);
665                 if (err)
666                         return err;
667                 break;
668
669         case OVS_KEY_ATTR_IPV4:
670                 if (flow_key->eth.type != htons(ETH_P_IP))
671                         return -EINVAL;
672
673                 if (!flow_key->ip.proto)
674                         return -EINVAL;
675
676                 ipv4_key = nla_data(ovs_key);
677                 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
678                         return -EINVAL;
679
680                 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
681                         return -EINVAL;
682
683                 break;
684
685         case OVS_KEY_ATTR_IPV6:
686                 if (flow_key->eth.type != htons(ETH_P_IPV6))
687                         return -EINVAL;
688
689                 if (!flow_key->ip.proto)
690                         return -EINVAL;
691
692                 ipv6_key = nla_data(ovs_key);
693                 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
694                         return -EINVAL;
695
696                 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
697                         return -EINVAL;
698
699                 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
700                         return -EINVAL;
701
702                 break;
703
704         case OVS_KEY_ATTR_TCP:
705                 if (flow_key->ip.proto != IPPROTO_TCP)
706                         return -EINVAL;
707
708                 return validate_tp_port(flow_key);
709
710         case OVS_KEY_ATTR_UDP:
711                 if (flow_key->ip.proto != IPPROTO_UDP)
712                         return -EINVAL;
713
714                 return validate_tp_port(flow_key);
715
716         case OVS_KEY_ATTR_SCTP:
717                 if (flow_key->ip.proto != IPPROTO_SCTP)
718                         return -EINVAL;
719
720                 return validate_tp_port(flow_key);
721
722         default:
723                 return -EINVAL;
724         }
725
726         return 0;
727 }
728
729 static int validate_userspace(const struct nlattr *attr)
730 {
731         static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =   {
732                 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
733                 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
734         };
735         struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
736         int error;
737
738         error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
739                                  attr, userspace_policy);
740         if (error)
741                 return error;
742
743         if (!a[OVS_USERSPACE_ATTR_PID] ||
744             !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
745                 return -EINVAL;
746
747         return 0;
748 }
749
750 static int copy_action(const struct nlattr *from,
751                       struct sw_flow_actions **sfa)
752 {
753         int totlen = NLA_ALIGN(from->nla_len);
754         struct nlattr *to;
755
756         to = reserve_sfa_size(sfa, from->nla_len);
757         if (IS_ERR(to))
758                 return PTR_ERR(to);
759
760         memcpy(to, from, totlen);
761         return 0;
762 }
763
764 static int validate_and_copy_actions(const struct nlattr *attr,
765                                 const struct sw_flow_key *key,
766                                 int depth,
767                                 struct sw_flow_actions **sfa)
768 {
769         const struct nlattr *a;
770         int rem, err;
771
772         if (depth >= SAMPLE_ACTION_DEPTH)
773                 return -EOVERFLOW;
774
775         nla_for_each_nested(a, attr, rem) {
776                 /* Expected argument lengths, (u32)-1 for variable length. */
777                 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
778                         [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
779                         [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
780                         [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
781                         [OVS_ACTION_ATTR_POP_VLAN] = 0,
782                         [OVS_ACTION_ATTR_SET] = (u32)-1,
783                         [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
784                 };
785                 const struct ovs_action_push_vlan *vlan;
786                 int type = nla_type(a);
787                 bool skip_copy;
788
789                 if (type > OVS_ACTION_ATTR_MAX ||
790                     (action_lens[type] != nla_len(a) &&
791                      action_lens[type] != (u32)-1))
792                         return -EINVAL;
793
794                 skip_copy = false;
795                 switch (type) {
796                 case OVS_ACTION_ATTR_UNSPEC:
797                         return -EINVAL;
798
799                 case OVS_ACTION_ATTR_USERSPACE:
800                         err = validate_userspace(a);
801                         if (err)
802                                 return err;
803                         break;
804
805                 case OVS_ACTION_ATTR_OUTPUT:
806                         if (nla_get_u32(a) >= DP_MAX_PORTS)
807                                 return -EINVAL;
808                         break;
809
810
811                 case OVS_ACTION_ATTR_POP_VLAN:
812                         break;
813
814                 case OVS_ACTION_ATTR_PUSH_VLAN:
815                         vlan = nla_data(a);
816                         if (vlan->vlan_tpid != htons(ETH_P_8021Q))
817                                 return -EINVAL;
818                         if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
819                                 return -EINVAL;
820                         break;
821
822                 case OVS_ACTION_ATTR_SET:
823                         err = validate_set(a, key, sfa, &skip_copy);
824                         if (err)
825                                 return err;
826                         break;
827
828                 case OVS_ACTION_ATTR_SAMPLE:
829                         err = validate_and_copy_sample(a, key, depth, sfa);
830                         if (err)
831                                 return err;
832                         skip_copy = true;
833                         break;
834
835                 default:
836                         return -EINVAL;
837                 }
838                 if (!skip_copy) {
839                         err = copy_action(a, sfa);
840                         if (err)
841                                 return err;
842                 }
843         }
844
845         if (rem > 0)
846                 return -EINVAL;
847
848         return 0;
849 }
850
851 static void clear_stats(struct sw_flow *flow)
852 {
853         flow->used = 0;
854         flow->tcp_flags = 0;
855         flow->packet_count = 0;
856         flow->byte_count = 0;
857 }
858
859 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
860 {
861         struct ovs_header *ovs_header = info->userhdr;
862         struct nlattr **a = info->attrs;
863         struct sw_flow_actions *acts;
864         struct sk_buff *packet;
865         struct sw_flow *flow;
866         struct datapath *dp;
867         struct ethhdr *eth;
868         int len;
869         int err;
870
871         err = -EINVAL;
872         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
873             !a[OVS_PACKET_ATTR_ACTIONS])
874                 goto err;
875
876         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
877         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
878         err = -ENOMEM;
879         if (!packet)
880                 goto err;
881         skb_reserve(packet, NET_IP_ALIGN);
882
883         nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
884
885         skb_reset_mac_header(packet);
886         eth = eth_hdr(packet);
887
888         /* Normally, setting the skb 'protocol' field would be handled by a
889          * call to eth_type_trans(), but it assumes there's a sending
890          * device, which we may not have. */
891         if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
892                 packet->protocol = eth->h_proto;
893         else
894                 packet->protocol = htons(ETH_P_802_2);
895
896         /* Build an sw_flow for sending this packet. */
897         flow = ovs_flow_alloc();
898         err = PTR_ERR(flow);
899         if (IS_ERR(flow))
900                 goto err_kfree_skb;
901
902         err = ovs_flow_extract(packet, -1, &flow->key);
903         if (err)
904                 goto err_flow_free;
905
906         err = ovs_flow_metadata_from_nlattrs(flow, a[OVS_PACKET_ATTR_KEY]);
907         if (err)
908                 goto err_flow_free;
909         acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
910         err = PTR_ERR(acts);
911         if (IS_ERR(acts))
912                 goto err_flow_free;
913
914         err = validate_and_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0, &acts);
915         rcu_assign_pointer(flow->sf_acts, acts);
916         if (err)
917                 goto err_flow_free;
918
919         OVS_CB(packet)->flow = flow;
920         OVS_CB(packet)->pkt_key = &flow->key;
921         packet->priority = flow->key.phy.priority;
922         packet->mark = flow->key.phy.skb_mark;
923
924         rcu_read_lock();
925         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
926         err = -ENODEV;
927         if (!dp)
928                 goto err_unlock;
929
930         local_bh_disable();
931         err = ovs_execute_actions(dp, packet);
932         local_bh_enable();
933         rcu_read_unlock();
934
935         ovs_flow_free(flow, false);
936         return err;
937
938 err_unlock:
939         rcu_read_unlock();
940 err_flow_free:
941         ovs_flow_free(flow, false);
942 err_kfree_skb:
943         kfree_skb(packet);
944 err:
945         return err;
946 }
947
948 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
949         [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
950         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
951         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
952 };
953
954 static struct genl_ops dp_packet_genl_ops[] = {
955         { .cmd = OVS_PACKET_CMD_EXECUTE,
956           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
957           .policy = packet_policy,
958           .doit = ovs_packet_cmd_execute
959         }
960 };
961
962 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
963 {
964         struct flow_table *table;
965         int i;
966
967         table = rcu_dereference_check(dp->table, lockdep_ovsl_is_held());
968         stats->n_flows = ovs_flow_tbl_count(table);
969
970         stats->n_hit = stats->n_missed = stats->n_lost = 0;
971         for_each_possible_cpu(i) {
972                 const struct dp_stats_percpu *percpu_stats;
973                 struct dp_stats_percpu local_stats;
974                 unsigned int start;
975
976                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
977
978                 do {
979                         start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
980                         local_stats = *percpu_stats;
981                 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
982
983                 stats->n_hit += local_stats.n_hit;
984                 stats->n_missed += local_stats.n_missed;
985                 stats->n_lost += local_stats.n_lost;
986         }
987 }
988
989 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
990         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
991         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
992         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
993 };
994
995 static struct genl_family dp_flow_genl_family = {
996         .id = GENL_ID_GENERATE,
997         .hdrsize = sizeof(struct ovs_header),
998         .name = OVS_FLOW_FAMILY,
999         .version = OVS_FLOW_VERSION,
1000         .maxattr = OVS_FLOW_ATTR_MAX,
1001         .netnsok = true,
1002          SET_PARALLEL_OPS
1003 };
1004
1005 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
1006         .name = OVS_FLOW_MCGROUP
1007 };
1008
1009 static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb);
1010 static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
1011 {
1012         const struct nlattr *a;
1013         struct nlattr *start;
1014         int err = 0, rem;
1015
1016         start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
1017         if (!start)
1018                 return -EMSGSIZE;
1019
1020         nla_for_each_nested(a, attr, rem) {
1021                 int type = nla_type(a);
1022                 struct nlattr *st_sample;
1023
1024                 switch (type) {
1025                 case OVS_SAMPLE_ATTR_PROBABILITY:
1026                         if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, sizeof(u32), nla_data(a)))
1027                                 return -EMSGSIZE;
1028                         break;
1029                 case OVS_SAMPLE_ATTR_ACTIONS:
1030                         st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
1031                         if (!st_sample)
1032                                 return -EMSGSIZE;
1033                         err = actions_to_attr(nla_data(a), nla_len(a), skb);
1034                         if (err)
1035                                 return err;
1036                         nla_nest_end(skb, st_sample);
1037                         break;
1038                 }
1039         }
1040
1041         nla_nest_end(skb, start);
1042         return err;
1043 }
1044
1045 static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
1046 {
1047         const struct nlattr *ovs_key = nla_data(a);
1048         int key_type = nla_type(ovs_key);
1049         struct nlattr *start;
1050         int err;
1051
1052         switch (key_type) {
1053         case OVS_KEY_ATTR_IPV4_TUNNEL:
1054                 start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
1055                 if (!start)
1056                         return -EMSGSIZE;
1057
1058                 err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key),
1059                                              nla_data(ovs_key));
1060                 if (err)
1061                         return err;
1062                 nla_nest_end(skb, start);
1063                 break;
1064         default:
1065                 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
1066                         return -EMSGSIZE;
1067                 break;
1068         }
1069
1070         return 0;
1071 }
1072
1073 static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb)
1074 {
1075         const struct nlattr *a;
1076         int rem, err;
1077
1078         nla_for_each_attr(a, attr, len, rem) {
1079                 int type = nla_type(a);
1080
1081                 switch (type) {
1082                 case OVS_ACTION_ATTR_SET:
1083                         err = set_action_to_attr(a, skb);
1084                         if (err)
1085                                 return err;
1086                         break;
1087
1088                 case OVS_ACTION_ATTR_SAMPLE:
1089                         err = sample_action_to_attr(a, skb);
1090                         if (err)
1091                                 return err;
1092                         break;
1093                 default:
1094                         if (nla_put(skb, type, nla_len(a), nla_data(a)))
1095                                 return -EMSGSIZE;
1096                         break;
1097                 }
1098         }
1099
1100         return 0;
1101 }
1102
1103 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
1104 {
1105         return NLMSG_ALIGN(sizeof(struct ovs_header))
1106                 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
1107                 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
1108                 + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
1109                 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
1110                 + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
1111                 + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
1112 }
1113
1114 /* Called with ovs_mutex. */
1115 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
1116                                   struct sk_buff *skb, u32 portid,
1117                                   u32 seq, u32 flags, u8 cmd)
1118 {
1119         const int skb_orig_len = skb->len;
1120         struct nlattr *start;
1121         struct ovs_flow_stats stats;
1122         struct ovs_header *ovs_header;
1123         struct nlattr *nla;
1124         unsigned long used;
1125         u8 tcp_flags;
1126         int err;
1127
1128         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
1129         if (!ovs_header)
1130                 return -EMSGSIZE;
1131
1132         ovs_header->dp_ifindex = get_dpifindex(dp);
1133
1134         /* Fill flow key. */
1135         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
1136         if (!nla)
1137                 goto nla_put_failure;
1138
1139         err = ovs_flow_to_nlattrs(&flow->unmasked_key,
1140                         &flow->unmasked_key, skb);
1141         if (err)
1142                 goto error;
1143         nla_nest_end(skb, nla);
1144
1145         nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
1146         if (!nla)
1147                 goto nla_put_failure;
1148
1149         err = ovs_flow_to_nlattrs(&flow->key, &flow->mask->key, skb);
1150         if (err)
1151                 goto error;
1152
1153         nla_nest_end(skb, nla);
1154
1155         spin_lock_bh(&flow->lock);
1156         used = flow->used;
1157         stats.n_packets = flow->packet_count;
1158         stats.n_bytes = flow->byte_count;
1159         tcp_flags = flow->tcp_flags;
1160         spin_unlock_bh(&flow->lock);
1161
1162         if (used &&
1163             nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
1164                 goto nla_put_failure;
1165
1166         if (stats.n_packets &&
1167             nla_put(skb, OVS_FLOW_ATTR_STATS,
1168                     sizeof(struct ovs_flow_stats), &stats))
1169                 goto nla_put_failure;
1170
1171         if (tcp_flags &&
1172             nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
1173                 goto nla_put_failure;
1174
1175         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
1176          * this is the first flow to be dumped into 'skb'.  This is unusual for
1177          * Netlink but individual action lists can be longer than
1178          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
1179          * The userspace caller can always fetch the actions separately if it
1180          * really wants them.  (Most userspace callers in fact don't care.)
1181          *
1182          * This can only fail for dump operations because the skb is always
1183          * properly sized for single flows.
1184          */
1185         start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
1186         if (start) {
1187                 const struct sw_flow_actions *sf_acts;
1188
1189                 sf_acts = rcu_dereference_check(flow->sf_acts,
1190                                                 lockdep_ovsl_is_held());
1191
1192                 err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb);
1193                 if (!err)
1194                         nla_nest_end(skb, start);
1195                 else {
1196                         if (skb_orig_len)
1197                                 goto error;
1198
1199                         nla_nest_cancel(skb, start);
1200                 }
1201         } else if (skb_orig_len)
1202                 goto nla_put_failure;
1203
1204         return genlmsg_end(skb, ovs_header);
1205
1206 nla_put_failure:
1207         err = -EMSGSIZE;
1208 error:
1209         genlmsg_cancel(skb, ovs_header);
1210         return err;
1211 }
1212
1213 static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
1214 {
1215         const struct sw_flow_actions *sf_acts;
1216
1217         sf_acts = ovsl_dereference(flow->sf_acts);
1218
1219         return genlmsg_new(ovs_flow_cmd_msg_size(sf_acts), GFP_KERNEL);
1220 }
1221
1222 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
1223                                                struct datapath *dp,
1224                                                u32 portid, u32 seq, u8 cmd)
1225 {
1226         struct sk_buff *skb;
1227         int retval;
1228
1229         skb = ovs_flow_cmd_alloc_info(flow);
1230         if (!skb)
1231                 return ERR_PTR(-ENOMEM);
1232
1233         retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd);
1234         BUG_ON(retval < 0);
1235         return skb;
1236 }
1237
1238 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1239 {
1240         struct nlattr **a = info->attrs;
1241         struct ovs_header *ovs_header = info->userhdr;
1242         struct sw_flow_key key, masked_key;
1243         struct sw_flow *flow = NULL;
1244         struct sw_flow_mask mask;
1245         struct sk_buff *reply;
1246         struct datapath *dp;
1247         struct flow_table *table;
1248         struct sw_flow_actions *acts = NULL;
1249         struct sw_flow_match match;
1250         int error;
1251
1252         /* Extract key. */
1253         error = -EINVAL;
1254         if (!a[OVS_FLOW_ATTR_KEY])
1255                 goto error;
1256
1257         ovs_match_init(&match, &key, &mask);
1258         error = ovs_match_from_nlattrs(&match,
1259                         a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
1260         if (error)
1261                 goto error;
1262
1263         /* Validate actions. */
1264         if (a[OVS_FLOW_ATTR_ACTIONS]) {
1265                 acts = ovs_flow_actions_alloc(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
1266                 error = PTR_ERR(acts);
1267                 if (IS_ERR(acts))
1268                         goto error;
1269
1270                 ovs_flow_key_mask(&masked_key, &key, &mask);
1271                 error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
1272                                                   &masked_key, 0, &acts);
1273                 if (error) {
1274                         OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
1275                         goto err_kfree;
1276                 }
1277         } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
1278                 error = -EINVAL;
1279                 goto error;
1280         }
1281
1282         ovs_lock();
1283         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1284         error = -ENODEV;
1285         if (!dp)
1286                 goto err_unlock_ovs;
1287
1288         table = ovsl_dereference(dp->table);
1289
1290         /* Check if this is a duplicate flow */
1291         flow = ovs_flow_lookup(table, &key);
1292         if (!flow) {
1293                 struct sw_flow_mask *mask_p;
1294                 /* Bail out if we're not allowed to create a new flow. */
1295                 error = -ENOENT;
1296                 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
1297                         goto err_unlock_ovs;
1298
1299                 /* Expand table, if necessary, to make room. */
1300                 if (ovs_flow_tbl_need_to_expand(table)) {
1301                         struct flow_table *new_table;
1302
1303                         new_table = ovs_flow_tbl_expand(table);
1304                         if (!IS_ERR(new_table)) {
1305                                 rcu_assign_pointer(dp->table, new_table);
1306                                 ovs_flow_tbl_destroy(table, true);
1307                                 table = ovsl_dereference(dp->table);
1308                         }
1309                 }
1310
1311                 /* Allocate flow. */
1312                 flow = ovs_flow_alloc();
1313                 if (IS_ERR(flow)) {
1314                         error = PTR_ERR(flow);
1315                         goto err_unlock_ovs;
1316                 }
1317                 clear_stats(flow);
1318
1319                 flow->key = masked_key;
1320                 flow->unmasked_key = key;
1321
1322                 /* Make sure mask is unique in the system */
1323                 mask_p = ovs_sw_flow_mask_find(table, &mask);
1324                 if (!mask_p) {
1325                         /* Allocate a new mask if none exsits. */
1326                         mask_p = ovs_sw_flow_mask_alloc();
1327                         if (!mask_p)
1328                                 goto err_flow_free;
1329                         mask_p->key = mask.key;
1330                         mask_p->range = mask.range;
1331                         ovs_sw_flow_mask_insert(table, mask_p);
1332                 }
1333
1334                 ovs_sw_flow_mask_add_ref(mask_p);
1335                 flow->mask = mask_p;
1336                 rcu_assign_pointer(flow->sf_acts, acts);
1337
1338                 /* Put flow in bucket. */
1339                 ovs_flow_insert(table, flow);
1340
1341                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1342                                                 info->snd_seq, OVS_FLOW_CMD_NEW);
1343         } else {
1344                 /* We found a matching flow. */
1345                 struct sw_flow_actions *old_acts;
1346
1347                 /* Bail out if we're not allowed to modify an existing flow.
1348                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1349                  * because Generic Netlink treats the latter as a dump
1350                  * request.  We also accept NLM_F_EXCL in case that bug ever
1351                  * gets fixed.
1352                  */
1353                 error = -EEXIST;
1354                 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
1355                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1356                         goto err_unlock_ovs;
1357
1358                 /* The unmasked key has to be the same for flow updates. */
1359                 error = -EINVAL;
1360                 if (!ovs_flow_cmp_unmasked_key(flow, &key, match.range.end)) {
1361                         OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n");
1362                         goto err_unlock_ovs;
1363                 }
1364
1365                 /* Update actions. */
1366                 old_acts = ovsl_dereference(flow->sf_acts);
1367                 rcu_assign_pointer(flow->sf_acts, acts);
1368                 ovs_flow_deferred_free_acts(old_acts);
1369
1370                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1371                                                info->snd_seq, OVS_FLOW_CMD_NEW);
1372
1373                 /* Clear stats. */
1374                 if (a[OVS_FLOW_ATTR_CLEAR]) {
1375                         spin_lock_bh(&flow->lock);
1376                         clear_stats(flow);
1377                         spin_unlock_bh(&flow->lock);
1378                 }
1379         }
1380         ovs_unlock();
1381
1382         if (!IS_ERR(reply))
1383                 ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
1384         else
1385                 netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
1386                                 ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
1387         return 0;
1388
1389 err_flow_free:
1390         ovs_flow_free(flow, false);
1391 err_unlock_ovs:
1392         ovs_unlock();
1393 err_kfree:
1394         kfree(acts);
1395 error:
1396         return error;
1397 }
1398
1399 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1400 {
1401         struct nlattr **a = info->attrs;
1402         struct ovs_header *ovs_header = info->userhdr;
1403         struct sw_flow_key key;
1404         struct sk_buff *reply;
1405         struct sw_flow *flow;
1406         struct datapath *dp;
1407         struct flow_table *table;
1408         struct sw_flow_match match;
1409         int err;
1410
1411         if (!a[OVS_FLOW_ATTR_KEY]) {
1412                 OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
1413                 return -EINVAL;
1414         }
1415
1416         ovs_match_init(&match, &key, NULL);
1417         err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1418         if (err)
1419                 return err;
1420
1421         ovs_lock();
1422         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1423         if (!dp) {
1424                 err = -ENODEV;
1425                 goto unlock;
1426         }
1427
1428         table = ovsl_dereference(dp->table);
1429         flow = ovs_flow_lookup_unmasked_key(table, &match);
1430         if (!flow) {
1431                 err = -ENOENT;
1432                 goto unlock;
1433         }
1434
1435         reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1436                                         info->snd_seq, OVS_FLOW_CMD_NEW);
1437         if (IS_ERR(reply)) {
1438                 err = PTR_ERR(reply);
1439                 goto unlock;
1440         }
1441
1442         ovs_unlock();
1443         return genlmsg_reply(reply, info);
1444 unlock:
1445         ovs_unlock();
1446         return err;
1447 }
1448
1449 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1450 {
1451         struct nlattr **a = info->attrs;
1452         struct ovs_header *ovs_header = info->userhdr;
1453         struct sw_flow_key key;
1454         struct sk_buff *reply;
1455         struct sw_flow *flow;
1456         struct datapath *dp;
1457         struct flow_table *table;
1458         struct sw_flow_match match;
1459         int err;
1460
1461         ovs_lock();
1462         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1463         if (!dp) {
1464                 err = -ENODEV;
1465                 goto unlock;
1466         }
1467
1468         if (!a[OVS_FLOW_ATTR_KEY]) {
1469                 err = flush_flows(dp);
1470                 goto unlock;
1471         }
1472
1473         ovs_match_init(&match, &key, NULL);
1474         err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1475         if (err)
1476                 goto unlock;
1477
1478         table = ovsl_dereference(dp->table);
1479         flow = ovs_flow_lookup_unmasked_key(table, &match);
1480         if (!flow) {
1481                 err = -ENOENT;
1482                 goto unlock;
1483         }
1484
1485         reply = ovs_flow_cmd_alloc_info(flow);
1486         if (!reply) {
1487                 err = -ENOMEM;
1488                 goto unlock;
1489         }
1490
1491         ovs_flow_remove(table, flow);
1492
1493         err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
1494                                      info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1495         BUG_ON(err < 0);
1496
1497         ovs_flow_free(flow, true);
1498         ovs_unlock();
1499
1500         ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
1501         return 0;
1502 unlock:
1503         ovs_unlock();
1504         return err;
1505 }
1506
1507 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1508 {
1509         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1510         struct datapath *dp;
1511         struct flow_table *table;
1512
1513         rcu_read_lock();
1514         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1515         if (!dp) {
1516                 rcu_read_unlock();
1517                 return -ENODEV;
1518         }
1519
1520         table = rcu_dereference(dp->table);
1521         for (;;) {
1522                 struct sw_flow *flow;
1523                 u32 bucket, obj;
1524
1525                 bucket = cb->args[0];
1526                 obj = cb->args[1];
1527                 flow = ovs_flow_dump_next(table, &bucket, &obj);
1528                 if (!flow)
1529                         break;
1530
1531                 if (ovs_flow_cmd_fill_info(flow, dp, skb,
1532                                            NETLINK_CB(cb->skb).portid,
1533                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1534                                            OVS_FLOW_CMD_NEW) < 0)
1535                         break;
1536
1537                 cb->args[0] = bucket;
1538                 cb->args[1] = obj;
1539         }
1540         rcu_read_unlock();
1541         return skb->len;
1542 }
1543
1544 static struct genl_ops dp_flow_genl_ops[] = {
1545         { .cmd = OVS_FLOW_CMD_NEW,
1546           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1547           .policy = flow_policy,
1548           .doit = ovs_flow_cmd_new_or_set
1549         },
1550         { .cmd = OVS_FLOW_CMD_DEL,
1551           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1552           .policy = flow_policy,
1553           .doit = ovs_flow_cmd_del
1554         },
1555         { .cmd = OVS_FLOW_CMD_GET,
1556           .flags = 0,               /* OK for unprivileged users. */
1557           .policy = flow_policy,
1558           .doit = ovs_flow_cmd_get,
1559           .dumpit = ovs_flow_cmd_dump
1560         },
1561         { .cmd = OVS_FLOW_CMD_SET,
1562           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1563           .policy = flow_policy,
1564           .doit = ovs_flow_cmd_new_or_set,
1565         },
1566 };
1567
1568 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1569         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1570         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1571 };
1572
1573 static struct genl_family dp_datapath_genl_family = {
1574         .id = GENL_ID_GENERATE,
1575         .hdrsize = sizeof(struct ovs_header),
1576         .name = OVS_DATAPATH_FAMILY,
1577         .version = OVS_DATAPATH_VERSION,
1578         .maxattr = OVS_DP_ATTR_MAX,
1579         .netnsok = true,
1580          SET_PARALLEL_OPS
1581 };
1582
1583 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
1584         .name = OVS_DATAPATH_MCGROUP
1585 };
1586
1587 static size_t ovs_dp_cmd_msg_size(void)
1588 {
1589         size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1590
1591         msgsize += nla_total_size(IFNAMSIZ);
1592         msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1593
1594         return msgsize;
1595 }
1596
1597 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1598                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1599 {
1600         struct ovs_header *ovs_header;
1601         struct ovs_dp_stats dp_stats;
1602         int err;
1603
1604         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1605                                    flags, cmd);
1606         if (!ovs_header)
1607                 goto error;
1608
1609         ovs_header->dp_ifindex = get_dpifindex(dp);
1610
1611         rcu_read_lock();
1612         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1613         rcu_read_unlock();
1614         if (err)
1615                 goto nla_put_failure;
1616
1617         get_dp_stats(dp, &dp_stats);
1618         if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
1619                 goto nla_put_failure;
1620
1621         return genlmsg_end(skb, ovs_header);
1622
1623 nla_put_failure:
1624         genlmsg_cancel(skb, ovs_header);
1625 error:
1626         return -EMSGSIZE;
1627 }
1628
1629 static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid,
1630                                              u32 seq, u8 cmd)
1631 {
1632         struct sk_buff *skb;
1633         int retval;
1634
1635         skb = genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
1636         if (!skb)
1637                 return ERR_PTR(-ENOMEM);
1638
1639         retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd);
1640         if (retval < 0) {
1641                 kfree_skb(skb);
1642                 return ERR_PTR(retval);
1643         }
1644         return skb;
1645 }
1646
1647 /* Called with ovs_mutex. */
1648 static struct datapath *lookup_datapath(struct net *net,
1649                                         struct ovs_header *ovs_header,
1650                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1651 {
1652         struct datapath *dp;
1653
1654         if (!a[OVS_DP_ATTR_NAME])
1655                 dp = get_dp(net, ovs_header->dp_ifindex);
1656         else {
1657                 struct vport *vport;
1658
1659                 rcu_read_lock();
1660                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1661                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1662                 rcu_read_unlock();
1663         }
1664         return dp ? dp : ERR_PTR(-ENODEV);
1665 }
1666
1667 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1668 {
1669         struct nlattr **a = info->attrs;
1670         struct vport_parms parms;
1671         struct sk_buff *reply;
1672         struct datapath *dp;
1673         struct vport *vport;
1674         struct ovs_net *ovs_net;
1675         int err, i;
1676
1677         err = -EINVAL;
1678         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1679                 goto err;
1680
1681         ovs_lock();
1682
1683         err = -ENOMEM;
1684         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1685         if (dp == NULL)
1686                 goto err_unlock_ovs;
1687
1688         ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1689
1690         /* Allocate table. */
1691         err = -ENOMEM;
1692         rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
1693         if (!dp->table)
1694                 goto err_free_dp;
1695
1696         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1697         if (!dp->stats_percpu) {
1698                 err = -ENOMEM;
1699                 goto err_destroy_table;
1700         }
1701
1702         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1703                             GFP_KERNEL);
1704         if (!dp->ports) {
1705                 err = -ENOMEM;
1706                 goto err_destroy_percpu;
1707         }
1708
1709         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1710                 INIT_HLIST_HEAD(&dp->ports[i]);
1711
1712         /* Set up our datapath device. */
1713         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1714         parms.type = OVS_VPORT_TYPE_INTERNAL;
1715         parms.options = NULL;
1716         parms.dp = dp;
1717         parms.port_no = OVSP_LOCAL;
1718         parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1719
1720         vport = new_vport(&parms);
1721         if (IS_ERR(vport)) {
1722                 err = PTR_ERR(vport);
1723                 if (err == -EBUSY)
1724                         err = -EEXIST;
1725
1726                 goto err_destroy_ports_array;
1727         }
1728
1729         reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1730                                       info->snd_seq, OVS_DP_CMD_NEW);
1731         err = PTR_ERR(reply);
1732         if (IS_ERR(reply))
1733                 goto err_destroy_local_port;
1734
1735         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1736         list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1737
1738         ovs_unlock();
1739
1740         ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
1741         return 0;
1742
1743 err_destroy_local_port:
1744         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1745 err_destroy_ports_array:
1746         kfree(dp->ports);
1747 err_destroy_percpu:
1748         free_percpu(dp->stats_percpu);
1749 err_destroy_table:
1750         ovs_flow_tbl_destroy(ovsl_dereference(dp->table), false);
1751 err_free_dp:
1752         release_net(ovs_dp_get_net(dp));
1753         kfree(dp);
1754 err_unlock_ovs:
1755         ovs_unlock();
1756 err:
1757         return err;
1758 }
1759
1760 /* Called with ovs_mutex. */
1761 static void __dp_destroy(struct datapath *dp)
1762 {
1763         int i;
1764
1765         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1766                 struct vport *vport;
1767                 struct hlist_node *n;
1768
1769                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1770                         if (vport->port_no != OVSP_LOCAL)
1771                                 ovs_dp_detach_port(vport);
1772         }
1773
1774         list_del_rcu(&dp->list_node);
1775
1776         /* OVSP_LOCAL is datapath internal port. We need to make sure that
1777          * all port in datapath are destroyed first before freeing datapath.
1778          */
1779         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1780
1781         ovs_flow_tbl_destroy((__force struct flow_table *)dp->table, true);
1782
1783         call_rcu(&dp->rcu, destroy_dp_rcu);
1784 }
1785
1786 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1787 {
1788         struct sk_buff *reply;
1789         struct datapath *dp;
1790         int err;
1791
1792         ovs_lock();
1793         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1794         err = PTR_ERR(dp);
1795         if (IS_ERR(dp))
1796                 goto unlock;
1797
1798         reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1799                                       info->snd_seq, OVS_DP_CMD_DEL);
1800         err = PTR_ERR(reply);
1801         if (IS_ERR(reply))
1802                 goto unlock;
1803
1804         __dp_destroy(dp);
1805         ovs_unlock();
1806
1807         ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
1808
1809         return 0;
1810 unlock:
1811         ovs_unlock();
1812         return err;
1813 }
1814
1815 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1816 {
1817         struct sk_buff *reply;
1818         struct datapath *dp;
1819         int err;
1820
1821         ovs_lock();
1822         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1823         err = PTR_ERR(dp);
1824         if (IS_ERR(dp))
1825                 goto unlock;
1826
1827         reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1828                                       info->snd_seq, OVS_DP_CMD_NEW);
1829         if (IS_ERR(reply)) {
1830                 err = PTR_ERR(reply);
1831                 netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
1832                                 ovs_dp_datapath_multicast_group.id, err);
1833                 err = 0;
1834                 goto unlock;
1835         }
1836
1837         ovs_unlock();
1838         ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
1839
1840         return 0;
1841 unlock:
1842         ovs_unlock();
1843         return err;
1844 }
1845
1846 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1847 {
1848         struct sk_buff *reply;
1849         struct datapath *dp;
1850         int err;
1851
1852         ovs_lock();
1853         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1854         if (IS_ERR(dp)) {
1855                 err = PTR_ERR(dp);
1856                 goto unlock;
1857         }
1858
1859         reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1860                                       info->snd_seq, OVS_DP_CMD_NEW);
1861         if (IS_ERR(reply)) {
1862                 err = PTR_ERR(reply);
1863                 goto unlock;
1864         }
1865
1866         ovs_unlock();
1867         return genlmsg_reply(reply, info);
1868
1869 unlock:
1870         ovs_unlock();
1871         return err;
1872 }
1873
1874 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1875 {
1876         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1877         struct datapath *dp;
1878         int skip = cb->args[0];
1879         int i = 0;
1880
1881         rcu_read_lock();
1882         list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) {
1883                 if (i >= skip &&
1884                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1885                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1886                                          OVS_DP_CMD_NEW) < 0)
1887                         break;
1888                 i++;
1889         }
1890         rcu_read_unlock();
1891
1892         cb->args[0] = i;
1893
1894         return skb->len;
1895 }
1896
1897 static struct genl_ops dp_datapath_genl_ops[] = {
1898         { .cmd = OVS_DP_CMD_NEW,
1899           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1900           .policy = datapath_policy,
1901           .doit = ovs_dp_cmd_new
1902         },
1903         { .cmd = OVS_DP_CMD_DEL,
1904           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1905           .policy = datapath_policy,
1906           .doit = ovs_dp_cmd_del
1907         },
1908         { .cmd = OVS_DP_CMD_GET,
1909           .flags = 0,               /* OK for unprivileged users. */
1910           .policy = datapath_policy,
1911           .doit = ovs_dp_cmd_get,
1912           .dumpit = ovs_dp_cmd_dump
1913         },
1914         { .cmd = OVS_DP_CMD_SET,
1915           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1916           .policy = datapath_policy,
1917           .doit = ovs_dp_cmd_set,
1918         },
1919 };
1920
1921 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1922         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1923         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1924         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1925         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1926         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1927         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1928 };
1929
1930 static struct genl_family dp_vport_genl_family = {
1931         .id = GENL_ID_GENERATE,
1932         .hdrsize = sizeof(struct ovs_header),
1933         .name = OVS_VPORT_FAMILY,
1934         .version = OVS_VPORT_VERSION,
1935         .maxattr = OVS_VPORT_ATTR_MAX,
1936         .netnsok = true,
1937          SET_PARALLEL_OPS
1938 };
1939
1940 struct genl_multicast_group ovs_dp_vport_multicast_group = {
1941         .name = OVS_VPORT_MCGROUP
1942 };
1943
1944 /* Called with ovs_mutex or RCU read lock. */
1945 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1946                                    u32 portid, u32 seq, u32 flags, u8 cmd)
1947 {
1948         struct ovs_header *ovs_header;
1949         struct ovs_vport_stats vport_stats;
1950         int err;
1951
1952         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1953                                  flags, cmd);
1954         if (!ovs_header)
1955                 return -EMSGSIZE;
1956
1957         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1958
1959         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1960             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1961             nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
1962             nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid))
1963                 goto nla_put_failure;
1964
1965         ovs_vport_get_stats(vport, &vport_stats);
1966         if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1967                     &vport_stats))
1968                 goto nla_put_failure;
1969
1970         err = ovs_vport_get_options(vport, skb);
1971         if (err == -EMSGSIZE)
1972                 goto error;
1973
1974         return genlmsg_end(skb, ovs_header);
1975
1976 nla_put_failure:
1977         err = -EMSGSIZE;
1978 error:
1979         genlmsg_cancel(skb, ovs_header);
1980         return err;
1981 }
1982
1983 /* Called with ovs_mutex or RCU read lock. */
1984 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1985                                          u32 seq, u8 cmd)
1986 {
1987         struct sk_buff *skb;
1988         int retval;
1989
1990         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1991         if (!skb)
1992                 return ERR_PTR(-ENOMEM);
1993
1994         retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1995         BUG_ON(retval < 0);
1996
1997         return skb;
1998 }
1999
2000 /* Called with ovs_mutex or RCU read lock. */
2001 static struct vport *lookup_vport(struct net *net,
2002                                   struct ovs_header *ovs_header,
2003                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
2004 {
2005         struct datapath *dp;
2006         struct vport *vport;
2007
2008         if (a[OVS_VPORT_ATTR_NAME]) {
2009                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
2010                 if (!vport)
2011                         return ERR_PTR(-ENODEV);
2012                 if (ovs_header->dp_ifindex &&
2013                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
2014                         return ERR_PTR(-ENODEV);
2015                 return vport;
2016         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
2017                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
2018
2019                 if (port_no >= DP_MAX_PORTS)
2020                         return ERR_PTR(-EFBIG);
2021
2022                 dp = get_dp(net, ovs_header->dp_ifindex);
2023                 if (!dp)
2024                         return ERR_PTR(-ENODEV);
2025
2026                 vport = ovs_vport_ovsl_rcu(dp, port_no);
2027                 if (!vport)
2028                         return ERR_PTR(-ENODEV);
2029                 return vport;
2030         } else
2031                 return ERR_PTR(-EINVAL);
2032 }
2033
2034 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
2035 {
2036         struct nlattr **a = info->attrs;
2037         struct ovs_header *ovs_header = info->userhdr;
2038         struct vport_parms parms;
2039         struct sk_buff *reply;
2040         struct vport *vport;
2041         struct datapath *dp;
2042         u32 port_no;
2043         int err;
2044
2045         err = -EINVAL;
2046         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
2047             !a[OVS_VPORT_ATTR_UPCALL_PID])
2048                 goto exit;
2049
2050         ovs_lock();
2051         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2052         err = -ENODEV;
2053         if (!dp)
2054                 goto exit_unlock;
2055
2056         if (a[OVS_VPORT_ATTR_PORT_NO]) {
2057                 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
2058
2059                 err = -EFBIG;
2060                 if (port_no >= DP_MAX_PORTS)
2061                         goto exit_unlock;
2062
2063                 vport = ovs_vport_ovsl(dp, port_no);
2064                 err = -EBUSY;
2065                 if (vport)
2066                         goto exit_unlock;
2067         } else {
2068                 for (port_no = 1; ; port_no++) {
2069                         if (port_no >= DP_MAX_PORTS) {
2070                                 err = -EFBIG;
2071                                 goto exit_unlock;
2072                         }
2073                         vport = ovs_vport_ovsl(dp, port_no);
2074                         if (!vport)
2075                                 break;
2076                 }
2077         }
2078
2079         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2080         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2081         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
2082         parms.dp = dp;
2083         parms.port_no = port_no;
2084         parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
2085
2086         vport = new_vport(&parms);
2087         err = PTR_ERR(vport);
2088         if (IS_ERR(vport))
2089                 goto exit_unlock;
2090
2091         err = 0;
2092         if (a[OVS_VPORT_ATTR_STATS])
2093                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
2094
2095         reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
2096                                          OVS_VPORT_CMD_NEW);
2097         if (IS_ERR(reply)) {
2098                 err = PTR_ERR(reply);
2099                 ovs_dp_detach_port(vport);
2100                 goto exit_unlock;
2101         }
2102
2103         ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
2104
2105 exit_unlock:
2106         ovs_unlock();
2107 exit:
2108         return err;
2109 }
2110
2111 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2112 {
2113         struct nlattr **a = info->attrs;
2114         struct sk_buff *reply;
2115         struct vport *vport;
2116         int err;
2117
2118         ovs_lock();
2119         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2120         err = PTR_ERR(vport);
2121         if (IS_ERR(vport))
2122                 goto exit_unlock;
2123
2124         if (a[OVS_VPORT_ATTR_TYPE] &&
2125             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2126                 err = -EINVAL;
2127                 goto exit_unlock;
2128         }
2129
2130         reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2131         if (!reply) {
2132                 err = -ENOMEM;
2133                 goto exit_unlock;
2134         }
2135
2136         if (a[OVS_VPORT_ATTR_OPTIONS]) {
2137                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2138                 if (err)
2139                         goto exit_free;
2140         }
2141
2142         if (a[OVS_VPORT_ATTR_STATS])
2143                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
2144
2145         if (a[OVS_VPORT_ATTR_UPCALL_PID])
2146                 vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
2147
2148         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2149                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2150         BUG_ON(err < 0);
2151
2152         ovs_unlock();
2153         ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
2154         return 0;
2155
2156 exit_free:
2157         kfree_skb(reply);
2158 exit_unlock:
2159         ovs_unlock();
2160         return err;
2161 }
2162
2163 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2164 {
2165         struct nlattr **a = info->attrs;
2166         struct sk_buff *reply;
2167         struct vport *vport;
2168         int err;
2169
2170         ovs_lock();
2171         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2172         err = PTR_ERR(vport);
2173         if (IS_ERR(vport))
2174                 goto exit_unlock;
2175
2176         if (vport->port_no == OVSP_LOCAL) {
2177                 err = -EINVAL;
2178                 goto exit_unlock;
2179         }
2180
2181         reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
2182                                          info->snd_seq, OVS_VPORT_CMD_DEL);
2183         err = PTR_ERR(reply);
2184         if (IS_ERR(reply))
2185                 goto exit_unlock;
2186
2187         err = 0;
2188         ovs_dp_detach_port(vport);
2189
2190         ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
2191
2192 exit_unlock:
2193         ovs_unlock();
2194         return err;
2195 }
2196
2197 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2198 {
2199         struct nlattr **a = info->attrs;
2200         struct ovs_header *ovs_header = info->userhdr;
2201         struct sk_buff *reply;
2202         struct vport *vport;
2203         int err;
2204
2205         rcu_read_lock();
2206         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2207         err = PTR_ERR(vport);
2208         if (IS_ERR(vport))
2209                 goto exit_unlock;
2210
2211         reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
2212                                          info->snd_seq, OVS_VPORT_CMD_NEW);
2213         err = PTR_ERR(reply);
2214         if (IS_ERR(reply))
2215                 goto exit_unlock;
2216
2217         rcu_read_unlock();
2218
2219         return genlmsg_reply(reply, info);
2220
2221 exit_unlock:
2222         rcu_read_unlock();
2223         return err;
2224 }
2225
2226 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2227 {
2228         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2229         struct datapath *dp;
2230         int bucket = cb->args[0], skip = cb->args[1];
2231         int i, j = 0;
2232
2233         rcu_read_lock();
2234         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2235         if (!dp) {
2236                 rcu_read_unlock();
2237                 return -ENODEV;
2238         }
2239         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2240                 struct vport *vport;
2241
2242                 j = 0;
2243                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2244                         if (j >= skip &&
2245                             ovs_vport_cmd_fill_info(vport, skb,
2246                                                     NETLINK_CB(cb->skb).portid,
2247                                                     cb->nlh->nlmsg_seq,
2248                                                     NLM_F_MULTI,
2249                                                     OVS_VPORT_CMD_NEW) < 0)
2250                                 goto out;
2251
2252                         j++;
2253                 }
2254                 skip = 0;
2255         }
2256 out:
2257         rcu_read_unlock();
2258
2259         cb->args[0] = i;
2260         cb->args[1] = j;
2261
2262         return skb->len;
2263 }
2264
2265 static struct genl_ops dp_vport_genl_ops[] = {
2266         { .cmd = OVS_VPORT_CMD_NEW,
2267           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2268           .policy = vport_policy,
2269           .doit = ovs_vport_cmd_new
2270         },
2271         { .cmd = OVS_VPORT_CMD_DEL,
2272           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2273           .policy = vport_policy,
2274           .doit = ovs_vport_cmd_del
2275         },
2276         { .cmd = OVS_VPORT_CMD_GET,
2277           .flags = 0,               /* OK for unprivileged users. */
2278           .policy = vport_policy,
2279           .doit = ovs_vport_cmd_get,
2280           .dumpit = ovs_vport_cmd_dump
2281         },
2282         { .cmd = OVS_VPORT_CMD_SET,
2283           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2284           .policy = vport_policy,
2285           .doit = ovs_vport_cmd_set,
2286         },
2287 };
2288
2289 struct genl_family_and_ops {
2290         struct genl_family *family;
2291         struct genl_ops *ops;
2292         int n_ops;
2293         struct genl_multicast_group *group;
2294 };
2295
2296 static const struct genl_family_and_ops dp_genl_families[] = {
2297         { &dp_datapath_genl_family,
2298           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
2299           &ovs_dp_datapath_multicast_group },
2300         { &dp_vport_genl_family,
2301           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
2302           &ovs_dp_vport_multicast_group },
2303         { &dp_flow_genl_family,
2304           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
2305           &ovs_dp_flow_multicast_group },
2306         { &dp_packet_genl_family,
2307           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
2308           NULL },
2309 };
2310
2311 static void dp_unregister_genl(int n_families)
2312 {
2313         int i;
2314
2315         for (i = 0; i < n_families; i++)
2316                 genl_unregister_family(dp_genl_families[i].family);
2317 }
2318
2319 static int dp_register_genl(void)
2320 {
2321         int n_registered;
2322         int err;
2323         int i;
2324
2325         n_registered = 0;
2326         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2327                 const struct genl_family_and_ops *f = &dp_genl_families[i];
2328
2329                 err = genl_register_family_with_ops(f->family, f->ops,
2330                                                     f->n_ops);
2331                 if (err)
2332                         goto error;
2333                 n_registered++;
2334
2335                 if (f->group) {
2336                         err = genl_register_mc_group(f->family, f->group);
2337                         if (err)
2338                                 goto error;
2339                 }
2340         }
2341
2342         return 0;
2343
2344 error:
2345         dp_unregister_genl(n_registered);
2346         return err;
2347 }
2348
2349 static void rehash_flow_table(struct work_struct *work)
2350 {
2351         struct datapath *dp;
2352         struct net *net;
2353
2354         ovs_lock();
2355         rtnl_lock();
2356         for_each_net(net) {
2357                 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2358
2359                 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2360                         struct flow_table *old_table = ovsl_dereference(dp->table);
2361                         struct flow_table *new_table;
2362
2363                         new_table = ovs_flow_tbl_rehash(old_table);
2364                         if (!IS_ERR(new_table)) {
2365                                 rcu_assign_pointer(dp->table, new_table);
2366                                 ovs_flow_tbl_destroy(old_table, true);
2367                         }
2368                 }
2369         }
2370         rtnl_unlock();
2371         ovs_unlock();
2372         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2373 }
2374
2375 static int __net_init ovs_init_net(struct net *net)
2376 {
2377         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2378
2379         INIT_LIST_HEAD(&ovs_net->dps);
2380         INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2381         return 0;
2382 }
2383
2384 static void __net_exit ovs_exit_net(struct net *net)
2385 {
2386         struct datapath *dp, *dp_next;
2387         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2388
2389         ovs_lock();
2390         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2391                 __dp_destroy(dp);
2392         ovs_unlock();
2393
2394         cancel_work_sync(&ovs_net->dp_notify_work);
2395 }
2396
2397 static struct pernet_operations ovs_net_ops = {
2398         .init = ovs_init_net,
2399         .exit = ovs_exit_net,
2400         .id   = &ovs_net_id,
2401         .size = sizeof(struct ovs_net),
2402 };
2403
2404 DEFINE_COMPAT_PNET_REG_FUNC(device);
2405
2406 static int __init dp_init(void)
2407 {
2408         int err;
2409
2410         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2411
2412         pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
2413                 VERSION);
2414
2415         err = ovs_workqueues_init();
2416         if (err)
2417                 goto error;
2418
2419         err = ovs_flow_init();
2420         if (err)
2421                 goto error_wq;
2422
2423         err = ovs_vport_init();
2424         if (err)
2425                 goto error_flow_exit;
2426
2427         err = register_pernet_device(&ovs_net_ops);
2428         if (err)
2429                 goto error_vport_exit;
2430
2431         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2432         if (err)
2433                 goto error_netns_exit;
2434
2435         err = dp_register_genl();
2436         if (err < 0)
2437                 goto error_unreg_notifier;
2438
2439         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2440
2441         return 0;
2442
2443 error_unreg_notifier:
2444         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2445 error_netns_exit:
2446         unregister_pernet_device(&ovs_net_ops);
2447 error_vport_exit:
2448         ovs_vport_exit();
2449 error_flow_exit:
2450         ovs_flow_exit();
2451 error_wq:
2452         ovs_workqueues_exit();
2453 error:
2454         return err;
2455 }
2456
2457 static void dp_cleanup(void)
2458 {
2459         cancel_delayed_work_sync(&rehash_flow_wq);
2460         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2461         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2462         unregister_pernet_device(&ovs_net_ops);
2463         rcu_barrier();
2464         ovs_vport_exit();
2465         ovs_flow_exit();
2466         ovs_workqueues_exit();
2467 }
2468
2469 module_init(dp_init);
2470 module_exit(dp_cleanup);
2471
2472 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2473 MODULE_LICENSE("GPL");
2474 MODULE_VERSION(VERSION);