datapath: Factor out allocation and verification of actions.
[cascardo/ovs.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/div64.h>
43 #include <linux/highmem.h>
44 #include <linux/netfilter_bridge.h>
45 #include <linux/netfilter_ipv4.h>
46 #include <linux/inetdevice.h>
47 #include <linux/list.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <linux/genetlink.h>
52 #include <net/genetlink.h>
53 #include <net/genetlink.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56
57 #include "datapath.h"
58 #include "flow.h"
59 #include "flow_table.h"
60 #include "flow_netlink.h"
61 #include "vlan.h"
62 #include "vport-internal_dev.h"
63 #include "vport-netdev.h"
64
65 int ovs_net_id __read_mostly;
66
67 static struct genl_family dp_packet_genl_family;
68 static struct genl_family dp_flow_genl_family;
69 static struct genl_family dp_datapath_genl_family;
70
71 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
72         .name = OVS_FLOW_MCGROUP
73 };
74
75 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
76         .name = OVS_DATAPATH_MCGROUP
77 };
78
79 struct genl_multicast_group ovs_dp_vport_multicast_group = {
80         .name = OVS_VPORT_MCGROUP
81 };
82
83 /* Check if need to build a reply message.
84  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
85 static bool ovs_must_notify(struct genl_info *info,
86                             const struct genl_multicast_group *grp)
87 {
88         return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
89                 netlink_has_listeners(genl_info_net(info)->genl_sock, GROUP_ID(grp));
90 }
91
92 static void ovs_notify(struct genl_family *family, struct genl_multicast_group *grp,
93                        struct sk_buff *skb, struct genl_info *info)
94 {
95         genl_notify(family, skb, genl_info_net(info),
96                     info->snd_portid, GROUP_ID(grp), info->nlhdr, GFP_KERNEL);
97 }
98
99 /**
100  * DOC: Locking:
101  *
102  * All writes e.g. Writes to device state (add/remove datapath, port, set
103  * operations on vports, etc.), Writes to other state (flow table
104  * modifications, set miscellaneous datapath parameters, etc.) are protected
105  * by ovs_lock.
106  *
107  * Reads are protected by RCU.
108  *
109  * There are a few special cases (mostly stats) that have their own
110  * synchronization but they nest under all of above and don't interact with
111  * each other.
112  *
113  * The RTNL lock nests inside ovs_mutex.
114  */
115
116 static DEFINE_MUTEX(ovs_mutex);
117
118 void ovs_lock(void)
119 {
120         mutex_lock(&ovs_mutex);
121 }
122
123 void ovs_unlock(void)
124 {
125         mutex_unlock(&ovs_mutex);
126 }
127
128 #ifdef CONFIG_LOCKDEP
129 int lockdep_ovsl_is_held(void)
130 {
131         if (debug_locks)
132                 return lockdep_is_held(&ovs_mutex);
133         else
134                 return 1;
135 }
136 #endif
137
138 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
139                              const struct dp_upcall_info *);
140 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
141                                   const struct dp_upcall_info *);
142
143 /* Must be called with rcu_read_lock or ovs_mutex. */
144 static struct datapath *get_dp(struct net *net, int dp_ifindex)
145 {
146         struct datapath *dp = NULL;
147         struct net_device *dev;
148
149         rcu_read_lock();
150         dev = dev_get_by_index_rcu(net, dp_ifindex);
151         if (dev) {
152                 struct vport *vport = ovs_internal_dev_get_vport(dev);
153                 if (vport)
154                         dp = vport->dp;
155         }
156         rcu_read_unlock();
157
158         return dp;
159 }
160
161 /* Must be called with rcu_read_lock or ovs_mutex. */
162 const char *ovs_dp_name(const struct datapath *dp)
163 {
164         struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
165         return vport->ops->get_name(vport);
166 }
167
168 static int get_dpifindex(struct datapath *dp)
169 {
170         struct vport *local;
171         int ifindex;
172
173         rcu_read_lock();
174
175         local = ovs_vport_rcu(dp, OVSP_LOCAL);
176         if (local)
177                 ifindex = netdev_vport_priv(local)->dev->ifindex;
178         else
179                 ifindex = 0;
180
181         rcu_read_unlock();
182
183         return ifindex;
184 }
185
186 static void destroy_dp_rcu(struct rcu_head *rcu)
187 {
188         struct datapath *dp = container_of(rcu, struct datapath, rcu);
189
190         ovs_flow_tbl_destroy(&dp->table);
191         free_percpu(dp->stats_percpu);
192         release_net(ovs_dp_get_net(dp));
193         kfree(dp->ports);
194         kfree(dp);
195 }
196
197 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
198                                             u16 port_no)
199 {
200         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
201 }
202
203 /* Called with ovs_mutex or RCU read lock. */
204 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
205 {
206         struct vport *vport;
207         struct hlist_head *head;
208
209         head = vport_hash_bucket(dp, port_no);
210         hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
211                 if (vport->port_no == port_no)
212                         return vport;
213         }
214         return NULL;
215 }
216
217 /* Called with ovs_mutex. */
218 static struct vport *new_vport(const struct vport_parms *parms)
219 {
220         struct vport *vport;
221
222         vport = ovs_vport_add(parms);
223         if (!IS_ERR(vport)) {
224                 struct datapath *dp = parms->dp;
225                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
226
227                 hlist_add_head_rcu(&vport->dp_hash_node, head);
228         }
229         return vport;
230 }
231
232 void ovs_dp_detach_port(struct vport *p)
233 {
234         ASSERT_OVSL();
235
236         /* First drop references to device. */
237         hlist_del_rcu(&p->dp_hash_node);
238
239         /* Then destroy it. */
240         ovs_vport_del(p);
241 }
242
243 void ovs_dp_process_packet_with_key(struct sk_buff *skb,
244                                     struct sw_flow_key *pkt_key,
245                                     bool recirc)
246 {
247         const struct vport *p = OVS_CB(skb)->input_vport;
248         struct datapath *dp = p->dp;
249         struct sw_flow *flow;
250         struct dp_stats_percpu *stats;
251         u64 *stats_counter;
252         u32 n_mask_hit;
253
254         stats = this_cpu_ptr(dp->stats_percpu);
255
256         /* Look up flow. */
257         flow = ovs_flow_tbl_lookup_stats(&dp->table, pkt_key, skb_get_hash(skb),
258                                          &n_mask_hit);
259         if (unlikely(!flow)) {
260                 struct dp_upcall_info upcall;
261
262                 upcall.cmd = OVS_PACKET_CMD_MISS;
263                 upcall.key = pkt_key;
264                 upcall.userdata = NULL;
265                 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
266                 ovs_dp_upcall(dp, skb, &upcall);
267                 consume_skb(skb);
268                 stats_counter = &stats->n_missed;
269                 goto out;
270         }
271
272         OVS_CB(skb)->pkt_key = pkt_key;
273         OVS_CB(skb)->flow = flow;
274
275         ovs_flow_stats_update(OVS_CB(skb)->flow, pkt_key->tp.flags, skb);
276         ovs_execute_actions(dp, skb, recirc);
277         stats_counter = &stats->n_hit;
278
279 out:
280         /* Update datapath statistics. */
281         u64_stats_update_begin(&stats->sync);
282         (*stats_counter)++;
283         stats->n_mask_hit += n_mask_hit;
284         u64_stats_update_end(&stats->sync);
285 }
286
287 /* Must be called with rcu_read_lock. */
288 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
289 {
290         int error;
291         struct sw_flow_key key;
292
293         OVS_CB(skb)->input_vport = p;
294
295         /* Extract flow from 'skb' into 'key'. */
296         error = ovs_flow_extract(skb, p->port_no, &key);
297         if (unlikely(error)) {
298                 kfree_skb(skb);
299                 return;
300         }
301
302         ovs_dp_process_packet_with_key(skb, &key, false);
303 }
304
305 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
306                   const struct dp_upcall_info *upcall_info)
307 {
308         struct dp_stats_percpu *stats;
309         int err;
310
311         if (upcall_info->portid == 0) {
312                 err = -ENOTCONN;
313                 goto err;
314         }
315
316         if (!skb_is_gso(skb))
317                 err = queue_userspace_packet(dp, skb, upcall_info);
318         else
319                 err = queue_gso_packets(dp, skb, upcall_info);
320         if (err)
321                 goto err;
322
323         return 0;
324
325 err:
326         stats = this_cpu_ptr(dp->stats_percpu);
327
328         u64_stats_update_begin(&stats->sync);
329         stats->n_lost++;
330         u64_stats_update_end(&stats->sync);
331
332         return err;
333 }
334
335 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
336                              const struct dp_upcall_info *upcall_info)
337 {
338         unsigned short gso_type = skb_shinfo(skb)->gso_type;
339         struct dp_upcall_info later_info;
340         struct sw_flow_key later_key;
341         struct sk_buff *segs, *nskb;
342         int err;
343
344         segs = __skb_gso_segment(skb, NETIF_F_SG, false);
345         if (IS_ERR(segs))
346                 return PTR_ERR(segs);
347
348         /* Queue all of the segments. */
349         skb = segs;
350         do {
351                 err = queue_userspace_packet(dp, skb, upcall_info);
352                 if (err)
353                         break;
354
355                 if (skb == segs && gso_type & SKB_GSO_UDP) {
356                         /* The initial flow key extracted by ovs_flow_extract()
357                          * in this case is for a first fragment, so we need to
358                          * properly mark later fragments.
359                          */
360                         later_key = *upcall_info->key;
361                         later_key.ip.frag = OVS_FRAG_TYPE_LATER;
362
363                         later_info = *upcall_info;
364                         later_info.key = &later_key;
365                         upcall_info = &later_info;
366                 }
367         } while ((skb = skb->next));
368
369         /* Free all of the segments. */
370         skb = segs;
371         do {
372                 nskb = skb->next;
373                 if (err)
374                         kfree_skb(skb);
375                 else
376                         consume_skb(skb);
377         } while ((skb = nskb));
378         return err;
379 }
380
381 static size_t key_attr_size(void)
382 {
383         /* Whenever adding new OVS_KEY_ FIELDS, we should consider
384          * updating this function.  */
385         BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 21);
386
387         return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
388                 + nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
389                   + nla_total_size(8)   /* OVS_TUNNEL_KEY_ATTR_ID */
390                   + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
391                   + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
392                   + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TOS */
393                   + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TTL */
394                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
395                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_CSUM */
396                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_OAM */
397                 + nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
398                 + nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
399                 + nla_total_size(4)   /* OVS_KEY_ATTR_DP_HASH */
400                 + nla_total_size(4)   /* OVS_KEY_ATTR_RECIRC_ID */
401                 + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
402                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
403                 + nla_total_size(4)   /* OVS_KEY_ATTR_8021Q */
404                 + nla_total_size(0)   /* OVS_KEY_ATTR_ENCAP */
405                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
406                 + nla_total_size(40)  /* OVS_KEY_ATTR_IPV6 */
407                 + nla_total_size(2)   /* OVS_KEY_ATTR_ICMPV6 */
408                 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
409 }
410
411 static size_t upcall_msg_size(const struct nlattr *userdata,
412                               unsigned int hdrlen)
413 {
414         size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
415                 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
416                 + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
417
418         /* OVS_PACKET_ATTR_USERDATA */
419         if (userdata)
420                 size += NLA_ALIGN(userdata->nla_len);
421
422         return size;
423 }
424
425 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
426                                   const struct dp_upcall_info *upcall_info)
427 {
428         struct ovs_header *upcall;
429         struct sk_buff *nskb = NULL;
430         struct sk_buff *user_skb; /* to be queued to userspace */
431         struct nlattr *nla;
432         struct genl_info info = {
433 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
434                 .dst_sk = ovs_dp_get_net(dp)->genl_sock,
435 #endif
436                 .snd_portid = upcall_info->portid,
437         };
438         size_t len;
439         unsigned int hlen;
440         int err, dp_ifindex;
441
442         dp_ifindex = get_dpifindex(dp);
443         if (!dp_ifindex)
444                 return -ENODEV;
445
446         if (vlan_tx_tag_present(skb)) {
447                 nskb = skb_clone(skb, GFP_ATOMIC);
448                 if (!nskb)
449                         return -ENOMEM;
450
451                 nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
452                 if (!nskb)
453                         return -ENOMEM;
454
455                 vlan_set_tci(nskb, 0);
456
457                 skb = nskb;
458         }
459
460         if (nla_attr_size(skb->len) > USHRT_MAX) {
461                 err = -EFBIG;
462                 goto out;
463         }
464
465         /* Complete checksum if needed */
466         if (skb->ip_summed == CHECKSUM_PARTIAL &&
467             (err = skb_checksum_help(skb)))
468                 goto out;
469
470         /* Older versions of OVS user space enforce alignment of the last
471          * Netlink attribute to NLA_ALIGNTO which would require extensive
472          * padding logic. Only perform zerocopy if padding is not required.
473          */
474         if (dp->user_features & OVS_DP_F_UNALIGNED)
475                 hlen = skb_zerocopy_headlen(skb);
476         else
477                 hlen = skb->len;
478
479         len = upcall_msg_size(upcall_info->userdata, hlen);
480         user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
481         if (!user_skb) {
482                 err = -ENOMEM;
483                 goto out;
484         }
485
486         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
487                              0, upcall_info->cmd);
488         upcall->dp_ifindex = dp_ifindex;
489
490         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
491         err = ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
492         BUG_ON(err);
493         nla_nest_end(user_skb, nla);
494
495         if (upcall_info->userdata)
496                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
497                           nla_len(upcall_info->userdata),
498                           nla_data(upcall_info->userdata));
499
500         /* Only reserve room for attribute header, packet data is added
501          * in skb_zerocopy() */
502         if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
503                 err = -ENOBUFS;
504                 goto out;
505         }
506         nla->nla_len = nla_attr_size(skb->len);
507
508         err = skb_zerocopy(user_skb, skb, skb->len, hlen);
509         if (err)
510                 goto out;
511
512         /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
513         if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
514                 size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
515
516                 if (plen > 0)
517                         memset(skb_put(user_skb, plen), 0, plen);
518         }
519
520         ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
521
522         err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
523 out:
524         if (err)
525                 skb_tx_error(skb);
526         kfree_skb(nskb);
527         return err;
528 }
529
530 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
531 {
532         struct ovs_header *ovs_header = info->userhdr;
533         struct nlattr **a = info->attrs;
534         struct sw_flow_actions *acts;
535         struct sk_buff *packet;
536         struct sw_flow *flow;
537         struct datapath *dp;
538         struct ethhdr *eth;
539         struct vport *input_vport;
540         int len;
541         int err;
542
543         err = -EINVAL;
544         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
545             !a[OVS_PACKET_ATTR_ACTIONS])
546                 goto err;
547
548         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
549         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
550         err = -ENOMEM;
551         if (!packet)
552                 goto err;
553         skb_reserve(packet, NET_IP_ALIGN);
554
555         nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
556
557         skb_reset_mac_header(packet);
558         eth = eth_hdr(packet);
559
560         /* Normally, setting the skb 'protocol' field would be handled by a
561          * call to eth_type_trans(), but it assumes there's a sending
562          * device, which we may not have. */
563         if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
564                 packet->protocol = eth->h_proto;
565         else
566                 packet->protocol = htons(ETH_P_802_2);
567
568         /* Build an sw_flow for sending this packet. */
569         flow = ovs_flow_alloc();
570         err = PTR_ERR(flow);
571         if (IS_ERR(flow))
572                 goto err_kfree_skb;
573
574         err = ovs_flow_extract(packet, -1, &flow->key);
575         if (err)
576                 goto err_flow_free;
577
578         err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]);
579         if (err)
580                 goto err_flow_free;
581         acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
582         err = PTR_ERR(acts);
583         if (IS_ERR(acts))
584                 goto err_flow_free;
585
586         err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
587                                    &flow->key, 0, &acts);
588         rcu_assign_pointer(flow->sf_acts, acts);
589         if (err)
590                 goto err_flow_free;
591
592         OVS_CB(packet)->flow = flow;
593         OVS_CB(packet)->pkt_key = &flow->key;
594         packet->priority = flow->key.phy.priority;
595         packet->mark = flow->key.phy.skb_mark;
596
597         rcu_read_lock();
598         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
599         err = -ENODEV;
600         if (!dp)
601                 goto err_unlock;
602
603         input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
604         if (!input_vport)
605                 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
606
607         if (!input_vport)
608                 goto err_unlock;
609
610         OVS_CB(packet)->input_vport = input_vport;
611
612         local_bh_disable();
613         err = ovs_execute_actions(dp, packet, false);
614         local_bh_enable();
615         rcu_read_unlock();
616
617         ovs_flow_free(flow, false);
618         return err;
619
620 err_unlock:
621         rcu_read_unlock();
622 err_flow_free:
623         ovs_flow_free(flow, false);
624 err_kfree_skb:
625         kfree_skb(packet);
626 err:
627         return err;
628 }
629
630 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
631         [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
632         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
633         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
634 };
635
636 static struct genl_ops dp_packet_genl_ops[] = {
637         { .cmd = OVS_PACKET_CMD_EXECUTE,
638           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
639           .policy = packet_policy,
640           .doit = ovs_packet_cmd_execute
641         }
642 };
643
644 static struct genl_family dp_packet_genl_family = {
645         .id = GENL_ID_GENERATE,
646         .hdrsize = sizeof(struct ovs_header),
647         .name = OVS_PACKET_FAMILY,
648         .version = OVS_PACKET_VERSION,
649         .maxattr = OVS_PACKET_ATTR_MAX,
650         .netnsok = true,
651         .parallel_ops = true,
652         .ops = dp_packet_genl_ops,
653         .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
654 };
655
656 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
657                          struct ovs_dp_megaflow_stats *mega_stats)
658 {
659         int i;
660
661         memset(mega_stats, 0, sizeof(*mega_stats));
662
663         stats->n_flows = ovs_flow_tbl_count(&dp->table);
664         mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
665
666         stats->n_hit = stats->n_missed = stats->n_lost = 0;
667
668         for_each_possible_cpu(i) {
669                 const struct dp_stats_percpu *percpu_stats;
670                 struct dp_stats_percpu local_stats;
671                 unsigned int start;
672
673                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
674
675                 do {
676                         start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
677                         local_stats = *percpu_stats;
678                 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
679
680                 stats->n_hit += local_stats.n_hit;
681                 stats->n_missed += local_stats.n_missed;
682                 stats->n_lost += local_stats.n_lost;
683                 mega_stats->n_mask_hit += local_stats.n_mask_hit;
684         }
685 }
686
687 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
688 {
689         return NLMSG_ALIGN(sizeof(struct ovs_header))
690                 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
691                 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
692                 + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
693                 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
694                 + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
695                 + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
696 }
697
698 /* Called with ovs_mutex or RCU read lock. */
699 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
700                                   struct sk_buff *skb, u32 portid,
701                                   u32 seq, u32 flags, u8 cmd)
702 {
703         const int skb_orig_len = skb->len;
704         struct nlattr *start;
705         struct ovs_flow_stats stats;
706         __be16 tcp_flags;
707         unsigned long used;
708         struct ovs_header *ovs_header;
709         struct nlattr *nla;
710         int err;
711
712         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
713         if (!ovs_header)
714                 return -EMSGSIZE;
715
716         ovs_header->dp_ifindex = dp_ifindex;
717
718         /* Fill flow key. */
719         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
720         if (!nla)
721                 goto nla_put_failure;
722
723         err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
724         if (err)
725                 goto error;
726         nla_nest_end(skb, nla);
727
728         nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
729         if (!nla)
730                 goto nla_put_failure;
731
732         err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
733         if (err)
734                 goto error;
735
736         nla_nest_end(skb, nla);
737
738         ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
739
740         if (used &&
741             nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
742                 goto nla_put_failure;
743
744         if (stats.n_packets &&
745             nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
746                 goto nla_put_failure;
747
748         if ((u8)ntohs(tcp_flags) &&
749              nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
750                 goto nla_put_failure;
751
752         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
753          * this is the first flow to be dumped into 'skb'.  This is unusual for
754          * Netlink but individual action lists can be longer than
755          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
756          * The userspace caller can always fetch the actions separately if it
757          * really wants them.  (Most userspace callers in fact don't care.)
758          *
759          * This can only fail for dump operations because the skb is always
760          * properly sized for single flows.
761          */
762         start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
763         if (start) {
764                 const struct sw_flow_actions *sf_acts;
765
766                 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
767                 err = ovs_nla_put_actions(sf_acts->actions,
768                                           sf_acts->actions_len, skb);
769
770                 if (!err)
771                         nla_nest_end(skb, start);
772                 else {
773                         if (skb_orig_len)
774                                 goto error;
775
776                         nla_nest_cancel(skb, start);
777                 }
778         } else if (skb_orig_len)
779                 goto nla_put_failure;
780
781         return genlmsg_end(skb, ovs_header);
782
783 nla_put_failure:
784         err = -EMSGSIZE;
785 error:
786         genlmsg_cancel(skb, ovs_header);
787         return err;
788 }
789
790 /* May not be called with RCU read lock. */
791 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
792                                                struct genl_info *info,
793                                                bool always)
794 {
795         struct sk_buff *skb;
796
797         if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
798                 return NULL;
799
800         skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
801
802         if (!skb)
803                 return ERR_PTR(-ENOMEM);
804
805         return skb;
806 }
807
808 /* Called with ovs_mutex. */
809 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
810                                                int dp_ifindex,
811                                                struct genl_info *info, u8 cmd,
812                                                bool always)
813 {
814         struct sk_buff *skb;
815         int retval;
816
817         skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
818                                       always);
819         if (!skb || IS_ERR(skb))
820                 return skb;
821
822         retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
823                                         info->snd_portid, info->snd_seq, 0,
824                                         cmd);
825         BUG_ON(retval < 0);
826         return skb;
827 }
828
829 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
830 {
831         struct nlattr **a = info->attrs;
832         struct ovs_header *ovs_header = info->userhdr;
833         struct sw_flow *flow, *new_flow;
834         struct sw_flow_mask mask;
835         struct sk_buff *reply;
836         struct datapath *dp;
837         struct sw_flow_actions *acts;
838         struct sw_flow_match match;
839         int error;
840
841         /* Must have key and actions. */
842         error = -EINVAL;
843         if (!a[OVS_FLOW_ATTR_KEY])
844                 goto error;
845         if (!a[OVS_FLOW_ATTR_ACTIONS])
846                 goto error;
847
848         /* Most of the time we need to allocate a new flow, do it before
849          * locking. */
850         new_flow = ovs_flow_alloc();
851         if (IS_ERR(new_flow)) {
852                 error = PTR_ERR(new_flow);
853                 goto error;
854         }
855
856         /* Extract key. */
857         ovs_match_init(&match, &new_flow->unmasked_key, &mask);
858         error = ovs_nla_get_match(&match,
859                                   a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
860         if (error)
861                 goto err_kfree_flow;
862
863         ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
864
865         /* Validate actions. */
866         acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
867         error = PTR_ERR(acts);
868         if (IS_ERR(acts))
869                 goto err_kfree_flow;
870
871         error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
872                                      0, &acts);
873         if (error) {
874                 OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
875                 goto err_kfree_acts;
876         }
877
878         reply = ovs_flow_cmd_alloc_info(acts, info, false);
879         if (IS_ERR(reply)) {
880                 error = PTR_ERR(reply);
881                 goto err_kfree_acts;
882         }
883
884         ovs_lock();
885         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
886         if (unlikely(!dp)) {
887                 error = -ENODEV;
888                 goto err_unlock_ovs;
889         }
890         /* Check if this is a duplicate flow */
891         flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
892         if (likely(!flow)) {
893                 rcu_assign_pointer(new_flow->sf_acts, acts);
894
895                 /* Put flow in bucket. */
896                 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
897                 if (unlikely(error)) {
898                         acts = NULL;
899                         goto err_unlock_ovs;
900                 }
901
902                 if (unlikely(reply)) {
903                         error = ovs_flow_cmd_fill_info(new_flow,
904                                                        ovs_header->dp_ifindex,
905                                                        reply, info->snd_portid,
906                                                        info->snd_seq, 0,
907                                                        OVS_FLOW_CMD_NEW);
908                         BUG_ON(error < 0);
909                 }
910                 ovs_unlock();
911         } else {
912                 struct sw_flow_actions *old_acts;
913
914                 /* Bail out if we're not allowed to modify an existing flow.
915                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
916                  * because Generic Netlink treats the latter as a dump
917                  * request.  We also accept NLM_F_EXCL in case that bug ever
918                  * gets fixed.
919                  */
920                 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
921                                                          | NLM_F_EXCL))) {
922                         error = -EEXIST;
923                         goto err_unlock_ovs;
924                 }
925                 /* The unmasked key has to be the same for flow updates. */
926                 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
927                         error = -EEXIST;
928                         goto err_unlock_ovs;
929                 }
930                 /* Update actions. */
931                 old_acts = ovsl_dereference(flow->sf_acts);
932                 rcu_assign_pointer(flow->sf_acts, acts);
933
934                 if (unlikely(reply)) {
935                         error = ovs_flow_cmd_fill_info(flow,
936                                                        ovs_header->dp_ifindex,
937                                                        reply, info->snd_portid,
938                                                        info->snd_seq, 0,
939                                                        OVS_FLOW_CMD_NEW);
940                         BUG_ON(error < 0);
941                 }
942                 ovs_unlock();
943
944                 ovs_nla_free_flow_actions(old_acts);
945                 ovs_flow_free(new_flow, false);
946         }
947
948         if (reply)
949                 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
950         return 0;
951
952 err_unlock_ovs:
953         ovs_unlock();
954         kfree_skb(reply);
955 err_kfree_acts:
956         kfree(acts);
957 err_kfree_flow:
958         ovs_flow_free(new_flow, false);
959 error:
960         return error;
961 }
962
963 static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
964                                                 const struct sw_flow_key *key,
965                                                 const struct sw_flow_mask *mask)
966 {
967         struct sw_flow_actions *acts;
968         struct sw_flow_key masked_key;
969         int error;
970
971         acts = ovs_nla_alloc_flow_actions(nla_len(a));
972         if (IS_ERR(acts))
973                 return acts;
974
975         ovs_flow_mask_key(&masked_key, key, mask);
976         error = ovs_nla_copy_actions(a, &masked_key, 0, &acts);
977         if (error) {
978                 OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
979                 kfree(acts);
980                 return ERR_PTR(error);
981         }
982
983         return acts;
984 }
985
986 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
987 {
988         struct nlattr **a = info->attrs;
989         struct ovs_header *ovs_header = info->userhdr;
990         struct sw_flow_key key;
991         struct sw_flow *flow;
992         struct sw_flow_mask mask;
993         struct sk_buff *reply = NULL;
994         struct datapath *dp;
995         struct sw_flow_actions *old_acts = NULL, *acts = NULL;
996         struct sw_flow_match match;
997         int error;
998
999         /* Extract key. */
1000         error = -EINVAL;
1001         if (!a[OVS_FLOW_ATTR_KEY])
1002                 goto error;
1003
1004         ovs_match_init(&match, &key, &mask);
1005         error = ovs_nla_get_match(&match,
1006                                   a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
1007         if (error)
1008                 goto error;
1009
1010         /* Validate actions. */
1011         if (a[OVS_FLOW_ATTR_ACTIONS]) {
1012                 acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask);
1013                 if (IS_ERR(acts)) {
1014                         error = PTR_ERR(acts);
1015                         goto error;
1016                 }
1017         }
1018
1019         /* Can allocate before locking if have acts. */
1020         if (acts) {
1021                 reply = ovs_flow_cmd_alloc_info(acts, info, false);
1022                 if (IS_ERR(reply)) {
1023                         error = PTR_ERR(reply);
1024                         goto err_kfree_acts;
1025                 }
1026         }
1027
1028         ovs_lock();
1029         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1030         if (unlikely(!dp)) {
1031                 error = -ENODEV;
1032                 goto err_unlock_ovs;
1033         }
1034         /* Check that the flow exists. */
1035         flow = ovs_flow_tbl_lookup(&dp->table, &key);
1036         if (unlikely(!flow)) {
1037                 error = -ENOENT;
1038                 goto err_unlock_ovs;
1039         }
1040         /* The unmasked key has to be the same for flow updates. */
1041         if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
1042                 error = -EEXIST;
1043                 goto err_unlock_ovs;
1044         }
1045         /* Update actions, if present. */
1046         if (likely(acts)) {
1047                 old_acts = ovsl_dereference(flow->sf_acts);
1048                 rcu_assign_pointer(flow->sf_acts, acts);
1049
1050                 if (unlikely(reply)) {
1051                         error = ovs_flow_cmd_fill_info(flow,
1052                                                        ovs_header->dp_ifindex,
1053                                                        reply, info->snd_portid,
1054                                                        info->snd_seq, 0,
1055                                                        OVS_FLOW_CMD_NEW);
1056                         BUG_ON(error < 0);
1057                 }
1058         } else {
1059                 /* Could not alloc without acts before locking. */
1060                 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1061                                                 info, OVS_FLOW_CMD_NEW, false);
1062                 if (unlikely(IS_ERR(reply))) {
1063                         error = PTR_ERR(reply);
1064                         goto err_unlock_ovs;
1065                 }
1066         }
1067
1068         /* Clear stats. */
1069         if (a[OVS_FLOW_ATTR_CLEAR])
1070                 ovs_flow_stats_clear(flow);
1071         ovs_unlock();
1072
1073         if (reply)
1074                 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
1075         if (old_acts)
1076                 ovs_nla_free_flow_actions(old_acts);
1077         return 0;
1078
1079 err_unlock_ovs:
1080         ovs_unlock();
1081         kfree_skb(reply);
1082 err_kfree_acts:
1083         kfree(acts);
1084 error:
1085         return error;
1086 }
1087
1088 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1089 {
1090         struct nlattr **a = info->attrs;
1091         struct ovs_header *ovs_header = info->userhdr;
1092         struct sw_flow_key key;
1093         struct sk_buff *reply;
1094         struct sw_flow *flow;
1095         struct datapath *dp;
1096         struct sw_flow_match match;
1097         int err;
1098
1099         if (!a[OVS_FLOW_ATTR_KEY]) {
1100                 OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
1101                 return -EINVAL;
1102         }
1103
1104         ovs_match_init(&match, &key, NULL);
1105         err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1106         if (err)
1107                 return err;
1108
1109         ovs_lock();
1110         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1111         if (!dp) {
1112                 err = -ENODEV;
1113                 goto unlock;
1114         }
1115
1116         flow = ovs_flow_tbl_lookup(&dp->table, &key);
1117         if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
1118                 err = -ENOENT;
1119                 goto unlock;
1120         }
1121
1122         reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1123                                         OVS_FLOW_CMD_NEW, true);
1124         if (IS_ERR(reply)) {
1125                 err = PTR_ERR(reply);
1126                 goto unlock;
1127         }
1128
1129         ovs_unlock();
1130         return genlmsg_reply(reply, info);
1131 unlock:
1132         ovs_unlock();
1133         return err;
1134 }
1135
1136 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1137 {
1138         struct nlattr **a = info->attrs;
1139         struct ovs_header *ovs_header = info->userhdr;
1140         struct sw_flow_key key;
1141         struct sk_buff *reply;
1142         struct sw_flow *flow;
1143         struct datapath *dp;
1144         struct sw_flow_match match;
1145         int err;
1146
1147         if (likely(a[OVS_FLOW_ATTR_KEY])) {
1148                 ovs_match_init(&match, &key, NULL);
1149                 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1150                 if (unlikely(err))
1151                         return err;
1152         }
1153
1154         ovs_lock();
1155         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1156         if (unlikely(!dp)) {
1157                 err = -ENODEV;
1158                 goto unlock;
1159         }
1160         if (unlikely(!a[OVS_FLOW_ATTR_KEY])) {
1161                 err = ovs_flow_tbl_flush(&dp->table);
1162                 goto unlock;
1163         }
1164         flow = ovs_flow_tbl_lookup(&dp->table, &key);
1165         if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) {
1166                 err = -ENOENT;
1167                 goto unlock;
1168         }
1169
1170         ovs_flow_tbl_remove(&dp->table, flow);
1171         ovs_unlock();
1172
1173         reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *)flow->sf_acts,
1174                                         info, false);
1175
1176         if (likely(reply)) {
1177                 if (likely(!IS_ERR(reply))) {
1178                         rcu_read_lock(); /* Keep RCU checker happy. */
1179                         err = ovs_flow_cmd_fill_info(flow,
1180                                                      ovs_header->dp_ifindex,
1181                                                      reply, info->snd_portid,
1182                                                      info->snd_seq, 0,
1183                                                      OVS_FLOW_CMD_DEL);
1184                         rcu_read_unlock();
1185                         BUG_ON(err < 0);
1186                         ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
1187                 } else {
1188                         genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0,
1189                                      GROUP_ID(&ovs_dp_flow_multicast_group), PTR_ERR(reply));
1190
1191                 }
1192         }
1193
1194         ovs_flow_free(flow, true);
1195         return 0;
1196 unlock:
1197         ovs_unlock();
1198         return err;
1199 }
1200
1201 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1202 {
1203         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1204         struct table_instance *ti;
1205         struct datapath *dp;
1206
1207         rcu_read_lock();
1208         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1209         if (!dp) {
1210                 rcu_read_unlock();
1211                 return -ENODEV;
1212         }
1213
1214         ti = rcu_dereference(dp->table.ti);
1215         for (;;) {
1216                 struct sw_flow *flow;
1217                 u32 bucket, obj;
1218
1219                 bucket = cb->args[0];
1220                 obj = cb->args[1];
1221                 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1222                 if (!flow)
1223                         break;
1224
1225                 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1226                                            NETLINK_CB(cb->skb).portid,
1227                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1228                                            OVS_FLOW_CMD_NEW) < 0)
1229                         break;
1230
1231                 cb->args[0] = bucket;
1232                 cb->args[1] = obj;
1233         }
1234         rcu_read_unlock();
1235         return skb->len;
1236 }
1237
1238 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1239         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1240         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1241         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1242 };
1243
1244 static struct genl_ops dp_flow_genl_ops[] = {
1245         { .cmd = OVS_FLOW_CMD_NEW,
1246           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1247           .policy = flow_policy,
1248           .doit = ovs_flow_cmd_new
1249         },
1250         { .cmd = OVS_FLOW_CMD_DEL,
1251           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1252           .policy = flow_policy,
1253           .doit = ovs_flow_cmd_del
1254         },
1255         { .cmd = OVS_FLOW_CMD_GET,
1256           .flags = 0,               /* OK for unprivileged users. */
1257           .policy = flow_policy,
1258           .doit = ovs_flow_cmd_get,
1259           .dumpit = ovs_flow_cmd_dump
1260         },
1261         { .cmd = OVS_FLOW_CMD_SET,
1262           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1263           .policy = flow_policy,
1264           .doit = ovs_flow_cmd_set,
1265         },
1266 };
1267
1268 static struct genl_family dp_flow_genl_family = {
1269         .id = GENL_ID_GENERATE,
1270         .hdrsize = sizeof(struct ovs_header),
1271         .name = OVS_FLOW_FAMILY,
1272         .version = OVS_FLOW_VERSION,
1273         .maxattr = OVS_FLOW_ATTR_MAX,
1274         .netnsok = true,
1275         .parallel_ops = true,
1276         .ops = dp_flow_genl_ops,
1277         .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1278         .mcgrps = &ovs_dp_flow_multicast_group,
1279         .n_mcgrps = 1,
1280 };
1281
1282 static size_t ovs_dp_cmd_msg_size(void)
1283 {
1284         size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1285
1286         msgsize += nla_total_size(IFNAMSIZ);
1287         msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1288         msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
1289         msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1290
1291         return msgsize;
1292 }
1293
1294 /* Called with ovs_mutex or RCU read lock. */
1295 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1296                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1297 {
1298         struct ovs_header *ovs_header;
1299         struct ovs_dp_stats dp_stats;
1300         struct ovs_dp_megaflow_stats dp_megaflow_stats;
1301         int err;
1302
1303         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1304                                    flags, cmd);
1305         if (!ovs_header)
1306                 goto error;
1307
1308         ovs_header->dp_ifindex = get_dpifindex(dp);
1309
1310         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1311         if (err)
1312                 goto nla_put_failure;
1313
1314         get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1315         if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1316                         &dp_stats))
1317                 goto nla_put_failure;
1318
1319         if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1320                         sizeof(struct ovs_dp_megaflow_stats),
1321                         &dp_megaflow_stats))
1322                 goto nla_put_failure;
1323
1324         if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1325                 goto nla_put_failure;
1326
1327         return genlmsg_end(skb, ovs_header);
1328
1329 nla_put_failure:
1330         genlmsg_cancel(skb, ovs_header);
1331 error:
1332         return -EMSGSIZE;
1333 }
1334
1335 static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
1336 {
1337         return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
1338 }
1339
1340 /* Called with rcu_read_lock or ovs_mutex. */
1341 static struct datapath *lookup_datapath(struct net *net,
1342                                         struct ovs_header *ovs_header,
1343                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1344 {
1345         struct datapath *dp;
1346
1347         if (!a[OVS_DP_ATTR_NAME])
1348                 dp = get_dp(net, ovs_header->dp_ifindex);
1349         else {
1350                 struct vport *vport;
1351
1352                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1353                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1354         }
1355         return dp ? dp : ERR_PTR(-ENODEV);
1356 }
1357
1358 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1359 {
1360         struct datapath *dp;
1361
1362         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1363         if (IS_ERR(dp))
1364                 return;
1365
1366         WARN(dp->user_features, "Dropping previously announced user features\n");
1367         dp->user_features = 0;
1368 }
1369
1370 static void ovs_dp_change(struct datapath *dp, struct nlattr **a)
1371 {
1372         if (a[OVS_DP_ATTR_USER_FEATURES])
1373                 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1374 }
1375
1376 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1377 {
1378         struct nlattr **a = info->attrs;
1379         struct vport_parms parms;
1380         struct sk_buff *reply;
1381         struct datapath *dp;
1382         struct vport *vport;
1383         struct ovs_net *ovs_net;
1384         int err, i;
1385
1386         err = -EINVAL;
1387         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1388                 goto err;
1389
1390         reply = ovs_dp_cmd_alloc_info(info);
1391         if (!reply)
1392                 return -ENOMEM;
1393
1394         err = -ENOMEM;
1395         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1396         if (dp == NULL)
1397                 goto err_free_reply;
1398
1399         ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1400
1401         /* Allocate table. */
1402         err = ovs_flow_tbl_init(&dp->table);
1403         if (err)
1404                 goto err_free_dp;
1405
1406         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1407         if (!dp->stats_percpu) {
1408                 err = -ENOMEM;
1409                 goto err_destroy_table;
1410         }
1411
1412         for_each_possible_cpu(i) {
1413                 struct dp_stats_percpu *dpath_stats;
1414                 dpath_stats = per_cpu_ptr(dp->stats_percpu, i);
1415                 u64_stats_init(&dpath_stats->sync);
1416         }
1417
1418         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1419                             GFP_KERNEL);
1420         if (!dp->ports) {
1421                 err = -ENOMEM;
1422                 goto err_destroy_percpu;
1423         }
1424
1425         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1426                 INIT_HLIST_HEAD(&dp->ports[i]);
1427
1428         /* Set up our datapath device. */
1429         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1430         parms.type = OVS_VPORT_TYPE_INTERNAL;
1431         parms.options = NULL;
1432         parms.dp = dp;
1433         parms.port_no = OVSP_LOCAL;
1434         parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1435
1436         ovs_dp_change(dp, a);
1437
1438         /* So far only local changes have been made, now need the lock. */
1439         ovs_lock();
1440
1441         vport = new_vport(&parms);
1442         if (IS_ERR(vport)) {
1443                 err = PTR_ERR(vport);
1444                 if (err == -EBUSY)
1445                         err = -EEXIST;
1446
1447                 if (err == -EEXIST) {
1448                         /* An outdated user space instance that does not understand
1449                          * the concept of user_features has attempted to create a new
1450                          * datapath and is likely to reuse it. Drop all user features.
1451                          */
1452                         if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1453                                 ovs_dp_reset_user_features(skb, info);
1454                 }
1455
1456                 goto err_destroy_ports_array;
1457         }
1458
1459         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1460                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1461         BUG_ON(err < 0);
1462
1463         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1464         list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1465
1466         ovs_unlock();
1467
1468         ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1469         return 0;
1470
1471 err_destroy_ports_array:
1472         ovs_unlock();
1473         kfree(dp->ports);
1474 err_destroy_percpu:
1475         free_percpu(dp->stats_percpu);
1476 err_destroy_table:
1477         ovs_flow_tbl_destroy(&dp->table);
1478 err_free_dp:
1479         release_net(ovs_dp_get_net(dp));
1480         kfree(dp);
1481 err_free_reply:
1482         kfree_skb(reply);
1483 err:
1484         return err;
1485 }
1486
1487 /* Called with ovs_mutex. */
1488 static void __dp_destroy(struct datapath *dp)
1489 {
1490         int i;
1491
1492         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1493                 struct vport *vport;
1494                 struct hlist_node *n;
1495
1496                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1497                         if (vport->port_no != OVSP_LOCAL)
1498                                 ovs_dp_detach_port(vport);
1499         }
1500
1501         list_del_rcu(&dp->list_node);
1502
1503         /* OVSP_LOCAL is datapath internal port. We need to make sure that
1504          * all ports in datapath are destroyed first before freeing datapath.
1505          */
1506         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1507
1508         /* RCU destroy the flow table */
1509         call_rcu(&dp->rcu, destroy_dp_rcu);
1510 }
1511
1512 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1513 {
1514         struct sk_buff *reply;
1515         struct datapath *dp;
1516         int err;
1517
1518         reply = ovs_dp_cmd_alloc_info(info);
1519         if (!reply)
1520                 return -ENOMEM;
1521
1522         ovs_lock();
1523         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1524         err = PTR_ERR(dp);
1525         if (IS_ERR(dp))
1526                 goto err_unlock_free;
1527
1528         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1529                                    info->snd_seq, 0, OVS_DP_CMD_DEL);
1530         BUG_ON(err < 0);
1531
1532         __dp_destroy(dp);
1533
1534         ovs_unlock();
1535         ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1536         return 0;
1537
1538 err_unlock_free:
1539         ovs_unlock();
1540         kfree_skb(reply);
1541         return err;
1542 }
1543
1544 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1545 {
1546         struct sk_buff *reply;
1547         struct datapath *dp;
1548         int err;
1549
1550         reply = ovs_dp_cmd_alloc_info(info);
1551         if (!reply)
1552                 return -ENOMEM;
1553
1554         ovs_lock();
1555         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1556         err = PTR_ERR(dp);
1557         if (IS_ERR(dp))
1558                 goto err_unlock_free;
1559
1560         ovs_dp_change(dp, info->attrs);
1561
1562         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1563                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1564         BUG_ON(err < 0);
1565
1566         ovs_unlock();
1567         ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1568         return 0;
1569
1570 err_unlock_free:
1571         ovs_unlock();
1572         kfree_skb(reply);
1573         return err;
1574 }
1575
1576 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1577 {
1578         struct sk_buff *reply;
1579         struct datapath *dp;
1580         int err;
1581
1582         reply = ovs_dp_cmd_alloc_info(info);
1583         if (!reply)
1584                 return -ENOMEM;
1585
1586         rcu_read_lock();
1587         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1588         if (IS_ERR(dp)) {
1589                 err = PTR_ERR(dp);
1590                 goto err_unlock_free;
1591         }
1592         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1593                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1594         BUG_ON(err < 0);
1595         rcu_read_unlock();
1596
1597         return genlmsg_reply(reply, info);
1598
1599 err_unlock_free:
1600         rcu_read_unlock();
1601         kfree_skb(reply);
1602         return err;
1603 }
1604
1605 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1606 {
1607         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1608         struct datapath *dp;
1609         int skip = cb->args[0];
1610         int i = 0;
1611
1612         rcu_read_lock();
1613         list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) {
1614                 if (i >= skip &&
1615                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1616                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1617                                          OVS_DP_CMD_NEW) < 0)
1618                         break;
1619                 i++;
1620         }
1621         rcu_read_unlock();
1622
1623         cb->args[0] = i;
1624
1625         return skb->len;
1626 }
1627
1628 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1629         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1630         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1631         [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1632 };
1633
1634 static struct genl_ops dp_datapath_genl_ops[] = {
1635         { .cmd = OVS_DP_CMD_NEW,
1636           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1637           .policy = datapath_policy,
1638           .doit = ovs_dp_cmd_new
1639         },
1640         { .cmd = OVS_DP_CMD_DEL,
1641           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1642           .policy = datapath_policy,
1643           .doit = ovs_dp_cmd_del
1644         },
1645         { .cmd = OVS_DP_CMD_GET,
1646           .flags = 0,               /* OK for unprivileged users. */
1647           .policy = datapath_policy,
1648           .doit = ovs_dp_cmd_get,
1649           .dumpit = ovs_dp_cmd_dump
1650         },
1651         { .cmd = OVS_DP_CMD_SET,
1652           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1653           .policy = datapath_policy,
1654           .doit = ovs_dp_cmd_set,
1655         },
1656 };
1657
1658 static struct genl_family dp_datapath_genl_family = {
1659         .id = GENL_ID_GENERATE,
1660         .hdrsize = sizeof(struct ovs_header),
1661         .name = OVS_DATAPATH_FAMILY,
1662         .version = OVS_DATAPATH_VERSION,
1663         .maxattr = OVS_DP_ATTR_MAX,
1664         .netnsok = true,
1665         .parallel_ops = true,
1666         .ops = dp_datapath_genl_ops,
1667         .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1668         .mcgrps = &ovs_dp_datapath_multicast_group,
1669         .n_mcgrps = 1,
1670 };
1671
1672 /* Called with ovs_mutex or RCU read lock. */
1673 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1674                                    u32 portid, u32 seq, u32 flags, u8 cmd)
1675 {
1676         struct ovs_header *ovs_header;
1677         struct ovs_vport_stats vport_stats;
1678         int err;
1679
1680         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1681                                  flags, cmd);
1682         if (!ovs_header)
1683                 return -EMSGSIZE;
1684
1685         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1686
1687         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1688             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1689             nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)))
1690                 goto nla_put_failure;
1691
1692         ovs_vport_get_stats(vport, &vport_stats);
1693         if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1694                     &vport_stats))
1695                 goto nla_put_failure;
1696
1697         if (ovs_vport_get_upcall_portids(vport, skb))
1698                 goto nla_put_failure;
1699
1700         err = ovs_vport_get_options(vport, skb);
1701         if (err == -EMSGSIZE)
1702                 goto error;
1703
1704         return genlmsg_end(skb, ovs_header);
1705
1706 nla_put_failure:
1707         err = -EMSGSIZE;
1708 error:
1709         genlmsg_cancel(skb, ovs_header);
1710         return err;
1711 }
1712
1713 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1714 {
1715         return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1716 }
1717
1718 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1719 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1720                                          u32 seq, u8 cmd)
1721 {
1722         struct sk_buff *skb;
1723         int retval;
1724
1725         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1726         if (!skb)
1727                 return ERR_PTR(-ENOMEM);
1728
1729         retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1730         BUG_ON(retval < 0);
1731
1732         return skb;
1733 }
1734
1735 /* Called with ovs_mutex or RCU read lock. */
1736 static struct vport *lookup_vport(struct net *net,
1737                                   struct ovs_header *ovs_header,
1738                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1739 {
1740         struct datapath *dp;
1741         struct vport *vport;
1742
1743         if (a[OVS_VPORT_ATTR_NAME]) {
1744                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1745                 if (!vport)
1746                         return ERR_PTR(-ENODEV);
1747                 if (ovs_header->dp_ifindex &&
1748                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1749                         return ERR_PTR(-ENODEV);
1750                 return vport;
1751         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1752                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1753
1754                 if (port_no >= DP_MAX_PORTS)
1755                         return ERR_PTR(-EFBIG);
1756
1757                 dp = get_dp(net, ovs_header->dp_ifindex);
1758                 if (!dp)
1759                         return ERR_PTR(-ENODEV);
1760
1761                 vport = ovs_vport_ovsl_rcu(dp, port_no);
1762                 if (!vport)
1763                         return ERR_PTR(-ENODEV);
1764                 return vport;
1765         } else
1766                 return ERR_PTR(-EINVAL);
1767 }
1768
1769 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1770 {
1771         struct nlattr **a = info->attrs;
1772         struct ovs_header *ovs_header = info->userhdr;
1773         struct vport_parms parms;
1774         struct sk_buff *reply;
1775         struct vport *vport;
1776         struct datapath *dp;
1777         u32 port_no;
1778         int err;
1779
1780         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1781             !a[OVS_VPORT_ATTR_UPCALL_PID])
1782                 return -EINVAL;
1783
1784         port_no = a[OVS_VPORT_ATTR_PORT_NO]
1785                 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1786         if (port_no >= DP_MAX_PORTS)
1787                 return -EFBIG;
1788
1789         reply = ovs_vport_cmd_alloc_info();
1790         if (!reply)
1791                 return -ENOMEM;
1792
1793         ovs_lock();
1794         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1795         err = -ENODEV;
1796         if (!dp)
1797                 goto exit_unlock_free;
1798
1799         if (port_no) {
1800                 vport = ovs_vport_ovsl(dp, port_no);
1801                 err = -EBUSY;
1802                 if (vport)
1803                         goto exit_unlock_free;
1804         } else {
1805                 for (port_no = 1; ; port_no++) {
1806                         if (port_no >= DP_MAX_PORTS) {
1807                                 err = -EFBIG;
1808                                 goto exit_unlock_free;
1809                         }
1810                         vport = ovs_vport_ovsl(dp, port_no);
1811                         if (!vport)
1812                                 break;
1813                 }
1814         }
1815
1816         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1817         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1818         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1819         parms.dp = dp;
1820         parms.port_no = port_no;
1821         parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
1822
1823         vport = new_vport(&parms);
1824         err = PTR_ERR(vport);
1825         if (IS_ERR(vport))
1826                 goto exit_unlock_free;
1827
1828         err = 0;
1829         if (a[OVS_VPORT_ATTR_STATS])
1830                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1831
1832         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1833                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1834         BUG_ON(err < 0);
1835         ovs_unlock();
1836
1837         ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
1838         return 0;
1839
1840 exit_unlock_free:
1841         ovs_unlock();
1842         kfree_skb(reply);
1843         return err;
1844 }
1845
1846 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1847 {
1848         struct nlattr **a = info->attrs;
1849         struct sk_buff *reply;
1850         struct vport *vport;
1851         int err;
1852
1853         reply = ovs_vport_cmd_alloc_info();
1854         if (!reply)
1855                 return -ENOMEM;
1856
1857         ovs_lock();
1858         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1859         err = PTR_ERR(vport);
1860         if (IS_ERR(vport))
1861                 goto exit_unlock_free;
1862
1863         if (a[OVS_VPORT_ATTR_TYPE] &&
1864             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
1865                 err = -EINVAL;
1866                 goto exit_unlock_free;
1867         }
1868
1869         if (a[OVS_VPORT_ATTR_OPTIONS]) {
1870                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1871                 if (err)
1872                         goto exit_unlock_free;
1873         }
1874
1875         if (a[OVS_VPORT_ATTR_STATS])
1876                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1877
1878
1879         if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
1880                 err = ovs_vport_set_upcall_portids(vport,
1881                                                    a[OVS_VPORT_ATTR_UPCALL_PID]);
1882                 if (err)
1883                         goto exit_unlock_free;
1884         }
1885
1886         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1887                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1888         BUG_ON(err < 0);
1889         ovs_unlock();
1890
1891         ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
1892         return 0;
1893
1894 exit_unlock_free:
1895         ovs_unlock();
1896         kfree_skb(reply);
1897         return err;
1898 }
1899
1900 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1901 {
1902         struct nlattr **a = info->attrs;
1903         struct sk_buff *reply;
1904         struct vport *vport;
1905         int err;
1906
1907         reply = ovs_vport_cmd_alloc_info();
1908         if (!reply)
1909                 return -ENOMEM;
1910
1911         ovs_lock();
1912         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1913         err = PTR_ERR(vport);
1914         if (IS_ERR(vport))
1915                 goto exit_unlock_free;
1916
1917         if (vport->port_no == OVSP_LOCAL) {
1918                 err = -EINVAL;
1919                 goto exit_unlock_free;
1920         }
1921
1922         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1923                                       info->snd_seq, 0, OVS_VPORT_CMD_DEL);
1924         BUG_ON(err < 0);
1925         ovs_dp_detach_port(vport);
1926         ovs_unlock();
1927
1928         ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
1929         return 0;
1930
1931 exit_unlock_free:
1932         ovs_unlock();
1933         kfree_skb(reply);
1934         return err;
1935 }
1936
1937 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1938 {
1939         struct nlattr **a = info->attrs;
1940         struct ovs_header *ovs_header = info->userhdr;
1941         struct sk_buff *reply;
1942         struct vport *vport;
1943         int err;
1944
1945         reply = ovs_vport_cmd_alloc_info();
1946         if (!reply)
1947                 return -ENOMEM;
1948
1949         rcu_read_lock();
1950         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1951         err = PTR_ERR(vport);
1952         if (IS_ERR(vport))
1953                 goto exit_unlock_free;
1954         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1955                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1956         BUG_ON(err < 0);
1957         rcu_read_unlock();
1958
1959         return genlmsg_reply(reply, info);
1960
1961 exit_unlock_free:
1962         rcu_read_unlock();
1963         kfree_skb(reply);
1964         return err;
1965 }
1966
1967 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1968 {
1969         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1970         struct datapath *dp;
1971         int bucket = cb->args[0], skip = cb->args[1];
1972         int i, j = 0;
1973
1974         rcu_read_lock();
1975         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1976         if (!dp) {
1977                 rcu_read_unlock();
1978                 return -ENODEV;
1979         }
1980         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
1981                 struct vport *vport;
1982
1983                 j = 0;
1984                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1985                         if (j >= skip &&
1986                             ovs_vport_cmd_fill_info(vport, skb,
1987                                                     NETLINK_CB(cb->skb).portid,
1988                                                     cb->nlh->nlmsg_seq,
1989                                                     NLM_F_MULTI,
1990                                                     OVS_VPORT_CMD_NEW) < 0)
1991                                 goto out;
1992
1993                         j++;
1994                 }
1995                 skip = 0;
1996         }
1997 out:
1998         rcu_read_unlock();
1999
2000         cb->args[0] = i;
2001         cb->args[1] = j;
2002
2003         return skb->len;
2004 }
2005
2006 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2007         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2008         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2009         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2010         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2011         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
2012         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2013 };
2014
2015 static struct genl_ops dp_vport_genl_ops[] = {
2016         { .cmd = OVS_VPORT_CMD_NEW,
2017           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2018           .policy = vport_policy,
2019           .doit = ovs_vport_cmd_new
2020         },
2021         { .cmd = OVS_VPORT_CMD_DEL,
2022           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2023           .policy = vport_policy,
2024           .doit = ovs_vport_cmd_del
2025         },
2026         { .cmd = OVS_VPORT_CMD_GET,
2027           .flags = 0,               /* OK for unprivileged users. */
2028           .policy = vport_policy,
2029           .doit = ovs_vport_cmd_get,
2030           .dumpit = ovs_vport_cmd_dump
2031         },
2032         { .cmd = OVS_VPORT_CMD_SET,
2033           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2034           .policy = vport_policy,
2035           .doit = ovs_vport_cmd_set,
2036         },
2037 };
2038
2039 struct genl_family dp_vport_genl_family = {
2040         .id = GENL_ID_GENERATE,
2041         .hdrsize = sizeof(struct ovs_header),
2042         .name = OVS_VPORT_FAMILY,
2043         .version = OVS_VPORT_VERSION,
2044         .maxattr = OVS_VPORT_ATTR_MAX,
2045         .netnsok = true,
2046         .parallel_ops = true,
2047         .ops = dp_vport_genl_ops,
2048         .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2049         .mcgrps = &ovs_dp_vport_multicast_group,
2050         .n_mcgrps = 1,
2051 };
2052
2053 static struct genl_family *dp_genl_families[] = {
2054         &dp_datapath_genl_family,
2055         &dp_vport_genl_family,
2056         &dp_flow_genl_family,
2057         &dp_packet_genl_family,
2058 };
2059
2060 static void dp_unregister_genl(int n_families)
2061 {
2062         int i;
2063
2064         for (i = 0; i < n_families; i++)
2065                 genl_unregister_family(dp_genl_families[i]);
2066 }
2067
2068 static int dp_register_genl(void)
2069 {
2070         int err;
2071         int i;
2072
2073         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2074
2075                 err = genl_register_family(dp_genl_families[i]);
2076                 if (err)
2077                         goto error;
2078         }
2079
2080         return 0;
2081
2082 error:
2083         dp_unregister_genl(i);
2084         return err;
2085 }
2086
2087 static int __net_init ovs_init_net(struct net *net)
2088 {
2089         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2090
2091         INIT_LIST_HEAD(&ovs_net->dps);
2092         INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2093         return 0;
2094 }
2095
2096 static void __net_exit ovs_exit_net(struct net *net)
2097 {
2098         struct datapath *dp, *dp_next;
2099         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2100
2101         ovs_lock();
2102         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2103                 __dp_destroy(dp);
2104         ovs_unlock();
2105
2106         cancel_work_sync(&ovs_net->dp_notify_work);
2107 }
2108
2109 static struct pernet_operations ovs_net_ops = {
2110         .init = ovs_init_net,
2111         .exit = ovs_exit_net,
2112         .id   = &ovs_net_id,
2113         .size = sizeof(struct ovs_net),
2114 };
2115
2116 DEFINE_COMPAT_PNET_REG_FUNC(device);
2117
2118 static int __init dp_init(void)
2119 {
2120         int err;
2121
2122         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2123
2124         pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
2125                 VERSION);
2126
2127         err = ovs_flow_init();
2128         if (err)
2129                 goto error;
2130
2131         err = ovs_vport_init();
2132         if (err)
2133                 goto error_flow_exit;
2134
2135         err = register_pernet_device(&ovs_net_ops);
2136         if (err)
2137                 goto error_vport_exit;
2138
2139         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2140         if (err)
2141                 goto error_netns_exit;
2142
2143         err = dp_register_genl();
2144         if (err < 0)
2145                 goto error_unreg_notifier;
2146
2147         return 0;
2148
2149 error_unreg_notifier:
2150         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2151 error_netns_exit:
2152         unregister_pernet_device(&ovs_net_ops);
2153 error_vport_exit:
2154         ovs_vport_exit();
2155 error_flow_exit:
2156         ovs_flow_exit();
2157 error:
2158         return err;
2159 }
2160
2161 static void dp_cleanup(void)
2162 {
2163         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2164         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2165         unregister_pernet_device(&ovs_net_ops);
2166         rcu_barrier();
2167         ovs_vport_exit();
2168         ovs_flow_exit();
2169 }
2170
2171 module_init(dp_init);
2172 module_exit(dp_cleanup);
2173
2174 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2175 MODULE_LICENSE("GPL");
2176 MODULE_VERSION(VERSION);