#include <linux/openvswitch.h>
#include <linux/rculist.h>
#include <linux/dmi.h>
-#include <linux/genetlink.h>
-#include <net/genetlink.h>
#include <net/genetlink.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
/* Check if need to build a reply message.
* OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
-static bool ovs_must_notify(struct genl_info *info,
- const struct genl_multicast_group *grp)
+static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
+ unsigned int group)
{
return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
- netlink_has_listeners(genl_info_net(info)->genl_sock, GROUP_ID(grp));
+ genl_has_listeners(family, genl_info_net(info)->genl_sock,
+ group);
}
static void ovs_notify(struct genl_family *family, struct genl_multicast_group *grp,
ovs_vport_del(p);
}
-void ovs_dp_process_packet_with_key(struct sk_buff *skb,
- struct sw_flow_key *pkt_key,
- bool recirc)
+/* Must be called with rcu_read_lock. */
+void ovs_dp_process_packet(struct sk_buff *skb)
{
const struct vport *p = OVS_CB(skb)->input_vport;
+ struct sw_flow_key *pkt_key = OVS_CB(skb)->pkt_key;
struct datapath *dp = p->dp;
struct sw_flow *flow;
+ struct sw_flow_actions *sf_acts;
struct dp_stats_percpu *stats;
u64 *stats_counter;
u32 n_mask_hit;
&n_mask_hit);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
+ int error;
upcall.cmd = OVS_PACKET_CMD_MISS;
- upcall.key = pkt_key;
upcall.userdata = NULL;
upcall.portid = ovs_vport_find_upcall_portid(p, skb);
- ovs_dp_upcall(dp, skb, &upcall);
- consume_skb(skb);
+ upcall.egress_tun_info = NULL;
+
+ error = ovs_dp_upcall(dp, skb, &upcall);
+ if (unlikely(error))
+ kfree_skb(skb);
+ else
+ consume_skb(skb);
+
stats_counter = &stats->n_missed;
goto out;
}
- OVS_CB(skb)->pkt_key = pkt_key;
- OVS_CB(skb)->flow = flow;
+ ovs_flow_stats_update(flow, pkt_key->tp.flags, skb);
- ovs_flow_stats_update(OVS_CB(skb)->flow, pkt_key->tp.flags, skb);
- ovs_execute_actions(dp, skb, recirc);
+ sf_acts = rcu_dereference(flow->sf_acts);
+ ovs_execute_actions(dp, skb, sf_acts);
stats_counter = &stats->n_hit;
out:
/* Update datapath statistics. */
- u64_stats_update_begin(&stats->sync);
+ u64_stats_update_begin(&stats->syncp);
(*stats_counter)++;
stats->n_mask_hit += n_mask_hit;
- u64_stats_update_end(&stats->sync);
-}
-
-/* Must be called with rcu_read_lock. */
-void ovs_dp_process_received_packet(struct sk_buff *skb)
-{
- int error;
- struct sw_flow_key key;
-
- /* Extract flow from 'skb' into 'key'. */
- error = ovs_flow_extract(skb, OVS_CB(skb)->input_vport->port_no, &key);
- if (unlikely(error)) {
- kfree_skb(skb);
- return;
- }
-
- ovs_dp_process_packet_with_key(skb, &key, false);
+ u64_stats_update_end(&stats->syncp);
}
int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
struct dp_stats_percpu *stats;
int err;
+ BUG_ON(!OVS_CB(skb)->pkt_key);
+
if (upcall_info->portid == 0) {
err = -ENOTCONN;
goto err;
err:
stats = this_cpu_ptr(dp->stats_percpu);
- u64_stats_update_begin(&stats->sync);
+ u64_stats_update_begin(&stats->syncp);
stats->n_lost++;
- u64_stats_update_end(&stats->sync);
+ u64_stats_update_end(&stats->syncp);
return err;
}
const struct dp_upcall_info *upcall_info)
{
unsigned short gso_type = skb_shinfo(skb)->gso_type;
- struct dp_upcall_info later_info;
struct sw_flow_key later_key;
struct sk_buff *segs, *nskb;
int err;
if (IS_ERR(segs))
return PTR_ERR(segs);
+ if (gso_type & SKB_GSO_UDP) {
+ /* The initial flow key extracted by ovs_flow_key_extract()
+ * in this case is for a first fragment, so we need to
+ * properly mark later fragments.
+ */
+ later_key = *OVS_CB(skb)->pkt_key;
+ later_key.ip.frag = OVS_FRAG_TYPE_LATER;
+ }
+
/* Queue all of the segments. */
skb = segs;
do {
+ if (gso_type & SKB_GSO_UDP && skb != segs)
+ OVS_CB(skb)->pkt_key = &later_key;
+
err = queue_userspace_packet(dp, skb, upcall_info);
if (err)
break;
- if (skb == segs && gso_type & SKB_GSO_UDP) {
- /* The initial flow key extracted by ovs_flow_extract()
- * in this case is for a first fragment, so we need to
- * properly mark later fragments.
- */
- later_key = *upcall_info->key;
- later_key.ip.frag = OVS_FRAG_TYPE_LATER;
-
- later_info = *upcall_info;
- later_info.key = &later_key;
- upcall_info = &later_info;
- }
} while ((skb = skb->next));
/* Free all of the segments. */
return err;
}
-static size_t key_attr_size(void)
-{
- /* Whenever adding new OVS_KEY_ FIELDS, we should consider
- * updating this function. */
- BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 22);
-
- return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
- + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
- + nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */
- + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
- + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
- + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
- + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
- + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
- + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
- + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
- + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
- + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
- + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
- + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */
- + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */
- + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
- + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
- + nla_total_size(4) /* OVS_KEY_ATTR_8021Q */
- + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
- + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
- + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
- + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
- + nla_total_size(28); /* OVS_KEY_ATTR_ND */
-}
-
-static size_t upcall_msg_size(const struct nlattr *userdata,
+static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
unsigned int hdrlen)
{
size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
- + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
+ + nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */
/* OVS_PACKET_ATTR_USERDATA */
- if (userdata)
- size += NLA_ALIGN(userdata->nla_len);
+ if (upcall_info->userdata)
+ size += NLA_ALIGN(upcall_info->userdata->nla_len);
+
+ /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
+ if (upcall_info->egress_tun_info)
+ size += nla_total_size(ovs_tun_key_attr_size());
return size;
}
{
struct ovs_header *upcall;
struct sk_buff *nskb = NULL;
- struct sk_buff *user_skb; /* to be queued to userspace */
+ struct sk_buff *user_skb = NULL; /* to be queued to userspace */
+ struct sw_flow_key *pkt_key = OVS_CB(skb)->pkt_key;
struct nlattr *nla;
struct genl_info info = {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
else
hlen = skb->len;
- len = upcall_msg_size(upcall_info->userdata, hlen);
+ len = upcall_msg_size(upcall_info, hlen);
user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
if (!user_skb) {
err = -ENOMEM;
upcall->dp_ifindex = dp_ifindex;
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
- err = ovs_nla_put_flow(dp, upcall_info->key,
- upcall_info->key, user_skb);
+ err = ovs_nla_put_flow(pkt_key, pkt_key, user_skb);
BUG_ON(err);
nla_nest_end(user_skb, nla);
nla_len(upcall_info->userdata),
nla_data(upcall_info->userdata));
+ if (upcall_info->egress_tun_info) {
+ nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
+ err = ovs_nla_put_egress_tunnel_key(user_skb,
+ upcall_info->egress_tun_info);
+ BUG_ON(err);
+ nla_nest_end(user_skb, nla);
+ }
+
/* Only reserve room for attribute header, packet data is added
* in skb_zerocopy() */
if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
+ user_skb = NULL;
out:
if (err)
skb_tx_error(skb);
+
+ kfree_skb(user_skb);
kfree_skb(nskb);
return err;
}
struct sw_flow_actions *acts;
struct sk_buff *packet;
struct sw_flow *flow;
+ struct sw_flow_actions *sf_acts;
struct datapath *dp;
struct ethhdr *eth;
struct vport *input_vport;
if (IS_ERR(flow))
goto err_kfree_skb;
- err = ovs_flow_extract(packet, -1, &flow->key);
- if (err)
- goto err_flow_free;
-
- err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]);
+ err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet,
+ &flow->key);
if (err)
goto err_flow_free;
- acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
- err = PTR_ERR(acts);
- if (IS_ERR(acts))
- goto err_flow_free;
err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
&flow->key, &acts);
- rcu_assign_pointer(flow->sf_acts, acts);
if (err)
goto err_flow_free;
- OVS_CB(packet)->flow = flow;
+ rcu_assign_pointer(flow->sf_acts, acts);
OVS_CB(packet)->pkt_key = &flow->key;
+ OVS_CB(packet)->egress_tun_info = NULL;
packet->priority = flow->key.phy.priority;
packet->mark = flow->key.phy.skb_mark;
goto err_unlock;
OVS_CB(packet)->input_vport = input_vport;
+ sf_acts = rcu_dereference(flow->sf_acts);
local_bh_disable();
- err = ovs_execute_actions(dp, packet, false);
+ err = ovs_execute_actions(dp, packet, sf_acts);
local_bh_enable();
rcu_read_unlock();
percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
do {
- start = u64_stats_fetch_begin_irq(&percpu_stats->sync);
+ start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
local_stats = *percpu_stats;
- } while (u64_stats_fetch_retry_irq(&percpu_stats->sync, start));
+ } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
stats->n_hit += local_stats.n_hit;
stats->n_missed += local_stats.n_missed;
static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
{
return NLMSG_ALIGN(sizeof(struct ovs_header))
- + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
- + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
+ + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_KEY */
+ + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_MASK */
+ nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
+ nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
+ nla_total_size(8) /* OVS_FLOW_ATTR_USED */
}
/* Called with ovs_mutex or RCU read lock. */
-static int ovs_flow_cmd_fill_match(struct datapath *dp,
- const struct sw_flow *flow,
+static int ovs_flow_cmd_fill_match(const struct sw_flow *flow,
struct sk_buff *skb)
{
struct nlattr *nla;
if (!nla)
return -EMSGSIZE;
- err = ovs_nla_put_flow(dp, &flow->unmasked_key,
+ err = ovs_nla_put_flow(&flow->unmasked_key,
&flow->unmasked_key, skb);
if (err)
return err;
if (!nla)
return -EMSGSIZE;
- err = ovs_nla_put_flow(dp, &flow->key, &flow->mask->key, skb);
+ err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
if (err)
return err;
nla_nest_end(skb, nla);
}
/* Called with ovs_mutex or RCU read lock. */
-static int ovs_flow_cmd_fill_info(struct datapath *dp,
- const struct sw_flow *flow, int dp_ifindex,
+static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
struct sk_buff *skb, u32 portid,
u32 seq, u32 flags, u8 cmd)
{
return -EMSGSIZE;
ovs_header->dp_ifindex = dp_ifindex;
- err = ovs_flow_cmd_fill_match(dp, flow, skb);
+ err = ovs_flow_cmd_fill_match(flow, skb);
if (err)
goto error;
{
struct sk_buff *skb;
- if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
+ if (!always && !ovs_must_notify(&dp_flow_genl_family, info,
+ GROUP_ID(&ovs_dp_flow_multicast_group)))
return NULL;
skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
always);
- if (!skb || IS_ERR(skb))
+ if (IS_ERR_OR_NULL(skb))
return skb;
- retval = ovs_flow_cmd_fill_info(dp, flow, dp_ifindex, skb,
+ retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
info->snd_portid, info->snd_seq, 0,
cmd);
BUG_ON(retval < 0);
ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
/* Validate actions. */
- acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
- error = PTR_ERR(acts);
- if (IS_ERR(acts))
- goto err_kfree_flow;
-
error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
&acts);
if (error) {
}
if (unlikely(reply)) {
- error = ovs_flow_cmd_fill_info(dp, new_flow,
+ error = ovs_flow_cmd_fill_info(new_flow,
ovs_header->dp_ifindex,
reply, info->snd_portid,
info->snd_seq, 0,
rcu_assign_pointer(flow->sf_acts, acts);
if (unlikely(reply)) {
- error = ovs_flow_cmd_fill_info(dp, flow,
+ error = ovs_flow_cmd_fill_info(flow,
ovs_header->dp_ifindex,
reply, info->snd_portid,
info->snd_seq, 0,
return error;
}
+/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
const struct sw_flow_key *key,
const struct sw_flow_mask *mask)
struct sw_flow_key masked_key;
int error;
- acts = ovs_nla_alloc_flow_actions(nla_len(a));
- if (IS_ERR(acts))
- return acts;
-
ovs_flow_mask_key(&masked_key, key, mask);
error = ovs_nla_copy_actions(a, &masked_key, &acts);
if (error) {
- OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
- kfree(acts);
+ OVS_NLERR("Actions may not be safe on all matching packets.\n");
return ERR_PTR(error);
}
error = PTR_ERR(acts);
goto error;
}
- }
- /* Can allocate before locking if have acts. */
- if (acts) {
+ /* Can allocate before locking if have acts. */
reply = ovs_flow_cmd_alloc_info(acts, info, false);
if (IS_ERR(reply)) {
error = PTR_ERR(reply);
rcu_assign_pointer(flow->sf_acts, acts);
if (unlikely(reply)) {
- error = ovs_flow_cmd_fill_info(dp, flow,
+ error = ovs_flow_cmd_fill_info(flow,
ovs_header->dp_ifindex,
reply, info->snd_portid,
info->snd_seq, 0,
if (likely(reply)) {
if (likely(!IS_ERR(reply))) {
rcu_read_lock(); /* Keep RCU checker happy. */
- err = ovs_flow_cmd_fill_info(dp, flow,
+ err = ovs_flow_cmd_fill_info(flow,
ovs_header->dp_ifindex,
reply, info->snd_portid,
info->snd_seq, 0,
if (!flow)
break;
- if (ovs_flow_cmd_fill_info(dp, flow, ovs_header->dp_ifindex, skb,
+ if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
OVS_FLOW_CMD_NEW) < 0)
if (err)
goto err_free_dp;
- dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
+ dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
if (!dp->stats_percpu) {
err = -ENOMEM;
goto err_destroy_table;
}
- for_each_possible_cpu(i) {
- struct dp_stats_percpu *dpath_stats;
- dpath_stats = per_cpu_ptr(dp->stats_percpu, i);
- u64_stats_init(&dpath_stats->sync);
- }
-
dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
GFP_KERNEL);
if (!dp->ports) {
if (IS_ERR(vport))
goto exit_unlock_free;
- err = 0;
- if (a[OVS_VPORT_ATTR_STATS])
- ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
-
err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
info->snd_seq, 0, OVS_VPORT_CMD_NEW);
BUG_ON(err < 0);
goto exit_unlock_free;
}
- if (a[OVS_VPORT_ATTR_STATS])
- ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
-
-
if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
err = ovs_vport_set_upcall_portids(vport,
a[OVS_VPORT_ATTR_UPCALL_PID]);
pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
VERSION);
- err = ovs_flow_init();
+ err = action_fifos_init();
if (err)
goto error;
+ err = ovs_flow_init();
+ if (err)
+ goto error_action_fifos_exit;
+
err = ovs_vport_init();
if (err)
goto error_flow_exit;
ovs_vport_exit();
error_flow_exit:
ovs_flow_exit();
+error_action_fifos_exit:
+ action_fifos_exit();
error:
return err;
}
rcu_barrier();
ovs_vport_exit();
ovs_flow_exit();
+ action_fifos_exit();
}
module_init(dp_init);