2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/uaccess.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <net/llc_pdu.h>
29 #include <linux/kernel.h>
30 #include <linux/jhash.h>
31 #include <linux/jiffies.h>
32 #include <linux/llc.h>
33 #include <linux/module.h>
35 #include <linux/rcupdate.h>
36 #include <linux/if_arp.h>
38 #include <linux/ipv6.h>
39 #include <linux/sctp.h>
40 #include <linux/tcp.h>
41 #include <linux/udp.h>
42 #include <linux/icmp.h>
43 #include <linux/icmpv6.h>
44 #include <linux/rculist.h>
45 #include <net/geneve.h>
48 #include <net/ndisc.h>
50 #include <net/vxlan.h>
52 #include "flow_netlink.h"
56 const struct ovs_len_tbl *next;
59 #define OVS_ATTR_NESTED -1
60 #define OVS_ATTR_VARIABLE -2
62 static void update_range(struct sw_flow_match *match,
63 size_t offset, size_t size, bool is_mask)
65 struct sw_flow_key_range *range;
66 size_t start = rounddown(offset, sizeof(long));
67 size_t end = roundup(offset + size, sizeof(long));
70 range = &match->range;
72 range = &match->mask->range;
74 if (range->start == range->end) {
80 if (range->start > start)
87 #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
89 update_range(match, offsetof(struct sw_flow_key, field), \
90 sizeof((match)->key->field), is_mask); \
92 (match)->mask->key.field = value; \
94 (match)->key->field = value; \
97 #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \
99 update_range(match, offset, len, is_mask); \
101 memcpy((u8 *)&(match)->mask->key + offset, value_p, \
104 memcpy((u8 *)(match)->key + offset, value_p, len); \
107 #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
108 SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
109 value_p, len, is_mask)
111 #define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \
113 update_range(match, offsetof(struct sw_flow_key, field), \
114 sizeof((match)->key->field), is_mask); \
116 memset((u8 *)&(match)->mask->key.field, value, \
117 sizeof((match)->mask->key.field)); \
119 memset((u8 *)&(match)->key->field, value, \
120 sizeof((match)->key->field)); \
123 static bool match_validate(const struct sw_flow_match *match,
124 u64 key_attrs, u64 mask_attrs, bool log)
126 u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET;
127 u64 mask_allowed = key_attrs; /* At most allow all key attributes */
129 /* The following mask attributes allowed only if they
130 * pass the validation tests. */
131 mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4)
132 | (1 << OVS_KEY_ATTR_IPV6)
133 | (1 << OVS_KEY_ATTR_TCP)
134 | (1 << OVS_KEY_ATTR_TCP_FLAGS)
135 | (1 << OVS_KEY_ATTR_UDP)
136 | (1 << OVS_KEY_ATTR_SCTP)
137 | (1 << OVS_KEY_ATTR_ICMP)
138 | (1 << OVS_KEY_ATTR_ICMPV6)
139 | (1 << OVS_KEY_ATTR_ARP)
140 | (1 << OVS_KEY_ATTR_ND)
141 | (1 << OVS_KEY_ATTR_MPLS));
143 /* Always allowed mask fields. */
144 mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL)
145 | (1 << OVS_KEY_ATTR_IN_PORT)
146 | (1 << OVS_KEY_ATTR_ETHERTYPE));
148 /* Check key attributes. */
149 if (match->key->eth.type == htons(ETH_P_ARP)
150 || match->key->eth.type == htons(ETH_P_RARP)) {
151 key_expected |= 1 << OVS_KEY_ATTR_ARP;
152 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
153 mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
156 if (eth_p_mpls(match->key->eth.type)) {
157 key_expected |= 1 << OVS_KEY_ATTR_MPLS;
158 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
159 mask_allowed |= 1 << OVS_KEY_ATTR_MPLS;
162 if (match->key->eth.type == htons(ETH_P_IP)) {
163 key_expected |= 1 << OVS_KEY_ATTR_IPV4;
164 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
165 mask_allowed |= 1 << OVS_KEY_ATTR_IPV4;
167 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
168 if (match->key->ip.proto == IPPROTO_UDP) {
169 key_expected |= 1 << OVS_KEY_ATTR_UDP;
170 if (match->mask && (match->mask->key.ip.proto == 0xff))
171 mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
174 if (match->key->ip.proto == IPPROTO_SCTP) {
175 key_expected |= 1 << OVS_KEY_ATTR_SCTP;
176 if (match->mask && (match->mask->key.ip.proto == 0xff))
177 mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
180 if (match->key->ip.proto == IPPROTO_TCP) {
181 key_expected |= 1 << OVS_KEY_ATTR_TCP;
182 key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
183 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
184 mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
185 mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
189 if (match->key->ip.proto == IPPROTO_ICMP) {
190 key_expected |= 1 << OVS_KEY_ATTR_ICMP;
191 if (match->mask && (match->mask->key.ip.proto == 0xff))
192 mask_allowed |= 1 << OVS_KEY_ATTR_ICMP;
197 if (match->key->eth.type == htons(ETH_P_IPV6)) {
198 key_expected |= 1 << OVS_KEY_ATTR_IPV6;
199 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
200 mask_allowed |= 1 << OVS_KEY_ATTR_IPV6;
202 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
203 if (match->key->ip.proto == IPPROTO_UDP) {
204 key_expected |= 1 << OVS_KEY_ATTR_UDP;
205 if (match->mask && (match->mask->key.ip.proto == 0xff))
206 mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
209 if (match->key->ip.proto == IPPROTO_SCTP) {
210 key_expected |= 1 << OVS_KEY_ATTR_SCTP;
211 if (match->mask && (match->mask->key.ip.proto == 0xff))
212 mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
215 if (match->key->ip.proto == IPPROTO_TCP) {
216 key_expected |= 1 << OVS_KEY_ATTR_TCP;
217 key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
218 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
219 mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
220 mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
224 if (match->key->ip.proto == IPPROTO_ICMPV6) {
225 key_expected |= 1 << OVS_KEY_ATTR_ICMPV6;
226 if (match->mask && (match->mask->key.ip.proto == 0xff))
227 mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
229 if (match->key->tp.src ==
230 htons(NDISC_NEIGHBOUR_SOLICITATION) ||
231 match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
232 key_expected |= 1 << OVS_KEY_ATTR_ND;
233 if (match->mask && (match->mask->key.tp.src == htons(0xff)))
234 mask_allowed |= 1 << OVS_KEY_ATTR_ND;
240 if ((key_attrs & key_expected) != key_expected) {
241 /* Key attributes check failed. */
242 OVS_NLERR(log, "Missing key (keys=%llx, expected=%llx)",
243 (unsigned long long)key_attrs,
244 (unsigned long long)key_expected);
248 if ((mask_attrs & mask_allowed) != mask_attrs) {
249 /* Mask attributes check failed. */
250 OVS_NLERR(log, "Unexpected mask (mask=%llx, allowed=%llx)",
251 (unsigned long long)mask_attrs,
252 (unsigned long long)mask_allowed);
259 size_t ovs_tun_key_attr_size(void)
261 /* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider
262 * updating this function.
264 return nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */
265 + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
266 + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
267 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
268 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
269 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
270 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
271 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
272 + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
273 /* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS is mutually exclusive with
274 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
276 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
277 + nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */
280 size_t ovs_key_attr_size(void)
282 /* Whenever adding new OVS_KEY_ FIELDS, we should consider
283 * updating this function.
285 BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 26);
287 return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
288 + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
289 + ovs_tun_key_attr_size()
290 + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
291 + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
292 + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */
293 + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */
294 + nla_total_size(4) /* OVS_KEY_ATTR_CT_STATE */
295 + nla_total_size(2) /* OVS_KEY_ATTR_CT_ZONE */
296 + nla_total_size(4) /* OVS_KEY_ATTR_CT_MARK */
297 + nla_total_size(16) /* OVS_KEY_ATTR_CT_LABELS */
298 + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
299 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
300 + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */
301 + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
302 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
303 + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
304 + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
305 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
308 static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = {
309 [OVS_VXLAN_EXT_GBP] = { .len = sizeof(u32) },
312 static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
313 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) },
314 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) },
315 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = sizeof(u32) },
316 [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
317 [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
318 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
319 [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
320 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) },
321 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) },
322 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
323 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_VARIABLE },
324 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED,
325 .next = ovs_vxlan_ext_key_lens },
328 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
329 static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
330 [OVS_KEY_ATTR_ENCAP] = { .len = OVS_ATTR_NESTED },
331 [OVS_KEY_ATTR_PRIORITY] = { .len = sizeof(u32) },
332 [OVS_KEY_ATTR_IN_PORT] = { .len = sizeof(u32) },
333 [OVS_KEY_ATTR_SKB_MARK] = { .len = sizeof(u32) },
334 [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
335 [OVS_KEY_ATTR_VLAN] = { .len = sizeof(__be16) },
336 [OVS_KEY_ATTR_ETHERTYPE] = { .len = sizeof(__be16) },
337 [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
338 [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
339 [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
340 [OVS_KEY_ATTR_TCP_FLAGS] = { .len = sizeof(__be16) },
341 [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
342 [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
343 [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
344 [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
345 [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
346 [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
347 [OVS_KEY_ATTR_RECIRC_ID] = { .len = sizeof(u32) },
348 [OVS_KEY_ATTR_DP_HASH] = { .len = sizeof(u32) },
349 [OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED,
350 .next = ovs_tunnel_key_lens, },
351 [OVS_KEY_ATTR_MPLS] = { .len = sizeof(struct ovs_key_mpls) },
352 [OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u32) },
353 [OVS_KEY_ATTR_CT_ZONE] = { .len = sizeof(u16) },
354 [OVS_KEY_ATTR_CT_MARK] = { .len = sizeof(u32) },
355 [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
358 static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
360 return expected_len == attr_len ||
361 expected_len == OVS_ATTR_NESTED ||
362 expected_len == OVS_ATTR_VARIABLE;
365 static bool is_all_zero(const u8 *fp, size_t size)
372 for (i = 0; i < size; i++)
379 static int __parse_flow_nlattrs(const struct nlattr *attr,
380 const struct nlattr *a[],
381 u64 *attrsp, bool log, bool nz)
383 const struct nlattr *nla;
388 nla_for_each_nested(nla, attr, rem) {
389 u16 type = nla_type(nla);
392 if (type > OVS_KEY_ATTR_MAX) {
393 OVS_NLERR(log, "Key type %d is out of range max %d",
394 type, OVS_KEY_ATTR_MAX);
398 if (attrs & (1 << type)) {
399 OVS_NLERR(log, "Duplicate key (type %d).", type);
403 expected_len = ovs_key_lens[type].len;
404 if (!check_attr_len(nla_len(nla), expected_len)) {
405 OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
406 type, nla_len(nla), expected_len);
410 if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
416 OVS_NLERR(log, "Message has %d unknown bytes.", rem);
424 static int parse_flow_mask_nlattrs(const struct nlattr *attr,
425 const struct nlattr *a[], u64 *attrsp,
428 return __parse_flow_nlattrs(attr, a, attrsp, log, true);
431 static int parse_flow_nlattrs(const struct nlattr *attr,
432 const struct nlattr *a[], u64 *attrsp,
435 return __parse_flow_nlattrs(attr, a, attrsp, log, false);
438 static int genev_tun_opt_from_nlattr(const struct nlattr *a,
439 struct sw_flow_match *match, bool is_mask,
442 unsigned long opt_key_offset;
444 if (nla_len(a) > sizeof(match->key->tun_opts)) {
445 OVS_NLERR(log, "Geneve option length err (len %d, max %zu).",
446 nla_len(a), sizeof(match->key->tun_opts));
450 if (nla_len(a) % 4 != 0) {
451 OVS_NLERR(log, "Geneve opt len %d is not a multiple of 4.",
456 /* We need to record the length of the options passed
457 * down, otherwise packets with the same format but
458 * additional options will be silently matched.
461 SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a),
464 /* This is somewhat unusual because it looks at
465 * both the key and mask while parsing the
466 * attributes (and by extension assumes the key
467 * is parsed first). Normally, we would verify
468 * that each is the correct length and that the
469 * attributes line up in the validate function.
470 * However, that is difficult because this is
471 * variable length and we won't have the
474 if (match->key->tun_opts_len != nla_len(a)) {
475 OVS_NLERR(log, "Geneve option len %d != mask len %d",
476 match->key->tun_opts_len, nla_len(a));
480 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
483 opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
484 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
485 nla_len(a), is_mask);
489 static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
490 struct sw_flow_match *match, bool is_mask,
495 unsigned long opt_key_offset;
496 struct vxlan_metadata opts;
498 BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
500 memset(&opts, 0, sizeof(opts));
501 nla_for_each_nested(a, attr, rem) {
502 int type = nla_type(a);
504 if (type > OVS_VXLAN_EXT_MAX) {
505 OVS_NLERR(log, "VXLAN extension %d out of range max %d",
506 type, OVS_VXLAN_EXT_MAX);
510 if (!check_attr_len(nla_len(a),
511 ovs_vxlan_ext_key_lens[type].len)) {
512 OVS_NLERR(log, "VXLAN extension %d has unexpected len %d expected %d",
514 ovs_vxlan_ext_key_lens[type].len);
519 case OVS_VXLAN_EXT_GBP:
520 opts.gbp = nla_get_u32(a);
523 OVS_NLERR(log, "Unknown VXLAN extension attribute %d",
529 OVS_NLERR(log, "VXLAN extension message has %d unknown bytes.",
535 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false);
537 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
539 opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
540 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
545 static int ipv4_tun_from_nlattr(const struct nlattr *attr,
546 struct sw_flow_match *match, bool is_mask,
552 __be16 tun_flags = 0;
555 nla_for_each_nested(a, attr, rem) {
556 int type = nla_type(a);
559 if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
560 OVS_NLERR(log, "Tunnel attr %d out of range max %d",
561 type, OVS_TUNNEL_KEY_ATTR_MAX);
565 if (!check_attr_len(nla_len(a),
566 ovs_tunnel_key_lens[type].len)) {
567 OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
568 type, nla_len(a), ovs_tunnel_key_lens[type].len);
573 case OVS_TUNNEL_KEY_ATTR_ID:
574 SW_FLOW_KEY_PUT(match, tun_key.tun_id,
575 nla_get_be64(a), is_mask);
576 tun_flags |= TUNNEL_KEY;
578 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
579 SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
580 nla_get_in_addr(a), is_mask);
582 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
583 SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
584 nla_get_in_addr(a), is_mask);
586 case OVS_TUNNEL_KEY_ATTR_TOS:
587 SW_FLOW_KEY_PUT(match, tun_key.tos,
588 nla_get_u8(a), is_mask);
590 case OVS_TUNNEL_KEY_ATTR_TTL:
591 SW_FLOW_KEY_PUT(match, tun_key.ttl,
592 nla_get_u8(a), is_mask);
595 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
596 tun_flags |= TUNNEL_DONT_FRAGMENT;
598 case OVS_TUNNEL_KEY_ATTR_CSUM:
599 tun_flags |= TUNNEL_CSUM;
601 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
602 SW_FLOW_KEY_PUT(match, tun_key.tp_src,
603 nla_get_be16(a), is_mask);
605 case OVS_TUNNEL_KEY_ATTR_TP_DST:
606 SW_FLOW_KEY_PUT(match, tun_key.tp_dst,
607 nla_get_be16(a), is_mask);
609 case OVS_TUNNEL_KEY_ATTR_OAM:
610 tun_flags |= TUNNEL_OAM;
612 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
614 OVS_NLERR(log, "Multiple metadata blocks provided");
618 err = genev_tun_opt_from_nlattr(a, match, is_mask, log);
622 tun_flags |= TUNNEL_GENEVE_OPT;
625 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
627 OVS_NLERR(log, "Multiple metadata blocks provided");
631 err = vxlan_tun_opt_from_nlattr(a, match, is_mask, log);
635 tun_flags |= TUNNEL_VXLAN_OPT;
639 OVS_NLERR(log, "Unknown IPv4 tunnel attribute %d",
645 SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
648 OVS_NLERR(log, "IPv4 tunnel attribute has %d unknown bytes.",
654 if (!match->key->tun_key.u.ipv4.dst) {
655 OVS_NLERR(log, "IPv4 tunnel dst address is zero");
660 OVS_NLERR(log, "IPv4 tunnel TTL not specified.");
668 static int vxlan_opt_to_nlattr(struct sk_buff *skb,
669 const void *tun_opts, int swkey_tun_opts_len)
671 const struct vxlan_metadata *opts = tun_opts;
674 nla = nla_nest_start(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
678 if (nla_put_u32(skb, OVS_VXLAN_EXT_GBP, opts->gbp) < 0)
681 nla_nest_end(skb, nla);
685 static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
686 const struct ip_tunnel_key *output,
687 const void *tun_opts, int swkey_tun_opts_len)
689 if (output->tun_flags & TUNNEL_KEY &&
690 nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
692 if (output->u.ipv4.src &&
693 nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
696 if (output->u.ipv4.dst &&
697 nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
701 nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
703 if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
705 if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
706 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
708 if ((output->tun_flags & TUNNEL_CSUM) &&
709 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
711 if (output->tp_src &&
712 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src))
714 if (output->tp_dst &&
715 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst))
717 if ((output->tun_flags & TUNNEL_OAM) &&
718 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
721 if (output->tun_flags & TUNNEL_GENEVE_OPT &&
722 nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
723 swkey_tun_opts_len, tun_opts))
725 else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
726 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
733 static int ipv4_tun_to_nlattr(struct sk_buff *skb,
734 const struct ip_tunnel_key *output,
735 const void *tun_opts, int swkey_tun_opts_len)
740 nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
744 err = __ipv4_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len);
748 nla_nest_end(skb, nla);
752 int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
753 const struct ip_tunnel_info *egress_tun_info,
754 const void *egress_tun_opts)
756 return __ipv4_tun_to_nlattr(skb, &egress_tun_info->key,
758 egress_tun_info->options_len);
761 static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
762 u64 *attrs, const struct nlattr **a,
763 bool is_mask, bool log)
765 if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) {
766 u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
768 SW_FLOW_KEY_PUT(match, ovs_flow_hash, hash_val, is_mask);
769 *attrs &= ~(1 << OVS_KEY_ATTR_DP_HASH);
772 if (*attrs & (1 << OVS_KEY_ATTR_RECIRC_ID)) {
773 u32 recirc_id = nla_get_u32(a[OVS_KEY_ATTR_RECIRC_ID]);
775 SW_FLOW_KEY_PUT(match, recirc_id, recirc_id, is_mask);
776 *attrs &= ~(1 << OVS_KEY_ATTR_RECIRC_ID);
779 if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
780 SW_FLOW_KEY_PUT(match, phy.priority,
781 nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
782 *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
785 if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
786 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
789 in_port = 0xffffffff; /* Always exact match in_port. */
790 } else if (in_port >= DP_MAX_PORTS) {
791 OVS_NLERR(log, "Port %d exceeds max allowable %d",
792 in_port, DP_MAX_PORTS);
796 SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
797 *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
798 } else if (!is_mask) {
799 SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
802 if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
803 uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
805 SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
806 *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
808 if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
809 if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
812 *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
815 if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) &&
816 ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
817 u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
819 if (ct_state & ~CT_SUPPORTED_MASK) {
820 OVS_NLERR(log, "ct_state flags %08x unsupported",
825 SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask);
826 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
828 if (*attrs & (1 << OVS_KEY_ATTR_CT_ZONE) &&
829 ovs_ct_verify(net, OVS_KEY_ATTR_CT_ZONE)) {
830 u16 ct_zone = nla_get_u16(a[OVS_KEY_ATTR_CT_ZONE]);
832 SW_FLOW_KEY_PUT(match, ct.zone, ct_zone, is_mask);
833 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ZONE);
835 if (*attrs & (1 << OVS_KEY_ATTR_CT_MARK) &&
836 ovs_ct_verify(net, OVS_KEY_ATTR_CT_MARK)) {
837 u32 mark = nla_get_u32(a[OVS_KEY_ATTR_CT_MARK]);
839 SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask);
840 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK);
842 if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) &&
843 ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) {
844 const struct ovs_key_ct_labels *cl;
846 cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]);
847 SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels,
848 sizeof(*cl), is_mask);
849 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS);
854 static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match,
855 u64 attrs, const struct nlattr **a,
856 bool is_mask, bool log)
860 err = metadata_from_nlattrs(net, match, &attrs, a, is_mask, log);
864 if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) {
865 const struct ovs_key_ethernet *eth_key;
867 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
868 SW_FLOW_KEY_MEMCPY(match, eth.src,
869 eth_key->eth_src, ETH_ALEN, is_mask);
870 SW_FLOW_KEY_MEMCPY(match, eth.dst,
871 eth_key->eth_dst, ETH_ALEN, is_mask);
872 attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
875 if (attrs & (1 << OVS_KEY_ATTR_VLAN)) {
878 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
879 if (!(tci & htons(VLAN_TAG_PRESENT))) {
881 OVS_NLERR(log, "VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.");
883 OVS_NLERR(log, "VLAN TCI does not have VLAN_TAG_PRESENT bit set.");
888 SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
889 attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
892 if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
895 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
897 /* Always exact match EtherType. */
898 eth_type = htons(0xffff);
899 } else if (!eth_proto_is_802_3(eth_type)) {
900 OVS_NLERR(log, "EtherType %x is less than min %x",
901 ntohs(eth_type), ETH_P_802_3_MIN);
905 SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
906 attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
907 } else if (!is_mask) {
908 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
911 if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
912 const struct ovs_key_ipv4 *ipv4_key;
914 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
915 if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
916 OVS_NLERR(log, "IPv4 frag type %d is out of range max %d",
917 ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
920 SW_FLOW_KEY_PUT(match, ip.proto,
921 ipv4_key->ipv4_proto, is_mask);
922 SW_FLOW_KEY_PUT(match, ip.tos,
923 ipv4_key->ipv4_tos, is_mask);
924 SW_FLOW_KEY_PUT(match, ip.ttl,
925 ipv4_key->ipv4_ttl, is_mask);
926 SW_FLOW_KEY_PUT(match, ip.frag,
927 ipv4_key->ipv4_frag, is_mask);
928 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
929 ipv4_key->ipv4_src, is_mask);
930 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
931 ipv4_key->ipv4_dst, is_mask);
932 attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
935 if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
936 const struct ovs_key_ipv6 *ipv6_key;
938 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
939 if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
940 OVS_NLERR(log, "IPv6 frag type %d is out of range max %d",
941 ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
945 if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) {
946 OVS_NLERR(log, "IPv6 flow label %x is out of range (max=%x).\n",
947 ntohl(ipv6_key->ipv6_label), (1 << 20) - 1);
951 SW_FLOW_KEY_PUT(match, ipv6.label,
952 ipv6_key->ipv6_label, is_mask);
953 SW_FLOW_KEY_PUT(match, ip.proto,
954 ipv6_key->ipv6_proto, is_mask);
955 SW_FLOW_KEY_PUT(match, ip.tos,
956 ipv6_key->ipv6_tclass, is_mask);
957 SW_FLOW_KEY_PUT(match, ip.ttl,
958 ipv6_key->ipv6_hlimit, is_mask);
959 SW_FLOW_KEY_PUT(match, ip.frag,
960 ipv6_key->ipv6_frag, is_mask);
961 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
963 sizeof(match->key->ipv6.addr.src),
965 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
967 sizeof(match->key->ipv6.addr.dst),
970 attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
973 if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
974 const struct ovs_key_arp *arp_key;
976 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
977 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
978 OVS_NLERR(log, "Unknown ARP opcode (opcode=%d).",
983 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
984 arp_key->arp_sip, is_mask);
985 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
986 arp_key->arp_tip, is_mask);
987 SW_FLOW_KEY_PUT(match, ip.proto,
988 ntohs(arp_key->arp_op), is_mask);
989 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
990 arp_key->arp_sha, ETH_ALEN, is_mask);
991 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
992 arp_key->arp_tha, ETH_ALEN, is_mask);
994 attrs &= ~(1 << OVS_KEY_ATTR_ARP);
997 if (attrs & (1 << OVS_KEY_ATTR_MPLS)) {
998 const struct ovs_key_mpls *mpls_key;
1000 mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]);
1001 SW_FLOW_KEY_PUT(match, mpls.top_lse,
1002 mpls_key->mpls_lse, is_mask);
1004 attrs &= ~(1 << OVS_KEY_ATTR_MPLS);
1007 if (attrs & (1 << OVS_KEY_ATTR_TCP)) {
1008 const struct ovs_key_tcp *tcp_key;
1010 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
1011 SW_FLOW_KEY_PUT(match, tp.src, tcp_key->tcp_src, is_mask);
1012 SW_FLOW_KEY_PUT(match, tp.dst, tcp_key->tcp_dst, is_mask);
1013 attrs &= ~(1 << OVS_KEY_ATTR_TCP);
1016 if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
1017 SW_FLOW_KEY_PUT(match, tp.flags,
1018 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
1020 attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS);
1023 if (attrs & (1 << OVS_KEY_ATTR_UDP)) {
1024 const struct ovs_key_udp *udp_key;
1026 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
1027 SW_FLOW_KEY_PUT(match, tp.src, udp_key->udp_src, is_mask);
1028 SW_FLOW_KEY_PUT(match, tp.dst, udp_key->udp_dst, is_mask);
1029 attrs &= ~(1 << OVS_KEY_ATTR_UDP);
1032 if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
1033 const struct ovs_key_sctp *sctp_key;
1035 sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
1036 SW_FLOW_KEY_PUT(match, tp.src, sctp_key->sctp_src, is_mask);
1037 SW_FLOW_KEY_PUT(match, tp.dst, sctp_key->sctp_dst, is_mask);
1038 attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
1041 if (attrs & (1 << OVS_KEY_ATTR_ICMP)) {
1042 const struct ovs_key_icmp *icmp_key;
1044 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
1045 SW_FLOW_KEY_PUT(match, tp.src,
1046 htons(icmp_key->icmp_type), is_mask);
1047 SW_FLOW_KEY_PUT(match, tp.dst,
1048 htons(icmp_key->icmp_code), is_mask);
1049 attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
1052 if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) {
1053 const struct ovs_key_icmpv6 *icmpv6_key;
1055 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
1056 SW_FLOW_KEY_PUT(match, tp.src,
1057 htons(icmpv6_key->icmpv6_type), is_mask);
1058 SW_FLOW_KEY_PUT(match, tp.dst,
1059 htons(icmpv6_key->icmpv6_code), is_mask);
1060 attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
1063 if (attrs & (1 << OVS_KEY_ATTR_ND)) {
1064 const struct ovs_key_nd *nd_key;
1066 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
1067 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
1069 sizeof(match->key->ipv6.nd.target),
1071 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
1072 nd_key->nd_sll, ETH_ALEN, is_mask);
1073 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
1074 nd_key->nd_tll, ETH_ALEN, is_mask);
1075 attrs &= ~(1 << OVS_KEY_ATTR_ND);
1079 OVS_NLERR(log, "Unknown key attributes %llx",
1080 (unsigned long long)attrs);
1087 static void nlattr_set(struct nlattr *attr, u8 val,
1088 const struct ovs_len_tbl *tbl)
1093 /* The nlattr stream should already have been validated */
1094 nla_for_each_nested(nla, attr, rem) {
1095 if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
1096 if (tbl[nla_type(nla)].next)
1097 tbl = tbl[nla_type(nla)].next;
1098 nlattr_set(nla, val, tbl);
1100 memset(nla_data(nla), val, nla_len(nla));
1103 if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
1104 *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
1108 static void mask_set_nlattr(struct nlattr *attr, u8 val)
1110 nlattr_set(attr, val, ovs_key_lens);
1114 * ovs_nla_get_match - parses Netlink attributes into a flow key and
1115 * mask. In case the 'mask' is NULL, the flow is treated as exact match
1116 * flow. Otherwise, it is treated as a wildcarded flow, except the mask
1117 * does not include any don't care bit.
1118 * @net: Used to determine per-namespace field support.
1119 * @match: receives the extracted flow match information.
1120 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1121 * sequence. The fields should of the packet that triggered the creation
1123 * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
1124 * attribute specifies the mask field of the wildcarded flow.
1125 * @log: Boolean to allow kernel error logging. Normally true, but when
1126 * probing for feature compatibility this should be passed in as false to
1127 * suppress unnecessary error logging.
1129 int ovs_nla_get_match(struct net *net, struct sw_flow_match *match,
1130 const struct nlattr *nla_key,
1131 const struct nlattr *nla_mask,
1134 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1135 const struct nlattr *encap;
1136 struct nlattr *newmask = NULL;
1139 bool encap_valid = false;
1142 err = parse_flow_nlattrs(nla_key, a, &key_attrs, log);
1146 if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
1147 (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
1148 (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) {
1151 if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
1152 (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
1153 OVS_NLERR(log, "Invalid Vlan frame.");
1157 key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1158 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1159 encap = a[OVS_KEY_ATTR_ENCAP];
1160 key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
1163 if (tci & htons(VLAN_TAG_PRESENT)) {
1164 err = parse_flow_nlattrs(encap, a, &key_attrs, log);
1168 /* Corner case for truncated 802.1Q header. */
1169 if (nla_len(encap)) {
1170 OVS_NLERR(log, "Truncated 802.1Q header has non-zero encap attribute.");
1174 OVS_NLERR(log, "Encap attr is set for non-VLAN frame");
1179 err = ovs_key_from_nlattrs(net, match, key_attrs, a, false, log);
1185 /* Create an exact match mask. We need to set to 0xff
1186 * all the 'match->mask' fields that have been touched
1187 * in 'match->key'. We cannot simply memset
1188 * 'match->mask', because padding bytes and fields not
1189 * specified in 'match->key' should be left to 0.
1190 * Instead, we use a stream of netlink attributes,
1191 * copied from 'key' and set to 0xff.
1192 * ovs_key_from_nlattrs() will take care of filling
1193 * 'match->mask' appropriately.
1195 newmask = kmemdup(nla_key,
1196 nla_total_size(nla_len(nla_key)),
1201 mask_set_nlattr(newmask, 0xff);
1203 /* The userspace does not send tunnel attributes that
1204 * are 0, but we should not wildcard them nonetheless.
1206 if (match->key->tun_key.u.ipv4.dst)
1207 SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
1213 err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs, log);
1217 /* Always match on tci. */
1218 SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
1220 if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) {
1221 __be16 eth_type = 0;
1225 OVS_NLERR(log, "Encap mask attribute is set for non-VLAN frame.");
1230 mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
1231 if (a[OVS_KEY_ATTR_ETHERTYPE])
1232 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1234 if (eth_type == htons(0xffff)) {
1235 mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1236 encap = a[OVS_KEY_ATTR_ENCAP];
1237 err = parse_flow_mask_nlattrs(encap, a,
1242 OVS_NLERR(log, "VLAN frames must have an exact match on the TPID (mask=%x).",
1248 if (a[OVS_KEY_ATTR_VLAN])
1249 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1251 if (!(tci & htons(VLAN_TAG_PRESENT))) {
1252 OVS_NLERR(log, "VLAN tag present bit must have an exact match (tci_mask=%x).",
1259 err = ovs_key_from_nlattrs(net, match, mask_attrs, a, true,
1265 if (!match_validate(match, key_attrs, mask_attrs, log))
1273 static size_t get_ufid_len(const struct nlattr *attr, bool log)
1280 len = nla_len(attr);
1281 if (len < 1 || len > MAX_UFID_LENGTH) {
1282 OVS_NLERR(log, "ufid size %u bytes exceeds the range (1, %d)",
1283 nla_len(attr), MAX_UFID_LENGTH);
1290 /* Initializes 'flow->ufid', returning true if 'attr' contains a valid UFID,
1291 * or false otherwise.
1293 bool ovs_nla_get_ufid(struct sw_flow_id *sfid, const struct nlattr *attr,
1296 sfid->ufid_len = get_ufid_len(attr, log);
1298 memcpy(sfid->ufid, nla_data(attr), sfid->ufid_len);
1300 return sfid->ufid_len;
1303 int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
1304 const struct sw_flow_key *key, bool log)
1306 struct sw_flow_key *new_key;
1308 if (ovs_nla_get_ufid(sfid, ufid, log))
1311 /* If UFID was not provided, use unmasked key. */
1312 new_key = kmalloc(sizeof(*new_key), GFP_KERNEL);
1315 memcpy(new_key, key, sizeof(*key));
1316 sfid->unmasked_key = new_key;
1321 u32 ovs_nla_get_ufid_flags(const struct nlattr *attr)
1323 return attr ? nla_get_u32(attr) : 0;
1327 * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
1328 * @key: Receives extracted in_port, priority, tun_key and skb_mark.
1329 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1331 * @log: Boolean to allow kernel error logging. Normally true, but when
1332 * probing for feature compatibility this should be passed in as false to
1333 * suppress unnecessary error logging.
1335 * This parses a series of Netlink attributes that form a flow key, which must
1336 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1337 * get the metadata, that is, the parts of the flow key that cannot be
1338 * extracted from the packet itself.
1341 int ovs_nla_get_flow_metadata(struct net *net, const struct nlattr *attr,
1342 struct sw_flow_key *key,
1345 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1346 struct sw_flow_match match;
1350 err = parse_flow_nlattrs(attr, a, &attrs, log);
1354 memset(&match, 0, sizeof(match));
1357 memset(&key->ct, 0, sizeof(key->ct));
1358 key->phy.in_port = DP_MAX_PORTS;
1360 return metadata_from_nlattrs(net, &match, &attrs, a, false, log);
1363 static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
1364 const struct sw_flow_key *output, bool is_mask,
1365 struct sk_buff *skb)
1367 struct ovs_key_ethernet *eth_key;
1368 struct nlattr *nla, *encap;
1370 if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
1371 goto nla_put_failure;
1373 if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash))
1374 goto nla_put_failure;
1376 if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
1377 goto nla_put_failure;
1379 if ((swkey->tun_key.u.ipv4.dst || is_mask)) {
1380 const void *opts = NULL;
1382 if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
1383 opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
1385 if (ipv4_tun_to_nlattr(skb, &output->tun_key, opts,
1386 swkey->tun_opts_len))
1387 goto nla_put_failure;
1390 if (swkey->phy.in_port == DP_MAX_PORTS) {
1391 if (is_mask && (output->phy.in_port == 0xffff))
1392 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
1393 goto nla_put_failure;
1396 upper_u16 = !is_mask ? 0 : 0xffff;
1398 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
1399 (upper_u16 << 16) | output->phy.in_port))
1400 goto nla_put_failure;
1403 if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
1404 goto nla_put_failure;
1406 if (ovs_ct_put_key(output, skb))
1407 goto nla_put_failure;
1409 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1411 goto nla_put_failure;
1413 eth_key = nla_data(nla);
1414 ether_addr_copy(eth_key->eth_src, output->eth.src);
1415 ether_addr_copy(eth_key->eth_dst, output->eth.dst);
1417 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
1419 eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff);
1420 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
1421 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
1422 goto nla_put_failure;
1423 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
1424 if (!swkey->eth.tci)
1429 if (swkey->eth.type == htons(ETH_P_802_2)) {
1431 * Ethertype 802.2 is represented in the netlink with omitted
1432 * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
1433 * 0xffff in the mask attribute. Ethertype can also
1436 if (is_mask && output->eth.type)
1437 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
1439 goto nla_put_failure;
1443 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
1444 goto nla_put_failure;
1446 if (swkey->eth.type == htons(ETH_P_IP)) {
1447 struct ovs_key_ipv4 *ipv4_key;
1449 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
1451 goto nla_put_failure;
1452 ipv4_key = nla_data(nla);
1453 ipv4_key->ipv4_src = output->ipv4.addr.src;
1454 ipv4_key->ipv4_dst = output->ipv4.addr.dst;
1455 ipv4_key->ipv4_proto = output->ip.proto;
1456 ipv4_key->ipv4_tos = output->ip.tos;
1457 ipv4_key->ipv4_ttl = output->ip.ttl;
1458 ipv4_key->ipv4_frag = output->ip.frag;
1459 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1460 struct ovs_key_ipv6 *ipv6_key;
1462 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
1464 goto nla_put_failure;
1465 ipv6_key = nla_data(nla);
1466 memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
1467 sizeof(ipv6_key->ipv6_src));
1468 memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
1469 sizeof(ipv6_key->ipv6_dst));
1470 ipv6_key->ipv6_label = output->ipv6.label;
1471 ipv6_key->ipv6_proto = output->ip.proto;
1472 ipv6_key->ipv6_tclass = output->ip.tos;
1473 ipv6_key->ipv6_hlimit = output->ip.ttl;
1474 ipv6_key->ipv6_frag = output->ip.frag;
1475 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
1476 swkey->eth.type == htons(ETH_P_RARP)) {
1477 struct ovs_key_arp *arp_key;
1479 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
1481 goto nla_put_failure;
1482 arp_key = nla_data(nla);
1483 memset(arp_key, 0, sizeof(struct ovs_key_arp));
1484 arp_key->arp_sip = output->ipv4.addr.src;
1485 arp_key->arp_tip = output->ipv4.addr.dst;
1486 arp_key->arp_op = htons(output->ip.proto);
1487 ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
1488 ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
1489 } else if (eth_p_mpls(swkey->eth.type)) {
1490 struct ovs_key_mpls *mpls_key;
1492 nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key));
1494 goto nla_put_failure;
1495 mpls_key = nla_data(nla);
1496 mpls_key->mpls_lse = output->mpls.top_lse;
1499 if ((swkey->eth.type == htons(ETH_P_IP) ||
1500 swkey->eth.type == htons(ETH_P_IPV6)) &&
1501 swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1503 if (swkey->ip.proto == IPPROTO_TCP) {
1504 struct ovs_key_tcp *tcp_key;
1506 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
1508 goto nla_put_failure;
1509 tcp_key = nla_data(nla);
1510 tcp_key->tcp_src = output->tp.src;
1511 tcp_key->tcp_dst = output->tp.dst;
1512 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
1514 goto nla_put_failure;
1515 } else if (swkey->ip.proto == IPPROTO_UDP) {
1516 struct ovs_key_udp *udp_key;
1518 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
1520 goto nla_put_failure;
1521 udp_key = nla_data(nla);
1522 udp_key->udp_src = output->tp.src;
1523 udp_key->udp_dst = output->tp.dst;
1524 } else if (swkey->ip.proto == IPPROTO_SCTP) {
1525 struct ovs_key_sctp *sctp_key;
1527 nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
1529 goto nla_put_failure;
1530 sctp_key = nla_data(nla);
1531 sctp_key->sctp_src = output->tp.src;
1532 sctp_key->sctp_dst = output->tp.dst;
1533 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1534 swkey->ip.proto == IPPROTO_ICMP) {
1535 struct ovs_key_icmp *icmp_key;
1537 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
1539 goto nla_put_failure;
1540 icmp_key = nla_data(nla);
1541 icmp_key->icmp_type = ntohs(output->tp.src);
1542 icmp_key->icmp_code = ntohs(output->tp.dst);
1543 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1544 swkey->ip.proto == IPPROTO_ICMPV6) {
1545 struct ovs_key_icmpv6 *icmpv6_key;
1547 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
1548 sizeof(*icmpv6_key));
1550 goto nla_put_failure;
1551 icmpv6_key = nla_data(nla);
1552 icmpv6_key->icmpv6_type = ntohs(output->tp.src);
1553 icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
1555 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1556 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
1557 struct ovs_key_nd *nd_key;
1559 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
1561 goto nla_put_failure;
1562 nd_key = nla_data(nla);
1563 memcpy(nd_key->nd_target, &output->ipv6.nd.target,
1564 sizeof(nd_key->nd_target));
1565 ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
1566 ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
1573 nla_nest_end(skb, encap);
1581 int ovs_nla_put_key(const struct sw_flow_key *swkey,
1582 const struct sw_flow_key *output, int attr, bool is_mask,
1583 struct sk_buff *skb)
1588 nla = nla_nest_start(skb, attr);
1591 err = __ovs_nla_put_key(swkey, output, is_mask, skb);
1594 nla_nest_end(skb, nla);
1599 /* Called with ovs_mutex or RCU read lock. */
1600 int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb)
1602 if (ovs_identifier_is_ufid(&flow->id))
1603 return nla_put(skb, OVS_FLOW_ATTR_UFID, flow->id.ufid_len,
1606 return ovs_nla_put_key(flow->id.unmasked_key, flow->id.unmasked_key,
1607 OVS_FLOW_ATTR_KEY, false, skb);
1610 /* Called with ovs_mutex or RCU read lock. */
1611 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb)
1613 return ovs_nla_put_key(&flow->key, &flow->key,
1614 OVS_FLOW_ATTR_KEY, false, skb);
1617 /* Called with ovs_mutex or RCU read lock. */
1618 int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
1620 return ovs_nla_put_key(&flow->key, &flow->mask->key,
1621 OVS_FLOW_ATTR_MASK, true, skb);
1624 #define MAX_ACTIONS_BUFSIZE (32 * 1024)
1626 static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
1628 struct sw_flow_actions *sfa;
1630 if (size > MAX_ACTIONS_BUFSIZE) {
1631 OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
1632 return ERR_PTR(-EINVAL);
1635 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
1637 return ERR_PTR(-ENOMEM);
1639 sfa->actions_len = 0;
1643 static void ovs_nla_free_set_action(const struct nlattr *a)
1645 const struct nlattr *ovs_key = nla_data(a);
1646 struct ovs_tunnel_info *ovs_tun;
1648 switch (nla_type(ovs_key)) {
1649 case OVS_KEY_ATTR_TUNNEL_INFO:
1650 ovs_tun = nla_data(ovs_key);
1651 dst_release((struct dst_entry *)ovs_tun->tun_dst);
1656 void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
1658 const struct nlattr *a;
1664 nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
1665 switch (nla_type(a)) {
1666 case OVS_ACTION_ATTR_SET:
1667 ovs_nla_free_set_action(a);
1669 case OVS_ACTION_ATTR_CT:
1670 ovs_ct_free_action(a);
1678 static void __ovs_nla_free_flow_actions(struct rcu_head *head)
1680 ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu));
1683 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
1684 * The caller must hold rcu_read_lock for this to be sensible. */
1685 void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *sf_acts)
1687 call_rcu(&sf_acts->rcu, __ovs_nla_free_flow_actions);
1690 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
1691 int attr_len, bool log)
1694 struct sw_flow_actions *acts;
1696 int req_size = NLA_ALIGN(attr_len);
1697 int next_offset = offsetof(struct sw_flow_actions, actions) +
1698 (*sfa)->actions_len;
1700 if (req_size <= (ksize(*sfa) - next_offset))
1703 new_acts_size = ksize(*sfa) * 2;
1705 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
1706 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
1707 return ERR_PTR(-EMSGSIZE);
1708 new_acts_size = MAX_ACTIONS_BUFSIZE;
1711 acts = nla_alloc_flow_actions(new_acts_size, log);
1713 return (void *)acts;
1715 memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
1716 acts->actions_len = (*sfa)->actions_len;
1717 acts->orig_len = (*sfa)->orig_len;
1722 (*sfa)->actions_len += req_size;
1723 return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
1726 static struct nlattr *__add_action(struct sw_flow_actions **sfa,
1727 int attrtype, void *data, int len, bool log)
1731 a = reserve_sfa_size(sfa, nla_attr_size(len), log);
1735 a->nla_type = attrtype;
1736 a->nla_len = nla_attr_size(len);
1739 memcpy(nla_data(a), data, len);
1740 memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
1745 int ovs_nla_add_action(struct sw_flow_actions **sfa, int attrtype, void *data,
1750 a = __add_action(sfa, attrtype, data, len, log);
1752 return PTR_ERR_OR_ZERO(a);
1755 static inline int add_nested_action_start(struct sw_flow_actions **sfa,
1756 int attrtype, bool log)
1758 int used = (*sfa)->actions_len;
1761 err = ovs_nla_add_action(sfa, attrtype, NULL, 0, log);
1768 static inline void add_nested_action_end(struct sw_flow_actions *sfa,
1771 struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions +
1774 a->nla_len = sfa->actions_len - st_offset;
1777 static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
1778 const struct sw_flow_key *key,
1779 int depth, struct sw_flow_actions **sfa,
1780 __be16 eth_type, __be16 vlan_tci, bool log);
1782 static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
1783 const struct sw_flow_key *key, int depth,
1784 struct sw_flow_actions **sfa,
1785 __be16 eth_type, __be16 vlan_tci, bool log)
1787 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
1788 const struct nlattr *probability, *actions;
1789 const struct nlattr *a;
1790 int rem, start, err, st_acts;
1792 memset(attrs, 0, sizeof(attrs));
1793 nla_for_each_nested(a, attr, rem) {
1794 int type = nla_type(a);
1795 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
1802 probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
1803 if (!probability || nla_len(probability) != sizeof(u32))
1806 actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
1807 if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
1810 /* validation done, copy sample action. */
1811 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log);
1814 err = ovs_nla_add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
1815 nla_data(probability), sizeof(u32), log);
1818 st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS, log);
1822 err = __ovs_nla_copy_actions(net, actions, key, depth + 1, sfa,
1823 eth_type, vlan_tci, log);
1827 add_nested_action_end(*sfa, st_acts);
1828 add_nested_action_end(*sfa, start);
1833 void ovs_match_init(struct sw_flow_match *match,
1834 struct sw_flow_key *key,
1835 struct sw_flow_mask *mask)
1837 memset(match, 0, sizeof(*match));
1841 memset(key, 0, sizeof(*key));
1844 memset(&mask->key, 0, sizeof(mask->key));
1845 mask->range.start = mask->range.end = 0;
1849 static int validate_geneve_opts(struct sw_flow_key *key)
1851 struct geneve_opt *option;
1852 int opts_len = key->tun_opts_len;
1853 bool crit_opt = false;
1855 option = (struct geneve_opt *)TUN_METADATA_OPTS(key, key->tun_opts_len);
1856 while (opts_len > 0) {
1859 if (opts_len < sizeof(*option))
1862 len = sizeof(*option) + option->length * 4;
1866 crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
1868 option = (struct geneve_opt *)((u8 *)option + len);
1872 key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
1877 static int validate_and_copy_set_tun(const struct nlattr *attr,
1878 struct sw_flow_actions **sfa, bool log)
1880 struct sw_flow_match match;
1881 struct sw_flow_key key;
1882 struct metadata_dst *tun_dst;
1883 struct ip_tunnel_info *tun_info;
1884 struct ovs_tunnel_info *ovs_tun;
1886 int err = 0, start, opts_type;
1888 ovs_match_init(&match, &key, NULL);
1889 opts_type = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log);
1893 if (key.tun_opts_len) {
1894 switch (opts_type) {
1895 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
1896 err = validate_geneve_opts(&key);
1900 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
1905 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log);
1909 tun_dst = metadata_dst_alloc(key.tun_opts_len, GFP_KERNEL);
1913 a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
1914 sizeof(*ovs_tun), log);
1916 dst_release((struct dst_entry *)tun_dst);
1920 ovs_tun = nla_data(a);
1921 ovs_tun->tun_dst = tun_dst;
1923 tun_info = &tun_dst->u.tun_info;
1924 tun_info->mode = IP_TUNNEL_INFO_TX;
1925 tun_info->key = key.tun_key;
1927 /* We need to store the options in the action itself since
1928 * everything else will go away after flow setup. We can append
1929 * it to tun_info and then point there.
1931 ip_tunnel_info_opts_set(tun_info,
1932 TUN_METADATA_OPTS(&key, key.tun_opts_len),
1934 add_nested_action_end(*sfa, start);
1939 /* Return false if there are any non-masked bits set.
1940 * Mask follows data immediately, before any netlink padding.
1942 static bool validate_masked(u8 *data, int len)
1944 u8 *mask = data + len;
1947 if (*data++ & ~*mask++)
1953 static int validate_set(const struct nlattr *a,
1954 const struct sw_flow_key *flow_key,
1955 struct sw_flow_actions **sfa,
1956 bool *skip_copy, __be16 eth_type, bool masked, bool log)
1958 const struct nlattr *ovs_key = nla_data(a);
1959 int key_type = nla_type(ovs_key);
1962 /* There can be only one key in a action */
1963 if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
1966 key_len = nla_len(ovs_key);
1970 if (key_type > OVS_KEY_ATTR_MAX ||
1971 !check_attr_len(key_len, ovs_key_lens[key_type].len))
1974 if (masked && !validate_masked(nla_data(ovs_key), key_len))
1978 const struct ovs_key_ipv4 *ipv4_key;
1979 const struct ovs_key_ipv6 *ipv6_key;
1982 case OVS_KEY_ATTR_PRIORITY:
1983 case OVS_KEY_ATTR_SKB_MARK:
1984 case OVS_KEY_ATTR_CT_MARK:
1985 case OVS_KEY_ATTR_CT_LABELS:
1986 case OVS_KEY_ATTR_ETHERNET:
1989 case OVS_KEY_ATTR_TUNNEL:
1990 if (eth_p_mpls(eth_type))
1994 return -EINVAL; /* Masked tunnel set not supported. */
1997 err = validate_and_copy_set_tun(a, sfa, log);
2002 case OVS_KEY_ATTR_IPV4:
2003 if (eth_type != htons(ETH_P_IP))
2006 ipv4_key = nla_data(ovs_key);
2009 const struct ovs_key_ipv4 *mask = ipv4_key + 1;
2011 /* Non-writeable fields. */
2012 if (mask->ipv4_proto || mask->ipv4_frag)
2015 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
2018 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
2023 case OVS_KEY_ATTR_IPV6:
2024 if (eth_type != htons(ETH_P_IPV6))
2027 ipv6_key = nla_data(ovs_key);
2030 const struct ovs_key_ipv6 *mask = ipv6_key + 1;
2032 /* Non-writeable fields. */
2033 if (mask->ipv6_proto || mask->ipv6_frag)
2036 /* Invalid bits in the flow label mask? */
2037 if (ntohl(mask->ipv6_label) & 0xFFF00000)
2040 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
2043 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
2046 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
2051 case OVS_KEY_ATTR_TCP:
2052 if ((eth_type != htons(ETH_P_IP) &&
2053 eth_type != htons(ETH_P_IPV6)) ||
2054 flow_key->ip.proto != IPPROTO_TCP)
2059 case OVS_KEY_ATTR_UDP:
2060 if ((eth_type != htons(ETH_P_IP) &&
2061 eth_type != htons(ETH_P_IPV6)) ||
2062 flow_key->ip.proto != IPPROTO_UDP)
2067 case OVS_KEY_ATTR_MPLS:
2068 if (!eth_p_mpls(eth_type))
2072 case OVS_KEY_ATTR_SCTP:
2073 if ((eth_type != htons(ETH_P_IP) &&
2074 eth_type != htons(ETH_P_IPV6)) ||
2075 flow_key->ip.proto != IPPROTO_SCTP)
2084 /* Convert non-masked non-tunnel set actions to masked set actions. */
2085 if (!masked && key_type != OVS_KEY_ATTR_TUNNEL) {
2086 int start, len = key_len * 2;
2091 start = add_nested_action_start(sfa,
2092 OVS_ACTION_ATTR_SET_TO_MASKED,
2097 at = __add_action(sfa, key_type, NULL, len, log);
2101 memcpy(nla_data(at), nla_data(ovs_key), key_len); /* Key. */
2102 memset(nla_data(at) + key_len, 0xff, key_len); /* Mask. */
2103 /* Clear non-writeable bits from otherwise writeable fields. */
2104 if (key_type == OVS_KEY_ATTR_IPV6) {
2105 struct ovs_key_ipv6 *mask = nla_data(at) + key_len;
2107 mask->ipv6_label &= htonl(0x000FFFFF);
2109 add_nested_action_end(*sfa, start);
2115 static int validate_userspace(const struct nlattr *attr)
2117 static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
2118 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
2119 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
2120 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = {.type = NLA_U32 },
2122 struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
2125 error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
2126 attr, userspace_policy);
2130 if (!a[OVS_USERSPACE_ATTR_PID] ||
2131 !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
2137 static int copy_action(const struct nlattr *from,
2138 struct sw_flow_actions **sfa, bool log)
2140 int totlen = NLA_ALIGN(from->nla_len);
2143 to = reserve_sfa_size(sfa, from->nla_len, log);
2147 memcpy(to, from, totlen);
2151 static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
2152 const struct sw_flow_key *key,
2153 int depth, struct sw_flow_actions **sfa,
2154 __be16 eth_type, __be16 vlan_tci, bool log)
2156 const struct nlattr *a;
2159 if (depth >= SAMPLE_ACTION_DEPTH)
2162 nla_for_each_nested(a, attr, rem) {
2163 /* Expected argument lengths, (u32)-1 for variable length. */
2164 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
2165 [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
2166 [OVS_ACTION_ATTR_RECIRC] = sizeof(u32),
2167 [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
2168 [OVS_ACTION_ATTR_PUSH_MPLS] = sizeof(struct ovs_action_push_mpls),
2169 [OVS_ACTION_ATTR_POP_MPLS] = sizeof(__be16),
2170 [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
2171 [OVS_ACTION_ATTR_POP_VLAN] = 0,
2172 [OVS_ACTION_ATTR_SET] = (u32)-1,
2173 [OVS_ACTION_ATTR_SET_MASKED] = (u32)-1,
2174 [OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
2175 [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash),
2176 [OVS_ACTION_ATTR_CT] = (u32)-1,
2178 const struct ovs_action_push_vlan *vlan;
2179 int type = nla_type(a);
2182 if (type > OVS_ACTION_ATTR_MAX ||
2183 (action_lens[type] != nla_len(a) &&
2184 action_lens[type] != (u32)-1))
2189 case OVS_ACTION_ATTR_UNSPEC:
2192 case OVS_ACTION_ATTR_USERSPACE:
2193 err = validate_userspace(a);
2198 case OVS_ACTION_ATTR_OUTPUT:
2199 if (nla_get_u32(a) >= DP_MAX_PORTS)
2203 case OVS_ACTION_ATTR_HASH: {
2204 const struct ovs_action_hash *act_hash = nla_data(a);
2206 switch (act_hash->hash_alg) {
2207 case OVS_HASH_ALG_L4:
2216 case OVS_ACTION_ATTR_POP_VLAN:
2217 vlan_tci = htons(0);
2220 case OVS_ACTION_ATTR_PUSH_VLAN:
2222 if (vlan->vlan_tpid != htons(ETH_P_8021Q))
2224 if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
2226 vlan_tci = vlan->vlan_tci;
2229 case OVS_ACTION_ATTR_RECIRC:
2232 case OVS_ACTION_ATTR_PUSH_MPLS: {
2233 const struct ovs_action_push_mpls *mpls = nla_data(a);
2235 if (!eth_p_mpls(mpls->mpls_ethertype))
2237 /* Prohibit push MPLS other than to a white list
2238 * for packets that have a known tag order.
2240 if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
2241 (eth_type != htons(ETH_P_IP) &&
2242 eth_type != htons(ETH_P_IPV6) &&
2243 eth_type != htons(ETH_P_ARP) &&
2244 eth_type != htons(ETH_P_RARP) &&
2245 !eth_p_mpls(eth_type)))
2247 eth_type = mpls->mpls_ethertype;
2251 case OVS_ACTION_ATTR_POP_MPLS:
2252 if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
2253 !eth_p_mpls(eth_type))
2256 /* Disallow subsequent L2.5+ set and mpls_pop actions
2257 * as there is no check here to ensure that the new
2258 * eth_type is valid and thus set actions could
2259 * write off the end of the packet or otherwise
2262 * Support for these actions is planned using packet
2265 eth_type = htons(0);
2268 case OVS_ACTION_ATTR_SET:
2269 err = validate_set(a, key, sfa,
2270 &skip_copy, eth_type, false, log);
2275 case OVS_ACTION_ATTR_SET_MASKED:
2276 err = validate_set(a, key, sfa,
2277 &skip_copy, eth_type, true, log);
2282 case OVS_ACTION_ATTR_SAMPLE:
2283 err = validate_and_copy_sample(net, a, key, depth, sfa,
2284 eth_type, vlan_tci, log);
2290 case OVS_ACTION_ATTR_CT:
2291 err = ovs_ct_copy_action(net, a, key, sfa, log);
2298 OVS_NLERR(log, "Unknown Action type %d", type);
2302 err = copy_action(a, sfa, log);
2314 /* 'key' must be the masked key. */
2315 int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
2316 const struct sw_flow_key *key,
2317 struct sw_flow_actions **sfa, bool log)
2321 *sfa = nla_alloc_flow_actions(nla_len(attr), log);
2323 return PTR_ERR(*sfa);
2325 (*sfa)->orig_len = nla_len(attr);
2326 err = __ovs_nla_copy_actions(net, attr, key, 0, sfa, key->eth.type,
2329 ovs_nla_free_flow_actions(*sfa);
2334 static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
2336 const struct nlattr *a;
2337 struct nlattr *start;
2340 start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
2344 nla_for_each_nested(a, attr, rem) {
2345 int type = nla_type(a);
2346 struct nlattr *st_sample;
2349 case OVS_SAMPLE_ATTR_PROBABILITY:
2350 if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY,
2351 sizeof(u32), nla_data(a)))
2354 case OVS_SAMPLE_ATTR_ACTIONS:
2355 st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
2358 err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
2361 nla_nest_end(skb, st_sample);
2366 nla_nest_end(skb, start);
2370 static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
2372 const struct nlattr *ovs_key = nla_data(a);
2373 int key_type = nla_type(ovs_key);
2374 struct nlattr *start;
2378 case OVS_KEY_ATTR_TUNNEL_INFO: {
2379 struct ovs_tunnel_info *ovs_tun = nla_data(ovs_key);
2380 struct ip_tunnel_info *tun_info = &ovs_tun->tun_dst->u.tun_info;
2382 start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
2386 err = ipv4_tun_to_nlattr(skb, &tun_info->key,
2387 tun_info->options_len ?
2388 ip_tunnel_info_opts(tun_info) : NULL,
2389 tun_info->options_len);
2392 nla_nest_end(skb, start);
2396 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
2404 static int masked_set_action_to_set_action_attr(const struct nlattr *a,
2405 struct sk_buff *skb)
2407 const struct nlattr *ovs_key = nla_data(a);
2409 size_t key_len = nla_len(ovs_key) / 2;
2411 /* Revert the conversion we did from a non-masked set action to
2412 * masked set action.
2414 nla = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
2418 if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key)))
2421 nla_nest_end(skb, nla);
2425 int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
2427 const struct nlattr *a;
2430 nla_for_each_attr(a, attr, len, rem) {
2431 int type = nla_type(a);
2434 case OVS_ACTION_ATTR_SET:
2435 err = set_action_to_attr(a, skb);
2440 case OVS_ACTION_ATTR_SET_TO_MASKED:
2441 err = masked_set_action_to_set_action_attr(a, skb);
2446 case OVS_ACTION_ATTR_SAMPLE:
2447 err = sample_action_to_attr(a, skb);
2452 case OVS_ACTION_ATTR_CT:
2453 err = ovs_ct_action_to_attr(nla_data(a), skb);
2459 if (nla_put(skb, type, nla_len(a), nla_data(a)))