2 * Copyright (c) 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include <netinet/icmp6.h>
23 #include "classifier.h"
24 #include "dynamic-string.h"
25 #include "meta-flow.h"
26 #include "ofp-actions.h"
27 #include "ofp-errors.h"
30 #include "openflow/nicira-ext.h"
32 #include "unaligned.h"
36 VLOG_DEFINE_THIS_MODULE(nx_match);
38 /* Rate limit for nx_match parse errors. These always indicate a bug in the
39 * peer and so there's not much point in showing a lot of them. */
40 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
42 /* Returns the width of the data for a field with the given 'header', in
45 nxm_field_bytes(uint32_t header)
47 unsigned int length = NXM_LENGTH(header);
48 return NXM_HASMASK(header) ? length / 2 : length;
51 /* Returns the width of the data for a field with the given 'header', in
54 nxm_field_bits(uint32_t header)
56 return nxm_field_bytes(header) * 8;
59 /* nx_pull_match() and helpers. */
62 nx_entry_ok(const void *p, unsigned int match_len)
64 unsigned int payload_len;
70 VLOG_DBG_RL(&rl, "nx_match ends with partial (%u-byte) nxm_header",
75 memcpy(&header_be, p, 4);
76 header = ntohl(header_be);
78 payload_len = NXM_LENGTH(header);
80 VLOG_DBG_RL(&rl, "nxm_entry %08"PRIx32" has invalid payload "
84 if (match_len < payload_len + 4) {
85 VLOG_DBG_RL(&rl, "%"PRIu32"-byte nxm_entry but only "
86 "%u bytes left in nx_match", payload_len + 4, match_len);
93 /* Given NXM/OXM value 'value' and mask 'mask', each 'width' bytes long,
94 * checks for any 1-bit in the value where there is a 0-bit in the mask. If it
95 * finds one, logs a warning. */
97 check_mask_consistency(const uint8_t *p, const struct mf_field *mf)
99 unsigned int width = mf->n_bytes;
100 const uint8_t *value = p + 4;
101 const uint8_t *mask = p + 4 + width;
104 for (i = 0; i < width; i++) {
105 if (value[i] & ~mask[i]) {
106 if (!VLOG_DROP_WARN(&rl)) {
107 char *s = nx_match_to_string(p, width * 2 + 4);
108 VLOG_WARN_RL(&rl, "NXM/OXM entry %s has 1-bits in value for "
109 "bits wildcarded by the mask. (Future versions "
110 "of OVS may report this as an OpenFlow error.)",
119 nx_pull_raw(const uint8_t *p, unsigned int match_len, bool strict,
120 struct match *match, ovs_be64 *cookie, ovs_be64 *cookie_mask)
124 ovs_assert((cookie != NULL) == (cookie_mask != NULL));
126 match_init_catchall(match);
128 *cookie = *cookie_mask = htonll(0);
135 (header = nx_entry_ok(p, match_len)) != 0;
136 p += 4 + NXM_LENGTH(header), match_len -= 4 + NXM_LENGTH(header)) {
137 const struct mf_field *mf;
140 mf = mf_from_nxm_header(header);
143 error = OFPERR_OFPBMC_BAD_FIELD;
147 } else if (!mf_are_prereqs_ok(mf, &match->flow)) {
148 error = OFPERR_OFPBMC_BAD_PREREQ;
149 } else if (!mf_is_all_wild(mf, &match->wc)) {
150 error = OFPERR_OFPBMC_DUP_FIELD;
152 unsigned int width = mf->n_bytes;
153 union mf_value value;
155 memcpy(&value, p + 4, width);
156 if (!mf_is_value_valid(mf, &value)) {
157 error = OFPERR_OFPBMC_BAD_VALUE;
158 } else if (!NXM_HASMASK(header)) {
160 mf_set_value(mf, &value, match);
164 memcpy(&mask, p + 4 + width, width);
165 if (!mf_is_mask_valid(mf, &mask)) {
166 error = OFPERR_OFPBMC_BAD_MASK;
169 check_mask_consistency(p, mf);
170 mf_set(mf, &value, &mask, match);
175 /* Check if the match is for a cookie rather than a classifier rule. */
176 if ((header == NXM_NX_COOKIE || header == NXM_NX_COOKIE_W) && cookie) {
178 error = OFPERR_OFPBMC_DUP_FIELD;
180 unsigned int width = sizeof *cookie;
182 memcpy(cookie, p + 4, width);
183 if (NXM_HASMASK(header)) {
184 memcpy(cookie_mask, p + 4 + width, width);
186 *cookie_mask = OVS_BE64_MAX;
193 VLOG_DBG_RL(&rl, "bad nxm_entry %#08"PRIx32" (vendor=%"PRIu32", "
194 "field=%"PRIu32", hasmask=%"PRIu32", len=%"PRIu32"), "
196 NXM_VENDOR(header), NXM_FIELD(header),
197 NXM_HASMASK(header), NXM_LENGTH(header),
198 ofperr_to_string(error));
203 return match_len ? OFPERR_OFPBMC_BAD_LEN : 0;
207 nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict,
209 ovs_be64 *cookie, ovs_be64 *cookie_mask)
214 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
216 VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
217 "multiple of 8, is longer than space in message (max "
218 "length %"PRIu32")", match_len, ofpbuf_size(b));
219 return OFPERR_OFPBMC_BAD_LEN;
223 return nx_pull_raw(p, match_len, strict, match, cookie, cookie_mask);
226 /* Parses the nx_match formatted match description in 'b' with length
227 * 'match_len'. Stores the results in 'match'. If 'cookie' and 'cookie_mask'
228 * are valid pointers, then stores the cookie and mask in them if 'b' contains
229 * a "NXM_NX_COOKIE*" match. Otherwise, stores 0 in both.
231 * Fails with an error upon encountering an unknown NXM header.
233 * Returns 0 if successful, otherwise an OpenFlow error code. */
235 nx_pull_match(struct ofpbuf *b, unsigned int match_len, struct match *match,
236 ovs_be64 *cookie, ovs_be64 *cookie_mask)
238 return nx_pull_match__(b, match_len, true, match, cookie, cookie_mask);
241 /* Behaves the same as nx_pull_match(), but skips over unknown NXM headers,
242 * instead of failing with an error. */
244 nx_pull_match_loose(struct ofpbuf *b, unsigned int match_len,
246 ovs_be64 *cookie, ovs_be64 *cookie_mask)
248 return nx_pull_match__(b, match_len, false, match, cookie, cookie_mask);
252 oxm_pull_match__(struct ofpbuf *b, bool strict, struct match *match)
254 struct ofp11_match_header *omh = ofpbuf_data(b);
258 if (ofpbuf_size(b) < sizeof *omh) {
259 return OFPERR_OFPBMC_BAD_LEN;
262 match_len = ntohs(omh->length);
263 if (match_len < sizeof *omh) {
264 return OFPERR_OFPBMC_BAD_LEN;
267 if (omh->type != htons(OFPMT_OXM)) {
268 return OFPERR_OFPBMC_BAD_TYPE;
271 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
273 VLOG_DBG_RL(&rl, "oxm length %u, rounded up to a "
274 "multiple of 8, is longer than space in message (max "
275 "length %"PRIu32")", match_len, ofpbuf_size(b));
276 return OFPERR_OFPBMC_BAD_LEN;
279 return nx_pull_raw(p + sizeof *omh, match_len - sizeof *omh,
280 strict, match, NULL, NULL);
283 /* Parses the oxm formatted match description preceded by a struct
284 * ofp11_match_header in 'b'. Stores the result in 'match'.
286 * Fails with an error when encountering unknown OXM headers.
288 * Returns 0 if successful, otherwise an OpenFlow error code. */
290 oxm_pull_match(struct ofpbuf *b, struct match *match)
292 return oxm_pull_match__(b, true, match);
295 /* Behaves the same as oxm_pull_match() with one exception. Skips over unknown
296 * OXM headers instead of failing with an error when they are encountered. */
298 oxm_pull_match_loose(struct ofpbuf *b, struct match *match)
300 return oxm_pull_match__(b, false, match);
303 /* nx_put_match() and helpers.
305 * 'put' functions whose names end in 'w' add a wildcarded field.
306 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
307 * Other 'put' functions add exact-match fields.
311 nxm_put_header(struct ofpbuf *b, uint32_t header)
313 ovs_be32 n_header = htonl(header);
314 ofpbuf_put(b, &n_header, sizeof n_header);
318 nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value)
320 nxm_put_header(b, header);
321 ofpbuf_put(b, &value, sizeof value);
325 nxm_put_8m(struct ofpbuf *b, uint32_t header, uint8_t value, uint8_t mask)
332 nxm_put_8(b, header, value);
336 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
337 ofpbuf_put(b, &value, sizeof value);
338 ofpbuf_put(b, &mask, sizeof mask);
343 nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value)
345 nxm_put_header(b, header);
346 ofpbuf_put(b, &value, sizeof value);
350 nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
352 nxm_put_header(b, header);
353 ofpbuf_put(b, &value, sizeof value);
354 ofpbuf_put(b, &mask, sizeof mask);
358 nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
365 nxm_put_16(b, header, value);
369 nxm_put_16w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
375 nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value)
377 nxm_put_header(b, header);
378 ofpbuf_put(b, &value, sizeof value);
382 nxm_put_32w(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
384 nxm_put_header(b, header);
385 ofpbuf_put(b, &value, sizeof value);
386 ofpbuf_put(b, &mask, sizeof mask);
390 nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
397 nxm_put_32(b, header, value);
401 nxm_put_32w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
407 nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value)
409 nxm_put_header(b, header);
410 ofpbuf_put(b, &value, sizeof value);
414 nxm_put_64w(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
416 nxm_put_header(b, header);
417 ofpbuf_put(b, &value, sizeof value);
418 ofpbuf_put(b, &mask, sizeof mask);
422 nxm_put_64m(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
429 nxm_put_64(b, header, value);
433 nxm_put_64w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
439 nxm_put_eth(struct ofpbuf *b, uint32_t header,
440 const uint8_t value[ETH_ADDR_LEN])
442 nxm_put_header(b, header);
443 ofpbuf_put(b, value, ETH_ADDR_LEN);
447 nxm_put_eth_masked(struct ofpbuf *b, uint32_t header,
448 const uint8_t value[ETH_ADDR_LEN],
449 const uint8_t mask[ETH_ADDR_LEN])
451 if (!eth_addr_is_zero(mask)) {
452 if (eth_mask_is_exact(mask)) {
453 nxm_put_eth(b, header, value);
455 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
456 ofpbuf_put(b, value, ETH_ADDR_LEN);
457 ofpbuf_put(b, mask, ETH_ADDR_LEN);
463 nxm_put_ipv6(struct ofpbuf *b, uint32_t header,
464 const struct in6_addr *value, const struct in6_addr *mask)
466 if (ipv6_mask_is_any(mask)) {
468 } else if (ipv6_mask_is_exact(mask)) {
469 nxm_put_header(b, header);
470 ofpbuf_put(b, value, sizeof *value);
472 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
473 ofpbuf_put(b, value, sizeof *value);
474 ofpbuf_put(b, mask, sizeof *mask);
479 nxm_put_frag(struct ofpbuf *b, const struct match *match)
481 uint8_t nw_frag = match->flow.nw_frag;
482 uint8_t nw_frag_mask = match->wc.masks.nw_frag;
484 switch (nw_frag_mask) {
488 case FLOW_NW_FRAG_MASK:
489 nxm_put_8(b, NXM_NX_IP_FRAG, nw_frag);
493 nxm_put_8m(b, NXM_NX_IP_FRAG, nw_frag,
494 nw_frag_mask & FLOW_NW_FRAG_MASK);
499 /* Appends to 'b' a set of OXM or NXM matches for the IPv4 or IPv6 fields in
502 nxm_put_ip(struct ofpbuf *b, const struct match *match, bool oxm)
504 const struct flow *flow = &match->flow;
506 if (flow->dl_type == htons(ETH_TYPE_IP)) {
507 nxm_put_32m(b, oxm ? OXM_OF_IPV4_SRC : NXM_OF_IP_SRC,
508 flow->nw_src, match->wc.masks.nw_src);
509 nxm_put_32m(b, oxm ? OXM_OF_IPV4_DST : NXM_OF_IP_DST,
510 flow->nw_dst, match->wc.masks.nw_dst);
512 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_SRC : NXM_NX_IPV6_SRC,
513 &flow->ipv6_src, &match->wc.masks.ipv6_src);
514 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_DST : NXM_NX_IPV6_DST,
515 &flow->ipv6_dst, &match->wc.masks.ipv6_dst);
518 nxm_put_frag(b, match);
520 if (match->wc.masks.nw_tos & IP_DSCP_MASK) {
522 nxm_put_8(b, OXM_OF_IP_DSCP, flow->nw_tos >> 2);
524 nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & IP_DSCP_MASK);
528 if (match->wc.masks.nw_tos & IP_ECN_MASK) {
529 nxm_put_8(b, oxm ? OXM_OF_IP_ECN : NXM_NX_IP_ECN,
530 flow->nw_tos & IP_ECN_MASK);
533 if (!oxm && match->wc.masks.nw_ttl) {
534 nxm_put_8(b, NXM_NX_IP_TTL, flow->nw_ttl);
537 nxm_put_32m(b, oxm ? OXM_OF_IPV6_FLABEL : NXM_NX_IPV6_LABEL,
538 flow->ipv6_label, match->wc.masks.ipv6_label);
540 if (match->wc.masks.nw_proto) {
541 nxm_put_8(b, oxm ? OXM_OF_IP_PROTO : NXM_OF_IP_PROTO, flow->nw_proto);
543 if (flow->nw_proto == IPPROTO_TCP) {
544 nxm_put_16m(b, oxm ? OXM_OF_TCP_SRC : NXM_OF_TCP_SRC,
545 flow->tp_src, match->wc.masks.tp_src);
546 nxm_put_16m(b, oxm ? OXM_OF_TCP_DST : NXM_OF_TCP_DST,
547 flow->tp_dst, match->wc.masks.tp_dst);
548 nxm_put_16m(b, NXM_NX_TCP_FLAGS,
549 flow->tcp_flags, match->wc.masks.tcp_flags);
550 } else if (flow->nw_proto == IPPROTO_UDP) {
551 nxm_put_16m(b, oxm ? OXM_OF_UDP_SRC : NXM_OF_UDP_SRC,
552 flow->tp_src, match->wc.masks.tp_src);
553 nxm_put_16m(b, oxm ? OXM_OF_UDP_DST : NXM_OF_UDP_DST,
554 flow->tp_dst, match->wc.masks.tp_dst);
555 } else if (flow->nw_proto == IPPROTO_SCTP) {
556 nxm_put_16m(b, OXM_OF_SCTP_SRC, flow->tp_src,
557 match->wc.masks.tp_src);
558 nxm_put_16m(b, OXM_OF_SCTP_DST, flow->tp_dst,
559 match->wc.masks.tp_dst);
560 } else if (is_icmpv4(flow)) {
561 if (match->wc.masks.tp_src) {
562 nxm_put_8(b, oxm ? OXM_OF_ICMPV4_TYPE : NXM_OF_ICMP_TYPE,
563 ntohs(flow->tp_src));
565 if (match->wc.masks.tp_dst) {
566 nxm_put_8(b, oxm ? OXM_OF_ICMPV4_CODE : NXM_OF_ICMP_CODE,
567 ntohs(flow->tp_dst));
569 } else if (is_icmpv6(flow)) {
570 if (match->wc.masks.tp_src) {
571 nxm_put_8(b, oxm ? OXM_OF_ICMPV6_TYPE : NXM_NX_ICMPV6_TYPE,
572 ntohs(flow->tp_src));
574 if (match->wc.masks.tp_dst) {
575 nxm_put_8(b, oxm ? OXM_OF_ICMPV6_CODE : NXM_NX_ICMPV6_CODE,
576 ntohs(flow->tp_dst));
578 if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
579 flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
580 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_ND_TARGET : NXM_NX_ND_TARGET,
581 &flow->nd_target, &match->wc.masks.nd_target);
582 if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
583 uint32_t field = oxm ? OXM_OF_IPV6_ND_SLL : NXM_NX_ND_SLL;
584 nxm_put_eth_masked(b, field,
585 flow->arp_sha, match->wc.masks.arp_sha);
587 if (flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
588 uint32_t field = oxm ? OXM_OF_IPV6_ND_TLL : NXM_NX_ND_TLL;
589 nxm_put_eth_masked(b, field,
590 flow->arp_tha, match->wc.masks.arp_tha);
597 /* Appends to 'b' the nx_match format that expresses 'match'. For Flow Mod and
598 * Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
599 * Otherwise, 'cookie_mask' should be zero.
601 * This function can cause 'b''s data to be reallocated.
603 * Returns the number of bytes appended to 'b', excluding padding.
605 * If 'match' is a catch-all rule that matches every packet, then this function
606 * appends nothing to 'b' and returns 0. */
608 nx_put_raw(struct ofpbuf *b, bool oxm, const struct match *match,
609 ovs_be64 cookie, ovs_be64 cookie_mask)
611 const struct flow *flow = &match->flow;
612 const size_t start_len = ofpbuf_size(b);
616 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 26);
619 if (match->wc.masks.dp_hash) {
621 nxm_put_32m(b, NXM_NX_DP_HASH, htonl(flow->dp_hash),
622 htonl(match->wc.masks.dp_hash));
626 if (match->wc.masks.recirc_id) {
628 nxm_put_32(b, NXM_NX_RECIRC_ID, htonl(flow->recirc_id));
632 if (match->wc.masks.in_port.ofp_port) {
633 ofp_port_t in_port = flow->in_port.ofp_port;
635 nxm_put_32(b, OXM_OF_IN_PORT, ofputil_port_to_ofp11(in_port));
637 nxm_put_16(b, NXM_OF_IN_PORT, htons(ofp_to_u16(in_port)));
642 nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_SRC : NXM_OF_ETH_SRC,
643 flow->dl_src, match->wc.masks.dl_src);
644 nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_DST : NXM_OF_ETH_DST,
645 flow->dl_dst, match->wc.masks.dl_dst);
646 nxm_put_16m(b, oxm ? OXM_OF_ETH_TYPE : NXM_OF_ETH_TYPE,
647 ofputil_dl_type_to_openflow(flow->dl_type),
648 match->wc.masks.dl_type);
652 ovs_be16 VID_CFI_MASK = htons(VLAN_VID_MASK | VLAN_CFI);
653 ovs_be16 vid = flow->vlan_tci & VID_CFI_MASK;
654 ovs_be16 mask = match->wc.masks.vlan_tci & VID_CFI_MASK;
656 if (mask == htons(VLAN_VID_MASK | VLAN_CFI)) {
657 nxm_put_16(b, OXM_OF_VLAN_VID, vid);
659 nxm_put_16m(b, OXM_OF_VLAN_VID, vid, mask);
662 if (vid && vlan_tci_to_pcp(match->wc.masks.vlan_tci)) {
663 nxm_put_8(b, OXM_OF_VLAN_PCP, vlan_tci_to_pcp(flow->vlan_tci));
667 nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci,
668 match->wc.masks.vlan_tci);
672 if (eth_type_mpls(flow->dl_type)) {
673 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_TC_MASK)) {
674 nxm_put_8(b, OXM_OF_MPLS_TC, mpls_lse_to_tc(flow->mpls_lse[0]));
677 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_BOS_MASK)) {
678 nxm_put_8(b, OXM_OF_MPLS_BOS, mpls_lse_to_bos(flow->mpls_lse[0]));
681 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_LABEL_MASK)) {
682 nxm_put_32(b, OXM_OF_MPLS_LABEL,
683 htonl(mpls_lse_to_label(flow->mpls_lse[0])));
688 if (is_ip_any(flow)) {
689 nxm_put_ip(b, match, oxm);
690 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
691 flow->dl_type == htons(ETH_TYPE_RARP)) {
693 if (match->wc.masks.nw_proto) {
694 nxm_put_16(b, oxm ? OXM_OF_ARP_OP : NXM_OF_ARP_OP,
695 htons(flow->nw_proto));
697 nxm_put_32m(b, oxm ? OXM_OF_ARP_SPA : NXM_OF_ARP_SPA,
698 flow->nw_src, match->wc.masks.nw_src);
699 nxm_put_32m(b, oxm ? OXM_OF_ARP_TPA : NXM_OF_ARP_TPA,
700 flow->nw_dst, match->wc.masks.nw_dst);
701 nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_SHA : NXM_NX_ARP_SHA,
702 flow->arp_sha, match->wc.masks.arp_sha);
703 nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_THA : NXM_NX_ARP_THA,
704 flow->arp_tha, match->wc.masks.arp_tha);
708 nxm_put_64m(b, oxm ? OXM_OF_TUNNEL_ID : NXM_NX_TUN_ID,
709 flow->tunnel.tun_id, match->wc.masks.tunnel.tun_id);
711 /* Other tunnel metadata. */
712 nxm_put_32m(b, NXM_NX_TUN_IPV4_SRC,
713 flow->tunnel.ip_src, match->wc.masks.tunnel.ip_src);
714 nxm_put_32m(b, NXM_NX_TUN_IPV4_DST,
715 flow->tunnel.ip_dst, match->wc.masks.tunnel.ip_dst);
718 for (i = 0; i < FLOW_N_REGS; i++) {
719 nxm_put_32m(b, NXM_NX_REG(i),
720 htonl(flow->regs[i]), htonl(match->wc.masks.regs[i]));
724 nxm_put_32m(b, NXM_NX_PKT_MARK, htonl(flow->pkt_mark),
725 htonl(match->wc.masks.pkt_mark));
727 /* OpenFlow 1.1+ Metadata. */
728 nxm_put_64m(b, OXM_OF_METADATA, flow->metadata, match->wc.masks.metadata);
731 nxm_put_64m(b, NXM_NX_COOKIE, cookie, cookie_mask);
733 match_len = ofpbuf_size(b) - start_len;
737 /* Appends to 'b' the nx_match format that expresses 'match', plus enough zero
738 * bytes to pad the nx_match out to a multiple of 8. For Flow Mod and Flow
739 * Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
740 * Otherwise, 'cookie_mask' should be zero.
742 * This function can cause 'b''s data to be reallocated.
744 * Returns the number of bytes appended to 'b', excluding padding. The return
745 * value can be zero if it appended nothing at all to 'b' (which happens if
746 * 'cr' is a catch-all rule that matches every packet). */
748 nx_put_match(struct ofpbuf *b, const struct match *match,
749 ovs_be64 cookie, ovs_be64 cookie_mask)
751 int match_len = nx_put_raw(b, false, match, cookie, cookie_mask);
753 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
758 /* Appends to 'b' an struct ofp11_match_header followed by the oxm format that
759 * expresses 'cr', plus enough zero bytes to pad the data appended out to a
762 * This function can cause 'b''s data to be reallocated.
764 * Returns the number of bytes appended to 'b', excluding the padding. Never
767 oxm_put_match(struct ofpbuf *b, const struct match *match)
770 struct ofp11_match_header *omh;
771 size_t start_len = ofpbuf_size(b);
772 ovs_be64 cookie = htonll(0), cookie_mask = htonll(0);
774 ofpbuf_put_uninit(b, sizeof *omh);
775 match_len = nx_put_raw(b, true, match, cookie, cookie_mask) + sizeof *omh;
776 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
778 omh = ofpbuf_at(b, start_len, sizeof *omh);
779 omh->type = htons(OFPMT_OXM);
780 omh->length = htons(match_len);
785 /* nx_match_to_string() and helpers. */
787 static void format_nxm_field_name(struct ds *, uint32_t header);
790 nx_match_to_string(const uint8_t *p, unsigned int match_len)
796 return xstrdup("<any>");
800 while ((header = nx_entry_ok(p, match_len)) != 0) {
801 unsigned int length = NXM_LENGTH(header);
802 unsigned int value_len = nxm_field_bytes(header);
803 const uint8_t *value = p + 4;
804 const uint8_t *mask = value + value_len;
808 ds_put_cstr(&s, ", ");
811 format_nxm_field_name(&s, header);
812 ds_put_char(&s, '(');
814 for (i = 0; i < value_len; i++) {
815 ds_put_format(&s, "%02x", value[i]);
817 if (NXM_HASMASK(header)) {
818 ds_put_char(&s, '/');
819 for (i = 0; i < value_len; i++) {
820 ds_put_format(&s, "%02x", mask[i]);
823 ds_put_char(&s, ')');
826 match_len -= 4 + length;
831 ds_put_cstr(&s, ", ");
834 ds_put_format(&s, "<%u invalid bytes>", match_len);
837 return ds_steal_cstr(&s);
841 oxm_match_to_string(const struct ofpbuf *p, unsigned int match_len)
843 const struct ofp11_match_header *omh = ofpbuf_data(p);
849 if (match_len < sizeof *omh) {
850 ds_put_format(&s, "<match too short: %u>", match_len);
854 if (omh->type != htons(OFPMT_OXM)) {
855 ds_put_format(&s, "<bad match type field: %u>", ntohs(omh->type));
859 match_len_ = ntohs(omh->length);
860 if (match_len_ < sizeof *omh) {
861 ds_put_format(&s, "<match length field too short: %u>", match_len_);
865 if (match_len_ != match_len) {
866 ds_put_format(&s, "<match length field incorrect: %u != %u>",
867 match_len_, match_len);
871 return nx_match_to_string(ofpbuf_at(p, sizeof *omh, 0),
872 match_len - sizeof *omh);
875 return ds_steal_cstr(&s);
879 format_nxm_field_name(struct ds *s, uint32_t header)
881 const struct mf_field *mf = mf_from_nxm_header(header);
883 ds_put_cstr(s, IS_OXM_HEADER(header) ? mf->oxm_name : mf->nxm_name);
884 if (NXM_HASMASK(header)) {
885 ds_put_cstr(s, "_W");
887 } else if (header == NXM_NX_COOKIE) {
888 ds_put_cstr(s, "NXM_NX_COOKIE");
889 } else if (header == NXM_NX_COOKIE_W) {
890 ds_put_cstr(s, "NXM_NX_COOKIE_W");
892 ds_put_format(s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header));
897 parse_nxm_field_name(const char *name, int name_len)
902 /* Check whether it's a field name. */
903 wild = name_len > 2 && !memcmp(&name[name_len - 2], "_W", 2);
908 for (i = 0; i < MFF_N_IDS; i++) {
909 const struct mf_field *mf = mf_from_id(i);
913 !strncmp(mf->nxm_name, name, name_len) &&
914 mf->nxm_name[name_len] == '\0') {
915 header = mf->nxm_header;
916 } else if (mf->oxm_name &&
917 !strncmp(mf->oxm_name, name, name_len) &&
918 mf->oxm_name[name_len] == '\0') {
919 header = mf->oxm_header;
926 } else if (mf->maskable != MFM_NONE) {
927 return NXM_MAKE_WILD_HEADER(header);
931 if (!strncmp("NXM_NX_COOKIE", name, name_len) &&
932 (name_len == strlen("NXM_NX_COOKIE"))) {
934 return NXM_NX_COOKIE;
936 return NXM_NX_COOKIE_W;
940 /* Check whether it's a 32-bit field header value as hex.
941 * (This isn't ordinarily useful except for testing error behavior.) */
943 uint32_t header = hexits_value(name, name_len, NULL);
944 if (header != UINT_MAX) {
952 /* nx_match_from_string(). */
955 nx_match_from_string_raw(const char *s, struct ofpbuf *b)
957 const char *full_s = s;
958 const size_t start_len = ofpbuf_size(b);
960 if (!strcmp(s, "<any>")) {
961 /* Ensure that 'ofpbuf_data(b)' isn't actually null. */
962 ofpbuf_prealloc_tailroom(b, 1);
966 for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
973 name_len = strcspn(s, "(");
974 if (s[name_len] != '(') {
975 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s);
978 header = parse_nxm_field_name(name, name_len);
980 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
985 nxm_put_header(b, header);
986 s = ofpbuf_put_hex(b, s, &n);
987 if (n != nxm_field_bytes(header)) {
988 ovs_fatal(0, "%.2s: hex digits expected", s);
990 if (NXM_HASMASK(header)) {
993 ovs_fatal(0, "%s: missing / in masked field %.*s",
994 full_s, name_len, name);
996 s = ofpbuf_put_hex(b, s + 1, &n);
997 if (n != nxm_field_bytes(header)) {
998 ovs_fatal(0, "%.2s: hex digits expected", s);
1002 s += strspn(s, " ");
1004 ovs_fatal(0, "%s: missing ) following field %.*s",
1005 full_s, name_len, name);
1010 return ofpbuf_size(b) - start_len;
1014 nx_match_from_string(const char *s, struct ofpbuf *b)
1016 int match_len = nx_match_from_string_raw(s, b);
1017 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
1022 oxm_match_from_string(const char *s, struct ofpbuf *b)
1025 struct ofp11_match_header *omh;
1026 size_t start_len = ofpbuf_size(b);
1028 ofpbuf_put_uninit(b, sizeof *omh);
1029 match_len = nx_match_from_string_raw(s, b) + sizeof *omh;
1030 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
1032 omh = ofpbuf_at(b, start_len, sizeof *omh);
1033 omh->type = htons(OFPMT_OXM);
1034 omh->length = htons(match_len);
1039 /* Parses 's' as a "move" action, in the form described in ovs-ofctl(8), into
1042 * Returns NULL if successful, otherwise a malloc()'d string describing the
1043 * error. The caller is responsible for freeing the returned string. */
1044 char * WARN_UNUSED_RESULT
1045 nxm_parse_reg_move(struct ofpact_reg_move *move, const char *s)
1047 const char *full_s = s;
1050 error = mf_parse_subfield__(&move->src, &s);
1054 if (strncmp(s, "->", 2)) {
1055 return xasprintf("%s: missing `->' following source", full_s);
1058 error = mf_parse_subfield(&move->dst, s);
1063 if (move->src.n_bits != move->dst.n_bits) {
1064 return xasprintf("%s: source field is %d bits wide but destination is "
1065 "%d bits wide", full_s,
1066 move->src.n_bits, move->dst.n_bits);
1071 /* Parses 's' as a "load" action, in the form described in ovs-ofctl(8), into
1074 * Returns NULL if successful, otherwise a malloc()'d string describing the
1075 * error. The caller is responsible for freeing the returned string. */
1076 char * WARN_UNUSED_RESULT
1077 nxm_parse_reg_load(struct ofpact_reg_load *load, const char *s)
1079 const char *full_s = s;
1080 uint64_t value = strtoull(s, (char **) &s, 0);
1083 if (strncmp(s, "->", 2)) {
1084 return xasprintf("%s: missing `->' following value", full_s);
1087 error = mf_parse_subfield(&load->dst, s);
1092 if (load->dst.n_bits < 64 && (value >> load->dst.n_bits) != 0) {
1093 return xasprintf("%s: value %"PRIu64" does not fit into %d bits",
1094 full_s, value, load->dst.n_bits);
1097 load->subvalue.be64[0] = htonll(0);
1098 load->subvalue.be64[1] = htonll(value);
1102 /* nxm_format_reg_move(), nxm_format_reg_load(). */
1105 nxm_format_reg_move(const struct ofpact_reg_move *move, struct ds *s)
1107 ds_put_format(s, "move:");
1108 mf_format_subfield(&move->src, s);
1109 ds_put_cstr(s, "->");
1110 mf_format_subfield(&move->dst, s);
1114 nxm_format_reg_load(const struct ofpact_reg_load *load, struct ds *s)
1116 ds_put_cstr(s, "load:");
1117 mf_format_subvalue(&load->subvalue, s);
1118 ds_put_cstr(s, "->");
1119 mf_format_subfield(&load->dst, s);
1123 nxm_reg_move_from_openflow(const struct nx_action_reg_move *narm,
1124 struct ofpbuf *ofpacts)
1126 struct ofpact_reg_move *move;
1128 move = ofpact_put_REG_MOVE(ofpacts);
1129 move->src.field = mf_from_nxm_header(ntohl(narm->src));
1130 move->src.ofs = ntohs(narm->src_ofs);
1131 move->src.n_bits = ntohs(narm->n_bits);
1132 move->dst.field = mf_from_nxm_header(ntohl(narm->dst));
1133 move->dst.ofs = ntohs(narm->dst_ofs);
1134 move->dst.n_bits = ntohs(narm->n_bits);
1136 return nxm_reg_move_check(move, NULL);
1140 nxm_reg_load_from_openflow(const struct nx_action_reg_load *narl,
1141 struct ofpbuf *ofpacts)
1143 struct ofpact_reg_load *load;
1145 load = ofpact_put_REG_LOAD(ofpacts);
1146 load->dst.field = mf_from_nxm_header(ntohl(narl->dst));
1147 load->dst.ofs = nxm_decode_ofs(narl->ofs_nbits);
1148 load->dst.n_bits = nxm_decode_n_bits(narl->ofs_nbits);
1149 load->subvalue.be64[1] = narl->value;
1151 /* Reject 'narl' if a bit numbered 'n_bits' or higher is set to 1 in
1153 if (load->dst.n_bits < 64 &&
1154 ntohll(narl->value) >> load->dst.n_bits) {
1155 return OFPERR_OFPBAC_BAD_ARGUMENT;
1158 return nxm_reg_load_check(load, NULL);
1162 nxm_reg_move_check(const struct ofpact_reg_move *move, const struct flow *flow)
1166 error = mf_check_src(&move->src, flow);
1171 return mf_check_dst(&move->dst, NULL);
1175 nxm_reg_load_check(const struct ofpact_reg_load *load, const struct flow *flow)
1177 return mf_check_dst(&load->dst, flow);
1181 nxm_reg_move_to_nxast(const struct ofpact_reg_move *move,
1182 struct ofpbuf *openflow)
1184 struct nx_action_reg_move *narm;
1186 narm = ofputil_put_NXAST_REG_MOVE(openflow);
1187 narm->n_bits = htons(move->dst.n_bits);
1188 narm->src_ofs = htons(move->src.ofs);
1189 narm->dst_ofs = htons(move->dst.ofs);
1190 narm->src = htonl(move->src.field->nxm_header);
1191 narm->dst = htonl(move->dst.field->nxm_header);
1195 nxm_reg_load_to_nxast(const struct ofpact_reg_load *load,
1196 struct ofpbuf *openflow)
1198 struct nx_action_reg_load *narl;
1200 narl = ofputil_put_NXAST_REG_LOAD(openflow);
1201 narl->ofs_nbits = nxm_encode_ofs_nbits(load->dst.ofs, load->dst.n_bits);
1202 narl->dst = htonl(load->dst.field->nxm_header);
1203 narl->value = load->subvalue.be64[1];
1206 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
1209 nxm_execute_reg_move(const struct ofpact_reg_move *move,
1210 struct flow *flow, struct flow_wildcards *wc)
1212 union mf_value src_value;
1213 union mf_value dst_value;
1215 mf_mask_field_and_prereqs(move->dst.field, &wc->masks);
1216 mf_mask_field_and_prereqs(move->src.field, &wc->masks);
1218 mf_get_value(move->dst.field, flow, &dst_value);
1219 mf_get_value(move->src.field, flow, &src_value);
1220 bitwise_copy(&src_value, move->src.field->n_bytes, move->src.ofs,
1221 &dst_value, move->dst.field->n_bytes, move->dst.ofs,
1223 mf_set_flow_value(move->dst.field, &dst_value, flow);
1227 nxm_execute_reg_load(const struct ofpact_reg_load *load, struct flow *flow,
1228 struct flow_wildcards *wc)
1230 /* Since at the datapath interface we do not have set actions for
1231 * individual fields, but larger sets of fields for a given protocol
1232 * layer, the set action will in practice only ever apply to exactly
1233 * matched flows for the given protocol layer. For example, if the
1234 * reg_load changes the IP TTL, the corresponding datapath action will
1235 * rewrite also the IP addresses and TOS byte. Since these other field
1236 * values may not be explicitly set, they depend on the incoming flow field
1237 * values, and are hence all of them are set in the wildcards masks, when
1238 * the action is committed to the datapath. For the rare case, where the
1239 * reg_load action does not actually change the value, and no other flow
1240 * field values are set (or loaded), the datapath action is skipped, and
1241 * no mask bits are set. Such a datapath flow should, however, be
1242 * dependent on the specific field value, so the corresponding wildcard
1243 * mask bits must be set, lest the datapath flow be applied to packets
1244 * containing some other value in the field and the field value remain
1245 * unchanged regardless of the incoming value.
1247 * We set the masks here for the whole fields, and their prerequisities.
1248 * Even if only the lower byte of a TCP destination port is set,
1249 * we set the mask for the whole field, and also the ip_proto in the IP
1250 * header, so that the kernel flow would not be applied on, e.g., a UDP
1251 * packet, or any other IP protocol in addition to TCP packets.
1253 mf_mask_field_and_prereqs(load->dst.field, &wc->masks);
1254 mf_write_subfield_flow(&load->dst, &load->subvalue, flow);
1258 nxm_reg_load(const struct mf_subfield *dst, uint64_t src_data,
1259 struct flow *flow, struct flow_wildcards *wc)
1261 union mf_subvalue src_subvalue;
1262 union mf_subvalue mask_value;
1263 ovs_be64 src_data_be = htonll(src_data);
1265 memset(&mask_value, 0xff, sizeof mask_value);
1266 mf_write_subfield_flow(dst, &mask_value, &wc->masks);
1268 bitwise_copy(&src_data_be, sizeof src_data_be, 0,
1269 &src_subvalue, sizeof src_subvalue, 0,
1270 sizeof src_data_be * 8);
1271 mf_write_subfield_flow(dst, &src_subvalue, flow);
1274 /* nxm_parse_stack_action, works for both push() and pop(). */
1276 /* Parses 's' as a "push" or "pop" action, in the form described in
1277 * ovs-ofctl(8), into '*stack_action'.
1279 * Returns NULL if successful, otherwise a malloc()'d string describing the
1280 * error. The caller is responsible for freeing the returned string. */
1281 char * WARN_UNUSED_RESULT
1282 nxm_parse_stack_action(struct ofpact_stack *stack_action, const char *s)
1286 error = mf_parse_subfield__(&stack_action->subfield, &s);
1292 return xasprintf("%s: trailing garbage following push or pop", s);
1299 nxm_format_stack_push(const struct ofpact_stack *push, struct ds *s)
1301 ds_put_cstr(s, "push:");
1302 mf_format_subfield(&push->subfield, s);
1306 nxm_format_stack_pop(const struct ofpact_stack *pop, struct ds *s)
1308 ds_put_cstr(s, "pop:");
1309 mf_format_subfield(&pop->subfield, s);
1312 /* Common set for both push and pop actions. */
1314 stack_action_from_openflow__(const struct nx_action_stack *nasp,
1315 struct ofpact_stack *stack_action)
1317 stack_action->subfield.field = mf_from_nxm_header(ntohl(nasp->field));
1318 stack_action->subfield.ofs = ntohs(nasp->offset);
1319 stack_action->subfield.n_bits = ntohs(nasp->n_bits);
1323 nxm_stack_to_nxast__(const struct ofpact_stack *stack_action,
1324 struct nx_action_stack *nasp)
1326 nasp->offset = htons(stack_action->subfield.ofs);
1327 nasp->n_bits = htons(stack_action->subfield.n_bits);
1328 nasp->field = htonl(stack_action->subfield.field->nxm_header);
1332 nxm_stack_push_from_openflow(const struct nx_action_stack *nasp,
1333 struct ofpbuf *ofpacts)
1335 struct ofpact_stack *push;
1337 push = ofpact_put_STACK_PUSH(ofpacts);
1338 stack_action_from_openflow__(nasp, push);
1340 return nxm_stack_push_check(push, NULL);
1344 nxm_stack_pop_from_openflow(const struct nx_action_stack *nasp,
1345 struct ofpbuf *ofpacts)
1347 struct ofpact_stack *pop;
1349 pop = ofpact_put_STACK_POP(ofpacts);
1350 stack_action_from_openflow__(nasp, pop);
1352 return nxm_stack_pop_check(pop, NULL);
1356 nxm_stack_push_check(const struct ofpact_stack *push,
1357 const struct flow *flow)
1359 return mf_check_src(&push->subfield, flow);
1363 nxm_stack_pop_check(const struct ofpact_stack *pop,
1364 const struct flow *flow)
1366 return mf_check_dst(&pop->subfield, flow);
1370 nxm_stack_push_to_nxast(const struct ofpact_stack *stack,
1371 struct ofpbuf *openflow)
1373 nxm_stack_to_nxast__(stack, ofputil_put_NXAST_STACK_PUSH(openflow));
1377 nxm_stack_pop_to_nxast(const struct ofpact_stack *stack,
1378 struct ofpbuf *openflow)
1380 nxm_stack_to_nxast__(stack, ofputil_put_NXAST_STACK_POP(openflow));
1383 /* nxm_execute_stack_push(), nxm_execute_stack_pop(). */
1385 nx_stack_push(struct ofpbuf *stack, union mf_subvalue *v)
1387 ofpbuf_put(stack, v, sizeof *v);
1390 static union mf_subvalue *
1391 nx_stack_pop(struct ofpbuf *stack)
1393 union mf_subvalue *v = NULL;
1395 if (ofpbuf_size(stack)) {
1397 ofpbuf_set_size(stack, ofpbuf_size(stack) - sizeof *v);
1398 v = (union mf_subvalue *) ofpbuf_tail(stack);
1405 nxm_execute_stack_push(const struct ofpact_stack *push,
1406 const struct flow *flow, struct flow_wildcards *wc,
1407 struct ofpbuf *stack)
1409 union mf_subvalue mask_value;
1410 union mf_subvalue dst_value;
1412 memset(&mask_value, 0xff, sizeof mask_value);
1413 mf_write_subfield_flow(&push->subfield, &mask_value, &wc->masks);
1415 mf_read_subfield(&push->subfield, flow, &dst_value);
1416 nx_stack_push(stack, &dst_value);
1420 nxm_execute_stack_pop(const struct ofpact_stack *pop,
1421 struct flow *flow, struct flow_wildcards *wc,
1422 struct ofpbuf *stack)
1424 union mf_subvalue *src_value;
1426 src_value = nx_stack_pop(stack);
1428 /* Only pop if stack is not empty. Otherwise, give warning. */
1430 union mf_subvalue mask_value;
1432 memset(&mask_value, 0xff, sizeof mask_value);
1433 mf_write_subfield_flow(&pop->subfield, &mask_value, &wc->masks);
1434 mf_write_subfield_flow(&pop->subfield, src_value, flow);
1436 if (!VLOG_DROP_WARN(&rl)) {
1437 char *flow_str = flow_to_string(flow);
1438 VLOG_WARN_RL(&rl, "Failed to pop from an empty stack. On flow \n"