OVS_TUNNEL_KEY_ATTR_TP_SRC, /* be16 src Transport Port. */
OVS_TUNNEL_KEY_ATTR_TP_DST, /* be16 dst Transport Port. */
OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS, /* Nested OVS_VXLAN_EXT_* */
+ OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */
+ OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */
__OVS_TUNNEL_KEY_ATTR_MAX
};
struct ofpbuf execute_actions;
uint64_t stub[256 / 8];
struct pkt_metadata *md = &packet->md;
+ bool dst_set;
- if (md->tunnel.ip_dst) {
+ dst_set = flow_tnl_dst_is_set(&md->tunnel);
+ if (dst_set) {
/* The Linux kernel datapath throws away the tunnel information
* that we supply as metadata. We have to use a "set" action to
* supply it. */
aux->error = dpif_execute(aux->dpif, &execute);
log_execute_message(aux->dpif, &execute, true, aux->error);
- if (md->tunnel.ip_dst) {
+ if (dst_set) {
ofpbuf_uninit(&execute_actions);
}
break;
* away. Some GCC versions gave warnings on ALWAYS_INLINE, so these are
* defined as macros. */
-#if (FLOW_WC_SEQ != 34)
+#if (FLOW_WC_SEQ != 35)
#define MINIFLOW_ASSERT(X) ovs_assert(X)
BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
"assertions enabled. Consider updating FLOW_WC_SEQ after "
uint8_t nw_frag, nw_tos, nw_ttl, nw_proto;
/* Metadata. */
- if (md->tunnel.ip_dst) {
+ if (flow_tnl_dst_is_set(&md->tunnel)) {
miniflow_push_words(mf, tunnel, &md->tunnel,
offsetof(struct flow_tnl, metadata) /
sizeof(uint64_t));
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 34);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
match_init_catchall(flow_metadata);
if (flow->tunnel.tun_id != htonll(0)) {
match_set_tun_flags(flow_metadata,
flow->tunnel.flags & FLOW_TNL_PUB_F_MASK);
}
- if (flow->tunnel.ip_src != htonl(0)) {
+ if (flow->tunnel.ip_src) {
match_set_tun_src(flow_metadata, flow->tunnel.ip_src);
}
- if (flow->tunnel.ip_dst != htonl(0)) {
+ if (flow->tunnel.ip_dst) {
match_set_tun_dst(flow_metadata, flow->tunnel.ip_dst);
}
+ if (ipv6_addr_is_set(&flow->tunnel.ipv6_src)) {
+ match_set_tun_ipv6_src(flow_metadata, &flow->tunnel.ipv6_src);
+ }
+ if (ipv6_addr_is_set(&flow->tunnel.ipv6_dst)) {
+ match_set_tun_ipv6_dst(flow_metadata, &flow->tunnel.ipv6_dst);
+ }
if (flow->tunnel.gbp_id != htons(0)) {
match_set_tun_gbp_id(flow_metadata, flow->tunnel.gbp_id);
}
memset(&wc->masks, 0x0, sizeof wc->masks);
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 34);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
- if (flow->tunnel.ip_dst) {
+ if (flow_tnl_dst_is_set(&flow->tunnel)) {
if (flow->tunnel.flags & FLOW_TNL_F_KEY) {
WC_MASK_FIELD(wc, tunnel.tun_id);
}
WC_MASK_FIELD(wc, tunnel.ip_src);
WC_MASK_FIELD(wc, tunnel.ip_dst);
+ WC_MASK_FIELD(wc, tunnel.ipv6_src);
+ WC_MASK_FIELD(wc, tunnel.ipv6_dst);
WC_MASK_FIELD(wc, tunnel.flags);
WC_MASK_FIELD(wc, tunnel.ip_tos);
WC_MASK_FIELD(wc, tunnel.ip_ttl);
flow_wc_map(const struct flow *flow, struct flowmap *map)
{
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 34);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
flowmap_init(map);
- if (flow->tunnel.ip_dst) {
+ if (flow_tnl_dst_is_set(&flow->tunnel)) {
FLOWMAP_SET__(map, tunnel, offsetof(struct flow_tnl, metadata));
if (!(flow->tunnel.flags & FLOW_TNL_F_UDPIF)) {
if (flow->tunnel.metadata.present.map) {
flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
{
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 34);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
/* Clear all L3 and L4 fields and dp_hash. */
- BUILD_ASSERT(FLOW_WC_SEQ == 34);
+ BUILD_ASSERT(FLOW_WC_SEQ == 35);
memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
flow->dp_hash = 0;
/* This sequence number should be incremented whenever anything involving flows
* or the wildcarding of flows changes. This will cause build assertion
* failures in places which likely need to be updated. */
-#define FLOW_WC_SEQ 34
+#define FLOW_WC_SEQ 35
/* Number of Open vSwitch extension 32-bit registers. */
#define FLOW_N_REGS 8
/* Remember to update FLOW_WC_SEQ when changing 'struct flow'. */
BUILD_ASSERT_DECL(offsetof(struct flow, igmp_group_ip4) + sizeof(uint32_t)
== sizeof(struct flow_tnl) + 216
- && FLOW_WC_SEQ == 34);
+ && FLOW_WC_SEQ == 35);
/* Incremental points at which flow classification may be performed in
* segments.
match->flow.tunnel.ip_dst = dst & mask;
}
+void
+match_set_tun_ipv6_src(struct match *match, const struct in6_addr *src)
+{
+ match->flow.tunnel.ipv6_src = *src;
+ match->wc.masks.tunnel.ipv6_src = in6addr_exact;
+}
+
+void
+match_set_tun_ipv6_src_masked(struct match *match, const struct in6_addr *src,
+ const struct in6_addr *mask)
+{
+ match->flow.tunnel.ipv6_src = ipv6_addr_bitand(src, mask);
+ match->wc.masks.tunnel.ipv6_src = *mask;
+}
+
+void
+match_set_tun_ipv6_dst(struct match *match, const struct in6_addr *dst)
+{
+ match->flow.tunnel.ipv6_dst = *dst;
+ match->wc.masks.tunnel.ipv6_dst = in6addr_exact;
+}
+
+void
+match_set_tun_ipv6_dst_masked(struct match *match, const struct in6_addr *dst,
+ const struct in6_addr *mask)
+{
+ match->flow.tunnel.ipv6_dst = ipv6_addr_bitand(dst, mask);
+ match->wc.masks.tunnel.ipv6_dst = *mask;
+}
+
void
match_set_tun_ttl(struct match *match, uint8_t ttl)
{
format_be64_masked(s, "tun_id", tnl->tun_id, wc->masks.tunnel.tun_id);
format_ip_netmask(s, "tun_src", tnl->ip_src, wc->masks.tunnel.ip_src);
format_ip_netmask(s, "tun_dst", tnl->ip_dst, wc->masks.tunnel.ip_dst);
+ format_ipv6_netmask(s, "tun_ipv6_src", &tnl->ipv6_src,
+ &wc->masks.tunnel.ipv6_src);
+ format_ipv6_netmask(s, "tun_ipv6_dst", &tnl->ipv6_dst,
+ &wc->masks.tunnel.ipv6_dst);
if (wc->masks.tunnel.gbp_id) {
format_be16_masked(s, "tun_gbp_id", tnl->gbp_id,
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 34);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
if (priority != OFP_DEFAULT_PRIORITY) {
ds_put_format(s, "priority=%d,", priority);
void match_set_tun_src_masked(struct match *match, ovs_be32 src, ovs_be32 mask);
void match_set_tun_dst(struct match *match, ovs_be32 dst);
void match_set_tun_dst_masked(struct match *match, ovs_be32 dst, ovs_be32 mask);
+void match_set_tun_ipv6_src(struct match *, const struct in6_addr *);
+void match_set_tun_ipv6_src_masked(struct match *, const struct in6_addr *,
+ const struct in6_addr *);
+void match_set_tun_ipv6_dst(struct match *, const struct in6_addr *);
+void match_set_tun_ipv6_dst_masked(struct match *, const struct in6_addr *,
+ const struct in6_addr *);
void match_set_tun_ttl(struct match *match, uint8_t ttl);
void match_set_tun_ttl_masked(struct match *match, uint8_t ttl, uint8_t mask);
void match_set_tun_tos(struct match *match, uint8_t tos);
int match_len;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 34);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
/* Metadata. */
if (match->wc.masks.dp_hash) {
[OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = ATTR_LEN_NESTED,
.next = ovs_vxlan_ext_attr_lens ,
.next_max = OVS_VXLAN_EXT_MAX},
+ [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = 16 },
+ [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = 16 },
};
static const struct attr_len_tbl ovs_flow_key_attr_lens[OVS_KEY_ATTR_MAX + 1] = {
case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
tun->ip_dst = nl_attr_get_be32(a);
break;
+ case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
+ tun->ipv6_src = nl_attr_get_in6_addr(a);
+ break;
+ case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
+ tun->ipv6_dst = nl_attr_get_in6_addr(a);
+ break;
case OVS_TUNNEL_KEY_ATTR_TOS:
tun->ip_tos = nl_attr_get_u8(a);
break;
if (tun_key->ip_dst) {
nl_msg_put_be32(a, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ip_dst);
}
+ if (ipv6_addr_is_set(&tun_key->ipv6_src)) {
+ nl_msg_put_in6_addr(a, OVS_TUNNEL_KEY_ATTR_IPV6_SRC, &tun_key->ipv6_src);
+ }
+ if (ipv6_addr_is_set(&tun_key->ipv6_dst)) {
+ nl_msg_put_in6_addr(a, OVS_TUNNEL_KEY_ATTR_IPV6_DST, &tun_key->ipv6_dst);
+ }
if (tun_key->ip_tos) {
nl_msg_put_u8(a, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ip_tos);
}
format_ipv4(ds, "dst", nl_attr_get_be32(a),
ma ? nl_attr_get(ma) : NULL, verbose);
break;
+ case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: {
+ struct in6_addr ipv6_src;
+ ipv6_src = nl_attr_get_in6_addr(a);
+ format_in6_addr(ds, "ipv6_src", &ipv6_src,
+ ma ? nl_attr_get(ma) : NULL, verbose);
+ break;
+ }
+ case OVS_TUNNEL_KEY_ATTR_IPV6_DST: {
+ struct in6_addr ipv6_dst;
+ ipv6_dst = nl_attr_get_in6_addr(a);
+ format_in6_addr(ds, "ipv6_dst", &ipv6_dst,
+ ma ? nl_attr_get(ma) : NULL, verbose);
+ break;
+ }
case OVS_TUNNEL_KEY_ATTR_TOS:
format_u8x(ds, "tos", nl_attr_get_u8(a),
ma ? nl_attr_get(ma) : NULL, verbose);
SCAN_FIELD_NESTED("tun_id=", ovs_be64, be64, OVS_TUNNEL_KEY_ATTR_ID);
SCAN_FIELD_NESTED("src=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_SRC);
SCAN_FIELD_NESTED("dst=", ovs_be32, ipv4, OVS_TUNNEL_KEY_ATTR_IPV4_DST);
+ SCAN_FIELD_NESTED("ipv6_src=", struct in6_addr, in6_addr, OVS_TUNNEL_KEY_ATTR_IPV6_SRC);
+ SCAN_FIELD_NESTED("ipv6_dst=", struct in6_addr, in6_addr, OVS_TUNNEL_KEY_ATTR_IPV6_DST);
SCAN_FIELD_NESTED("tos=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TOS);
SCAN_FIELD_NESTED("ttl=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TTL);
SCAN_FIELD_NESTED("tp_src=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_SRC);
nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, data->skb_priority);
- if (flow->tunnel.ip_dst || export_mask) {
+ if (flow_tnl_dst_is_set(&flow->tunnel) || export_mask) {
tun_key_to_attr(buf, &data->tunnel, &parms->flow->tunnel,
parms->key_buf);
}
{
nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, md->skb_priority);
- if (md->tunnel.ip_dst) {
+ if (flow_tnl_dst_is_set(&md->tunnel)) {
tun_key_to_attr(buf, &md->tunnel, &md->tunnel, NULL);
}
commit_odp_tunnel_action(const struct flow *flow, struct flow *base,
struct ofpbuf *odp_actions)
{
- /* A valid IPV4_TUNNEL must have non-zero ip_dst. */
- if (flow->tunnel.ip_dst) {
+ /* A valid IPV4_TUNNEL must have non-zero ip_dst; a valid IPv6 tunnel
+ * must have non-zero ipv6_dst. */
+ if (flow_tnl_dst_is_set(&flow->tunnel)) {
if (!memcmp(&base->tunnel, &flow->tunnel, sizeof base->tunnel)) {
return;
}
* - OVS_TUNNEL_KEY_ATTR_ID 8 -- 4 12
* - OVS_TUNNEL_KEY_ATTR_IPV4_SRC 4 -- 4 8
* - OVS_TUNNEL_KEY_ATTR_IPV4_DST 4 -- 4 8
+ * - OVS_TUNNEL_KEY_ATTR_IPV6_SRC 16 -- 4 20
+ * - OVS_TUNNEL_KEY_ATTR_IPV6_DST 16 -- 4 20
* - OVS_TUNNEL_KEY_ATTR_TOS 1 3 4 8
* - OVS_TUNNEL_KEY_ATTR_TTL 1 3 4 8
* - OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT 0 -- 4 4
* OVS_KEY_ATTR_ICMPV6 2 2 4 8
* OVS_KEY_ATTR_ND 28 -- 4 32
* ----------------------------------------------------------
- * total 532
+ * total 572
*
* We include some slack space in case the calculation isn't quite right or we
* add another field and forget to adjust this value.
*/
-#define ODPUTIL_FLOW_KEY_BYTES 576
-BUILD_ASSERT_DECL(FLOW_WC_SEQ == 34);
+#define ODPUTIL_FLOW_KEY_BYTES 640
+BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
/* A buffer with sufficient size and alignment to hold an nlattr-formatted flow
* key. An array of "struct nlattr" might not, in theory, be sufficiently
void
ofputil_wildcard_from_ofpfw10(uint32_t ofpfw, struct flow_wildcards *wc)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 34);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
/* Initialize most of wc. */
flow_wildcards_init_catchall(wc);
const struct in6_addr in6addr_exact = IN6ADDR_EXACT_INIT;
const struct in6_addr in6addr_all_hosts = IN6ADDR_ALL_HOSTS_INIT;
+struct in6_addr
+flow_tnl_dst(const struct flow_tnl *tnl)
+{
+ struct in6_addr addr;
+ if (tnl->ip_dst) {
+ in6_addr_set_mapped_ipv4(&addr, tnl->ip_dst);
+ return addr;
+ }
+ return tnl->ipv6_dst;
+}
+
+struct in6_addr
+flow_tnl_src(const struct flow_tnl *tnl)
+{
+ struct in6_addr addr;
+ if (tnl->ip_src) {
+ in6_addr_set_mapped_ipv4(&addr, tnl->ip_src);
+ return addr;
+ }
+ return tnl->ipv6_src;
+}
+
/* Parses 's' as a 16-digit hexadecimal number representing a datapath ID. On
* success stores the dpid into '*dpidp' and returns true, on failure stores 0
* into '*dpidp' and returns false.
/* Tunnel information used in flow key and metadata. */
struct flow_tnl {
ovs_be32 ip_dst;
+ struct in6_addr ipv6_dst;
ovs_be32 ip_src;
+ struct in6_addr ipv6_src;
ovs_be64 tun_id;
uint16_t flags;
uint8_t ip_tos;
/* Tunnel information is in userspace datapath format. */
#define FLOW_TNL_F_UDPIF (1 << 4)
+static inline bool ipv6_addr_is_set(const struct in6_addr *addr);
+
+static inline bool
+flow_tnl_dst_is_set(const struct flow_tnl *tnl)
+{
+ return tnl->ip_dst || ipv6_addr_is_set(&tnl->ipv6_dst);
+}
+
+struct in6_addr flow_tnl_dst(const struct flow_tnl *tnl);
+struct in6_addr flow_tnl_src(const struct flow_tnl *tnl);
+
/* Returns an offset to 'src' covering all the meaningful fields in 'src'. */
static inline size_t
flow_tnl_size(const struct flow_tnl *src)
{
- if (!src->ip_dst) {
- /* Covers ip_dst only. */
+ if (!flow_tnl_dst_is_set(src)) {
+ /* Covers ip_dst and ipv6_dst only. */
return offsetof(struct flow_tnl, ip_src);
}
if (src->flags & FLOW_TNL_F_UDPIF) {
* looked at. */
memset(md, 0, offsetof(struct pkt_metadata, in_port));
md->tunnel.ip_dst = 0;
+ md->tunnel.ipv6_dst = in6addr_any;
md->in_port.odp_port = port;
}
hash = hash_pointer(state->ofproto, 0);
hash = hash_int(state->table_id, hash);
- if (state->metadata.tunnel->ip_dst) {
+ if (flow_tnl_dst_is_set(state->metadata.tunnel)) {
/* We may leave remainder bytes unhashed, but that is unlikely as
* the tunnel is not in the datapath format. */
hash = hash_words64((const uint64_t *) state->metadata.tunnel,
{
struct flow_tnl tunnel;
tunnel.ip_dst = htonl(0);
+ tunnel.ipv6_dst = in6addr_any;
struct recirc_state state = {
.table_id = TBL_INTERNAL,
.ofproto = ofproto,
/* Metadata for restoring pipeline context after recirculation. Helpers
* are inlined below to keep them together with the definition for easier
* updates. */
-BUILD_ASSERT_DECL(FLOW_WC_SEQ == 34);
+BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
struct recirc_metadata {
/* Metadata in struct flow. */
recirc_metadata_to_flow(const struct recirc_metadata *md,
struct flow *flow)
{
- if (md->tunnel && md->tunnel->ip_dst) {
+ if (md->tunnel && flow_tnl_dst_is_set(md->tunnel)) {
flow->tunnel = *md->tunnel;
} else {
memset(&flow->tunnel, 0, sizeof flow->tunnel);
/* If 'struct flow' gets additional metadata, we'll need to zero it out
* before traversing a patch port. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 34);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
memset(&flow_tnl, 0, sizeof flow_tnl);
if (!xport) {
{
if (tnl_port_should_receive(flow)) {
wc->masks.tunnel.tun_id = OVS_BE64_MAX;
- wc->masks.tunnel.ip_src = OVS_BE32_MAX;
- wc->masks.tunnel.ip_dst = OVS_BE32_MAX;
+ if (flow->tunnel.ip_dst) {
+ wc->masks.tunnel.ip_src = OVS_BE32_MAX;
+ wc->masks.tunnel.ip_dst = OVS_BE32_MAX;
+ } else {
+ wc->masks.tunnel.ipv6_src = in6addr_exact;
+ wc->masks.tunnel.ipv6_dst = in6addr_exact;
+ }
wc->masks.tunnel.flags = (FLOW_TNL_F_DONT_FRAGMENT |
FLOW_TNL_F_CSUM |
FLOW_TNL_F_KEY);
if (!cfg->ip_src_flow) {
flow->tunnel.ip_src = in6_addr_get_mapped_ipv4(&tnl_port->match.ipv6_src);
+ if (!flow->tunnel.ip_src) {
+ flow->tunnel.ipv6_src = tnl_port->match.ipv6_src;
+ }
}
if (!cfg->ip_dst_flow) {
flow->tunnel.ip_dst = in6_addr_get_mapped_ipv4(&tnl_port->match.ipv6_dst);
+ if (!flow->tunnel.ip_dst) {
+ flow->tunnel.ipv6_dst = tnl_port->match.ipv6_dst;
+ }
}
flow->pkt_mark = tnl_port->match.pkt_mark;
* here as a description of how to treat received
* packets. */
match.in_key = in_key_flow ? 0 : flow->tunnel.tun_id;
- if (ip_src == IP_SRC_CFG && flow->tunnel.ip_dst) {
- in6_addr_set_mapped_ipv4(&match.ipv6_src, flow->tunnel.ip_dst);
+ if (ip_src == IP_SRC_CFG) {
+ match.ipv6_src = flow_tnl_dst(&flow->tunnel);
}
- if (!ip_dst_flow && flow->tunnel.ip_src) {
- in6_addr_set_mapped_ipv4(&match.ipv6_dst, flow->tunnel.ip_src);
+ if (!ip_dst_flow) {
+ match.ipv6_dst = flow_tnl_src(&flow->tunnel);
}
match.odp_port = flow->in_port.odp_port;
match.pkt_mark = flow->pkt_mark;
static inline bool
tnl_port_should_receive(const struct flow *flow)
{
- return flow->tunnel.ip_dst != 0;
+ return flow_tnl_dst_is_set(&flow->tunnel);
}
int tnl_port_build_header(const struct ofport_dpif *ofport,