/* These fields are accessed by readers who care about wildcarding. */
const tag_type tag; /* Tag generated from mask for partitioning. */
const uint8_t n_indices; /* How many indices to use. */
- const struct miniflow index_maps[CLS_MAX_INDICES + 1]; /* Stage maps. */
+ const struct flowmap index_maps[CLS_MAX_INDICES + 1]; /* Stage maps. */
unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask'
* (runtime configurable). */
const int ports_mask_len;
const uint64_t *mask_values = miniflow_get_values(&mask->masks);
const uint64_t *flow_u64 = (const uint64_t *)flow;
const uint64_t *p = mask_values;
- uint32_t hash;
- size_t idx;
+ uint32_t hash = basis;
+ map_t map;
- hash = basis;
- MAP_FOR_EACH_INDEX(idx, mask->masks.tnl_map) {
- hash = hash_add64(hash, flow_u64[idx] & *p++);
- }
- flow_u64 += FLOW_TNL_U64S;
- MAP_FOR_EACH_INDEX(idx, mask->masks.pkt_map) {
- hash = hash_add64(hash, flow_u64[idx] & *p++);
+ FLOWMAP_FOR_EACH_MAP (map, mask->masks.map) {
+ size_t idx;
+
+ MAP_FOR_EACH_INDEX (idx, map) {
+ hash = hash_add64(hash, flow_u64[idx] & *p++);
+ }
+ flow_u64 += MAP_T_BITS;
}
return hash_finish(hash, (p - mask_values) * 8);
const uint64_t *mask_values = miniflow_get_values(&mask->masks);
const uint64_t *p = mask_values;
uint32_t hash = basis;
- uint64_t flow_u64;
+ uint64_t value;
- MINIFLOW_FOR_EACH_IN_TNL_MAP(flow_u64, flow, mask->masks) {
- hash = hash_add64(hash, flow_u64 & *p++);
- }
- MINIFLOW_FOR_EACH_IN_PKT_MAP(flow_u64, flow, mask->masks) {
- hash = hash_add64(hash, flow_u64 & *p++);
+ MINIFLOW_FOR_EACH_IN_FLOWMAP(value, flow, mask->masks.map) {
+ hash = hash_add64(hash, value & *p++);
}
return hash_finish(hash, (p - mask_values) * 8);
static inline uint32_t
flow_hash_in_minimask_range(const struct flow *flow,
const struct minimask *mask,
- const struct miniflow *range,
+ const struct flowmap range,
unsigned int *offset,
uint32_t *basis)
{
const uint64_t *mask_values = miniflow_get_values(&mask->masks);
- const uint64_t *p = mask_values + *offset;
const uint64_t *flow_u64 = (const uint64_t *)flow;
+ const uint64_t *p = mask_values + *offset;
uint32_t hash = *basis;
- size_t idx;
+ map_t map;
- MAP_FOR_EACH_INDEX(idx, range->tnl_map) {
- hash = hash_add64(hash, flow_u64[idx] & *p++);
- }
- flow_u64 += FLOW_TNL_U64S;
- MAP_FOR_EACH_INDEX(idx, range->pkt_map) {
- hash = hash_add64(hash, flow_u64[idx] & *p++);
+ FLOWMAP_FOR_EACH_MAP (map, range) {
+ size_t idx;
+
+ MAP_FOR_EACH_INDEX (idx, map) {
+ hash = hash_add64(hash, flow_u64[idx] & *p++);
+ }
+ flow_u64 += MAP_T_BITS;
}
*basis = hash; /* Allow continuation from the unfinished value. */
}
/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask for bits in
- * 'map'. The 1-bits in 'map' correspond to the first 1-bits in 'mask''s
- * map. */
+ * 'fmap'. 1-bits in 'fmap' are a subset of 1-bits in 'mask''s map. */
static inline void
flow_wildcards_fold_minimask_in_map(struct flow_wildcards *wc,
const struct minimask *mask,
- const struct miniflow *map)
+ const struct flowmap fmap)
{
- const uint64_t *p = miniflow_get_values(&mask->masks);
- uint64_t *dst_u64 = (uint64_t *)&wc->masks;
- size_t idx;
-
- if (map->tnl_map) {
- MAP_FOR_EACH_INDEX(idx, map->tnl_map) {
- dst_u64[idx] |= *p++;
- }
- }
- dst_u64 += FLOW_TNL_U64S;
- MAP_FOR_EACH_INDEX(idx, map->pkt_map) {
- dst_u64[idx] |= *p++;
- }
+ flow_union_with_miniflow_subset(&wc->masks, &mask->masks, fmap);
}
/* Returns a hash value for 'mask', given 'basis'. */
for (size_t i = 0; i < n_values; i++) {
hash = hash_add64(hash, *p++);
}
- hash = hash_add64(hash, mask->masks.tnl_map);
- hash = hash_add64(hash, mask->masks.pkt_map);
+
+ map_t map;
+ FLOWMAP_FOR_EACH_MAP (map, mask->masks.map) {
+ hash = hash_add64(hash, map);
+ }
return hash_finish(hash, n_values);
}
* flow_hash_in_minimask_range(), only the form of the arguments differ. */
static inline uint32_t
minimatch_hash_range(const struct minimatch *match,
- const struct miniflow *range, unsigned int *offset,
+ const struct flowmap range, unsigned int *offset,
uint32_t *basis)
{
- const uint64_t *p = miniflow_get_values(match->flow);
- const uint64_t *q = miniflow_get_values(&match->mask->masks);
+ const uint64_t *p = miniflow_get_values(match->flow) + *offset;
+ const uint64_t *q = miniflow_get_values(&match->mask->masks) + *offset;
+ unsigned int n = flowmap_n_1bits(range);
uint32_t hash = *basis;
- int n, i;
-
- n = miniflow_n_values(range);
-
- q += *offset;
- p += *offset;
- for (i = 0; i < n; i++) {
+ for (unsigned int i = 0; i < n; i++) {
hash = hash_add64(hash, p[i] & q[i]);
}
*basis = hash; /* Allow continuation from the unfinished value. */
basis = 0;
mask_offset = 0;
for (i = 0; i < subtable->n_indices; i++) {
- ihash[i] = minimatch_hash_range(&rule->match, &subtable->index_maps[i],
+ ihash[i] = minimatch_hash_range(&rule->match, subtable->index_maps[i],
&mask_offset, &basis);
}
- hash = minimatch_hash_range(&rule->match, &subtable->index_maps[i],
+ hash = minimatch_hash_range(&rule->match, subtable->index_maps[i],
&mask_offset, &basis);
head = find_equal(subtable, rule->match.flow, hash);
mask_offset = 0;
for (i = 0; i < subtable->n_indices; i++) {
ihash[i] = minimatch_hash_range(&cls_rule->match,
- &subtable->index_maps[i],
+ subtable->index_maps[i],
&mask_offset, &basis);
}
- hash = minimatch_hash_range(&cls_rule->match, &subtable->index_maps[i],
+ hash = minimatch_hash_range(&cls_rule->match, subtable->index_maps[i],
&mask_offset, &basis);
head = find_equal(subtable, cls_rule->match.flow, hash);
/* Initializes 'map' with a subset of 'miniflow''s maps that includes only the
* portions with u64-offset 'i' such that 'start' <= i < 'end'. Does not copy
* any data from 'miniflow' to 'map'. */
-static void
-miniflow_get_map_in_range(const struct miniflow *miniflow,
- uint8_t start, uint8_t end, struct miniflow *map)
+static struct flowmap
+miniflow_get_map_in_range(const struct miniflow *miniflow, uint8_t start,
+ uint8_t end)
{
- *map = *miniflow; /* Copy maps. */
-
- if (start >= FLOW_TNL_U64S) {
- map->tnl_map = 0;
- if (start > FLOW_TNL_U64S) {
- /* Clear 'start - FLOW_TNL_U64S' LSBs from pkt_map. */
- start -= FLOW_TNL_U64S;
- uint64_t msk = (UINT64_C(1) << start) - 1;
+ struct flowmap map;
+ size_t ofs = 0;
- map->pkt_map &= ~msk;
- }
- } else if (start > 0) {
- /* Clear 'start' LSBs from tnl_map. */
- uint64_t msk = (UINT64_C(1) << start) - 1;
+ map = miniflow->map;
- map->tnl_map &= ~msk;
+ /* Clear the bits before 'start'. */
+ while (start >= MAP_T_BITS) {
+ start -= MAP_T_BITS;
+ ofs += MAP_T_BITS;
+ map.bits[start / MAP_T_BITS] = 0;
+ }
+ if (start > 0) {
+ flowmap_clear(&map, ofs, start);
}
- if (end <= FLOW_TNL_U64S) {
- map->pkt_map = 0;
- if (end < FLOW_TNL_U64S) {
- /* Keep 'end' LSBs in tnl_map. */
- map->tnl_map &= (UINT64_C(1) << end) - 1;
- }
- } else {
- if (end < FLOW_U64S) {
- /* Keep 'end - FLOW_TNL_U64S' LSBs in pkt_map. */
- map->pkt_map &= (UINT64_C(1) << (end - FLOW_TNL_U64S)) - 1;
- }
+ /* Clear the bits starting at 'end'. */
+ if (end < FLOW_U64S) {
+ /* flowmap_clear() can handle at most MAP_T_BITS at a time. */
+ ovs_assert(FLOW_U64S - end <= MAP_T_BITS);
+ flowmap_clear(&map, end, FLOW_U64S - end);
}
+ return map;
}
/* The new subtable will be visible to the readers only after this. */
uint32_t hash = minimask_hash(mask, 0);
struct cls_subtable *subtable;
int i, index = 0;
- struct minimask stage_mask;
+ struct flowmap stage_map;
uint8_t prev;
size_t count = miniflow_n_values(&mask->masks);
/* Init indices for segmented lookup, if any. */
prev = 0;
for (i = 0; i < cls->n_flow_segments; i++) {
- miniflow_get_map_in_range(&mask->masks, prev, cls->flow_segments[i],
- &stage_mask.masks);
+ stage_map = miniflow_get_map_in_range(&mask->masks, prev,
+ cls->flow_segments[i]);
/* Add an index if it adds mask bits. */
- if (!minimask_is_catchall(&stage_mask)) {
+ if (!flowmap_is_empty(stage_map)) {
cmap_init(&subtable->indices[index]);
- *CONST_CAST(struct miniflow *, &subtable->index_maps[index])
- = stage_mask.masks;
+ *CONST_CAST(struct flowmap *, &subtable->index_maps[index])
+ = stage_map;
index++;
}
prev = cls->flow_segments[i];
}
/* Map for the final stage. */
- miniflow_get_map_in_range(
- &mask->masks, prev, FLOW_U64S,
- CONST_CAST(struct miniflow *, &subtable->index_maps[index]));
+ *CONST_CAST(struct flowmap *, &subtable->index_maps[index])
+ = miniflow_get_map_in_range(&mask->masks, prev, FLOW_U64S);
/* Check if the final stage adds any bits,
* and remove the last index if it doesn't. */
if (index > 0) {
- if (miniflow_equal_maps(&subtable->index_maps[index],
- &subtable->index_maps[index - 1])) {
+ if (flowmap_equal(subtable->index_maps[index],
+ subtable->index_maps[index - 1])) {
--index;
cmap_destroy(&subtable->indices[index]);
}
static inline bool
check_tries(struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
const unsigned int field_plen[CLS_MAX_TRIES],
- const struct miniflow *range_map, const struct flow *flow,
+ const struct flowmap range_map, const struct flow *flow,
struct flow_wildcards *wc)
{
int j;
* fields using the prefix tries. The trie checks are done only as
* needed to avoid folding in additional bits to the wildcards mask. */
for (j = 0; j < n_tries; j++) {
- /* Is the trie field relevant for this subtable? */
- if (field_plen[j]) {
+ /* Is the trie field relevant for this subtable, and
+ is the trie field within the current range of fields? */
+ if (field_plen[j] &&
+ flowmap_is_set(&range_map, trie_ctx[j].be32ofs / 2)) {
struct trie_ctx *ctx = &trie_ctx[j];
- uint8_t be32ofs = ctx->be32ofs;
- uint8_t be64ofs = be32ofs / 2;
-
- /* Is the trie field within the current range of fields? */
- if (MINIFLOW_IN_MAP(range_map, be64ofs)) {
- /* On-demand trie lookup. */
- if (!ctx->lookup_done) {
- memset(&ctx->match_plens, 0, sizeof ctx->match_plens);
- ctx->maskbits = trie_lookup(ctx->trie, flow,
- &ctx->match_plens);
- ctx->lookup_done = true;
+
+ /* On-demand trie lookup. */
+ if (!ctx->lookup_done) {
+ memset(&ctx->match_plens, 0, sizeof ctx->match_plens);
+ ctx->maskbits = trie_lookup(ctx->trie, flow, &ctx->match_plens);
+ ctx->lookup_done = true;
+ }
+ /* Possible to skip the rest of the subtable if subtable's
+ * prefix on the field is not included in the lookup result. */
+ if (!be_get_bit_at(&ctx->match_plens.be32, field_plen[j] - 1)) {
+ /* We want the trie lookup to never result in unwildcarding
+ * any bits that would not be unwildcarded otherwise.
+ * Since the trie is shared by the whole classifier, it is
+ * possible that the 'maskbits' contain bits that are
+ * irrelevant for the partition relevant for the current
+ * packet. Hence the checks below. */
+
+ /* Check that the trie result will not unwildcard more bits
+ * than this subtable would otherwise. */
+ if (ctx->maskbits <= field_plen[j]) {
+ /* Unwildcard the bits and skip the rest. */
+ mask_set_prefix_bits(wc, ctx->be32ofs, ctx->maskbits);
+ /* Note: Prerequisite already unwildcarded, as the only
+ * prerequisite of the supported trie lookup fields is
+ * the ethertype, which is always unwildcarded. */
+ return true;
}
- /* Possible to skip the rest of the subtable if subtable's
- * prefix on the field is not included in the lookup result. */
- if (!be_get_bit_at(&ctx->match_plens.be32, field_plen[j] - 1)) {
- /* We want the trie lookup to never result in unwildcarding
- * any bits that would not be unwildcarded otherwise.
- * Since the trie is shared by the whole classifier, it is
- * possible that the 'maskbits' contain bits that are
- * irrelevant for the partition relevant for the current
- * packet. Hence the checks below. */
-
- /* Check that the trie result will not unwildcard more bits
- * than this subtable would otherwise. */
- if (ctx->maskbits <= field_plen[j]) {
- /* Unwildcard the bits and skip the rest. */
- mask_set_prefix_bits(wc, be32ofs, ctx->maskbits);
- /* Note: Prerequisite already unwildcarded, as the only
- * prerequisite of the supported trie lookup fields is
- * the ethertype, which is always unwildcarded. */
- return true;
- }
- /* Can skip if the field is already unwildcarded. */
- if (mask_prefix_bits_set(wc, be32ofs, ctx->maskbits)) {
- return true;
- }
+ /* Can skip if the field is already unwildcarded. */
+ if (mask_prefix_bits_set(wc, ctx->be32ofs, ctx->maskbits)) {
+ return true;
}
}
}
const uint64_t *flowp = miniflow_get_values(flow);
const uint64_t *maskp = miniflow_get_values(&mask->masks);
const uint64_t *target_u64 = (const uint64_t *)target;
- size_t idx;
+ map_t map;
- MAP_FOR_EACH_INDEX(idx, mask->masks.tnl_map) {
- if ((*flowp++ ^ target_u64[idx]) & *maskp++) {
- return false;
- }
- }
- target_u64 += FLOW_TNL_U64S;
- MAP_FOR_EACH_INDEX(idx, mask->masks.pkt_map) {
- if ((*flowp++ ^ target_u64[idx]) & *maskp++) {
- return false;
+ FLOWMAP_FOR_EACH_MAP (map, mask->masks.map) {
+ size_t idx;
+
+ MAP_FOR_EACH_INDEX (idx, map) {
+ if ((*flowp++ ^ target_u64[idx]) & *maskp++) {
+ return false;
+ }
}
+ target_u64 += MAP_T_BITS;
}
-
return true;
}
uint64_t *wc_u64 = (uint64_t *)&wc->masks;
uint64_t diff;
size_t idx;
+ map_t map;
- MAP_FOR_EACH_INDEX(idx, mask->masks.tnl_map) {
- uint64_t msk = *maskp++;
+ FLOWMAP_FOR_EACH_MAP (map, mask->masks.map) {
+ MAP_FOR_EACH_INDEX(idx, map) {
+ uint64_t msk = *maskp++;
- diff = (*flowp++ ^ target_u64[idx]) & msk;
- if (diff) {
- goto out;
- }
-
- /* Fill in the bits that were looked at. */
- wc_u64[idx] |= msk;
- }
- target_u64 += FLOW_TNL_U64S;
- wc_u64 += FLOW_TNL_U64S;
- MAP_FOR_EACH_INDEX(idx, mask->masks.pkt_map) {
- uint64_t msk = *maskp++;
+ diff = (*flowp++ ^ target_u64[idx]) & msk;
+ if (diff) {
+ goto out;
+ }
- diff = (*flowp++ ^ target_u64[idx]) & msk;
- if (diff) {
- goto out;
+ /* Fill in the bits that were looked at. */
+ wc_u64[idx] |= msk;
}
-
- /* Fill in the bits that were looked at. */
- wc_u64[idx] |= msk;
+ target_u64 += MAP_T_BITS;
+ wc_u64 += MAP_T_BITS;
}
-
return true;
out:
const struct flow *flow, struct trie_ctx trie_ctx[CLS_MAX_TRIES],
unsigned int n_tries, struct flow_wildcards *wc)
{
- uint32_t basis = 0, hash;
- const struct cls_match *rule = NULL;
- int i;
- struct miniflow stages_map;
- unsigned int mask_offset = 0;
-
if (OVS_UNLIKELY(!wc)) {
return find_match(subtable, version, flow,
flow_hash_in_minimask(flow, &subtable->mask, 0));
}
- memset(&stages_map, 0, sizeof stages_map);
+ uint32_t basis = 0, hash;
+ const struct cls_match *rule = NULL;
+ struct flowmap stages_map = FLOWMAP_EMPTY_INITIALIZER;
+ unsigned int mask_offset = 0;
+ int i;
+
/* Try to finish early by checking fields in segments. */
for (i = 0; i < subtable->n_indices; i++) {
const struct cmap_node *inode;
if (check_tries(trie_ctx, n_tries, subtable->trie_plen,
- &subtable->index_maps[i], flow, wc)) {
+ subtable->index_maps[i], flow, wc)) {
/* 'wc' bits for the trie field set, now unwildcard the preceding
* bits used so far. */
goto no_match;
}
/* Accumulate the map used so far. */
- stages_map.tnl_map |= subtable->index_maps[i].tnl_map;
- stages_map.pkt_map |= subtable->index_maps[i].pkt_map;
+ stages_map = flowmap_or(stages_map, subtable->index_maps[i]);
hash = flow_hash_in_minimask_range(flow, &subtable->mask,
- &subtable->index_maps[i],
+ subtable->index_maps[i],
&mask_offset, &basis);
inode = cmap_find(&subtable->indices[i], hash);
}
/* Trie check for the final range. */
if (check_tries(trie_ctx, n_tries, subtable->trie_plen,
- &subtable->index_maps[i], flow, wc)) {
+ subtable->index_maps[i], flow, wc)) {
goto no_match;
}
hash = flow_hash_in_minimask_range(flow, &subtable->mask,
- &subtable->index_maps[i],
+ subtable->index_maps[i],
&mask_offset, &basis);
rule = find_match(subtable, version, flow, hash);
if (!rule && subtable->ports_mask_len) {
no_match:
/* Unwildcard the bits in stages so far, as they were used in determining
* there is no match. */
- flow_wildcards_fold_minimask_in_map(wc, &subtable->mask, &stages_map);
+ flow_wildcards_fold_minimask_in_map(wc, &subtable->mask, stages_map);
return NULL;
}
{
int i;
- BUILD_ASSERT(sizeof(struct miniflow) == 2 * sizeof(uint64_t));
-
flow_cache->sweep_idx = 0;
for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
flow_cache->entries[i].flow = NULL;
flow_cache->entries[i].key.hash = 0;
flow_cache->entries[i].key.len = sizeof(struct miniflow);
- flow_cache->entries[i].key.mf.tnl_map = 0;
- flow_cache->entries[i].key.mf.pkt_map = 0;
+ flowmap_init(&flow_cache->entries[i].key.mf.map);
}
}
* miniflow_extract(), if the map is different the miniflow is different.
* Therefore we can be faster by comparing the map and the miniflow in a
* single memcmp().
- * - These functions can be inlined by the compiler.
- *
- * The following assertions make sure that what we're doing with miniflow is
- * safe.
- */
-BUILD_ASSERT_DECL(sizeof(struct miniflow) == 2 * sizeof(uint64_t));
+ * - These functions can be inlined by the compiler. */
/* Given the number of bits set in miniflow's maps, returns the size of the
* 'netdev_flow_key.mf' */
netdev_flow_mask_init(struct netdev_flow_key *mask,
const struct match *match)
{
- const uint64_t *mask_u64 = (const uint64_t *) &match->wc.masks;
uint64_t *dst = miniflow_values(&mask->mf);
- struct miniflow maps;
- uint64_t map;
+ struct flowmap fmap;
uint32_t hash = 0;
- int n;
+ size_t idx;
/* Only check masks that make sense for the flow. */
- flow_wc_map(&match->flow, &maps);
- memset(&mask->mf, 0, sizeof mask->mf); /* Clear maps. */
+ flow_wc_map(&match->flow, &fmap);
+ flowmap_init(&mask->mf.map);
- map = maps.tnl_map;
- while (map) {
- uint64_t rm1bit = rightmost_1bit(map);
- int i = raw_ctz(map);
+ FLOWMAP_FOR_EACH_INDEX(idx, fmap) {
+ uint64_t mask_u64 = flow_u64_value(&match->wc.masks, idx);
- if (mask_u64[i]) {
- mask->mf.tnl_map |= rm1bit;
- *dst++ = mask_u64[i];
- hash = hash_add64(hash, mask_u64[i]);
+ if (mask_u64) {
+ flowmap_set(&mask->mf.map, idx, 1);
+ *dst++ = mask_u64;
+ hash = hash_add64(hash, mask_u64);
}
- map -= rm1bit;
}
- mask_u64 += FLOW_TNL_U64S;
- map = maps.pkt_map;
- while (map) {
- uint64_t rm1bit = rightmost_1bit(map);
- int i = raw_ctz(map);
- if (mask_u64[i]) {
- mask->mf.pkt_map |= rm1bit;
- *dst++ = mask_u64[i];
- hash = hash_add64(hash, mask_u64[i]);
- }
- map -= rm1bit;
- }
+ map_t map;
- hash = hash_add64(hash, mask->mf.tnl_map);
- hash = hash_add64(hash, mask->mf.pkt_map);
+ FLOWMAP_FOR_EACH_MAP (map, mask->mf.map) {
+ hash = hash_add64(hash, map);
+ }
- n = dst - miniflow_get_values(&mask->mf);
+ size_t n = dst - miniflow_get_values(&mask->mf);
mask->hash = hash_finish(hash, n * 8);
mask->len = netdev_flow_key_size(n);
dst->len = mask->len;
dst->mf = mask->mf; /* Copy maps. */
- FLOW_FOR_EACH_IN_MAPS(value, flow, mask->mf) {
+ FLOW_FOR_EACH_IN_MAPS(value, flow, mask->mf.map) {
*dst_u64 = value & *mask_u64++;
hash = hash_add64(hash, *dst_u64++);
}
(dst_u64 - miniflow_get_values(&dst->mf)) * 8);
}
-/* Iterate through netdev_flow_key TNL u64 values specified by 'MAPS'. */
-#define NETDEV_FLOW_KEY_FOR_EACH_IN_TNL_MAP(VALUE, KEY, MAPS) \
- MINIFLOW_FOR_EACH_IN_TNL_MAP(VALUE, &(KEY)->mf, MAPS)
-
-/* Iterate through netdev_flow_key PKT u64 values specified by 'MAPS'. */
-#define NETDEV_FLOW_KEY_FOR_EACH_IN_PKT_MAP(VALUE, KEY, MAPS) \
- MINIFLOW_FOR_EACH_IN_PKT_MAP(VALUE, &(KEY)->mf, MAPS)
+/* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */
+#define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \
+ MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP)
/* Returns a hash value for the bits of 'key' where there are 1-bits in
* 'mask'. */
{
const uint64_t *p = miniflow_get_values(&mask->mf);
uint32_t hash = 0;
- uint64_t key_u64;
+ uint64_t value;
- NETDEV_FLOW_KEY_FOR_EACH_IN_TNL_MAP(key_u64, key, mask->mf) {
- hash = hash_add64(hash, key_u64 & *p++);
- }
- NETDEV_FLOW_KEY_FOR_EACH_IN_PKT_MAP(key_u64, key, mask->mf) {
- hash = hash_add64(hash, key_u64 & *p++);
+ NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, key, mask->mf.map) {
+ hash = hash_add64(hash, value & *p++);
}
return hash_finish(hash, (p - miniflow_get_values(&mask->mf)) * 8);
netdev_flow_mask_init(&mask, match);
/* Make sure wc does not have metadata. */
- ovs_assert(!(mask.mf.pkt_map
- & (MINIFLOW_PKT_MAP(metadata) | MINIFLOW_PKT_MAP(regs))));
+ ovs_assert(!FLOWMAP_HAS_FIELD(&mask.mf.map, metadata)
+ && !FLOWMAP_HAS_FIELD(&mask.mf.map, regs));
/* Do not allocate extra space. */
flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len);
{
const uint64_t *keyp = miniflow_get_values(&rule->flow.mf);
const uint64_t *maskp = miniflow_get_values(&rule->mask->mf);
- uint64_t target_u64;
+ uint64_t value;
- NETDEV_FLOW_KEY_FOR_EACH_IN_TNL_MAP(target_u64, target, rule->flow.mf) {
- if (OVS_UNLIKELY((target_u64 & *maskp++) != *keyp++)) {
- return false;
- }
- }
- NETDEV_FLOW_KEY_FOR_EACH_IN_PKT_MAP(target_u64, target, rule->flow.mf) {
- if (OVS_UNLIKELY((target_u64 & *maskp++) != *keyp++)) {
+ NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, target, rule->flow.mf.map) {
+ if (OVS_UNLIKELY((value & *maskp++) != *keyp++)) {
return false;
}
}
/* Context for pushing data to a miniflow. */
struct mf_ctx {
- struct miniflow maps;
+ struct flowmap map;
uint64_t *data;
uint64_t * const end;
};
#define MINIFLOW_ASSERT(X)
#endif
-#define miniflow_set_map(MF, OFS) \
+/* True if 'IDX' and higher bits are not set. */
+#define ASSERT_FLOWMAP_NOT_SET(FM, IDX) \
{ \
- unsigned int ofs = (OFS); \
- \
- if (ofs < FLOW_TNL_U64S) { \
- MINIFLOW_ASSERT(!(MF.maps.tnl_map & (UINT64_MAX << ofs)) \
- && !MF.maps.pkt_map); \
- MF.maps.tnl_map |= UINT64_C(1) << ofs; \
- } else { \
- ofs -= FLOW_TNL_U64S; \
- MINIFLOW_ASSERT(!(MF.maps.pkt_map & (UINT64_MAX << ofs))); \
- MF.maps.pkt_map |= UINT64_C(1) << ofs; \
+ MINIFLOW_ASSERT(!((FM)->bits[(IDX) / MAP_T_BITS] & \
+ (FLOWMAP_MAX << ((IDX) % MAP_T_BITS)))); \
+ for (size_t i = (IDX) / MAP_T_BITS + 1; i < FLOWMAP_UNITS; i++) { \
+ MINIFLOW_ASSERT(!(FM)->bits[i]); \
} \
}
-#define miniflow_assert_in_map(MF, OFS) \
-{ \
- unsigned int ofs = (OFS); \
- \
- if (ofs < FLOW_TNL_U64S) { \
- MINIFLOW_ASSERT(MF.maps.tnl_map & UINT64_C(1) << ofs \
- && !(MF.maps.tnl_map & UINT64_MAX << (ofs + 1)) \
- && !MF.maps.pkt_map); \
- } else { \
- ofs -= FLOW_TNL_U64S; \
- MINIFLOW_ASSERT(MF.maps.pkt_map & UINT64_C(1) << ofs \
- && !(MF.maps.pkt_map & UINT64_MAX << (ofs + 1))); \
- } \
+#define miniflow_set_map(MF, OFS) \
+ { \
+ ASSERT_FLOWMAP_NOT_SET(&MF.map, (OFS)); \
+ flowmap_set(&MF.map, (OFS), 1); \
}
-#define miniflow_push_uint64_(MF, OFS, VALUE) \
-{ \
- MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 8 == 0); \
- *MF.data++ = VALUE; \
- miniflow_set_map(MF, OFS / 8); \
+#define miniflow_assert_in_map(MF, OFS) \
+ MINIFLOW_ASSERT(FLOWMAP_IS_SET(MF.map, (OFS))); \
+ ASSERT_FLOWMAP_NOT_SET(&MF.map, (OFS) + 1)
+
+#define miniflow_push_uint64_(MF, OFS, VALUE) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 8 == 0); \
+ *MF.data++ = VALUE; \
+ miniflow_set_map(MF, OFS / 8); \
}
-#define miniflow_push_be64_(MF, OFS, VALUE) \
+#define miniflow_push_be64_(MF, OFS, VALUE) \
miniflow_push_uint64_(MF, OFS, (OVS_FORCE uint64_t)(VALUE))
-#define miniflow_push_uint32_(MF, OFS, VALUE) \
- { \
- MINIFLOW_ASSERT(MF.data < MF.end); \
- \
- if ((OFS) % 8 == 0) { \
- miniflow_set_map(MF, OFS / 8); \
- *(uint32_t *)MF.data = VALUE; \
- } else if ((OFS) % 8 == 4) { \
- miniflow_assert_in_map(MF, OFS / 8); \
- *((uint32_t *)MF.data + 1) = VALUE; \
- MF.data++; \
- } \
+#define miniflow_push_uint32_(MF, OFS, VALUE) \
+ { \
+ MINIFLOW_ASSERT(MF.data < MF.end); \
+ \
+ if ((OFS) % 8 == 0) { \
+ miniflow_set_map(MF, OFS / 8); \
+ *(uint32_t *)MF.data = VALUE; \
+ } else if ((OFS) % 8 == 4) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ *((uint32_t *)MF.data + 1) = VALUE; \
+ MF.data++; \
+ } \
}
#define miniflow_push_be32_(MF, OFS, VALUE) \
miniflow_push_uint32_(MF, OFS, (OVS_FORCE uint32_t)(VALUE))
-#define miniflow_push_uint16_(MF, OFS, VALUE) \
-{ \
- MINIFLOW_ASSERT(MF.data < MF.end); \
- \
- if ((OFS) % 8 == 0) { \
- miniflow_set_map(MF, OFS / 8); \
- *(uint16_t *)MF.data = VALUE; \
- } else if ((OFS) % 8 == 2) { \
- miniflow_assert_in_map(MF, OFS / 8); \
- *((uint16_t *)MF.data + 1) = VALUE; \
- } else if ((OFS) % 8 == 4) { \
- miniflow_assert_in_map(MF, OFS / 8); \
- *((uint16_t *)MF.data + 2) = VALUE; \
- } else if ((OFS) % 8 == 6) { \
- miniflow_assert_in_map(MF, OFS / 8); \
- *((uint16_t *)MF.data + 3) = VALUE; \
- MF.data++; \
- } \
-}
-
-#define miniflow_pad_to_64_(MF, OFS) \
-{ \
- MINIFLOW_ASSERT((OFS) % 8 != 0); \
- miniflow_assert_in_map(MF, OFS / 8); \
- \
- memset((uint8_t *)MF.data + (OFS) % 8, 0, 8 - (OFS) % 8); \
- MF.data++; \
+#define miniflow_push_uint16_(MF, OFS, VALUE) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end); \
+ \
+ if ((OFS) % 8 == 0) { \
+ miniflow_set_map(MF, OFS / 8); \
+ *(uint16_t *)MF.data = VALUE; \
+ } else if ((OFS) % 8 == 2) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ *((uint16_t *)MF.data + 1) = VALUE; \
+ } else if ((OFS) % 8 == 4) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ *((uint16_t *)MF.data + 2) = VALUE; \
+ } else if ((OFS) % 8 == 6) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ *((uint16_t *)MF.data + 3) = VALUE; \
+ MF.data++; \
+ } \
+}
+
+#define miniflow_pad_to_64_(MF, OFS) \
+{ \
+ MINIFLOW_ASSERT((OFS) % 8 != 0); \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ \
+ memset((uint8_t *)MF.data + (OFS) % 8, 0, 8 - (OFS) % 8); \
+ MF.data++; \
}
#define miniflow_push_be16_(MF, OFS, VALUE) \
miniflow_push_uint16_(MF, OFS, (OVS_FORCE uint16_t)VALUE);
-#define miniflow_set_maps(MF, OFS, N_WORDS) \
-{ \
- unsigned int ofs = (OFS); \
- unsigned int n_words = (N_WORDS); \
- uint64_t n_words_mask = UINT64_MAX >> (64 - n_words); \
- \
- MINIFLOW_ASSERT(n_words && MF.data + n_words <= MF.end); \
- if (ofs < FLOW_TNL_U64S) { \
- MINIFLOW_ASSERT(!(MF.maps.tnl_map & UINT64_MAX << ofs) \
- && !MF.maps.pkt_map); \
- MF.maps.tnl_map |= n_words_mask << ofs; \
- if (n_words > FLOW_TNL_U64S - ofs) { \
- MF.maps.pkt_map |= n_words_mask >> (FLOW_TNL_U64S - ofs); \
- } \
- } else { \
- ofs -= FLOW_TNL_U64S; \
- MINIFLOW_ASSERT(!(MF.maps.pkt_map & (UINT64_MAX << ofs))); \
- MF.maps.pkt_map |= n_words_mask << ofs; \
- } \
+#define miniflow_set_maps(MF, OFS, N_WORDS) \
+{ \
+ size_t ofs = (OFS); \
+ size_t n_words = (N_WORDS); \
+ \
+ MINIFLOW_ASSERT(n_words && MF.data + n_words <= MF.end); \
+ ASSERT_FLOWMAP_NOT_SET(&MF.map, ofs); \
+ flowmap_set(&MF.map, ofs, n_words); \
}
/* Data at 'valuep' may be unaligned. */
const void *data = dp_packet_data(packet);
size_t size = dp_packet_size(packet);
uint64_t *values = miniflow_values(dst);
- struct mf_ctx mf = { { 0, 0 }, values, values + FLOW_U64S };
+ struct mf_ctx mf = { FLOWMAP_EMPTY_INITIALIZER, values,
+ values + FLOW_U64S };
const char *l2;
ovs_be16 dl_type;
uint8_t nw_frag, nw_tos, nw_ttl, nw_proto;
}
}
out:
- *dst = mf.maps;
+ dst->map = mf.map;
}
/* For every bit of a field that is wildcarded in 'wildcards', sets the
*
* This is a less precise version of flow_wildcards_init_for_packet() above. */
void
-flow_wc_map(const struct flow *flow, struct miniflow *map)
+flow_wc_map(const struct flow *flow, struct flowmap *map)
{
/* Update this function whenever struct flow changes. */
BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
- map->tnl_map = 0;
+ flowmap_init(map);
+
if (flow->tunnel.ip_dst) {
- map->tnl_map |= MINIFLOW_TNL_MAP__(tunnel,
- offsetof(struct flow_tnl, metadata));
+ FLOWMAP_SET__(map, tunnel, offsetof(struct flow_tnl, metadata));
if (!(flow->tunnel.flags & FLOW_TNL_F_UDPIF)) {
if (flow->tunnel.metadata.present.map) {
- map->tnl_map |= MINIFLOW_TNL_MAP(tunnel.metadata);
+ FLOWMAP_SET(map, tunnel.metadata);
}
} else {
- map->tnl_map |= MINIFLOW_TNL_MAP(tunnel.metadata.present.len);
- map->tnl_map |= MINIFLOW_TNL_MAP__(tunnel.metadata.opts.gnv,
- flow->tunnel.metadata.present.len);
+ FLOWMAP_SET(map, tunnel.metadata.present.len);
+ FLOWMAP_SET__(map, tunnel.metadata.opts.gnv,
+ flow->tunnel.metadata.present.len);
}
}
/* Metadata fields that can appear on packet input. */
- map->pkt_map = MINIFLOW_PKT_MAP(skb_priority) | MINIFLOW_PKT_MAP(pkt_mark)
- | MINIFLOW_PKT_MAP(recirc_id) | MINIFLOW_PKT_MAP(dp_hash)
- | MINIFLOW_PKT_MAP(in_port)
- | MINIFLOW_PKT_MAP(dl_dst) | MINIFLOW_PKT_MAP(dl_src)
- | MINIFLOW_PKT_MAP(dl_type) | MINIFLOW_PKT_MAP(vlan_tci);
+ FLOWMAP_SET(map, skb_priority);
+ FLOWMAP_SET(map, pkt_mark);
+ FLOWMAP_SET(map, recirc_id);
+ FLOWMAP_SET(map, dp_hash);
+ FLOWMAP_SET(map, in_port);
+ FLOWMAP_SET(map, dl_dst);
+ FLOWMAP_SET(map, dl_src);
+ FLOWMAP_SET(map, dl_type);
+ FLOWMAP_SET(map, vlan_tci);
/* Ethertype-dependent fields. */
if (OVS_LIKELY(flow->dl_type == htons(ETH_TYPE_IP))) {
- map->pkt_map |= MINIFLOW_PKT_MAP(nw_src) | MINIFLOW_PKT_MAP(nw_dst)
- | MINIFLOW_PKT_MAP(nw_proto) | MINIFLOW_PKT_MAP(nw_frag)
- | MINIFLOW_PKT_MAP(nw_tos) | MINIFLOW_PKT_MAP(nw_ttl);
+ FLOWMAP_SET(map, nw_src);
+ FLOWMAP_SET(map, nw_dst);
+ FLOWMAP_SET(map, nw_proto);
+ FLOWMAP_SET(map, nw_frag);
+ FLOWMAP_SET(map, nw_tos);
+ FLOWMAP_SET(map, nw_ttl);
+
if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_IGMP)) {
- map->pkt_map |= MINIFLOW_PKT_MAP(igmp_group_ip4);
+ FLOWMAP_SET(map, igmp_group_ip4);
} else {
- map->pkt_map |= MINIFLOW_PKT_MAP(tcp_flags)
- | MINIFLOW_PKT_MAP(tp_src) | MINIFLOW_PKT_MAP(tp_dst);
+ FLOWMAP_SET(map, tcp_flags);
+ FLOWMAP_SET(map, tp_src);
+ FLOWMAP_SET(map, tp_dst);
}
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
- map->pkt_map |= MINIFLOW_PKT_MAP(ipv6_src) | MINIFLOW_PKT_MAP(ipv6_dst)
- | MINIFLOW_PKT_MAP(ipv6_label)
- | MINIFLOW_PKT_MAP(nw_proto) | MINIFLOW_PKT_MAP(nw_frag)
- | MINIFLOW_PKT_MAP(nw_tos) | MINIFLOW_PKT_MAP(nw_ttl);
+ FLOWMAP_SET(map, ipv6_src);
+ FLOWMAP_SET(map, ipv6_dst);
+ FLOWMAP_SET(map, ipv6_label);
+ FLOWMAP_SET(map, nw_proto);
+ FLOWMAP_SET(map, nw_frag);
+ FLOWMAP_SET(map, nw_tos);
+ FLOWMAP_SET(map, nw_ttl);
+
if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_ICMPV6)) {
- map->pkt_map |= MINIFLOW_PKT_MAP(nd_target)
- | MINIFLOW_PKT_MAP(arp_sha) | MINIFLOW_PKT_MAP(arp_tha);
+ FLOWMAP_SET(map, nd_target);
+ FLOWMAP_SET(map, arp_sha);
+ FLOWMAP_SET(map, arp_tha);
} else {
- map->pkt_map |= MINIFLOW_PKT_MAP(tcp_flags)
- | MINIFLOW_PKT_MAP(tp_src) | MINIFLOW_PKT_MAP(tp_dst);
+ FLOWMAP_SET(map, tcp_flags);
+ FLOWMAP_SET(map, tp_src);
+ FLOWMAP_SET(map, tp_dst);
}
} else if (eth_type_mpls(flow->dl_type)) {
- map->pkt_map |= MINIFLOW_PKT_MAP(mpls_lse);
+ FLOWMAP_SET(map, mpls_lse);
} else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
flow->dl_type == htons(ETH_TYPE_RARP)) {
- map->pkt_map |= MINIFLOW_PKT_MAP(nw_src) | MINIFLOW_PKT_MAP(nw_dst)
- | MINIFLOW_PKT_MAP(nw_proto)
- | MINIFLOW_PKT_MAP(arp_sha) | MINIFLOW_PKT_MAP(arp_tha);
+ FLOWMAP_SET(map, nw_src);
+ FLOWMAP_SET(map, nw_dst);
+ FLOWMAP_SET(map, nw_proto);
+ FLOWMAP_SET(map, arp_sha);
+ FLOWMAP_SET(map, arp_tha);
}
}
/* Separate loops for better optimization. */
if (dl_type == htons(ETH_TYPE_IPV6)) {
- struct miniflow maps = { 0, MINIFLOW_PKT_MAP(ipv6_src)
- | MINIFLOW_PKT_MAP(ipv6_dst) };
+ struct flowmap map = FLOWMAP_EMPTY_INITIALIZER;
uint64_t value;
- MINIFLOW_FOR_EACH_IN_PKT_MAP(value, flow, maps) {
+ FLOWMAP_SET(&map, ipv6_src);
+ FLOWMAP_SET(&map, ipv6_dst);
+
+ MINIFLOW_FOR_EACH_IN_FLOWMAP(value, flow, map) {
hash = hash_add64(hash, value);
}
} else {
/* Compressed flow. */
/* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
- * the caller. The caller must have already computed 'dst->tnl_map' and
- * 'dst->pkt_map' properly to indicate the significant uint64_t elements of
- * 'src'.
+ * the caller. The caller must have already computed 'dst->map' properly to
+ * indicate the significant uint64_t elements of 'src'.
*
* Normally the significant elements are the ones that are non-zero. However,
* when a miniflow is initialized from a (mini)mask, the values can be zeroes,
void
miniflow_init(struct miniflow *dst, const struct flow *src)
{
- const uint64_t *src_u64 = (const uint64_t *) src;
uint64_t *dst_u64 = miniflow_values(dst);
size_t idx;
- MAPS_FOR_EACH_INDEX(idx, *dst) {
- *dst_u64++ = src_u64[idx];
+ FLOWMAP_FOR_EACH_INDEX(idx, dst->map) {
+ *dst_u64++ = flow_u64_value(src, idx);
}
}
void
miniflow_map_init(struct miniflow *flow, const struct flow *src)
{
- const uint64_t *src_u64 = (const uint64_t *) src;
- int i;
-
/* Initialize map, counting the number of nonzero elements. */
- flow->tnl_map = 0;
- for (i = 0; i < FLOW_TNL_U64S; i++) {
- if (src_u64[i]) {
- flow->tnl_map |= UINT64_C(1) << i;
- }
- }
- src_u64 += FLOW_TNL_U64S;
- flow->pkt_map = 0;
- for (i = 0; i < FLOW_U64S - FLOW_TNL_U64S; i++) {
- if (src_u64[i]) {
- flow->pkt_map |= UINT64_C(1) << i;
+ flowmap_init(&flow->map);
+ for (size_t i = 0; i < FLOW_U64S; i++) {
+ if (flow_u64_value(src, i)) {
+ flowmap_set(&flow->map, i, 1);
}
}
}
size_t n_values = miniflow_n_values(src);
size_t data_size = MINIFLOW_VALUES_SIZE(n_values);
struct miniflow *dst = xmalloc(n * (sizeof *src + data_size));
- unsigned int i;
+ size_t i;
COVERAGE_INC(miniflow_malloc);
const uint64_t *ap = miniflow_get_values(a);
const uint64_t *bp = miniflow_get_values(b);
- if (OVS_LIKELY(miniflow_equal_maps(a, b))) {
+ /* This is mostly called after a matching hash, so it is highly likely that
+ * the maps are equal as well. */
+ if (OVS_LIKELY(flowmap_equal(a->map, b->map))) {
return !memcmp(ap, bp, miniflow_n_values(a) * sizeof *ap);
} else {
- uint64_t map;
-
- map = a->tnl_map | b->tnl_map;
- for (; map; map = zero_rightmost_1bit(map)) {
- uint64_t bit = rightmost_1bit(map);
-
- if ((a->tnl_map & bit ? *ap++ : 0)
- != (b->tnl_map & bit ? *bp++ : 0)) {
- return false;
- }
- }
- map = a->pkt_map | b->pkt_map;
- for (; map; map = zero_rightmost_1bit(map)) {
- uint64_t bit = rightmost_1bit(map);
+ size_t idx;
- if ((a->pkt_map & bit ? *ap++ : 0)
- != (b->pkt_map & bit ? *bp++ : 0)) {
+ FLOWMAP_FOR_EACH_INDEX (idx, flowmap_or(a->map, b->map)) {
+ if ((flowmap_is_set(&a->map, idx) ? *ap++ : 0)
+ != (flowmap_is_set(&b->map, idx) ? *bp++ : 0)) {
return false;
}
}
const uint64_t *p = miniflow_get_values(&mask->masks);
size_t idx;
- MAPS_FOR_EACH_INDEX(idx, mask->masks) {
+ FLOWMAP_FOR_EACH_INDEX(idx, mask->masks.map) {
if ((miniflow_get(a, idx) ^ miniflow_get(b, idx)) & *p++) {
return false;
}
miniflow_equal_flow_in_minimask(const struct miniflow *a, const struct flow *b,
const struct minimask *mask)
{
- const uint64_t *b_u64 = (const uint64_t *) b;
const uint64_t *p = miniflow_get_values(&mask->masks);
size_t idx;
- MAPS_FOR_EACH_INDEX(idx, mask->masks) {
- if ((miniflow_get(a, idx) ^ b_u64[idx]) & *p++) {
+ FLOWMAP_FOR_EACH_INDEX(idx, mask->masks.map) {
+ if ((miniflow_get(a, idx) ^ flow_u64_value(b, idx)) & *p++) {
return false;
}
}
uint64_t *dst_values = storage;
const struct miniflow *a = &a_->masks;
const struct miniflow *b = &b_->masks;
- const uint64_t *ap = miniflow_get_values(a);
- const uint64_t *bp = miniflow_get_values(b);
size_t idx;
- dst->tnl_map = 0;
- MAP_FOR_EACH_INDEX(idx, a->tnl_map & b->tnl_map) {
- /* Both 'a' and 'b' have non-zero data at 'idx'. */
- uint64_t mask = *miniflow_values_get__(ap, a->tnl_map, idx)
- & *miniflow_values_get__(bp, b->tnl_map, idx);
+ flowmap_init(&dst->map);
- if (mask) {
- dst->tnl_map |= UINT64_C(1) << idx;
- *dst_values++ = mask;
- }
- }
- dst->pkt_map = 0;
- ap += count_1bits(a->tnl_map); /* Skip tnl_map values. */
- bp += count_1bits(b->tnl_map); /* Skip tnl_map values. */
- MAP_FOR_EACH_INDEX(idx, a->pkt_map & b->pkt_map) {
+ FLOWMAP_FOR_EACH_INDEX(idx, flowmap_and(a->map, b->map)) {
/* Both 'a' and 'b' have non-zero data at 'idx'. */
- uint64_t mask = *miniflow_values_get__(ap, a->pkt_map, idx)
- & *miniflow_values_get__(bp, b->pkt_map, idx);
+ uint64_t mask = *miniflow_get__(a, idx) & *miniflow_get__(b, idx);
if (mask) {
- dst->pkt_map |= UINT64_C(1) << idx;
+ flowmap_set(&dst->map, idx, 1);
*dst_values++ = mask;
}
}
bool
minimask_equal(const struct minimask *a, const struct minimask *b)
{
- return a->masks.tnl_map == b->masks.tnl_map
- && a->masks.pkt_map == b->masks.pkt_map &&
- !memcmp(miniflow_get_values(&a->masks), miniflow_get_values(&b->masks),
- MINIFLOW_VALUES_SIZE(miniflow_n_values(&a->masks)));
+ return !memcmp(a, b, sizeof *a
+ + MINIFLOW_VALUES_SIZE(miniflow_n_values(&a->masks)));
}
/* Returns true if at least one bit matched by 'b' is wildcarded by 'a',
bool
minimask_has_extra(const struct minimask *a, const struct minimask *b)
{
- const uint64_t *ap = miniflow_get_values(&a->masks);
const uint64_t *bp = miniflow_get_values(&b->masks);
size_t idx;
- MAP_FOR_EACH_INDEX(idx, b->masks.tnl_map) {
+ FLOWMAP_FOR_EACH_INDEX(idx, b->masks.map) {
uint64_t b_u64 = *bp++;
/* 'b_u64' is non-zero, check if the data in 'a' is either zero
* or misses some of the bits in 'b_u64'. */
- if (!(a->masks.tnl_map & (UINT64_C(1) << idx))
- || ((*miniflow_values_get__(ap, a->masks.tnl_map, idx) & b_u64)
- != b_u64)) {
- return true; /* 'a' wildcards some bits 'b' doesn't. */
- }
- }
- ap += count_1bits(a->masks.tnl_map); /* Skip tnl_map values. */
- MAP_FOR_EACH_INDEX(idx, b->masks.pkt_map) {
- uint64_t b_u64 = *bp++;
-
- if (!(a->masks.pkt_map & (UINT64_C(1) << idx))
- || ((*miniflow_values_get__(ap, a->masks.pkt_map, idx) & b_u64)
- != b_u64)) {
+ if (!MINIFLOW_IN_MAP(&a->masks, idx)
+ || ((*miniflow_get__(&a->masks, idx) & b_u64) != b_u64)) {
return true; /* 'a' wildcards some bits 'b' doesn't. */
}
}
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
+#include "bitmap.h"
#include "byte-order.h"
#include "openflow/nicira-ext.h"
#include "openflow/openflow.h"
};
BUILD_ASSERT_DECL(sizeof(struct flow) % sizeof(uint64_t) == 0);
BUILD_ASSERT_DECL(sizeof(struct flow_tnl) % sizeof(uint64_t) == 0);
-/* Number of uint64_t units in flow tunnel metadata. */
-#define FLOW_TNL_U64S (sizeof(struct flow_tnl) / sizeof(uint64_t))
#define FLOW_U64S (sizeof(struct flow) / sizeof(uint64_t))
extern const uint8_t flow_segment_u64s[];
+#define FLOW_U64_OFFSET(FIELD) \
+ (offsetof(struct flow, FIELD) / sizeof(uint64_t))
+#define FLOW_U64_OFFREM(FIELD) \
+ (offsetof(struct flow, FIELD) % sizeof(uint64_t))
+
+/* Number of 64-bit units spanned by a 'FIELD'. */
+#define FLOW_U64_SIZE(FIELD) \
+ DIV_ROUND_UP(FLOW_U64_OFFREM(FIELD) + MEMBER_SIZEOF(struct flow, FIELD), \
+ sizeof(uint64_t))
+
void flow_extract(struct dp_packet *, struct flow *);
void flow_zero_wildcards(struct flow *, const struct flow_wildcards *);
bool flow_equal_except(const struct flow *a, const struct flow *b,
const struct flow_wildcards *);
\f
-/* Compressed flow. */
+/* Bitmap for flow values. For each 1-bit the corresponding flow value is
+ * explicitly specified, other values are zeroes.
+ *
+ * map_t must be wide enough to hold any member of struct flow. */
+typedef unsigned long long map_t;
+#define MAP_T_BITS (sizeof(map_t) * CHAR_BIT)
+#define MAP_1 (map_t)1
+#define MAP_MAX TYPE_MAXIMUM(map_t)
+
+#define MAP_IS_SET(MAP, IDX) ((MAP) & (MAP_1 << (IDX)))
+
+/* Iterate through the indices of all 1-bits in 'MAP'. */
+#define MAP_FOR_EACH_INDEX(IDX, MAP) \
+ ULLONG_FOR_EACH_1(IDX, MAP)
+
+#define FLOWMAP_UNITS DIV_ROUND_UP(FLOW_U64S, MAP_T_BITS)
+
+struct flowmap {
+ map_t bits[FLOWMAP_UNITS];
+};
+
+#define FLOWMAP_EMPTY_INITIALIZER { { 0 } }
+
+static inline void flowmap_init(struct flowmap *);
+static inline bool flowmap_equal(struct flowmap, struct flowmap);
+static inline bool flowmap_is_set(const struct flowmap *, size_t idx);
+static inline bool flowmap_are_set(const struct flowmap *, size_t idx,
+ unsigned int n_bits);
+static inline void flowmap_set(struct flowmap *, size_t idx,
+ unsigned int n_bits);
+static inline void flowmap_clear(struct flowmap *, size_t idx,
+ unsigned int n_bits);
+static inline struct flowmap flowmap_or(struct flowmap, struct flowmap);
+static inline struct flowmap flowmap_and(struct flowmap, struct flowmap);
+static inline bool flowmap_is_empty(struct flowmap);
+static inline unsigned int flowmap_n_1bits(struct flowmap);
+
+#define FLOWMAP_HAS_FIELD(FM, FIELD) \
+ flowmap_are_set(FM, FLOW_U64_OFFSET(FIELD), FLOW_U64_SIZE(FIELD))
+
+#define FLOWMAP_SET(FM, FIELD) \
+ flowmap_set(FM, FLOW_U64_OFFSET(FIELD), FLOW_U64_SIZE(FIELD))
+
+#define FLOWMAP_SET__(FM, FIELD, SIZE) \
+ flowmap_set(FM, FLOW_U64_OFFSET(FIELD), \
+ DIV_ROUND_UP(SIZE, sizeof(uint64_t)))
+
+/* XXX: Only works for full 64-bit units. */
+#define FLOWMAP_CLEAR(FM, FIELD) \
+ BUILD_ASSERT_DECL(FLOW_U64_OFFREM(FIELD) == 0); \
+ BUILD_ASSERT_DECL(sizeof(((struct flow *)0)->FIELD) % sizeof(uint64_t) == 0); \
+ flowmap_clear(FM, FLOW_U64_OFFSET(FIELD), FLOW_U64_SIZE(FIELD))
+
+/* Iterate through all units in 'FMAP'. */
+#define FLOWMAP_FOR_EACH_UNIT(UNIT) \
+ for ((UNIT) = 0; (UNIT) < FLOWMAP_UNITS; (UNIT)++)
+
+/* Iterate through all map units in 'FMAP'. */
+#define FLOWMAP_FOR_EACH_MAP(MAP, FLOWMAP) \
+ for (size_t unit__ = 0; \
+ unit__ < FLOWMAP_UNITS && ((MAP) = (FLOWMAP).bits[unit__], true); \
+ unit__++)
+
+struct flowmap_aux;
+static inline bool flowmap_next_index(struct flowmap_aux *, size_t *idx);
+
+#define FLOWMAP_AUX_INITIALIZER(FLOWMAP) { .unit = 0, .map = (FLOWMAP) }
+
+/* Iterate through all struct flow u64 indices specified by 'MAP'. This is a
+ * slower but easier version of the FLOWMAP_FOR_EACH_MAP() &
+ * MAP_FOR_EACH_INDEX() combination. */
+#define FLOWMAP_FOR_EACH_INDEX(IDX, MAP) \
+ for (struct flowmap_aux aux__ = FLOWMAP_AUX_INITIALIZER(MAP); \
+ flowmap_next_index(&aux__, &(IDX));)
+
+/* Flowmap inline implementations. */
+static inline void
+flowmap_init(struct flowmap *fm)
+{
+ memset(fm, 0, sizeof *fm);
+}
+
+static inline bool
+flowmap_equal(struct flowmap a, struct flowmap b)
+{
+ return !memcmp(&a, &b, sizeof a);
+}
+
+static inline bool
+flowmap_is_set(const struct flowmap *fm, size_t idx)
+{
+ return (fm->bits[idx / MAP_T_BITS] & (MAP_1 << (idx % MAP_T_BITS))) != 0;
+}
+
+/* Returns 'true' if any of the 'n_bits' bits starting at 'idx' are set in
+ * 'fm'. 'n_bits' can be at most MAP_T_BITS. */
+static inline bool
+flowmap_are_set(const struct flowmap *fm, size_t idx, unsigned int n_bits)
+{
+ map_t n_bits_mask = (MAP_1 << n_bits) - 1;
+ size_t unit = idx / MAP_T_BITS;
+
+ idx %= MAP_T_BITS;
-/* Check that all tunnel fields fit into a single map. */
-BUILD_ASSERT_DECL(FLOW_TNL_U64S <= 64);
+ if (fm->bits[unit] & (n_bits_mask << idx)) {
+ return true;
+ }
+ if (idx + n_bits > MAP_T_BITS) {
+ /* Check the remaining bits from the next unit. */
+ return fm->bits[unit + 1] & (n_bits_mask >> (MAP_T_BITS - idx));
+ }
+ return false;
+}
-/* Check that all non-tunnel fields fit into a single map. */
-BUILD_ASSERT_DECL(FLOW_U64S - FLOW_TNL_U64S <= 64);
+/* Set the 'n_bits' consecutive bits in 'fm', starting at bit 'idx'.
+ * 'n_bits' can be at most MAP_T_BITS. */
+static inline void
+flowmap_set(struct flowmap *fm, size_t idx, unsigned int n_bits)
+{
+ map_t n_bits_mask = (MAP_1 << n_bits) - 1;
+ size_t unit = idx / MAP_T_BITS;
+
+ idx %= MAP_T_BITS;
+
+ fm->bits[unit] |= n_bits_mask << idx;
+ if (idx + n_bits > MAP_T_BITS) {
+ /* 'MAP_T_BITS - idx' bits were set on 'unit', set the remaining
+ * bits from the next unit. */
+ fm->bits[unit + 1] |= n_bits_mask >> (MAP_T_BITS - idx);
+ }
+}
+
+/* Clears the 'n_bits' consecutive bits in 'fm', starting at bit 'idx'.
+ * 'n_bits' can be at most MAP_T_BITS. */
+static inline void
+flowmap_clear(struct flowmap *fm, size_t idx, unsigned int n_bits)
+{
+ map_t n_bits_mask = (MAP_1 << n_bits) - 1;
+ size_t unit = idx / MAP_T_BITS;
+
+ idx %= MAP_T_BITS;
+
+ fm->bits[unit] &= ~(n_bits_mask << idx);
+ if (idx + n_bits > MAP_T_BITS) {
+ /* 'MAP_T_BITS - idx' bits were cleared on 'unit', clear the
+ * remaining bits from the next unit. */
+ fm->bits[unit + 1] &= ~(n_bits_mask >> (MAP_T_BITS - idx));
+ }
+}
+
+/* OR the bits in the flowmaps. */
+static inline struct flowmap
+flowmap_or(struct flowmap a, const struct flowmap b)
+{
+ struct flowmap map;
+ size_t unit;
+
+ FLOWMAP_FOR_EACH_UNIT (unit) {
+ map.bits[unit] = a.bits[unit] | b.bits[unit];
+ }
+ return map;
+}
+
+/* AND the bits in the flowmaps. */
+static inline struct flowmap
+flowmap_and(struct flowmap a, const struct flowmap b)
+{
+ struct flowmap map;
+ size_t unit;
+
+ FLOWMAP_FOR_EACH_UNIT (unit) {
+ map.bits[unit] = a.bits[unit] & b.bits[unit];
+ }
+ return map;
+}
+
+static inline bool
+flowmap_is_empty(const struct flowmap fm)
+{
+ map_t map;
+
+ FLOWMAP_FOR_EACH_MAP (map, fm) {
+ if (map) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static inline unsigned int
+flowmap_n_1bits(struct flowmap fm)
+{
+ unsigned int n_1bits = 0;
+ size_t unit;
+
+ FLOWMAP_FOR_EACH_UNIT (unit) {
+ n_1bits += count_1bits(fm.bits[unit]);
+ }
+ return n_1bits;
+}
+
+struct flowmap_aux {
+ size_t unit;
+ struct flowmap map;
+};
+
+static inline bool
+flowmap_next_index(struct flowmap_aux *aux, size_t *idx)
+{
+ for (;;) {
+ map_t *map = &aux->map.bits[aux->unit];
+ if (*map) {
+ *idx = aux->unit * MAP_T_BITS + raw_ctz(*map);
+ *map = zero_rightmost_1bit(*map);
+ return true;
+ }
+ if (++aux->unit >= FLOWMAP_UNITS) {
+ return false;
+ }
+ }
+}
+
+\f
+/* Compressed flow. */
/* A sparse representation of a "struct flow".
*
* importantly, minimizes the number of accessed cache lines. Second, it saves
* time when the goal is to iterate over only the nonzero parts of the struct.
*
- * The map members hold one bit for each uint64_t in a "struct flow". Each
+ * The map member hold one bit for each uint64_t in a "struct flow". Each
* 0-bit indicates that the corresponding uint64_t is zero, each 1-bit that it
* *may* be nonzero (see below how this applies to minimasks).
*
- * The values indicated by 'tnl_map' and 'pkt_map' always follow the miniflow
- * in memory. The user of the miniflow is responsible for always having enough
- * storage after the struct miniflow corresponding to the number of 1-bits in
- * maps.
+ * The values indicated by 'map' always follow the miniflow in memory. The
+ * user of the miniflow is responsible for always having enough storage after
+ * the struct miniflow corresponding to the number of 1-bits in maps.
*
* Elements in values array are allowed to be zero. This is useful for "struct
* minimatch", for which ensuring that the miniflow and minimask members have
* A miniflow is always dynamically allocated so that the maps are followed by
* at least as many elements as there are 1-bits in maps. */
struct miniflow {
- uint64_t tnl_map;
- uint64_t pkt_map;
+ struct flowmap map;
/* Followed by:
* uint64_t values[n];
* where 'n' is miniflow_n_values(miniflow). */
};
-BUILD_ASSERT_DECL(sizeof(struct miniflow) == 2 * sizeof(uint64_t));
+BUILD_ASSERT_DECL(sizeof(struct miniflow) % sizeof(uint64_t) == 0);
#define MINIFLOW_VALUES_SIZE(COUNT) ((COUNT) * sizeof(uint64_t))
* were extracted. */
void miniflow_extract(struct dp_packet *packet, struct miniflow *dst);
void miniflow_map_init(struct miniflow *, const struct flow *);
-void flow_wc_map(const struct flow *, struct miniflow *);
+void flow_wc_map(const struct flow *, struct flowmap *);
size_t miniflow_alloc(struct miniflow *dsts[], size_t n,
const struct miniflow *src);
void miniflow_init(struct miniflow *, const struct flow *);
static inline size_t
miniflow_n_values(const struct miniflow *flow)
{
- return count_1bits(flow->tnl_map) + count_1bits(flow->pkt_map);
+ return flowmap_n_1bits(flow->map);
}
struct flow_for_each_in_maps_aux {
- const uint64_t *values;
- struct miniflow maps;
+ const struct flow *flow;
+ struct flowmap_aux map_aux;
};
-static inline uint64_t
-flow_values_get_next_in_map(const uint64_t *values, uint64_t *map)
-{
- uint64_t value = values[raw_ctz(*map)];
-
- *map = zero_rightmost_1bit(*map);
-
- return value;
-}
-
static inline bool
flow_values_get_next_in_maps(struct flow_for_each_in_maps_aux *aux,
uint64_t *value)
{
- if (aux->maps.tnl_map) {
- *value = flow_values_get_next_in_map(aux->values, &aux->maps.tnl_map);
- return true;
- }
- if (aux->maps.pkt_map) {
- *value = flow_values_get_next_in_map(aux->values + FLOW_TNL_U64S,
- &aux->maps.pkt_map);
+ size_t idx;
+
+ if (flowmap_next_index(&aux->map_aux, &idx)) {
+ *value = flow_u64_value(aux->flow, idx);
return true;
}
return false;
}
-/* Iterate through all flow tunnel u64 values specified by 'MAPS'. */
+/* Iterate through all flow u64 values specified by 'MAPS'. */
#define FLOW_FOR_EACH_IN_MAPS(VALUE, FLOW, MAPS) \
for (struct flow_for_each_in_maps_aux aux__ \
- = { (const uint64_t *)(FLOW), (MAPS) }; \
+ = { (FLOW), FLOWMAP_AUX_INITIALIZER(MAPS) }; \
flow_values_get_next_in_maps(&aux__, &(VALUE));)
-/* Iterate through all struct flow u64 indices specified by 'MAP'. */
-#define MAP_FOR_EACH_INDEX(U64IDX, MAP) \
- for (uint64_t map__ = (MAP); \
- map__ && ((U64IDX) = raw_ctz(map__), true); \
- map__ = zero_rightmost_1bit(map__))
-
-/* Iterate through all struct flow u64 indices specified by 'MAPS'. */
-#define MAPS_FOR_EACH_INDEX(U64IDX, MAPS) \
- for (struct miniflow maps__ = (MAPS); \
- maps__.tnl_map \
- ? ((U64IDX) = raw_ctz(maps__.tnl_map), \
- maps__.tnl_map = zero_rightmost_1bit(maps__.tnl_map), \
- true) \
- : (maps__.pkt_map && \
- ((U64IDX) = FLOW_TNL_U64S + raw_ctz(maps__.pkt_map), \
- maps__.pkt_map = zero_rightmost_1bit(maps__.pkt_map), \
- true));)
-
-#define FLOW_U64_SIZE(FIELD) \
- DIV_ROUND_UP(sizeof(((struct flow *)0)->FIELD), sizeof(uint64_t))
-
-#define MINIFLOW_TNL_MAP__(FIELD, LEN) \
- (((UINT64_C(1) << DIV_ROUND_UP(LEN, sizeof(uint64_t))) - 1) \
- << (offsetof(struct flow, FIELD) / sizeof(uint64_t)))
-
-#define MINIFLOW_TNL_MAP(FIELD) \
- MINIFLOW_TNL_MAP__(FIELD, sizeof(((struct flow *)0)->FIELD))
-#define MINIFLOW_PKT_MAP(FIELD) \
- (((UINT64_C(1) << FLOW_U64_SIZE(FIELD)) - 1) \
- << ((offsetof(struct flow, FIELD) / sizeof(uint64_t)) - FLOW_TNL_U64S))
-
struct mf_for_each_in_map_aux {
+ size_t unit;
+ struct flowmap fmap;
+ struct flowmap map;
const uint64_t *values;
- uint64_t fmap;
- uint64_t map;
};
static inline bool
mf_get_next_in_map(struct mf_for_each_in_map_aux *aux,
uint64_t *value)
{
- if (aux->map) {
- uint64_t rm1bit = rightmost_1bit(aux->map);
+ map_t *map, *fmap;
+ map_t rm1bit;
+
+ while (OVS_UNLIKELY(!*(map = &aux->map.bits[aux->unit]))) {
+ /* Skip remaining data in the previous unit. */
+ aux->values += count_1bits(aux->fmap.bits[aux->unit]);
+ if (++aux->unit == FLOWMAP_UNITS) {
+ return false;
+ }
+ }
- aux->map -= rm1bit;
+ rm1bit = rightmost_1bit(*map);
+ *map -= rm1bit;
+ fmap = &aux->fmap.bits[aux->unit];
- if (aux->fmap & rm1bit) {
- uint64_t trash = aux->fmap & (rm1bit - 1);
+ if (OVS_LIKELY(*fmap & rm1bit)) {
+ map_t trash = *fmap & (rm1bit - 1);
- aux->fmap -= trash;
- /* count_1bits() is fast for systems where speed matters (e.g.,
- * DPDK), so we don't try avoid using it.
- * Advance 'aux->values' to point to the value for 'rm1bit'. */
- aux->values += count_1bits(trash);
+ *fmap -= trash;
+ /* count_1bits() is fast for systems where speed matters (e.g.,
+ * DPDK), so we don't try avoid using it.
+ * Advance 'aux->values' to point to the value for 'rm1bit'. */
+ aux->values += count_1bits(trash);
- *value = *aux->values;
- } else {
- *value = 0;
- }
- return true;
+ *value = *aux->values;
+ } else {
+ *value = 0;
}
- return false;
+ return true;
}
-/* Iterate through miniflow TNL u64 values specified by 'MAPS'. */
-#define MINIFLOW_FOR_EACH_IN_TNL_MAP(VALUE, FLOW, MAPS) \
- for (struct mf_for_each_in_map_aux aux__ = \
- { miniflow_get_values(FLOW), (FLOW)->tnl_map, (MAPS).tnl_map }; \
- mf_get_next_in_map(&aux__, &(VALUE));)
-
-/* Iterate through miniflow PKT u64 values specified by 'MAPS'. */
-#define MINIFLOW_FOR_EACH_IN_PKT_MAP(VALUE, FLOW, MAPS) \
+/* Iterate through miniflow u64 values specified by 'FLOWMAP'. */
+#define MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, FLOW, FLOWMAP) \
for (struct mf_for_each_in_map_aux aux__ = \
- { miniflow_get_values(FLOW) + count_1bits((FLOW)->tnl_map), \
- (FLOW)->pkt_map, (MAPS).pkt_map }; \
+ { 0, (FLOW)->map, (FLOWMAP), miniflow_get_values(FLOW) }; \
mf_get_next_in_map(&aux__, &(VALUE));)
-/* This can be used when it is known that 'u64_idx' is set in 'map'. */
+/* This can be used when it is known that 'idx' is set in 'map'. */
static inline const uint64_t *
-miniflow_values_get__(const uint64_t *values, uint64_t map, size_t u64_idx)
+miniflow_values_get__(const uint64_t *values, map_t map, size_t idx)
{
- return values + count_1bits(map & ((UINT64_C(1) << u64_idx) - 1));
+ return values + count_1bits(map & ((MAP_1 << idx) - 1));
}
/* This can be used when it is known that 'u64_idx' is set in
* the map of 'mf'. */
static inline const uint64_t *
-miniflow_get__(const struct miniflow *mf, size_t u64_idx)
-{
- return OVS_LIKELY(u64_idx >= FLOW_TNL_U64S)
- ? miniflow_values_get__(miniflow_get_values(mf)
- + count_1bits(mf->tnl_map),
- mf->pkt_map, u64_idx - FLOW_TNL_U64S)
- : miniflow_values_get__(miniflow_get_values(mf), mf->tnl_map, u64_idx);
-}
-
-#define MINIFLOW_IN_MAP(MF, U64_IDX) \
- (OVS_LIKELY(U64_IDX >= FLOW_TNL_U64S) \
- ? (MF)->pkt_map & (UINT64_C(1) << ((U64_IDX) - FLOW_TNL_U64S)) \
- : (MF)->tnl_map & (UINT64_C(1) << (U64_IDX)))
-
-/* Get the value of 'FIELD' of an up to 8 byte wide integer type 'TYPE' of
- * a miniflow. */
-#define MINIFLOW_GET_TYPE(MF, TYPE, OFS) \
- (MINIFLOW_IN_MAP(MF, (OFS) / sizeof(uint64_t)) \
- ? ((OVS_FORCE const TYPE *)miniflow_get__(MF, (OFS) / sizeof(uint64_t))) \
- [(OFS) % sizeof(uint64_t) / sizeof(TYPE)] \
+miniflow_get__(const struct miniflow *mf, size_t idx)
+{
+ const uint64_t *values = miniflow_get_values(mf);
+ const map_t *map = mf->map.bits;
+
+ while (idx >= MAP_T_BITS) {
+ idx -= MAP_T_BITS;
+ values += count_1bits(*map++);
+ }
+ return miniflow_values_get__(values, *map, idx);
+}
+
+#define MINIFLOW_IN_MAP(MF, IDX) flowmap_is_set(&(MF)->map, IDX)
+
+/* Get the value of the struct flow 'FIELD' as up to 8 byte wide integer type
+ * 'TYPE' from miniflow 'MF'. */
+#define MINIFLOW_GET_TYPE(MF, TYPE, FIELD) \
+ (MINIFLOW_IN_MAP(MF, FLOW_U64_OFFSET(FIELD)) \
+ ? ((OVS_FORCE const TYPE *)miniflow_get__(MF, FLOW_U64_OFFSET(FIELD))) \
+ [FLOW_U64_OFFREM(FIELD) / sizeof(TYPE)] \
: 0)
-#define MINIFLOW_GET_U8(FLOW, FIELD) \
- MINIFLOW_GET_TYPE(FLOW, uint8_t, offsetof(struct flow, FIELD))
-#define MINIFLOW_GET_U16(FLOW, FIELD) \
- MINIFLOW_GET_TYPE(FLOW, uint16_t, offsetof(struct flow, FIELD))
-#define MINIFLOW_GET_BE16(FLOW, FIELD) \
- MINIFLOW_GET_TYPE(FLOW, ovs_be16, offsetof(struct flow, FIELD))
-#define MINIFLOW_GET_U32(FLOW, FIELD) \
- MINIFLOW_GET_TYPE(FLOW, uint32_t, offsetof(struct flow, FIELD))
-#define MINIFLOW_GET_BE32(FLOW, FIELD) \
- MINIFLOW_GET_TYPE(FLOW, ovs_be32, offsetof(struct flow, FIELD))
-#define MINIFLOW_GET_U64(FLOW, FIELD) \
- MINIFLOW_GET_TYPE(FLOW, uint64_t, offsetof(struct flow, FIELD))
-#define MINIFLOW_GET_BE64(FLOW, FIELD) \
- MINIFLOW_GET_TYPE(FLOW, ovs_be64, offsetof(struct flow, FIELD))
+#define MINIFLOW_GET_U8(FLOW, FIELD) \
+ MINIFLOW_GET_TYPE(FLOW, uint8_t, FIELD)
+#define MINIFLOW_GET_U16(FLOW, FIELD) \
+ MINIFLOW_GET_TYPE(FLOW, uint16_t, FIELD)
+#define MINIFLOW_GET_BE16(FLOW, FIELD) \
+ MINIFLOW_GET_TYPE(FLOW, ovs_be16, FIELD)
+#define MINIFLOW_GET_U32(FLOW, FIELD) \
+ MINIFLOW_GET_TYPE(FLOW, uint32_t, FIELD)
+#define MINIFLOW_GET_BE32(FLOW, FIELD) \
+ MINIFLOW_GET_TYPE(FLOW, ovs_be32, FIELD)
+#define MINIFLOW_GET_U64(FLOW, FIELD) \
+ MINIFLOW_GET_TYPE(FLOW, uint64_t, FIELD)
+#define MINIFLOW_GET_BE64(FLOW, FIELD) \
+ MINIFLOW_GET_TYPE(FLOW, ovs_be64, FIELD)
static inline uint64_t miniflow_get(const struct miniflow *,
unsigned int u64_ofs);
static inline uint16_t miniflow_get_vid(const struct miniflow *);
static inline uint16_t miniflow_get_tcp_flags(const struct miniflow *);
static inline ovs_be64 miniflow_get_metadata(const struct miniflow *);
-static inline bool miniflow_equal_maps(const struct miniflow *a,
- const struct miniflow *b);
bool miniflow_equal(const struct miniflow *a, const struct miniflow *b);
bool miniflow_equal_in_minimask(const struct miniflow *a,
/* For every 1-bit in mask's map, the corresponding value is non-zero,
* so the only way the mask can not fix any bits or fields is for the
* map the be zero. */
- return mask->masks.tnl_map == 0 && mask->masks.pkt_map == 0;
+ return flowmap_is_empty(mask->masks.map);
}
/* Returns the uint64_t that would be at byte offset '8 * u64_ofs' if 'flow'
static inline uint64_t miniflow_get(const struct miniflow *flow,
unsigned int u64_ofs)
{
- return MINIFLOW_IN_MAP(flow, u64_ofs)
- ? *miniflow_get__(flow, u64_ofs) : 0;
+ return MINIFLOW_IN_MAP(flow, u64_ofs) ? *miniflow_get__(flow, u64_ofs) : 0;
}
static inline uint32_t miniflow_get_u32(const struct miniflow *flow,
return MINIFLOW_GET_BE64(flow, metadata);
}
-static inline bool
-miniflow_equal_maps(const struct miniflow *a, const struct miniflow *b)
-{
- return a->tnl_map == b->tnl_map && a->pkt_map == b->pkt_map;
-}
-
/* Returns the mask for the OpenFlow 1.1+ "metadata" field in 'mask'.
*
* The return value is all-1-bits if 'mask' matches on the whole value of the
return MINIFLOW_GET_BE64(&mask->masks, metadata);
}
-/* Perform a bitwise OR of miniflow 'src' flow data with the equivalent
- * fields in 'dst', storing the result in 'dst'. */
+/* Perform a bitwise OR of miniflow 'src' flow data specified in 'subset' with
+ * the equivalent fields in 'dst', storing the result in 'dst'. 'subset' must
+ * be a subset of 'src's map. */
static inline void
-flow_union_with_miniflow(struct flow *dst, const struct miniflow *src)
+flow_union_with_miniflow_subset(struct flow *dst, const struct miniflow *src,
+ struct flowmap subset)
{
uint64_t *dst_u64 = (uint64_t *) dst;
const uint64_t *p = miniflow_get_values(src);
- size_t idx;
+ map_t map;
- MAP_FOR_EACH_INDEX(idx, src->tnl_map) {
- dst_u64[idx] |= *p++;
- }
- dst_u64 += FLOW_TNL_U64S;
- MAP_FOR_EACH_INDEX(idx, src->pkt_map) {
- dst_u64[idx] |= *p++;
+ FLOWMAP_FOR_EACH_MAP (map, subset) {
+ size_t idx;
+
+ MAP_FOR_EACH_INDEX(idx, map) {
+ dst_u64[idx] |= *p++;
+ }
+ dst_u64 += MAP_T_BITS;
}
}
+/* Perform a bitwise OR of miniflow 'src' flow data with the equivalent
+ * fields in 'dst', storing the result in 'dst'. */
+static inline void
+flow_union_with_miniflow(struct flow *dst, const struct miniflow *src)
+{
+ flow_union_with_miniflow_subset(dst, src, src->map);
+}
+
static inline void
pkt_metadata_from_flow(struct pkt_metadata *md, const struct flow *flow)
{
minimatch_matches_flow(const struct minimatch *match,
const struct flow *target)
{
- const uint64_t *target_u64 = (const uint64_t *) target;
const uint64_t *flowp = miniflow_get_values(match->flow);
const uint64_t *maskp = miniflow_get_values(&match->mask->masks);
size_t idx;
- MAPS_FOR_EACH_INDEX(idx, *match->flow) {
- if ((*flowp++ ^ target_u64[idx]) & *maskp++) {
+ FLOWMAP_FOR_EACH_INDEX(idx, match->flow->map) {
+ if ((*flowp++ ^ flow_u64_value(target, idx)) & *maskp++) {
return false;
}
}
static inline uint32_t
miniflow_hash__(const struct miniflow *flow, uint32_t basis)
{
- const uint64_t *values = miniflow_get_values(flow);
- const uint64_t *p = values;
+ const uint64_t *p = miniflow_get_values(flow);
+ size_t n_values = miniflow_n_values(flow);
+ struct flowmap hash_map = FLOWMAP_EMPTY_INITIALIZER;
uint32_t hash = basis;
- uint64_t hash_tnl_map = 0, hash_pkt_map = 0;
- uint64_t map;
+ size_t idx;
- for (map = flow->tnl_map; map; map = zero_rightmost_1bit(map)) {
- if (*p) {
- hash = hash_add64(hash, *p);
- hash_tnl_map |= rightmost_1bit(map);
+ FLOWMAP_FOR_EACH_INDEX(idx, flow->map) {
+ uint64_t value = *p++;
+
+ if (value) {
+ hash = hash_add64(hash, value);
+ flowmap_set(&hash_map, idx, 1);
}
- p++;
}
- for (map = flow->pkt_map; map; map = zero_rightmost_1bit(map)) {
- if (*p) {
- hash = hash_add64(hash, *p);
- hash_pkt_map |= rightmost_1bit(map);
- }
- p++;
+ map_t map;
+ FLOWMAP_FOR_EACH_MAP (map, hash_map) {
+ hash = hash_add64(hash, map);
}
- hash = hash_add64(hash, hash_tnl_map);
- hash = hash_add64(hash, hash_pkt_map);
- return hash_finish(hash, p - values);
+ return hash_finish(hash, n_values);
}
static void