flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
uint32_t basis)
{
- const uint64_t *mask_values = miniflow_get_values(&mask->masks);
+ const uint64_t *mask_values = mask->masks.values;
const uint64_t *flow_u64 = (const uint64_t *)flow;
const uint64_t *p = mask_values;
uint32_t hash;
miniflow_hash_in_minimask(const struct miniflow *flow,
const struct minimask *mask, uint32_t basis)
{
- const uint64_t *mask_values = miniflow_get_values(&mask->masks);
+ const uint64_t *mask_values = mask->masks.values;
const uint64_t *p = mask_values;
uint32_t hash = basis;
uint64_t flow_u64;
const struct minimask *mask,
uint8_t start, uint8_t end, uint32_t *basis)
{
- const uint64_t *mask_values = miniflow_get_values(&mask->masks);
+ const uint64_t *mask_values = mask->masks.values;
const uint64_t *flow_u64 = (const uint64_t *)flow;
unsigned int offset;
uint64_t map;
int idx;
map = miniflow_get_map_in_range(&mask->masks, start, end, &offset);
- p = miniflow_get_values(&mask->masks) + offset;
+ p = mask->masks.values + offset;
MAP_FOR_EACH_INDEX(idx, map) {
dst_u64[idx] |= *p++;
}
static inline uint32_t
miniflow_hash(const struct miniflow *flow, uint32_t basis)
{
- const uint64_t *values = miniflow_get_values(flow);
+ const uint64_t *values = flow->values;
const uint64_t *p = values;
uint32_t hash = basis;
uint64_t hash_map = 0;
static inline uint32_t
minimatch_hash(const struct minimatch *match, uint32_t basis)
{
- return miniflow_hash(&match->flow, minimask_hash(&match->mask, basis));
+ return miniflow_hash(match->flow, minimask_hash(match->mask, basis));
}
/* Returns a hash value for the bits of range [start, end) in 'minimatch',
uint32_t hash = *basis;
int n, i;
- n = count_1bits(miniflow_get_map_in_range(&match->mask.masks, start, end,
+ n = count_1bits(miniflow_get_map_in_range(&match->mask->masks, start, end,
&offset));
- q = miniflow_get_values(&match->mask.masks) + offset;
- p = miniflow_get_values(&match->flow) + offset;
+ q = match->mask->masks.values + offset;
+ p = match->flow->values + offset;
for (i = 0; i < n; i++) {
hash = hash_add64(hash, p[i] & q[i]);
cls_match_alloc(const struct cls_rule *rule, cls_version_t version,
const struct cls_conjunction conj[], size_t n)
{
- int count = count_1bits(rule->match.flow.map);
+ int count = count_1bits(rule->match.flow->map);
struct cls_match *cls_match
- = xmalloc(sizeof *cls_match - sizeof cls_match->flow.inline_values
- + MINIFLOW_VALUES_SIZE(count));
+ = xmalloc(sizeof *cls_match + MINIFLOW_VALUES_SIZE(count));
ovsrcu_init(&cls_match->next, NULL);
*CONST_CAST(const struct cls_rule **, &cls_match->cls_rule) = rule;
atomic_init(&cls_match->remove_version, version); /* Initially
* invisible. */
miniflow_clone_inline(CONST_CAST(struct miniflow *, &cls_match->flow),
- &rule->match.flow, count);
+ rule->match.flow, count);
ovsrcu_set_hidden(&cls_match->conj_set,
cls_conjunction_set_alloc(cls_match, conj, n));
bool
cls_rule_is_catchall(const struct cls_rule *rule)
{
- return minimask_is_catchall(&rule->match.mask);
+ return minimask_is_catchall(rule->match.mask);
}
/* Makes rule invisible after 'version'. Once that version is made invisible
static inline ovs_be32 minimatch_get_ports(const struct minimatch *match)
{
/* Could optimize to use the same map if needed for fast path. */
- return MINIFLOW_GET_BE32(&match->flow, tp_src)
- & MINIFLOW_GET_BE32(&match->mask.masks, tp_src);
+ return MINIFLOW_GET_BE32(match->flow, tp_src)
+ & MINIFLOW_GET_BE32(&match->mask->masks, tp_src);
}
static void
CONST_CAST(struct cls_rule *, rule)->cls_match = new;
- subtable = find_subtable(cls, &rule->match.mask);
+ subtable = find_subtable(cls, rule->match.mask);
if (!subtable) {
- subtable = insert_subtable(cls, &rule->match.mask);
+ subtable = insert_subtable(cls, rule->match.mask);
}
/* Compute hashes in segments. */
}
hash = minimatch_hash_range(&rule->match, prev_be64ofs, FLOW_U64S, &basis);
- head = find_equal(subtable, &rule->match.flow, hash);
+ head = find_equal(subtable, rule->match.flow, hash);
if (!head) {
/* Add rule to tries.
*
* Concurrent readers might miss seeing the rule until this update,
* which might require being fixed up by revalidation later. */
new->partition = NULL;
- if (minimask_get_metadata_mask(&rule->match.mask) == OVS_BE64_MAX) {
- ovs_be64 metadata = miniflow_get_metadata(&rule->match.flow);
+ if (minimask_get_metadata_mask(rule->match.mask) == OVS_BE64_MAX) {
+ ovs_be64 metadata = miniflow_get_metadata(rule->match.flow);
new->partition = create_partition(cls, subtable, metadata);
}
/* Remove 'cls_rule' from the subtable's rules list. */
rculist_remove(CONST_CAST(struct rculist *, &cls_rule->node));
- subtable = find_subtable(cls, &cls_rule->match.mask);
+ subtable = find_subtable(cls, cls_rule->match.mask);
ovs_assert(subtable);
for (i = 0; i < subtable->n_indices; i++) {
hash = minimatch_hash_range(&cls_rule->match, prev_be64ofs, FLOW_U64S,
&basis);
- head = find_equal(subtable, &cls_rule->match.flow, hash);
+ head = find_equal(subtable, cls_rule->match.flow, hash);
/* Check if the rule is not the head rule. */
if (rule != head) {
const struct cls_match *head, *rule;
const struct cls_subtable *subtable;
- subtable = find_subtable(cls, &target->match.mask);
+ subtable = find_subtable(cls, target->match.mask);
if (!subtable) {
return NULL;
}
- head = find_equal(subtable, &target->match.flow,
- miniflow_hash_in_minimask(&target->match.flow,
- &target->match.mask, 0));
+ head = find_equal(subtable, target->match.flow,
+ miniflow_hash_in_minimask(target->match.flow,
+ target->match.mask, 0));
if (!head) {
return NULL;
}
/* Iterate subtables in the descending max priority order. */
PVECTOR_FOR_EACH_PRIORITY (subtable, target->priority - 1, 2,
sizeof(struct cls_subtable), &cls->subtables) {
- uint64_t storage[FLOW_U64S];
- struct minimask mask;
+ struct {
+ struct minimask mask;
+ uint64_t storage[FLOW_U64S];
+ } m;
const struct cls_rule *rule;
- minimask_combine(&mask, &target->match.mask, &subtable->mask, storage);
+ minimask_combine(&m.mask, target->match.mask, &subtable->mask,
+ m.storage);
RCULIST_FOR_EACH (rule, node, &subtable->rules_list) {
if (rule->priority == target->priority
- && miniflow_equal_in_minimask(&target->match.flow,
- &rule->match.flow, &mask)
+ && miniflow_equal_in_minimask(target->match.flow,
+ rule->match.flow, &m.mask)
&& cls_match_visible_in_version(rule->cls_match, version)) {
return true;
}
cls_rule_is_loose_match(const struct cls_rule *rule,
const struct minimatch *criteria)
{
- return (!minimask_has_extra(&rule->match.mask, &criteria->mask)
- && miniflow_equal_in_minimask(&rule->match.flow, &criteria->flow,
- &criteria->mask));
+ return (!minimask_has_extra(rule->match.mask, criteria->mask)
+ && miniflow_equal_in_minimask(rule->match.flow, criteria->flow,
+ criteria->mask));
}
\f
/* Iteration. */
{
/* Rule may only match a target if it is visible in target's version. */
return cls_match_visible_in_version(rule->cls_match, version)
- && (!target || miniflow_equal_in_minimask(&rule->match.flow,
- &target->match.flow,
- &target->match.mask));
+ && (!target || miniflow_equal_in_minimask(rule->match.flow,
+ target->match.flow,
+ target->match.mask));
}
static const struct cls_rule *
struct cls_cursor *cursor)
{
if (!cursor->target
- || !minimask_has_extra(&subtable->mask, &cursor->target->match.mask)) {
+ || !minimask_has_extra(&subtable->mask, cursor->target->match.mask)) {
const struct cls_rule *rule;
RCULIST_FOR_EACH (rule, node, &subtable->rules_list) {
uint8_t prev;
int count = count_1bits(mask->masks.map);
- subtable = xzalloc(sizeof *subtable - sizeof mask->masks.inline_values
- + MINIFLOW_VALUES_SIZE(count));
+ subtable = xzalloc(sizeof *subtable + MINIFLOW_VALUES_SIZE(count));
cmap_init(&subtable->rules);
miniflow_clone_inline(CONST_CAST(struct miniflow *, &subtable->mask.masks),
&mask->masks, count);
const struct minimask *mask,
const struct flow *target)
{
- const uint64_t *flowp = miniflow_get_values(flow);
- const uint64_t *maskp = miniflow_get_values(&mask->masks);
+ const uint64_t *flowp = flow->values;
+ const uint64_t *maskp = mask->masks.values;
int idx;
MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
const struct flow *target,
struct flow_wildcards *wc)
{
- const uint64_t *flowp = miniflow_get_values(flow);
- const uint64_t *maskp = miniflow_get_values(&mask->masks);
+ const uint64_t *flowp = flow->values;
+ const uint64_t *maskp = mask->masks.values;
int idx;
MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
minimatch_get_prefix(const struct minimatch *match, const struct mf_field *mf)
{
return (OVS_FORCE const ovs_be32 *)
- (miniflow_get_values(&match->flow)
- + count_1bits(match->flow.map &
+ (match->flow->values
+ + count_1bits(match->flow->map &
((UINT64_C(1) << mf->flow_be32ofs / 2) - 1)))
+ (mf->flow_be32ofs & 1);
}
uint32_t hash; /* Hash function differs for different users. */
uint32_t len; /* Length of the following miniflow (incl. map). */
struct miniflow mf;
- uint64_t buf[FLOW_MAX_PACKET_U64S - MINI_N_INLINE];
+ uint64_t buf[FLOW_MAX_PACKET_U64S];
};
/* Exact match cache for frequently used flows
{
int i;
- BUILD_ASSERT(offsetof(struct miniflow, inline_values) == sizeof(uint64_t));
+ BUILD_ASSERT(offsetof(struct miniflow, values) == sizeof(uint64_t));
flow_cache->sweep_idx = 0;
for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
flow_cache->entries[i].flow = NULL;
flow_cache->entries[i].key.hash = 0;
flow_cache->entries[i].key.len
- = offsetof(struct miniflow, inline_values);
- miniflow_initialize(&flow_cache->entries[i].key.mf,
- flow_cache->entries[i].key.buf);
+ = offsetof(struct miniflow, values);
+ flow_cache->entries[i].key.mf.map = 0;
}
}
* The following assertions make sure that what we're doing with miniflow is
* safe
*/
-BUILD_ASSERT_DECL(offsetof(struct miniflow, inline_values)
- == sizeof(uint64_t));
+BUILD_ASSERT_DECL(offsetof(struct miniflow, values) == sizeof(uint64_t));
/* Given the number of bits set in the miniflow map, returns the size of the
* 'netdev_flow_key.mf' */
static inline uint32_t
netdev_flow_key_size(uint32_t flow_u32s)
{
- return offsetof(struct miniflow, inline_values) +
- MINIFLOW_VALUES_SIZE(flow_u32s);
+ return offsetof(struct miniflow, values) + MINIFLOW_VALUES_SIZE(flow_u32s);
}
static inline bool
struct dp_packet packet;
uint64_t buf_stub[512 / 8];
- miniflow_initialize(&dst->mf, dst->buf);
-
dp_packet_use_stub(&packet, buf_stub, sizeof buf_stub);
pkt_metadata_from_flow(&packet.md, src);
flow_compose(&packet, src);
const struct match *match)
{
const uint64_t *mask_u64 = (const uint64_t *) &match->wc.masks;
- uint64_t *dst = mask->mf.inline_values;
+ uint64_t *dst = mask->mf.values;
uint64_t map, mask_map = 0;
uint32_t hash = 0;
int n;
map -= rm1bit;
}
- mask->mf.values_inline = true;
mask->mf.map = mask_map;
hash = hash_add64(hash, mask_map);
- n = dst - mask->mf.inline_values;
+ n = dst - mask->mf.values;
mask->hash = hash_finish(hash, n * 8);
mask->len = netdev_flow_key_size(n);
const struct flow *flow,
const struct netdev_flow_key *mask)
{
- uint64_t *dst_u64 = dst->mf.inline_values;
- const uint64_t *mask_u64 = mask->mf.inline_values;
+ uint64_t *dst_u64 = dst->mf.values;
+ const uint64_t *mask_u64 = mask->mf.values;
uint32_t hash = 0;
uint64_t value;
dst->len = mask->len;
- dst->mf.values_inline = true;
dst->mf.map = mask->mf.map;
FLOW_FOR_EACH_IN_MAP(value, flow, mask->mf.map) {
*dst_u64 = value & *mask_u64++;
hash = hash_add64(hash, *dst_u64++);
}
- dst->hash = hash_finish(hash, (dst_u64 - dst->mf.inline_values) * 8);
+ dst->hash = hash_finish(hash, (dst_u64 - dst->mf.values) * 8);
}
/* Iterate through all netdev_flow_key u64 values specified by 'MAP' */
-#define NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(VALUE, KEY, MAP) \
- for (struct mf_for_each_in_map_aux aux__ \
- = { (KEY)->mf.inline_values, (KEY)->mf.map, MAP }; \
- mf_get_next_in_map(&aux__, &(VALUE)); \
+#define NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(VALUE, KEY, MAP) \
+ for (struct mf_for_each_in_map_aux aux__ \
+ = { (KEY)->mf.values, (KEY)->mf.map, MAP }; \
+ mf_get_next_in_map(&aux__, &(VALUE)); \
)
/* Returns a hash value for the bits of 'key' where there are 1-bits in
netdev_flow_key_hash_in_mask(const struct netdev_flow_key *key,
const struct netdev_flow_key *mask)
{
- const uint64_t *p = mask->mf.inline_values;
+ const uint64_t *p = mask->mf.values;
uint32_t hash = 0;
uint64_t key_u64;
hash = hash_add64(hash, key_u64 & *p++);
}
- return hash_finish(hash, (p - mask->mf.inline_values) * 8);
+ return hash_finish(hash, (p - mask->mf.values) * 8);
}
static inline bool
struct netdev_flow_key key;
size_t i, notfound_cnt = 0;
- miniflow_initialize(&key.mf, key.buf);
for (i = 0; i < cnt; i++) {
struct dp_netdev_flow *flow;
dpcls_rule_matches_key(const struct dpcls_rule *rule,
const struct netdev_flow_key *target)
{
- const uint64_t *keyp = rule->flow.mf.inline_values;
- const uint64_t *maskp = rule->mask->mf.inline_values;
+ const uint64_t *keyp = rule->flow.mf.values;
+ const uint64_t *maskp = rule->mask->mf.values;
uint64_t target_u64;
NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(target_u64, target, rule->flow.mf.map) {
COVERAGE_INC(flow_extract);
- miniflow_initialize(&m.mf, m.buf);
miniflow_extract(packet, &m.mf);
miniflow_expand(&m.mf, flow);
}
const struct pkt_metadata *md = &packet->md;
const void *data = dp_packet_data(packet);
size_t size = dp_packet_size(packet);
- uint64_t *values = miniflow_values(dst);
+ uint64_t *values = dst->values;
struct mf_ctx mf = { 0, values, values + FLOW_U64S };
const char *l2;
ovs_be16 dl_type;
return count_1bits(flow->map);
}
-static uint64_t *
-miniflow_alloc_values(struct miniflow *flow, int n)
-{
- int size = MINIFLOW_VALUES_SIZE(n);
-
- if (size <= sizeof flow->inline_values) {
- flow->values_inline = true;
- return flow->inline_values;
- } else {
- COVERAGE_INC(miniflow_malloc);
- flow->values_inline = false;
- flow->offline_values = xmalloc(size);
- return flow->offline_values;
- }
-}
-
/* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
- * the caller. The caller must have already initialized 'dst->map' properly
- * to indicate the significant uint64_t elements of 'src'. 'n' must be the
- * number of 1-bits in 'dst->map'.
+ * the caller. The caller must have already computed 'map' properly
+ * to indicate the significant uint64_t elements of 'src'.
*
* Normally the significant elements are the ones that are non-zero. However,
* when a miniflow is initialized from a (mini)mask, the values can be zeroes,
* so that the flow and mask always have the same maps.
*
- * This function initializes values (either inline if possible or with
- * malloc() otherwise) and copies the uint64_t elements of 'src' indicated by
- * 'dst->map' into it. */
-static void
-miniflow_init__(struct miniflow *dst, const struct flow *src, int n)
+ * This function always dynamically allocates a miniflow with the correct
+ * amount of inline storage and copies the uint64_t elements of 'src' indicated
+ * by 'map' into it. */
+static struct miniflow *
+miniflow_init__(const struct flow *src, uint64_t map)
{
const uint64_t *src_u64 = (const uint64_t *) src;
- uint64_t *dst_u64 = miniflow_alloc_values(dst, n);
+ struct miniflow *dst = xmalloc(sizeof *dst
+ + MINIFLOW_VALUES_SIZE(count_1bits(map)));
+ uint64_t *dst_u64 = dst->values;
int idx;
- MAP_FOR_EACH_INDEX(idx, dst->map) {
+ COVERAGE_INC(miniflow_malloc);
+
+ dst->map = map;
+ MAP_FOR_EACH_INDEX(idx, map) {
*dst_u64++ = src_u64[idx];
}
+ return dst;
}
-/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
- * with miniflow_destroy().
- * Always allocates offline storage. */
-void
-miniflow_init(struct miniflow *dst, const struct flow *src)
+/* Returns a miniflow copy of 'src'. The caller must eventually free the
+ * returned miniflow with free(). */
+struct miniflow *
+miniflow_create(const struct flow *src)
{
const uint64_t *src_u64 = (const uint64_t *) src;
+ uint64_t map;
unsigned int i;
- int n;
/* Initialize dst->map, counting the number of nonzero elements. */
- n = 0;
- dst->map = 0;
+ map = 0;
for (i = 0; i < FLOW_U64S; i++) {
if (src_u64[i]) {
- dst->map |= UINT64_C(1) << i;
- n++;
+ map |= UINT64_C(1) << i;
}
}
- miniflow_init__(dst, src, n);
+ return miniflow_init__(src, map);
}
-/* Initializes 'dst' as a copy of 'src', using 'mask->map' as 'dst''s map. The
- * caller must eventually free 'dst' with miniflow_destroy(). */
-void
-miniflow_init_with_minimask(struct miniflow *dst, const struct flow *src,
- const struct minimask *mask)
+/* Returns a copy of 'src', using 'mask->map'. The caller must eventually free
+ * the returned miniflow with free(). */
+struct miniflow *
+miniflow_create_with_minimask(const struct flow *src,
+ const struct minimask *mask)
{
- dst->map = mask->masks.map;
- miniflow_init__(dst, src, miniflow_n_values(dst));
+ return miniflow_init__(src, mask->masks.map);
}
-/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
- * with miniflow_destroy(). */
-void
-miniflow_clone(struct miniflow *dst, const struct miniflow *src)
+/* Returns a copy of 'src'. The caller must eventually free the returned
+ * miniflow with free(). */
+struct miniflow *
+miniflow_clone(const struct miniflow *src)
{
- int size = MINIFLOW_VALUES_SIZE(miniflow_n_values(src));
- uint64_t *values;
+ struct miniflow *dst;
+ int n = miniflow_n_values(src);
- dst->map = src->map;
- if (size <= sizeof dst->inline_values) {
- dst->values_inline = true;
- values = dst->inline_values;
- } else {
- dst->values_inline = false;
- COVERAGE_INC(miniflow_malloc);
- dst->offline_values = xmalloc(size);
- values = dst->offline_values;
- }
- memcpy(values, miniflow_get_values(src), size);
+ COVERAGE_INC(miniflow_malloc);
+ dst = xmalloc(sizeof *dst + MINIFLOW_VALUES_SIZE(n));
+ miniflow_clone_inline(dst, src, n);
+ return dst;
}
/* Initializes 'dst' as a copy of 'src'. The caller must have allocated
- * 'dst' to have inline space all data in 'src'. */
+ * 'dst' to have inline space for 'n_values' data in 'src'. */
void
miniflow_clone_inline(struct miniflow *dst, const struct miniflow *src,
size_t n_values)
{
- dst->values_inline = true;
- dst->map = src->map;
- memcpy(dst->inline_values, miniflow_get_values(src),
- MINIFLOW_VALUES_SIZE(n_values));
-}
-
-/* Initializes 'dst' with the data in 'src', destroying 'src'.
- * The caller must eventually free 'dst' with miniflow_destroy().
- * 'dst' must be regularly sized miniflow, but 'src' can have
- * storage for more than the default MINI_N_INLINE inline
- * values. */
-void
-miniflow_move(struct miniflow *dst, struct miniflow *src)
-{
- int size = MINIFLOW_VALUES_SIZE(miniflow_n_values(src));
-
dst->map = src->map;
- if (size <= sizeof dst->inline_values) {
- dst->values_inline = true;
- memcpy(dst->inline_values, miniflow_get_values(src), size);
- miniflow_destroy(src);
- } else if (src->values_inline) {
- dst->values_inline = false;
- COVERAGE_INC(miniflow_malloc);
- dst->offline_values = xmalloc(size);
- memcpy(dst->offline_values, src->inline_values, size);
- } else {
- dst->values_inline = false;
- dst->offline_values = src->offline_values;
- }
-}
-
-/* Frees any memory owned by 'flow'. Does not free the storage in which 'flow'
- * itself resides; the caller is responsible for that. */
-void
-miniflow_destroy(struct miniflow *flow)
-{
- if (!flow->values_inline) {
- free(flow->offline_values);
- }
+ memcpy(dst->values, src->values, MINIFLOW_VALUES_SIZE(n_values));
}
/* Initializes 'dst' as a copy of 'src'. */
flow_union_with_miniflow(dst, src);
}
-/* Returns true if 'a' and 'b' are the equal miniflow, false otherwise. */
+/* Returns true if 'a' and 'b' are equal miniflows, false otherwise. */
bool
miniflow_equal(const struct miniflow *a, const struct miniflow *b)
{
- const uint64_t *ap = miniflow_get_values(a);
- const uint64_t *bp = miniflow_get_values(b);
+ const uint64_t *ap = a->values;
+ const uint64_t *bp = b->values;
if (OVS_LIKELY(a->map == b->map)) {
int count = miniflow_n_values(a);
miniflow_equal_in_minimask(const struct miniflow *a, const struct miniflow *b,
const struct minimask *mask)
{
- const uint64_t *p = miniflow_get_values(&mask->masks);
+ const uint64_t *p = mask->masks.values;
int idx;
MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
const struct minimask *mask)
{
const uint64_t *b_u64 = (const uint64_t *) b;
- const uint64_t *p = miniflow_get_values(&mask->masks);
+ const uint64_t *p = mask->masks.values;
int idx;
MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
}
\f
-/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
- * with minimask_destroy(). */
-void
-minimask_init(struct minimask *mask, const struct flow_wildcards *wc)
-{
- miniflow_init(&mask->masks, &wc->masks);
-}
-
-/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
- * with minimask_destroy(). */
-void
-minimask_clone(struct minimask *dst, const struct minimask *src)
+/* Returns a minimask copy of 'wc'. The caller must eventually free the
+ * returned minimask with free(). */
+struct minimask *
+minimask_create(const struct flow_wildcards *wc)
{
- miniflow_clone(&dst->masks, &src->masks);
+ return (struct minimask *)miniflow_create(&wc->masks);
}
-/* Initializes 'dst' with the data in 'src', destroying 'src'.
- * The caller must eventually free 'dst' with minimask_destroy(). */
-void
-minimask_move(struct minimask *dst, struct minimask *src)
+/* Returns a copy of 'src'. The caller must eventually free the returned
+ * minimask with free(). */
+struct minimask *
+minimask_clone(const struct minimask *src)
{
- miniflow_move(&dst->masks, &src->masks);
+ return (struct minimask *)miniflow_clone(&src->masks);
}
/* Initializes 'dst_' as the bit-wise "and" of 'a_' and 'b_'.
*
- * The caller must provide room for FLOW_U64S "uint64_t"s in 'storage', for use
- * by 'dst_'. The caller must *not* free 'dst_' with minimask_destroy(). */
+ * The caller must provide room for FLOW_U64S "uint64_t"s in 'storage', which
+ * must follow '*dst_' in memory, for use by 'dst_'. The caller must *not*
+ * free 'dst_' free(). */
void
minimask_combine(struct minimask *dst_,
const struct minimask *a_, const struct minimask *b_,
const struct miniflow *b = &b_->masks;
int idx;
- dst->values_inline = false;
- dst->offline_values = storage;
-
dst->map = 0;
MAP_FOR_EACH_INDEX(idx, a->map & b->map) {
/* Both 'a' and 'b' have non-zero data at 'idx'. */
}
}
-/* Frees any memory owned by 'mask'. Does not free the storage in which 'mask'
- * itself resides; the caller is responsible for that. */
-void
-minimask_destroy(struct minimask *mask)
-{
- miniflow_destroy(&mask->masks);
-}
-
-/* Initializes 'dst' as a copy of 'src'. */
+/* Initializes 'wc' as a copy of 'mask'. */
void
minimask_expand(const struct minimask *mask, struct flow_wildcards *wc)
{
minimask_equal(const struct minimask *a, const struct minimask *b)
{
return a->masks.map == b->masks.map &&
- !memcmp(miniflow_get_values(&a->masks),
- miniflow_get_values(&b->masks),
- count_1bits(a->masks.map) * sizeof *a->masks.inline_values);
+ !memcmp(a->masks.values, b->masks.values,
+ count_1bits(a->masks.map) * sizeof *a->masks.values);
}
/* Returns true if at least one bit matched by 'b' is wildcarded by 'a',
bool
minimask_has_extra(const struct minimask *a, const struct minimask *b)
{
- const uint64_t *ap = miniflow_get_values(&a->masks);
- const uint64_t *bp = miniflow_get_values(&b->masks);
+ const uint64_t *ap = a->masks.values;
+ const uint64_t *bp = b->masks.values;
int idx;
MAP_FOR_EACH_INDEX(idx, b->masks.map) {
\f
/* Compressed flow. */
-/* Number of 64-bit words present in struct miniflow. */
-#define MINI_N_INLINE 4
-
/* Maximum number of 64-bit words supported. */
-BUILD_ASSERT_DECL(FLOW_U64S <= 63);
+BUILD_ASSERT_DECL(FLOW_U64S <= 64);
/* A sparse representation of a "struct flow".
*
* A "struct flow" is fairly large and tends to be mostly zeros. Sparse
- * representation has two advantages. First, it saves memory. Second, it
- * saves time when the goal is to iterate over only the nonzero parts of the
- * struct.
+ * representation has two advantages. First, it saves memory and, more
+ * importantly, minimizes the number of accessed cache lines. Second, it saves
+ * time when the goal is to iterate over only the nonzero parts of the struct.
*
* The 'map' member holds one bit for each uint64_t in a "struct flow". Each
* 0-bit indicates that the corresponding uint64_t is zero, each 1-bit that it
* *may* be nonzero (see below how this applies to minimasks).
*
- * The 'values_inline' boolean member indicates that the values are at
- * 'inline_values'. If 'values_inline' is zero, then the values are
- * offline at 'offline_values'. In either case, values is an array that has
- * one element for each 1-bit in 'map'. The least-numbered 1-bit is in
- * the first element of the values array, the next 1-bit is in the next array
- * element, and so on.
- *
- * MINI_N_INLINE is the default number of inline words. When a miniflow is
- * dynamically allocated the actual amount of inline storage may be different.
- * In that case 'inline_values' contains storage at least for the number
- * of words indicated by 'map' (one uint64_t for each 1-bit in the map).
- *
* Elements in values array are allowed to be zero. This is useful for "struct
* minimatch", for which ensuring that the miniflow and minimask members have
* same 'map' allows optimization. This allowance applies only to a miniflow
* that is not a mask. That is, a minimask may NOT have zero elements in
* its 'values'.
- */
+ *
+ * A miniflow is always dynamically allocated so that the 'values' array has as
+ * many elements as there are 1-bits in 'map'. */
struct miniflow {
- uint64_t map:63;
- uint64_t values_inline:1;
- union {
- uint64_t *offline_values;
- uint64_t inline_values[MINI_N_INLINE]; /* Minimum inline size. */
- };
+ uint64_t map;
+ uint64_t values[0];
};
-BUILD_ASSERT_DECL(sizeof(struct miniflow)
- == sizeof(uint64_t) + MINI_N_INLINE * sizeof(uint64_t));
+BUILD_ASSERT_DECL(sizeof(struct miniflow) == sizeof(uint64_t));
#define MINIFLOW_VALUES_SIZE(COUNT) ((COUNT) * sizeof(uint64_t))
-static inline uint64_t *miniflow_values(struct miniflow *mf)
-{
- return OVS_LIKELY(mf->values_inline)
- ? mf->inline_values : mf->offline_values;
-}
-
-static inline const uint64_t *miniflow_get_values(const struct miniflow *mf)
-{
- return OVS_LIKELY(mf->values_inline)
- ? mf->inline_values : mf->offline_values;
-}
-
-/* This is useful for initializing a miniflow for a miniflow_extract() call. */
-static inline void miniflow_initialize(struct miniflow *mf,
- uint64_t buf[FLOW_U64S])
-{
- mf->map = 0;
- mf->values_inline = (buf == (uint64_t *)(mf + 1));
- if (!mf->values_inline) {
- mf->offline_values = buf;
- }
-}
-
struct pkt_metadata;
-/* The 'dst->values' must be initialized with a buffer with space for
- * FLOW_U64S. 'dst->map' is ignored on input and set on output to
- * indicate which fields were extracted. */
+/* The 'dst' must follow with buffer space for FLOW_U64S 64-bit units.
+ * 'dst->map' is ignored on input and set on output to indicate which fields
+ * were extracted. */
void miniflow_extract(struct dp_packet *packet, struct miniflow *dst);
-void miniflow_init(struct miniflow *, const struct flow *);
-void miniflow_init_with_minimask(struct miniflow *, const struct flow *,
- const struct minimask *);
-void miniflow_clone(struct miniflow *, const struct miniflow *);
+struct miniflow * miniflow_create(const struct flow *);
+struct miniflow * miniflow_create_with_minimask(const struct flow *,
+ const struct minimask *);
+struct miniflow * miniflow_clone(const struct miniflow *);
void miniflow_clone_inline(struct miniflow *, const struct miniflow *,
size_t n_values);
-void miniflow_move(struct miniflow *dst, struct miniflow *);
-void miniflow_destroy(struct miniflow *);
void miniflow_expand(const struct miniflow *, struct flow *);
}
/* Iterate through all miniflow u64 values specified by 'MAP'. */
-#define MINIFLOW_FOR_EACH_IN_MAP(VALUE, FLOW, MAP) \
- for (struct mf_for_each_in_map_aux aux__ \
- = { miniflow_get_values(FLOW), (FLOW)->map, MAP }; \
- mf_get_next_in_map(&aux__, &(VALUE)); \
+#define MINIFLOW_FOR_EACH_IN_MAP(VALUE, FLOW, MAP) \
+ for (struct mf_for_each_in_map_aux aux__ \
+ = { (FLOW)->values, (FLOW)->map, MAP }; \
+ mf_get_next_in_map(&aux__, &(VALUE)); \
)
/* This can be used when it is known that 'u64_idx' is set in 'map'. */
static inline uint64_t
miniflow_get__(const struct miniflow *mf, int u64_idx)
{
- return miniflow_values_get__(miniflow_get_values(mf), mf->map, u64_idx);
+ return miniflow_values_get__(mf->values, mf->map, u64_idx);
}
/* Get the value of 'FIELD' of an up to 8 byte wide integer type 'TYPE' of
#define MINIFLOW_GET_TYPE(MF, TYPE, OFS) \
(((MF)->map & (UINT64_C(1) << (OFS) / sizeof(uint64_t))) \
? ((OVS_FORCE const TYPE *) \
- (miniflow_get_values(MF) \
+ ((MF)->values \
+ count_1bits((MF)->map & \
((UINT64_C(1) << (OFS) / sizeof(uint64_t)) - 1)))) \
[(OFS) % sizeof(uint64_t) / sizeof(TYPE)] \
struct miniflow masks;
};
-void minimask_init(struct minimask *, const struct flow_wildcards *);
-void minimask_clone(struct minimask *, const struct minimask *);
-void minimask_move(struct minimask *dst, struct minimask *src);
+struct minimask * minimask_create(const struct flow_wildcards *);
+struct minimask * minimask_clone(const struct minimask *);
void minimask_combine(struct minimask *dst,
const struct minimask *a, const struct minimask *b,
uint64_t storage[FLOW_U64S]);
-void minimask_destroy(struct minimask *);
void minimask_expand(const struct minimask *, struct flow_wildcards *);
flow_union_with_miniflow(struct flow *dst, const struct miniflow *src)
{
uint64_t *dst_u64 = (uint64_t *) dst;
- const uint64_t *p = miniflow_get_values(src);
+ const uint64_t *p = src->values;
int idx;
MAP_FOR_EACH_INDEX(idx, src->map) {
void
minimatch_init(struct minimatch *dst, const struct match *src)
{
- minimask_init(&dst->mask, &src->wc);
- miniflow_init_with_minimask(&dst->flow, &src->flow, &dst->mask);
+ dst->mask = minimask_create(&src->wc);
+ dst->flow = miniflow_create_with_minimask(&src->flow, dst->mask);
}
/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
void
minimatch_clone(struct minimatch *dst, const struct minimatch *src)
{
- miniflow_clone(&dst->flow, &src->flow);
- minimask_clone(&dst->mask, &src->mask);
+ dst->flow = miniflow_clone(src->flow);
+ dst->mask = minimask_clone(src->mask);
}
/* Initializes 'dst' with the data in 'src', destroying 'src'. The caller must
void
minimatch_move(struct minimatch *dst, struct minimatch *src)
{
- miniflow_move(&dst->flow, &src->flow);
- minimask_move(&dst->mask, &src->mask);
+ dst->flow = src->flow;
+ dst->mask = src->mask;
}
/* Frees any memory owned by 'match'. Does not free the storage in which
void
minimatch_destroy(struct minimatch *match)
{
- miniflow_destroy(&match->flow);
- minimask_destroy(&match->mask);
+ free(match->flow);
+ free(match->mask);
}
/* Initializes 'dst' as a copy of 'src'. */
void
minimatch_expand(const struct minimatch *src, struct match *dst)
{
- miniflow_expand(&src->flow, &dst->flow);
- minimask_expand(&src->mask, &dst->wc);
+ miniflow_expand(src->flow, &dst->flow);
+ minimask_expand(src->mask, &dst->wc);
memset(&dst->tun_md, 0, sizeof dst->tun_md);
}
bool
minimatch_equal(const struct minimatch *a, const struct minimatch *b)
{
- return (miniflow_equal(&a->flow, &b->flow)
- && minimask_equal(&a->mask, &b->mask));
+ return minimask_equal(a->mask, b->mask)
+ && miniflow_equal(a->flow, b->flow);
}
/* Returns true if 'target' satisifies 'match', that is, if each bit for which
const struct flow *target)
{
const uint64_t *target_u64 = (const uint64_t *) target;
- const uint64_t *flowp = miniflow_get_values(&match->flow);
- const uint64_t *maskp = miniflow_get_values(&match->mask.masks);
+ const uint64_t *flowp = match->flow->values;
+ const uint64_t *maskp = match->mask->masks.values;
int idx;
- MAP_FOR_EACH_INDEX(idx, match->flow.map) {
+ MAP_FOR_EACH_INDEX(idx, match->flow->map) {
if ((*flowp++ ^ target_u64[idx]) & *maskp++) {
return false;
}
* 'values', which makes minimatch_matches_flow() faster.
*/
struct minimatch {
- struct miniflow flow;
- struct minimask mask;
+ struct miniflow *flow;
+ struct minimask *mask;
};
void minimatch_init(struct minimatch *, const struct match *);
};
ds_put_format(&ds, "%s (%"PRIu32") : ", p->dev_name, p->portno);
- minimask_expand(&p->cr.match.mask, &wc);
- miniflow_expand(&p->cr.match.flow, &flow);
+ minimask_expand(p->cr.match.mask, &wc);
+ miniflow_expand(p->cr.match.flow, &flow);
/* Key. */
odp_parms.odp_in_port = flow.in_port.odp_port;
if (old_rule) {
ovsrcu_postpone(remove_rule_rcu, old_rule);
} else {
- if (minimask_get_vid_mask(&new_rule->cr.match.mask) == VLAN_VID_MASK) {
+ if (minimask_get_vid_mask(new_rule->cr.match.mask) == VLAN_VID_MASK) {
if (ofproto->vlan_bitmap) {
- uint16_t vid = miniflow_get_vid(&new_rule->cr.match.flow);
+ uint16_t vid = miniflow_get_vid(new_rule->cr.match.flow);
if (!bitmap_is_set(ofproto->vlan_bitmap, vid)) {
bitmap_set1(ofproto->vlan_bitmap, vid);
uint32_t hash;
hash = table->eviction_group_id_basis;
- miniflow_expand(&rule->cr.match.flow, &flow);
+ miniflow_expand(rule->cr.match.flow, &flow);
for (sf = table->eviction_fields;
sf < &table->eviction_fields[table->n_eviction_fields];
sf++)
CLS_FOR_EACH_TARGET (rule, cr, &oftable->cls, &target,
CLS_MAX_VERSION) {
- if (minimask_get_vid_mask(&rule->cr.match.mask) == VLAN_VID_MASK) {
- uint16_t vid = miniflow_get_vid(&rule->cr.match.flow);
+ if (minimask_get_vid_mask(rule->cr.match.mask) == VLAN_VID_MASK) {
+ uint16_t vid = miniflow_get_vid(rule->cr.match.flow);
bitmap_set1(vlan_bitmap, vid);
bitmap_set1(ofproto->vlan_bitmap, vid);
for (i = 0; i < cls->n_rules; ) {
struct test_rule *pos = cls->rules[i];
- if (!minimask_has_extra(&pos->cls_rule.match.mask,
- &target->match.mask)) {
+ if (!minimask_has_extra(pos->cls_rule.match.mask,
+ target->match.mask)) {
struct flow flow;
- miniflow_expand(&pos->cls_rule.match.flow, &flow);
+ miniflow_expand(pos->cls_rule.match.flow, &flow);
if (match(target, &flow)) {
tcls_remove(cls, pos);
continue;
random_set_seed(0xb3faca38);
for (idx = 0; next_random_flow(&flow, idx); idx++) {
const uint64_t *flow_u64 = (const uint64_t *) &flow;
- struct miniflow miniflow, miniflow2, miniflow3;
+ struct miniflow *miniflow, *miniflow2, *miniflow3;
struct flow flow2, flow3;
struct flow_wildcards mask;
- struct minimask minimask;
+ struct minimask *minimask;
int i;
/* Convert flow to miniflow. */
- miniflow_init(&miniflow, &flow);
+ miniflow = miniflow_create(&flow);
/* Check that the flow equals its miniflow. */
- assert(miniflow_get_vid(&miniflow) == vlan_tci_to_vid(flow.vlan_tci));
+ assert(miniflow_get_vid(miniflow) == vlan_tci_to_vid(flow.vlan_tci));
for (i = 0; i < FLOW_U64S; i++) {
- assert(miniflow_get(&miniflow, i) == flow_u64[i]);
+ assert(miniflow_get(miniflow, i) == flow_u64[i]);
}
/* Check that the miniflow equals itself. */
- assert(miniflow_equal(&miniflow, &miniflow));
+ assert(miniflow_equal(miniflow, miniflow));
/* Convert miniflow back to flow and verify that it's the same. */
- miniflow_expand(&miniflow, &flow2);
+ miniflow_expand(miniflow, &flow2);
assert(flow_equal(&flow, &flow2));
/* Check that copying a miniflow works properly. */
- miniflow_clone(&miniflow2, &miniflow);
- assert(miniflow_equal(&miniflow, &miniflow2));
- assert(miniflow_hash(&miniflow, 0) == miniflow_hash(&miniflow2, 0));
- miniflow_expand(&miniflow2, &flow3);
+ miniflow2 = miniflow_clone(miniflow);
+ assert(miniflow_equal(miniflow, miniflow2));
+ assert(miniflow_hash(miniflow, 0) == miniflow_hash(miniflow2, 0));
+ miniflow_expand(miniflow2, &flow3);
assert(flow_equal(&flow, &flow3));
/* Check that masked matches work as expected for identical flows and
do {
next_random_flow(&mask.masks, 1);
} while (flow_wildcards_is_catchall(&mask));
- minimask_init(&minimask, &mask);
- assert(minimask_is_catchall(&minimask)
+ minimask = minimask_create(&mask);
+ assert(minimask_is_catchall(minimask)
== flow_wildcards_is_catchall(&mask));
- assert(miniflow_equal_in_minimask(&miniflow, &miniflow2, &minimask));
- assert(miniflow_equal_flow_in_minimask(&miniflow, &flow2, &minimask));
- assert(miniflow_hash_in_minimask(&miniflow, &minimask, 0x12345678) ==
- flow_hash_in_minimask(&flow, &minimask, 0x12345678));
+ assert(miniflow_equal_in_minimask(miniflow, miniflow2, minimask));
+ assert(miniflow_equal_flow_in_minimask(miniflow, &flow2, minimask));
+ assert(miniflow_hash_in_minimask(miniflow, minimask, 0x12345678) ==
+ flow_hash_in_minimask(&flow, minimask, 0x12345678));
/* Check that masked matches work as expected for differing flows and
* miniflows. */
toggle_masked_flow_bits(&flow2, &mask);
- assert(!miniflow_equal_flow_in_minimask(&miniflow, &flow2, &minimask));
- miniflow_init(&miniflow3, &flow2);
- assert(!miniflow_equal_in_minimask(&miniflow, &miniflow3, &minimask));
+ assert(!miniflow_equal_flow_in_minimask(miniflow, &flow2, minimask));
+ miniflow3 = miniflow_create(&flow2);
+ assert(!miniflow_equal_in_minimask(miniflow, miniflow3, minimask));
/* Clean up. */
- miniflow_destroy(&miniflow);
- miniflow_destroy(&miniflow2);
- miniflow_destroy(&miniflow3);
- minimask_destroy(&minimask);
+ free(miniflow);
+ free(miniflow2);
+ free(miniflow3);
+ free(minimask);
}
}
test_minimask_has_extra(struct ovs_cmdl_context *ctx OVS_UNUSED)
{
struct flow_wildcards catchall;
- struct minimask minicatchall;
+ struct minimask *minicatchall;
struct flow flow;
unsigned int idx;
flow_wildcards_init_catchall(&catchall);
- minimask_init(&minicatchall, &catchall);
- assert(minimask_is_catchall(&minicatchall));
+ minicatchall = minimask_create(&catchall);
+ assert(minimask_is_catchall(minicatchall));
random_set_seed(0x2ec7905b);
for (idx = 0; next_random_flow(&flow, idx); idx++) {
struct flow_wildcards mask;
- struct minimask minimask;
+ struct minimask *minimask;
mask.masks = flow;
- minimask_init(&minimask, &mask);
- assert(!minimask_has_extra(&minimask, &minimask));
- assert(minimask_has_extra(&minicatchall, &minimask)
- == !minimask_is_catchall(&minimask));
- if (!minimask_is_catchall(&minimask)) {
- struct minimask minimask2;
+ minimask = minimask_create(&mask);
+ assert(!minimask_has_extra(minimask, minimask));
+ assert(minimask_has_extra(minicatchall, minimask)
+ == !minimask_is_catchall(minimask));
+ if (!minimask_is_catchall(minimask)) {
+ struct minimask *minimask2;
wildcard_extra_bits(&mask);
- minimask_init(&minimask2, &mask);
- assert(minimask_has_extra(&minimask2, &minimask));
- assert(!minimask_has_extra(&minimask, &minimask2));
- minimask_destroy(&minimask2);
+ minimask2 = minimask_create(&mask);
+ assert(minimask_has_extra(minimask2, minimask));
+ assert(!minimask_has_extra(minimask, minimask2));
+ free(minimask2);
}
- minimask_destroy(&minimask);
+ free(minimask);
}
- minimask_destroy(&minicatchall);
+ free(minicatchall);
}
static void
test_minimask_combine(struct ovs_cmdl_context *ctx OVS_UNUSED)
{
struct flow_wildcards catchall;
- struct minimask minicatchall;
+ struct minimask *minicatchall;
struct flow flow;
unsigned int idx;
flow_wildcards_init_catchall(&catchall);
- minimask_init(&minicatchall, &catchall);
- assert(minimask_is_catchall(&minicatchall));
+ minicatchall = minimask_create(&catchall);
+ assert(minimask_is_catchall(minicatchall));
random_set_seed(0x181bf0cd);
for (idx = 0; next_random_flow(&flow, idx); idx++) {
- struct minimask minimask, minimask2, minicombined;
+ struct minimask *minimask, *minimask2;
struct flow_wildcards mask, mask2, combined, combined2;
- uint64_t storage[FLOW_U64S];
+ struct {
+ struct minimask minicombined;
+ uint64_t storage[FLOW_U64S];
+ } m;
struct flow flow2;
mask.masks = flow;
- minimask_init(&minimask, &mask);
+ minimask = minimask_create(&mask);
- minimask_combine(&minicombined, &minimask, &minicatchall, storage);
- assert(minimask_is_catchall(&minicombined));
+ minimask_combine(&m.minicombined, minimask, minicatchall, m.storage);
+ assert(minimask_is_catchall(&m.minicombined));
any_random_flow(&flow2);
mask2.masks = flow2;
- minimask_init(&minimask2, &mask2);
+ minimask2 = minimask_create(&mask2);
- minimask_combine(&minicombined, &minimask, &minimask2, storage);
+ minimask_combine(&m.minicombined, minimask, minimask2, m.storage);
flow_wildcards_and(&combined, &mask, &mask2);
- minimask_expand(&minicombined, &combined2);
+ minimask_expand(&m.minicombined, &combined2);
assert(flow_wildcards_equal(&combined, &combined2));
- minimask_destroy(&minimask);
- minimask_destroy(&minimask2);
+ free(minimask);
+ free(minimask2);
}
- minimask_destroy(&minicatchall);
+ free(minicatchall);
}
\f
static const struct ovs_cmdl_command commands[] = {