cls_match_alloc(const struct cls_rule *rule, cls_version_t version,
const struct cls_conjunction conj[], size_t n)
{
- int count = count_1bits(rule->match.flow.map);
+ size_t count = miniflow_n_values(rule->match.flow);
struct cls_match *cls_match
- = xmalloc(sizeof *cls_match - sizeof cls_match->flow.inline_values
- + MINIFLOW_VALUES_SIZE(count));
+ = xmalloc(sizeof *cls_match + MINIFLOW_VALUES_SIZE(count));
ovsrcu_init(&cls_match->next, NULL);
*CONST_CAST(const struct cls_rule **, &cls_match->cls_rule) = rule;
*CONST_CAST(cls_version_t *, &cls_match->add_version) = version;
atomic_init(&cls_match->remove_version, version); /* Initially
* invisible. */
- miniflow_clone_inline(CONST_CAST(struct miniflow *, &cls_match->flow),
- &rule->match.flow, count);
+ miniflow_clone(CONST_CAST(struct miniflow *, &cls_match->flow),
+ rule->match.flow, count);
ovsrcu_set_hidden(&cls_match->conj_set,
cls_conjunction_set_alloc(cls_match, conj, n));
bool
cls_rule_is_catchall(const struct cls_rule *rule)
{
- return minimask_is_catchall(&rule->match.mask);
+ return minimask_is_catchall(rule->match.mask);
}
-/* Makes rule invisible after 'version'. Once that version is made invisible
- * (by changing the version parameter used in lookups), the rule should be
- * actually removed via ovsrcu_postpone().
+/* Makes 'rule' invisible in 'remove_version'. Once that version is used in
+ * lookups, the caller should remove 'rule' via ovsrcu_postpone().
*
- * 'rule_' must be in a classifier. */
+ * 'rule' must be in a classifier. */
void
cls_rule_make_invisible_in_version(const struct cls_rule *rule,
cls_version_t remove_version)
static inline ovs_be32 minimatch_get_ports(const struct minimatch *match)
{
/* Could optimize to use the same map if needed for fast path. */
- return MINIFLOW_GET_BE32(&match->flow, tp_src)
- & MINIFLOW_GET_BE32(&match->mask.masks, tp_src);
+ return MINIFLOW_GET_BE32(match->flow, tp_src)
+ & MINIFLOW_GET_BE32(&match->mask->masks, tp_src);
}
static void
CONST_CAST(struct cls_rule *, rule)->cls_match = new;
- subtable = find_subtable(cls, &rule->match.mask);
+ subtable = find_subtable(cls, rule->match.mask);
if (!subtable) {
- subtable = insert_subtable(cls, &rule->match.mask);
+ subtable = insert_subtable(cls, rule->match.mask);
}
/* Compute hashes in segments. */
}
hash = minimatch_hash_range(&rule->match, prev_be64ofs, FLOW_U64S, &basis);
- head = find_equal(subtable, &rule->match.flow, hash);
+ head = find_equal(subtable, rule->match.flow, hash);
if (!head) {
/* Add rule to tries.
*
* Concurrent readers might miss seeing the rule until this update,
* which might require being fixed up by revalidation later. */
new->partition = NULL;
- if (minimask_get_metadata_mask(&rule->match.mask) == OVS_BE64_MAX) {
- ovs_be64 metadata = miniflow_get_metadata(&rule->match.flow);
+ if (minimask_get_metadata_mask(rule->match.mask) == OVS_BE64_MAX) {
+ ovs_be64 metadata = miniflow_get_metadata(rule->match.flow);
new->partition = create_partition(cls, subtable, metadata);
}
/* Remove 'cls_rule' from the subtable's rules list. */
rculist_remove(CONST_CAST(struct rculist *, &cls_rule->node));
- subtable = find_subtable(cls, &cls_rule->match.mask);
+ subtable = find_subtable(cls, cls_rule->match.mask);
ovs_assert(subtable);
for (i = 0; i < subtable->n_indices; i++) {
hash = minimatch_hash_range(&cls_rule->match, prev_be64ofs, FLOW_U64S,
&basis);
- head = find_equal(subtable, &cls_rule->match.flow, hash);
+ head = find_equal(subtable, cls_rule->match.flow, hash);
/* Check if the rule is not the head rule. */
if (rule != head) {
const struct cls_match *head, *rule;
const struct cls_subtable *subtable;
- subtable = find_subtable(cls, &target->match.mask);
+ subtable = find_subtable(cls, target->match.mask);
if (!subtable) {
return NULL;
}
- head = find_equal(subtable, &target->match.flow,
- miniflow_hash_in_minimask(&target->match.flow,
- &target->match.mask, 0));
+ head = find_equal(subtable, target->match.flow,
+ miniflow_hash_in_minimask(target->match.flow,
+ target->match.mask, 0));
if (!head) {
return NULL;
}
/* Iterate subtables in the descending max priority order. */
PVECTOR_FOR_EACH_PRIORITY (subtable, target->priority - 1, 2,
sizeof(struct cls_subtable), &cls->subtables) {
- uint64_t storage[FLOW_U64S];
- struct minimask mask;
+ struct {
+ struct minimask mask;
+ uint64_t storage[FLOW_U64S];
+ } m;
const struct cls_rule *rule;
- minimask_combine(&mask, &target->match.mask, &subtable->mask, storage);
+ minimask_combine(&m.mask, target->match.mask, &subtable->mask,
+ m.storage);
RCULIST_FOR_EACH (rule, node, &subtable->rules_list) {
if (rule->priority == target->priority
- && miniflow_equal_in_minimask(&target->match.flow,
- &rule->match.flow, &mask)
+ && miniflow_equal_in_minimask(target->match.flow,
+ rule->match.flow, &m.mask)
&& cls_match_visible_in_version(rule->cls_match, version)) {
return true;
}
cls_rule_is_loose_match(const struct cls_rule *rule,
const struct minimatch *criteria)
{
- return (!minimask_has_extra(&rule->match.mask, &criteria->mask)
- && miniflow_equal_in_minimask(&rule->match.flow, &criteria->flow,
- &criteria->mask));
+ return (!minimask_has_extra(rule->match.mask, criteria->mask)
+ && miniflow_equal_in_minimask(rule->match.flow, criteria->flow,
+ criteria->mask));
}
\f
/* Iteration. */
{
/* Rule may only match a target if it is visible in target's version. */
return cls_match_visible_in_version(rule->cls_match, version)
- && (!target || miniflow_equal_in_minimask(&rule->match.flow,
- &target->match.flow,
- &target->match.mask));
+ && (!target || miniflow_equal_in_minimask(rule->match.flow,
+ target->match.flow,
+ target->match.mask));
}
static const struct cls_rule *
struct cls_cursor *cursor)
{
if (!cursor->target
- || !minimask_has_extra(&subtable->mask, &cursor->target->match.mask)) {
+ || !minimask_has_extra(&subtable->mask, cursor->target->match.mask)) {
const struct cls_rule *rule;
RCULIST_FOR_EACH (rule, node, &subtable->rules_list) {
int i, index = 0;
struct flow_wildcards old, new;
uint8_t prev;
- int count = count_1bits(mask->masks.map);
+ size_t count = miniflow_n_values(&mask->masks);
- subtable = xzalloc(sizeof *subtable - sizeof mask->masks.inline_values
- + MINIFLOW_VALUES_SIZE(count));
+ subtable = xzalloc(sizeof *subtable + MINIFLOW_VALUES_SIZE(count));
cmap_init(&subtable->rules);
- miniflow_clone_inline(CONST_CAST(struct miniflow *, &subtable->mask.masks),
- &mask->masks, count);
+ miniflow_clone(CONST_CAST(struct miniflow *, &subtable->mask.masks),
+ &mask->masks, count);
/* Init indices for segmented lookup, if any. */
flow_wildcards_init_catchall(&new);
{
const uint64_t *flowp = miniflow_get_values(flow);
const uint64_t *maskp = miniflow_get_values(&mask->masks);
- int idx;
-
- MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
- uint64_t diff = (*flowp++ ^ flow_u64_value(target, idx)) & *maskp++;
+ const uint64_t *target_u64 = (const uint64_t *)target;
+ size_t idx;
- if (diff) {
+ MAP_FOR_EACH_INDEX(idx, mask->masks.tnl_map) {
+ if ((*flowp++ ^ target_u64[idx]) & *maskp++) {
+ return false;
+ }
+ }
+ target_u64 += FLOW_TNL_U64S;
+ MAP_FOR_EACH_INDEX(idx, mask->masks.pkt_map) {
+ if ((*flowp++ ^ target_u64[idx]) & *maskp++) {
return false;
}
}
{
const uint64_t *flowp = miniflow_get_values(flow);
const uint64_t *maskp = miniflow_get_values(&mask->masks);
- int idx;
+ const uint64_t *target_u64 = (const uint64_t *)target;
+ uint64_t *wc_u64 = (uint64_t *)&wc->masks;
+ uint64_t diff;
+ size_t idx;
- MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
- uint64_t mask = *maskp++;
- uint64_t diff = (*flowp++ ^ flow_u64_value(target, idx)) & mask;
+ MAP_FOR_EACH_INDEX(idx, mask->masks.tnl_map) {
+ uint64_t msk = *maskp++;
+ diff = (*flowp++ ^ target_u64[idx]) & msk;
if (diff) {
- /* Only unwildcard if none of the differing bits is already
- * exact-matched. */
- if (!(flow_u64_value(&wc->masks, idx) & diff)) {
- /* Keep one bit of the difference. The selected bit may be
- * different in big-endian v.s. little-endian systems. */
- *flow_u64_lvalue(&wc->masks, idx) |= rightmost_1bit(diff);
- }
- return false;
+ goto out;
}
+
/* Fill in the bits that were looked at. */
- *flow_u64_lvalue(&wc->masks, idx) |= mask;
+ wc_u64[idx] |= msk;
+ }
+ target_u64 += FLOW_TNL_U64S;
+ wc_u64 += FLOW_TNL_U64S;
+ MAP_FOR_EACH_INDEX(idx, mask->masks.pkt_map) {
+ uint64_t msk = *maskp++;
+
+ diff = (*flowp++ ^ target_u64[idx]) & msk;
+ if (diff) {
+ goto out;
+ }
+
+ /* Fill in the bits that were looked at. */
+ wc_u64[idx] |= msk;
}
return true;
+
+out:
+ /* Only unwildcard if none of the differing bits is already
+ * exact-matched. */
+ if (!(wc_u64[idx] & diff)) {
+ /* Keep one bit of the difference. The selected bit may be
+ * different in big-endian v.s. little-endian systems. */
+ wc_u64[idx] |= rightmost_1bit(diff);
+ }
+ return false;
}
/* Unwildcard the fields looked up so far, if any. */
static const ovs_be32 *
minimatch_get_prefix(const struct minimatch *match, const struct mf_field *mf)
{
- return (OVS_FORCE const ovs_be32 *)
- (miniflow_get_values(&match->flow)
- + count_1bits(match->flow.map &
- ((UINT64_C(1) << mf->flow_be32ofs / 2) - 1)))
+ size_t u64_ofs = mf->flow_be32ofs / 2;
+
+ return (OVS_FORCE const ovs_be32 *)miniflow_get__(match->flow, u64_ofs)
+ (mf->flow_be32ofs & 1);
}