/*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
OVS_EXCLUDED(cls->mutex)
{
if (cls) {
- struct cls_partition *partition, *next_partition;
- struct cls_subtable *subtable, *next_subtable;
+ struct cls_partition *partition;
+ struct cls_subtable *subtable;
int i;
ovs_mutex_lock(&cls->mutex);
trie_destroy(&cls->tries[i].root);
}
- CMAP_FOR_EACH_SAFE (subtable, next_subtable, cmap_node,
- &cls->subtables_map) {
+ CMAP_FOR_EACH_SAFE (subtable, cmap_node, &cls->subtables_map) {
destroy_subtable(cls, subtable);
}
cmap_destroy(&cls->subtables_map);
- CMAP_FOR_EACH_SAFE (partition, next_partition, cmap_node,
- &cls->partitions) {
+ CMAP_FOR_EACH_SAFE (partition, cmap_node, &cls->partitions) {
ovsrcu_postpone(free, partition);
}
cmap_destroy(&cls->partitions);
* such that cls_rule_is_loose_match(rule, target) returns true.
*
* Ignores target->priority. */
-struct cls_cursor cls_cursor_init(const struct classifier *cls,
- const struct cls_rule *target,
- void **pnode, const void *offset, bool safe)
+struct cls_cursor cls_cursor_start(const struct classifier *cls,
+ const struct cls_rule *target,
+ bool safe)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
struct cls_cursor cursor;
struct cls_subtable *subtable;
- struct cls_rule *cls_rule = NULL;
cursor.safe = safe;
cursor.cls = cls;
cursor.target = target && !cls_rule_is_catchall(target) ? target : NULL;
+ cursor.rule = NULL;
/* Find first rule. */
ovs_mutex_lock(&cursor.cls->mutex);
if (rule) {
cursor.subtable = subtable;
- cls_rule = rule->cls_rule;
+ cursor.rule = rule->cls_rule;
break;
}
}
- *pnode = (char *)cls_rule + (ptrdiff_t)offset;
/* Leave locked if requested and have a rule. */
- if (safe || !cls_rule) {
+ if (safe || !cursor.rule) {
ovs_mutex_unlock(&cursor.cls->mutex);
}
return cursor;
OVS_NO_THREAD_SAFETY_ANALYSIS
{
/* Release the mutex if no rule, or 'safe' mode. */
+ cursor->rule = rule;
if (!rule || cursor->safe) {
ovs_mutex_unlock(&cursor->cls->mutex);
}
}
-/* Returns the next matching cls_rule in 'cursor''s iteration, or a null
- * pointer if there are no more matches. */
-struct cls_rule *
-cls_cursor_next(struct cls_cursor *cursor, const struct cls_rule *rule_)
+/* Sets 'cursor->rule' to the next matching cls_rule in 'cursor''s iteration,
+ * or to null if all matching rules have been visited. */
+void
+cls_cursor_advance(struct cls_cursor *cursor)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
- struct cls_match *rule = CONST_CAST(struct cls_match *, rule_->cls_match);
+ struct cls_match *rule = cursor->rule->cls_match;
const struct cls_subtable *subtable;
struct cls_match *next;
next = next_rule_in_list__(rule);
if (next->priority < rule->priority) {
cls_cursor_next_unlock(cursor, next->cls_rule);
- return next->cls_rule;
+ return;
}
/* 'next' is the head of the list, that is, the rule that is included in
CMAP_CURSOR_FOR_EACH_CONTINUE (rule, cmap_node, &cursor->rules) {
if (rule_matches(rule, cursor->target)) {
cls_cursor_next_unlock(cursor, rule->cls_rule);
- return rule->cls_rule;
+ return;
}
}
if (rule) {
cursor->subtable = subtable;
cls_cursor_next_unlock(cursor, rule->cls_rule);
- return rule->cls_rule;
+ return;
}
}
ovs_mutex_unlock(&cursor->cls->mutex);
- return NULL;
+ cursor->rule = NULL;
}
\f
static struct cls_subtable *
/*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
const struct cls_rule *target;
struct cmap_cursor subtables;
struct cmap_cursor rules;
+ struct cls_rule *rule;
bool safe;
};
/* Iteration requires mutual exclusion of writers. We do this by taking
* a mutex for the duration of the iteration, except for the
* 'SAFE' variant, where we release the mutex for the body of the loop. */
-struct cls_cursor cls_cursor_init(const struct classifier *cls,
- const struct cls_rule *target,
- void **pnode, const void *offset, bool safe);
+struct cls_cursor cls_cursor_start(const struct classifier *cls,
+ const struct cls_rule *target,
+ bool safe);
-struct cls_rule *cls_cursor_next(struct cls_cursor *cursor,
- const struct cls_rule *);
-
-#define CLS_CURSOR_START(RULE, MEMBER, CLS, TARGET) \
- cls_cursor_init(CLS, (TARGET), (void **)&(RULE), \
- OBJECT_CONTAINING(NULL, RULE, MEMBER), false)
-
-#define CLS_CURSOR_START_SAFE(RULE, MEMBER, CLS, TARGET) \
- cls_cursor_init(CLS, (TARGET), (void **)&(RULE), \
- OBJECT_CONTAINING(NULL, RULE, MEMBER), true)
-
-#define CLS_FOR_EACH(RULE, MEMBER, CLS) \
- for (struct cls_cursor cursor__ = CLS_CURSOR_START(RULE, MEMBER, CLS, \
- NULL); \
- RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER); \
- ASSIGN_CONTAINER(RULE, cls_cursor_next(&cursor__, &(RULE)->MEMBER), \
- MEMBER))
+void cls_cursor_advance(struct cls_cursor *);
+#define CLS_FOR_EACH(RULE, MEMBER, CLS) \
+ CLS_FOR_EACH_TARGET(RULE, MEMBER, CLS, NULL)
#define CLS_FOR_EACH_TARGET(RULE, MEMBER, CLS, TARGET) \
- for (struct cls_cursor cursor__ = CLS_CURSOR_START(RULE, MEMBER, CLS, \
- TARGET); \
- RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER); \
- ASSIGN_CONTAINER(RULE, cls_cursor_next(&cursor__, &(RULE)->MEMBER), \
- MEMBER))
-
-/* This form allows classifier_remove() to be called within the loop. */
-#define CLS_FOR_EACH_SAFE(RULE, NEXT, MEMBER, CLS) \
- for (struct cls_cursor cursor__ = CLS_CURSOR_START_SAFE(RULE, MEMBER, \
- CLS, NULL); \
- (RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER) \
- ? ASSIGN_CONTAINER(NEXT, cls_cursor_next(&cursor__, \
- &(RULE)->MEMBER), \
- MEMBER), true \
+ for (struct cls_cursor cursor__ = cls_cursor_start(CLS, TARGET, false); \
+ (cursor__.rule \
+ ? (ASSIGN_CONTAINER(RULE, cursor__.rule, MEMBER), \
+ true) \
: false); \
- (RULE) = (NEXT))
-
-/* This form allows classifier_remove() to be called within the loop. */
-#define CLS_FOR_EACH_TARGET_SAFE(RULE, NEXT, MEMBER, CLS, TARGET) \
- for (struct cls_cursor cursor__ = CLS_CURSOR_START_SAFE(RULE, MEMBER, \
- CLS, TARGET); \
- (RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER) \
- ? ASSIGN_CONTAINER(NEXT, cls_cursor_next(&cursor__, \
- &(RULE)->MEMBER), \
- MEMBER), true \
+ cls_cursor_advance(&cursor__))
+
+/* These forms allows classifier_remove() to be called within the loop. */
+#define CLS_FOR_EACH_SAFE(RULE, MEMBER, CLS) \
+ CLS_FOR_EACH_TARGET_SAFE(RULE, MEMBER, CLS, NULL)
+#define CLS_FOR_EACH_TARGET_SAFE(RULE, MEMBER, CLS, TARGET) \
+ for (struct cls_cursor cursor__ = cls_cursor_start(CLS, TARGET, true); \
+ (cursor__.rule \
+ ? (ASSIGN_CONTAINER(RULE, cursor__.rule, MEMBER), \
+ cls_cursor_advance(&cursor__), \
+ true) \
: false); \
- (RULE) = (NEXT))
-
+ ) \
#ifdef __cplusplus
}
return new;
}
-/* Initializes 'cursor' for iterating through 'cmap'.
- *
- * Use via CMAP_FOR_EACH. */
-void
-cmap_cursor_init(struct cmap_cursor *cursor, const struct cmap *cmap)
+struct cmap_cursor
+cmap_cursor_start(const struct cmap *cmap)
{
- cursor->impl = cmap_get_impl(cmap);
- cursor->bucket_idx = 0;
- cursor->entry_idx = 0;
+ struct cmap_cursor cursor;
+
+ cursor.impl = cmap_get_impl(cmap);
+ cursor.bucket_idx = 0;
+ cursor.entry_idx = 0;
+ cursor.node = NULL;
+ cmap_cursor_advance(&cursor);
+
+ return cursor;
}
-/* Returns the next node for 'cursor' to visit, following 'node', or NULL if
- * the last node has been visited.
- *
- * Use via CMAP_FOR_EACH. */
-struct cmap_node *
-cmap_cursor_next(struct cmap_cursor *cursor, const struct cmap_node *node)
+void
+cmap_cursor_advance(struct cmap_cursor *cursor)
{
const struct cmap_impl *impl = cursor->impl;
- if (node) {
- struct cmap_node *next = cmap_node_next(node);
-
- if (next) {
- return next;
+ if (cursor->node) {
+ cursor->node = cmap_node_next(cursor->node);
+ if (cursor->node) {
+ return;
}
}
const struct cmap_bucket *b = &impl->buckets[cursor->bucket_idx];
while (cursor->entry_idx < CMAP_K) {
- struct cmap_node *node = cmap_node_next(&b->nodes[cursor->entry_idx++]);
-
- if (node) {
- return node;
+ cursor->node = cmap_node_next(&b->nodes[cursor->entry_idx++]);
+ if (cursor->node) {
+ return;
}
}
cursor->bucket_idx++;
cursor->entry_idx = 0;
}
-
- return NULL;
}
/* Returns the next node in 'cmap' in hash order, or NULL if no nodes remain in
* has already expired.
*/
-#define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP) \
- for ((cmap_cursor_init(CURSOR, CMAP), \
- ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, NULL), MEMBER)); \
- NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
- ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, &(NODE)->MEMBER), \
- MEMBER))
-
-#define CMAP_CURSOR_FOR_EACH_SAFE(NODE, NEXT, MEMBER, CURSOR, CMAP) \
- for ((cmap_cursor_init(CURSOR, CMAP), \
- ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, NULL), MEMBER)); \
- (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER) \
- ? ASSIGN_CONTAINER(NEXT, cmap_cursor_next(CURSOR, &(NODE)->MEMBER), \
- MEMBER), true \
- : false); \
- (NODE) = (NEXT))
-
-#define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR) \
- for (ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, &(NODE)->MEMBER), \
- MEMBER); \
- NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
- ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, &(NODE)->MEMBER), \
- MEMBER))
+#define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP) \
+ for (*(CURSOR) = cmap_cursor_start(CMAP); \
+ ((CURSOR)->node \
+ ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), true) \
+ : false); \
+ cmap_cursor_advance(CURSOR))
+
+#define CMAP_CURSOR_FOR_EACH_SAFE(NODE, MEMBER, CURSOR, CMAP) \
+ for (*(CURSOR) = cmap_cursor_start(CMAP); \
+ ((CURSOR)->node \
+ ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), \
+ cmap_cursor_advance(CURSOR), \
+ true) \
+ : false); \
+ )
+
+#define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR) \
+ for (cmap_cursor_advance(CURSOR); \
+ ((CURSOR)->node \
+ ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), true) \
+ : false); \
+ cmap_cursor_advance(CURSOR))
struct cmap_cursor {
const struct cmap_impl *impl;
uint32_t bucket_idx;
int entry_idx;
+ struct cmap_node *node;
};
-void cmap_cursor_init(struct cmap_cursor *, const struct cmap *);
-struct cmap_node *cmap_cursor_next(struct cmap_cursor *,
- const struct cmap_node *);
-
-
-static inline struct cmap_cursor cmap_cursor_start(const struct cmap *cmap,
- void **pnode,
- const void *offset)
-{
- struct cmap_cursor cursor;
-
- cmap_cursor_init(&cursor, cmap);
- *pnode = (char *)cmap_cursor_next(&cursor, NULL) + (ptrdiff_t)offset;
-
- return cursor;
-}
-
-#define CMAP_CURSOR_START(NODE, MEMBER, CMAP) \
- cmap_cursor_start(CMAP, (void **)&(NODE), \
- OBJECT_CONTAINING(NULL, NODE, MEMBER))
+struct cmap_cursor cmap_cursor_start(const struct cmap *);
+void cmap_cursor_advance(struct cmap_cursor *);
#define CMAP_FOR_EACH(NODE, MEMBER, CMAP) \
- for (struct cmap_cursor cursor__ = CMAP_CURSOR_START(NODE, MEMBER, CMAP); \
- NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
- ASSIGN_CONTAINER(NODE, cmap_cursor_next(&cursor__, &(NODE)->MEMBER), \
- MEMBER))
-
-#define CMAP_FOR_EACH_SAFE(NODE, NEXT, MEMBER, CMAP) \
- for (struct cmap_cursor cursor__ = CMAP_CURSOR_START(NODE, MEMBER, CMAP); \
- (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER) \
- ? ASSIGN_CONTAINER(NEXT, \
- cmap_cursor_next(&cursor__, &(NODE)->MEMBER), \
- MEMBER), true \
+ for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
+ (cursor__.node \
+ ? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), true) \
+ : false); \
+ cmap_cursor_advance(&cursor__))
+
+#define CMAP_FOR_EACH_SAFE(NODE, MEMBER, CMAP) \
+ for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
+ (cursor__.node \
+ ? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), \
+ cmap_cursor_advance(&cursor__), \
+ true) \
: false); \
- (NODE) = (NEXT))
+ )
static inline struct cmap_node *cmap_first(const struct cmap *);
static void
dp_netdev_flow_flush(struct dp_netdev *dp)
{
- struct dp_netdev_flow *netdev_flow, *next;
+ struct dp_netdev_flow *netdev_flow;
ovs_mutex_lock(&dp->flow_mutex);
- CMAP_FOR_EACH_SAFE (netdev_flow, next, node, &dp->flow_table) {
+ CMAP_FOR_EACH_SAFE (netdev_flow, node, &dp->flow_table) {
dp_netdev_remove_flow(dp, netdev_flow);
}
ovs_mutex_unlock(&dp->flow_mutex);
destruct(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct rule_dpif *rule, *next_rule;
struct ofproto_packet_in *pin, *next_pin;
+ struct rule_dpif *rule;
struct oftable *table;
struct list pins;
hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
- CLS_FOR_EACH_SAFE (rule, next_rule, up.cr, &table->cls) {
+ CLS_FOR_EACH_SAFE (rule, up.cr, &table->cls) {
ofproto_rule_delete(&ofproto->up, &rule->up);
}
}
ovs_mutex_lock(&ofproto_mutex);
OFPROTO_FOR_EACH_TABLE (table, ofproto) {
- struct rule *rule, *next_rule;
+ struct rule *rule;
if (table->flags & OFTABLE_HIDDEN) {
continue;
}
- CLS_FOR_EACH_SAFE (rule, next_rule, cr, &table->cls) {
+ CLS_FOR_EACH_SAFE (rule, cr, &table->cls) {
ofproto_rule_delete__(rule, OFPRR_DELETE);
}
}
static void
destroy_classifier(struct classifier *cls)
{
- struct test_rule *rule, *next_rule;
+ struct test_rule *rule;
- CLS_FOR_EACH_SAFE (rule, next_rule, cls_rule, cls) {
+ CLS_FOR_EACH_SAFE (rule, cls_rule, cls) {
classifier_remove(cls, &rule->cls_rule);
free_rule(rule);
}
}
while (!classifier_is_empty(&cls)) {
- struct test_rule *rule, *next_rule;
struct test_rule *target;
+ struct test_rule *rule;
target = clone_rule(tcls.rules[random_range(tcls.n_rules)]);
- CLS_FOR_EACH_TARGET_SAFE (rule, next_rule, cls_rule, &cls,
+ CLS_FOR_EACH_TARGET_SAFE (rule, cls_rule, &cls,
&target->cls_rule) {
classifier_remove(&cls, &rule->cls_rule);
free_rule(rule);
static void
fte_free_all(struct classifier *cls)
{
- struct fte *fte, *next;
+ struct fte *fte;
- CLS_FOR_EACH_SAFE (fte, next, rule, cls) {
+ CLS_FOR_EACH_SAFE (fte, rule, cls) {
classifier_remove(cls, &fte->rule);
fte_free(fte);
}