#ifndef OFPROTO_OFPROTO_PROVIDER_H
#define OFPROTO_OFPROTO_PROVIDER_H 1
-/* Definitions for use within ofproto. */
+/* Definitions for use within ofproto.
+ *
+ *
+ * Thread-safety
+ * =============
+ *
+ * Lots of ofproto data structures are only accessed from a single thread.
+ * Those data structures are generally not thread-safe.
+ *
+ * The ofproto-dpif ofproto implementation accesses the flow table from
+ * multiple threads, including modifying the flow table from multiple threads
+ * via the "learn" action, so the flow table and various structures that index
+ * it have been made thread-safe. Refer to comments on individual data
+ * structures for details.
+ */
#include "cfm.h"
#include "classifier.h"
/* OpenFlow connections. */
struct connmgr *connmgr;
- /* Flow table operation tracking. */
- int state; /* Internal state. */
- struct list pending; /* List of "struct ofopgroup"s. */
- unsigned int n_pending; /* list_size(&pending). */
- struct hmap deletions; /* All OFOPERATION_DELETE "ofoperation"s. */
+ /* Flow table operation tracking.
+ *
+ * 'state' is meaningful only within ofproto.c, one of the enum
+ * ofproto_state constants defined there.
+ *
+ * 'pending' is the list of "struct ofopgroup"s currently pending.
+ *
+ * 'n_pending' is the number of elements in 'pending'.
+ *
+ * 'deletions' contains pending ofoperations of type OFOPERATION_DELETE,
+ * indexed on its rule's flow.*/
+ int state;
+ struct list pending OVS_GUARDED_BY(ofproto_mutex);
+ unsigned int n_pending OVS_GUARDED_BY(ofproto_mutex);
+ struct hmap deletions OVS_GUARDED_BY(ofproto_mutex);
/* Delayed rule executions.
*
* ofproto_mutex during a flow_mod, because otherwise a "learn" action
* triggered by the executing the packet would try to recursively modify
* the flow table and reacquire the global lock. */
- struct guarded_list rule_executes;
+ struct guarded_list rule_executes; /* Contains "struct rule_execute"s. */
/* Flow table operation logging. */
int n_add, n_delete, n_modify; /* Number of unreported ops of each kind. */
OFTABLE_READONLY = 1 << 1 /* Don't allow OpenFlow to change this table. */
};
-/* A flow table within a "struct ofproto". */
+/* A flow table within a "struct ofproto".
+ *
+ *
+ * Thread-safety
+ * =============
+ *
+ * A cls->rwlock read-lock holder prevents rules from being added or deleted.
+ *
+ * Adding or removing rules requires holding ofproto_mutex AND the cls->rwlock
+ * write-lock.
+ *
+ * cls->rwlock should be held only briefly. For extended access to a rule,
+ * increment its ref_count with ofproto_rule_ref(). A rule will not be freed
+ * until its ref_count reaches zero.
+ *
+ * Modifying a rule requires the rule's own mutex. Holding cls->rwlock (for
+ * read or write) does not allow the holder to modify the rule.
+ *
+ * Freeing a rule requires ofproto_mutex and the cls->rwlock write-lock. After
+ * removing the rule from the classifier, release a ref_count from the rule
+ * ('cls''s reference to the rule).
+ *
+ * Refer to the thread-safety notes on struct rule for more information.*/
struct oftable {
enum oftable_flags flags;
struct classifier cls; /* Contains "struct rule"s. */
/* An OpenFlow flow within a "struct ofproto".
*
* With few exceptions, ofproto implementations may look at these fields but
- * should not modify them. */
+ * should not modify them.
+ *
+ *
+ * Thread-safety
+ * =============
+ *
+ * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
+ * the classifier rule->ofproto->tables[rule->table_id].cls. The text below
+ * calls this classifier 'cls'.
+ *
+ * Motivation
+ * ----------
+ *
+ * The thread safety rules described here for "struct rule" are motivated by
+ * two goals:
+ *
+ * - Prevent threads that read members of "struct rule" from reading bad
+ * data due to changes by some thread concurrently modifying those
+ * members.
+ *
+ * - Prevent two threads making changes to members of a given "struct rule"
+ * from interfering with each other.
+ *
+ *
+ * Rules
+ * -----
+ *
+ * A rule 'rule' may be accessed without a risk of being freed by code that
+ * holds a read-lock or write-lock on 'cls->rwlock' or that owns a reference to
+ * 'rule->ref_count' (or both). Code that needs to hold onto a rule for a
+ * while should take 'cls->rwlock', find the rule it needs, increment
+ * 'rule->ref_count' with ofproto_rule_ref(), and drop 'cls->rwlock'.
+ *
+ * 'rule->ref_count' protects 'rule' from being freed. It doesn't protect the
+ * rule from being deleted from 'cls' (that's 'cls->rwlock') and it doesn't
+ * protect members of 'rule' from modification (that's 'rule->rwlock').
+ *
+ * 'rule->mutex' protects the members of 'rule' from modification. It doesn't
+ * protect the rule from being deleted from 'cls' (that's 'cls->rwlock') and it
+ * doesn't prevent the rule from being freed (that's 'rule->ref_count').
+ *
+ * Regarding thread safety, the members of a rule fall into the following
+ * categories:
+ *
+ * - Immutable. These members are marked 'const'.
+ *
+ * - Members that may be safely read or written only by code holding
+ * ofproto_mutex. These are marked OVS_GUARDED_BY(ofproto_mutex).
+ *
+ * - Members that may be safely read only by code holding ofproto_mutex or
+ * 'rule->mutex', and safely written only by coding holding ofproto_mutex
+ * AND 'rule->mutex'. These are marked OVS_GUARDED.
+ */
struct rule {
/* Where this rule resides in an OpenFlow switch.
*
const struct cls_rule cr; /* In owning ofproto's classifier. */
const uint8_t table_id; /* Index in ofproto's 'tables' array. */
+ /* Protects members marked OVS_GUARDED.
+ * Readers only need to hold this mutex.
+ * Writers must hold both this mutex AND ofproto_mutex. */
+ struct ovs_mutex mutex OVS_ACQ_AFTER(ofproto_mutex);
+
+ /* Number of references.
+ * The classifier owns one reference.
+ * Any thread trying to keep a rule from being freed should hold its own
+ * reference. */
atomic_uint ref_count;
- struct ofoperation *pending; /* Operation now in progress, if nonnull. */
+ /* Operation now in progress, if nonnull. */
+ struct ofoperation *pending OVS_GUARDED_BY(ofproto_mutex);
- ovs_be64 flow_cookie; /* Controller-issued identifier. Guarded by
- mutex. */
+ /* A "flow cookie" is the OpenFlow name for a 64-bit value associated with
+ * a flow.. */
+ ovs_be64 flow_cookie OVS_GUARDED;
struct hindex_node cookie_node OVS_GUARDED_BY(ofproto_mutex);
- long long int created; /* Creation time. */
- long long int modified; /* Time of last modification. */
- long long int used; /* Last use; time created if never used. */
+ /* Times. */
+ long long int created OVS_GUARDED; /* Creation time. */
+ long long int modified OVS_GUARDED; /* Time of last modification. */
+ long long int used OVS_GUARDED; /* Last use; time created if never used. */
bool send_flow_removed; /* Send a flow removed message? */
+ /* Timeouts. */
uint16_t hard_timeout OVS_GUARDED; /* In seconds from ->modified. */
uint16_t idle_timeout OVS_GUARDED; /* In seconds from ->used. */
- /* Eviction groups. */
- struct heap_node evg_node; /* In eviction_group's "rules" heap. */
- struct eviction_group *eviction_group; /* NULL if not in any group. */
-
- /* The mutex is used to protect those elements in struct rule which are
- * accessed by multiple threads. The main ofproto code is guaranteed not
- * to change any of the elements "Guarded by mutex" without holding the
- * lock.
- *
- * While maintaining a pointer to struct rule, threads are required to hold
- * a readlock on the classifier that holds the rule or increment the rule's
- * ref_count.
+ /* Eviction groups (see comment on struct eviction_group for explanation) .
*
- * A rule will not be evicted unless its classifier's write lock is
- * held. */
- struct ovs_mutex mutex;
+ * 'eviction_group' is this rule's eviction group, or NULL if it is not in
+ * any eviction group. When 'eviction_group' is nonnull, 'evg_node' is in
+ * the ->eviction_group->rules hmap. */
+ struct eviction_group *eviction_group OVS_GUARDED_BY(ofproto_mutex);
+ struct heap_node evg_node OVS_GUARDED_BY(ofproto_mutex);
- /* Guarded by mutex. */
- struct rule_actions *actions;
+ /* OpenFlow actions. See struct rule_actions for more thread-safety
+ * notes. */
+ struct rule_actions *actions OVS_GUARDED;
- struct list meter_list_node; /* In owning meter's 'rules' list. */
+ /* In owning meter's 'rules' list. An empty list if there is no meter. */
+ struct list meter_list_node OVS_GUARDED_BY(ofproto_mutex);
- /* Flow monitors. */
- enum nx_flow_monitor_flags monitor_flags;
- uint64_t add_seqno; /* Sequence number when added. */
- uint64_t modify_seqno; /* Sequence number when changed. */
+ /* Flow monitors (e.g. for NXST_FLOW_MONITOR, related to struct ofmonitor).
+ *
+ * 'add_seqno' is the sequence number when this rule was created.
+ * 'modify_seqno' is the sequence number when this rule was last modified.
+ * See 'monitor_seqno' in connmgr.c for more information. */
+ enum nx_flow_monitor_flags monitor_flags OVS_GUARDED_BY(ofproto_mutex);
+ uint64_t add_seqno OVS_GUARDED_BY(ofproto_mutex);
+ uint64_t modify_seqno OVS_GUARDED_BY(ofproto_mutex);
/* Optimisation for flow expiry. In ofproto's 'expirable' list if this
* rule is expirable, otherwise empty. */
void ofproto_rule_ref(struct rule *);
void ofproto_rule_unref(struct rule *);
+struct rule_actions *rule_get_actions(const struct rule *rule)
+ OVS_EXCLUDED(rule->mutex);
+struct rule_actions *rule_get_actions__(const struct rule *rule)
+ OVS_REQUIRES(rule->mutex);
+
/* A set of actions within a "struct rule".
*
*
void rule_collection_init(struct rule_collection *);
void rule_collection_add(struct rule_collection *, struct rule *);
+void rule_collection_ref(struct rule_collection *) OVS_REQUIRES(ofproto_mutex);
+void rule_collection_unref(struct rule_collection *);
void rule_collection_destroy(struct rule_collection *);
/* Threshold at which to begin flow table eviction. Only affects the
return cls_rule ? CONTAINER_OF(cls_rule, struct rule, cr) : NULL;
}
-void ofproto_rule_expire(struct rule *rule, uint8_t reason);
+void ofproto_rule_expire(struct rule *rule, uint8_t reason)
+ OVS_REQUIRES(ofproto_mutex);
void ofproto_rule_delete(struct ofproto *, struct classifier *cls,
- struct rule *) OVS_REQ_WRLOCK(cls->rwlock);
+ struct rule *)
+ OVS_EXCLUDED(ofproto_mutex);
void ofproto_rule_reduce_timeouts(struct rule *rule, uint16_t idle_timeout,
uint16_t hard_timeout)
- OVS_EXCLUDED(ofproto_mutex, rule->mutex);
+ OVS_EXCLUDED(ofproto_mutex);
void ofoperation_complete(struct ofoperation *, enum ofperr);
-bool ofoperation_has_out_port(const struct ofoperation *, ofp_port_t out_port);
+bool ofoperation_has_out_port(const struct ofoperation *, ofp_port_t out_port)
+ OVS_REQUIRES(ofproto_mutex);
/* ofproto class structure, to be defined by each ofproto implementation.
*
*
* Rule destruction must not fail. */
struct rule *(*rule_alloc)(void);
- enum ofperr (*rule_construct)(struct rule *rule);
- void (*rule_insert)(struct rule *rule);
- void (*rule_delete)(struct rule *rule);
+ enum ofperr (*rule_construct)(struct rule *rule)
+ /* OVS_REQUIRES(ofproto_mutex) */;
+ void (*rule_insert)(struct rule *rule) /* OVS_REQUIRES(ofproto_mutex) */;
+ void (*rule_delete)(struct rule *rule) /* OVS_REQUIRES(ofproto_mutex) */;
void (*rule_destruct)(struct rule *rule);
void (*rule_dealloc)(struct rule *rule);
* in '*byte_count'. UINT64_MAX indicates that the packet count or byte
* count is unknown. */
void (*rule_get_stats)(struct rule *rule, uint64_t *packet_count,
- uint64_t *byte_count);
+ uint64_t *byte_count)
+ /* OVS_EXCLUDED(ofproto_mutex) */;
/* Applies the actions in 'rule' to 'packet'. (This implements sending
* buffered packets for OpenFlow OFPT_FLOW_MOD commands.)
*
* ->rule_modify_actions() should not modify any base members of struct
* rule. */
- void (*rule_modify_actions)(struct rule *rule, bool reset_counters);
+ void (*rule_modify_actions)(struct rule *rule, bool reset_counters)
+ /* OVS_REQUIRES(ofproto_mutex) */;
/* Changes the OpenFlow IP fragment handling policy to 'frag_handling',
* which takes one of the following values, with the corresponding
enum { OFPROTO_POSTPONE = 1 << 16 };
BUILD_ASSERT_DECL(OFPROTO_POSTPONE < OFPERR_OFS);
-int ofproto_flow_mod(struct ofproto *, struct ofputil_flow_mod *);
+int ofproto_flow_mod(struct ofproto *, struct ofputil_flow_mod *)
+ OVS_EXCLUDED(ofproto_mutex);
void ofproto_add_flow(struct ofproto *, const struct match *,
unsigned int priority,
- const struct ofpact *ofpacts, size_t ofpacts_len);
+ const struct ofpact *ofpacts, size_t ofpacts_len)
+ OVS_EXCLUDED(ofproto_mutex);
bool ofproto_delete_flow(struct ofproto *,
- const struct match *, unsigned int priority);
+ const struct match *, unsigned int priority)
+ OVS_EXCLUDED(ofproto_mutex);
void ofproto_flush_flows(struct ofproto *);
#endif /* ofproto/ofproto-provider.h */
const struct mf_subfield *fields,
size_t n_fields);
-static void oftable_remove_rule(struct rule *rule) OVS_RELEASES(rule->mutex);
+static void oftable_remove_rule(struct rule *rule) OVS_REQUIRES(ofproto_mutex);
static void oftable_remove_rule__(struct ofproto *ofproto,
struct classifier *cls, struct rule *rule)
- OVS_REQ_WRLOCK(cls->rwlock) OVS_RELEASES(rule->mutex);
+ OVS_REQUIRES(ofproto_mutex);
static void oftable_insert_rule(struct rule *);
/* A set of rules within a single OpenFlow table (oftable) that have the same
struct heap rules; /* Contains "struct rule"s. */
};
-static bool choose_rule_to_evict(struct oftable *table, struct rule **rulep)
- OVS_TRY_WRLOCK(true, (*rulep)->mutex);
-static void ofproto_evict(struct ofproto *);
+static bool choose_rule_to_evict(struct oftable *table, struct rule **rulep);
+static void ofproto_evict(struct ofproto *) OVS_EXCLUDED(ofproto_mutex);
static uint32_t rule_eviction_priority(struct rule *);
static void eviction_group_add_rule(struct rule *);
static void eviction_group_remove_rule(struct rule *);
ofp_port_t out_port);
static void rule_criteria_destroy(struct rule_criteria *);
-/* A packet that needs to be passed to rule_execute(). */
+/* A packet that needs to be passed to rule_execute().
+ *
+ * (We can't do this immediately from ofopgroup_complete() because that holds
+ * ofproto_mutex, which rule_execute() needs released.) */
struct rule_execute {
struct list list_node; /* In struct ofproto's "rule_executes" list. */
struct rule *rule; /* Owns a reference to the rule. */
struct ofpbuf *packet; /* Owns the packet. */
};
-static void run_rule_executes(struct ofproto *);
+static void run_rule_executes(struct ofproto *) OVS_EXCLUDED(ofproto_mutex);
static void destroy_rule_executes(struct ofproto *);
/* ofport. */
-static void ofport_destroy__(struct ofport *);
+static void ofport_destroy__(struct ofport *) OVS_EXCLUDED(ofproto_mutex);
static void ofport_destroy(struct ofport *);
static void update_port(struct ofproto *, const char *devname);
const struct rule_collection *);
static void delete_flow__(struct rule *rule, struct ofopgroup *,
enum ofp_flow_removed_reason)
- OVS_RELEASES(rule->mutex);
+ OVS_REQUIRES(ofproto_mutex);
static bool handle_openflow(struct ofconn *, const struct ofpbuf *);
static enum ofperr handle_flow_mod__(struct ofproto *, struct ofconn *,
struct ofputil_flow_mod *,
- const struct ofp_header *);
+ const struct ofp_header *)
+ OVS_EXCLUDED(ofproto_mutex);
static void calc_duration(long long int start, long long int now,
uint32_t *sec, uint32_t *nsec);
static size_t n_ofproto_classes;
static size_t allocated_ofproto_classes;
-struct ovs_mutex ofproto_mutex;
+/* Global lock that protects all flow table operations. */
+struct ovs_mutex ofproto_mutex = OVS_MUTEX_INITIALIZER;
unsigned flow_eviction_threshold = OFPROTO_FLOW_EVICTION_THRESHOLD_DEFAULT;
unsigned n_handler_threads;
struct shash_node *node;
size_t i;
- ovs_mutex_init_recursive(&ofproto_mutex);
-
ofproto_class_register(&ofproto_dpif_class);
/* Make a local copy, since we don't own 'iface_hints' elements. */
connmgr_get_snoops(ofproto->connmgr, snoops);
}
+static void
+ofproto_rule_delete__(struct ofproto *ofproto, struct classifier *cls,
+ struct rule *rule)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ struct ofopgroup *group;
+
+ ovs_assert(!rule->pending);
+ ovs_assert(cls == &ofproto->tables[rule->table_id].cls);
+
+ group = ofopgroup_create_unattached(ofproto);
+ delete_flow__(rule, group, OFPRR_DELETE);
+ ofopgroup_submit(group);
+}
+
/* Deletes 'rule' from 'cls' within 'ofproto'.
*
* Within an ofproto implementation, this function allows an ofproto
* function is called. This function is not suitable for use elsewhere in an
* ofproto implementation.
*
- * This function is also used internally in ofproto.c.
- *
* This function implements steps 4.4 and 4.5 in the section titled "Rule Life
* Cycle" in ofproto-provider.h.
void
ofproto_rule_delete(struct ofproto *ofproto, struct classifier *cls,
struct rule *rule)
- OVS_REQ_WRLOCK(cls->rwlock)
+ OVS_EXCLUDED(ofproto_mutex)
{
struct ofopgroup *group;
+ ovs_mutex_lock(&ofproto_mutex);
ovs_assert(!rule->pending);
ovs_assert(cls == &ofproto->tables[rule->table_id].cls);
group = ofopgroup_create_unattached(ofproto);
ofoperation_create(group, rule, OFOPERATION_DELETE, OFPRR_DELETE);
- ovs_mutex_lock(&rule->mutex);
oftable_remove_rule__(ofproto, cls, rule);
ofproto->ofproto_class->rule_delete(rule);
ofopgroup_submit(group);
+
+ ovs_mutex_unlock(&ofproto_mutex);
}
static void
ofproto_flush__(struct ofproto *ofproto)
+ OVS_EXCLUDED(ofproto_mutex)
{
struct oftable *table;
ofproto->ofproto_class->flush(ofproto);
}
+ ovs_mutex_lock(&ofproto_mutex);
OFPROTO_FOR_EACH_TABLE (table, ofproto) {
struct rule *rule, *next_rule;
struct cls_cursor cursor;
continue;
}
- ovs_rwlock_wrlock(&table->cls.rwlock);
+ ovs_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, NULL);
+ ovs_rwlock_unlock(&table->cls.rwlock);
CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
if (!rule->pending) {
- ofproto_rule_delete(ofproto, &table->cls, rule);
+ ofproto_rule_delete__(ofproto, &table->cls, rule);
}
}
- ovs_rwlock_unlock(&table->cls.rwlock);
}
+ ovs_mutex_unlock(&ofproto_mutex);
}
static void
ofproto_destroy__(struct ofproto *ofproto)
+ OVS_EXCLUDED(ofproto_mutex)
{
struct oftable *table;
ovs_assert(list_is_empty(&ofproto->pending));
- ovs_assert(!ofproto->n_pending);
destroy_rule_executes(ofproto);
guarded_list_destroy(&ofproto->rule_executes);
void
ofproto_destroy(struct ofproto *p)
+ OVS_EXCLUDED(ofproto_mutex)
{
struct ofport *ofport, *next_ofport;
static bool
any_pending_ops(const struct ofproto *p)
+ OVS_EXCLUDED(ofproto_mutex)
{
- return !list_is_empty(&p->pending);
+ bool b;
+
+ ovs_mutex_lock(&ofproto_mutex);
+ b = !list_is_empty(&p->pending);
+ ovs_mutex_unlock(&ofproto_mutex);
+
+ return b;
}
int
continue;
}
+ ovs_mutex_lock(&ofproto_mutex);
HEAP_FOR_EACH (evg, size_node, &table->eviction_groups_by_size) {
heap_rebuild(&evg->rules);
}
}
}
ovs_rwlock_unlock(&table->cls.rwlock);
+ ovs_mutex_unlock(&ofproto_mutex);
}
}
unsigned int n_rules;
simap_increase(usage, "ports", hmap_count(&ofproto->ports));
+
+ ovs_mutex_lock(&ofproto_mutex);
simap_increase(usage, "ops",
ofproto->n_pending + hmap_count(&ofproto->deletions));
+ ovs_mutex_unlock(&ofproto_mutex);
n_rules = 0;
OFPROTO_FOR_EACH_TABLE (table, ofproto) {
fm.flags = 0;
fm.ofpacts = CONST_CAST(struct ofpact *, ofpacts);
fm.ofpacts_len = ofpacts_len;
+
return handle_flow_mod__(ofproto, NULL, &fm, NULL);
}
ofproto_add_flow(struct ofproto *ofproto, const struct match *match,
unsigned int priority,
const struct ofpact *ofpacts, size_t ofpacts_len)
+ OVS_EXCLUDED(ofproto_mutex)
{
const struct rule *rule;
bool must_add;
* This is a helper function for in-band control and fail-open. */
int
ofproto_flow_mod(struct ofproto *ofproto, struct ofputil_flow_mod *fm)
+ OVS_EXCLUDED(ofproto_mutex)
{
return handle_flow_mod__(ofproto, NULL, fm, NULL);
}
bool
ofproto_delete_flow(struct ofproto *ofproto,
const struct match *target, unsigned int priority)
+ OVS_EXCLUDED(ofproto_mutex)
{
struct classifier *cls = &ofproto->tables[0].cls;
struct rule *rule;
}
}
+struct rule_actions *
+rule_get_actions(const struct rule *rule)
+ OVS_EXCLUDED(rule->mutex)
+{
+ struct rule_actions *actions;
+
+ ovs_mutex_lock(&rule->mutex);
+ actions = rule_get_actions__(rule);
+ ovs_mutex_unlock(&rule->mutex);
+
+ return actions;
+}
+
+struct rule_actions *
+rule_get_actions__(const struct rule *rule)
+ OVS_REQUIRES(rule->mutex)
+{
+ rule_actions_ref(rule->actions);
+ return rule->actions;
+}
+
static void
ofproto_rule_destroy__(struct rule *rule)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
{
cls_rule_destroy(CONST_CAST(struct cls_rule *, &rule->cr));
rule_actions_unref(rule->actions);
* that outputs to 'port' (output to OFPP_FLOOD and OFPP_ALL doesn't count). */
static bool
ofproto_rule_has_out_port(const struct rule *rule, ofp_port_t port)
+ OVS_REQUIRES(ofproto_mutex)
{
return (port == OFPP_ANY
|| ofpacts_output_to_port(rule->actions->ofpacts,
* OFPAT_ENQUEUE action that outputs to 'out_port'. */
bool
ofoperation_has_out_port(const struct ofoperation *op, ofp_port_t out_port)
+ OVS_REQUIRES(ofproto_mutex)
{
if (ofproto_rule_has_out_port(op->rule, out_port)) {
return true;
* by passing them to the ofproto provider. */
static void
run_rule_executes(struct ofproto *ofproto)
+ OVS_EXCLUDED(ofproto_mutex)
{
struct rule_execute *e, *next;
struct list executes;
static void
ofproto_rule_change_cookie(struct ofproto *ofproto, struct rule *rule,
ovs_be64 new_cookie)
+ OVS_REQUIRES(ofproto_mutex)
{
if (new_cookie != rule->flow_cookie) {
- ovs_mutex_lock(&ofproto_mutex);
cookies_remove(ofproto, rule);
ovs_mutex_lock(&rule->mutex);
ovs_mutex_unlock(&rule->mutex);
cookies_insert(ofproto, rule);
- ovs_mutex_unlock(&ofproto_mutex);
}
}
rules->rules[rules->n++] = rule;
}
+void
+rule_collection_ref(struct rule_collection *rules)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ size_t i;
+
+ for (i = 0; i < rules->n; i++) {
+ ofproto_rule_ref(rules->rules[i]);
+ }
+}
+
+void
+rule_collection_unref(struct rule_collection *rules)
+{
+ size_t i;
+
+ for (i = 0; i < rules->n; i++) {
+ ofproto_rule_unref(rules->rules[i]);
+ }
+}
+
void
rule_collection_destroy(struct rule_collection *rules)
{
static enum ofperr
collect_rule(struct rule *rule, const struct rule_criteria *c,
struct rule_collection *rules)
+ OVS_REQUIRES(ofproto_mutex)
{
if (ofproto_rule_is_hidden(rule)) {
return 0;
collect_rules_loose(struct ofproto *ofproto,
const struct rule_criteria *criteria,
struct rule_collection *rules)
+ OVS_REQUIRES(ofproto_mutex)
{
struct oftable *table;
enum ofperr error;
if (criteria->cookie_mask == htonll(UINT64_MAX)) {
struct rule *rule;
- ovs_mutex_lock(&ofproto_mutex);
HINDEX_FOR_EACH_WITH_HASH (rule, cookie_node,
hash_cookie(criteria->cookie),
&ofproto->cookies) {
}
}
}
- ovs_mutex_unlock(&ofproto_mutex);
} else {
FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) {
struct cls_cursor cursor;
collect_rules_strict(struct ofproto *ofproto,
const struct rule_criteria *criteria,
struct rule_collection *rules)
+ OVS_REQUIRES(ofproto_mutex)
{
struct oftable *table;
int error;
if (criteria->cookie_mask == htonll(UINT64_MAX)) {
struct rule *rule;
- ovs_mutex_lock(&ofproto_mutex);
HINDEX_FOR_EACH_WITH_HASH (rule, cookie_node,
hash_cookie(criteria->cookie),
&ofproto->cookies) {
}
}
}
- ovs_mutex_unlock(&ofproto_mutex);
} else {
FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) {
struct rule *rule;
static enum ofperr
handle_flow_stats_request(struct ofconn *ofconn,
const struct ofp_header *request)
+ OVS_EXCLUDED(ofproto_mutex)
{
struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofputil_flow_stats_request fsr;
rule_criteria_init(&criteria, fsr.table_id, &fsr.match, 0, fsr.cookie,
fsr.cookie_mask, fsr.out_port);
+
+ ovs_mutex_lock(&ofproto_mutex);
error = collect_rules_loose(ofproto, &criteria, &rules);
rule_criteria_destroy(&criteria);
+ if (!error) {
+ rule_collection_ref(&rules);
+ }
+ ovs_mutex_unlock(&ofproto_mutex);
+
if (error) {
return error;
}
struct rule *rule = rules.rules[i];
long long int now = time_msec();
struct ofputil_flow_stats fs;
-
- minimatch_expand(&rule->cr.match, &fs.match);
- fs.priority = rule->cr.priority;
- fs.cookie = rule->flow_cookie;
- fs.table_id = rule->table_id;
- calc_duration(rule->created, now, &fs.duration_sec, &fs.duration_nsec);
- fs.idle_age = age_secs(now - rule->used);
- fs.hard_age = age_secs(now - rule->modified);
- ofproto->ofproto_class->rule_get_stats(rule, &fs.packet_count,
- &fs.byte_count);
- fs.ofpacts = rule->actions->ofpacts;
- fs.ofpacts_len = rule->actions->ofpacts_len;
+ long long int created, used, modified;
+ struct rule_actions *actions;
+ bool send_flow_removed;
ovs_mutex_lock(&rule->mutex);
+ fs.cookie = rule->flow_cookie;
fs.idle_timeout = rule->idle_timeout;
fs.hard_timeout = rule->hard_timeout;
+ created = rule->created;
+ used = rule->used;
+ modified = rule->modified;
+ actions = rule_get_actions__(rule);
+ send_flow_removed = rule->send_flow_removed;
ovs_mutex_unlock(&rule->mutex);
+ minimatch_expand(&rule->cr.match, &fs.match);
+ fs.table_id = rule->table_id;
+ calc_duration(created, now, &fs.duration_sec, &fs.duration_nsec);
+ fs.priority = rule->cr.priority;
+ fs.idle_age = age_secs(now - used);
+ fs.hard_age = age_secs(now - modified);
+ ofproto->ofproto_class->rule_get_stats(rule, &fs.packet_count,
+ &fs.byte_count);
+ fs.ofpacts = actions->ofpacts;
+ fs.ofpacts_len = actions->ofpacts_len;
+
fs.flags = 0;
- if (rule->send_flow_removed) {
+ if (send_flow_removed) {
fs.flags |= OFPUTIL_FF_SEND_FLOW_REM;
/* FIXME: Implement OFPUTIL_FF_NO_PKT_COUNTS and
OFPUTIL_FF_NO_BYT_COUNTS. */
}
ofputil_append_flow_stats_reply(&fs, &replies);
+
+ rule_actions_unref(actions);
}
+
+ rule_collection_unref(&rules);
rule_collection_destroy(&rules);
ofconn_send_replies(ofconn, &replies);
flow_stats_ds(struct rule *rule, struct ds *results)
{
uint64_t packet_count, byte_count;
+ struct rule_actions *actions;
+ long long int created;
rule->ofproto->ofproto_class->rule_get_stats(rule,
&packet_count, &byte_count);
+ ovs_mutex_lock(&rule->mutex);
+ actions = rule_get_actions__(rule);
+ created = rule->created;
+ ovs_mutex_unlock(&rule->mutex);
+
if (rule->table_id != 0) {
ds_put_format(results, "table_id=%"PRIu8", ", rule->table_id);
}
- ds_put_format(results, "duration=%llds, ",
- (time_msec() - rule->created) / 1000);
+ ds_put_format(results, "duration=%llds, ", (time_msec() - created) / 1000);
ds_put_format(results, "priority=%u, ", rule->cr.priority);
ds_put_format(results, "n_packets=%"PRIu64", ", packet_count);
ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count);
cls_rule_format(&rule->cr, results);
ds_put_char(results, ',');
- ofpacts_format(rule->actions->ofpacts, rule->actions->ofpacts_len,
- results);
+
+ ofpacts_format(actions->ofpacts, actions->ofpacts_len, results);
+
ds_put_cstr(results, "\n");
+
+ rule_actions_unref(actions);
}
/* Adds a pretty-printed description of all flows to 'results', including
static enum ofperr
handle_aggregate_stats_request(struct ofconn *ofconn,
const struct ofp_header *oh)
+ OVS_EXCLUDED(ofproto_mutex)
{
struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofputil_flow_stats_request request;
rule_criteria_init(&criteria, request.table_id, &request.match, 0,
request.cookie, request.cookie_mask,
request.out_port);
+
+ ovs_mutex_lock(&ofproto_mutex);
error = collect_rules_loose(ofproto, &criteria, &rules);
rule_criteria_destroy(&criteria);
+ if (!error) {
+ rule_collection_ref(&rules);
+ }
+ ovs_mutex_unlock(&ofproto_mutex);
+
if (error) {
return error;
}
stats.byte_count = UINT64_MAX;
}
+ rule_collection_unref(&rules);
rule_collection_destroy(&rules);
reply = ofputil_encode_aggregate_stats_reply(&stats, oh);
is_flow_deletion_pending(const struct ofproto *ofproto,
const struct cls_rule *cls_rule,
uint8_t table_id)
+ OVS_REQUIRES(ofproto_mutex)
{
if (!hmap_is_empty(&ofproto->deletions)) {
struct ofoperation *op;
static bool
should_evict_a_rule(struct oftable *table, unsigned int extra_space)
+ OVS_REQUIRES(ofproto_mutex)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
{
- size_t count;
-
- ovs_rwlock_rdlock(&table->cls.rwlock);
- count = classifier_count(&table->cls);
- ovs_rwlock_unlock(&table->cls.rwlock);
-
- return count + extra_space > table->max_flows;
+ return classifier_count(&table->cls) + extra_space > table->max_flows;
}
static enum ofperr
evict_rules_from_table(struct ofproto *ofproto, struct oftable *table,
unsigned int extra_space)
+ OVS_REQUIRES(ofproto_mutex)
{
while (should_evict_a_rule(table, extra_space)) {
struct rule *rule;
if (!choose_rule_to_evict(table, &rule)) {
return OFPERR_OFPFMFC_TABLE_FULL;
} else if (rule->pending) {
- ovs_mutex_unlock(&rule->mutex);
return OFPROTO_POSTPONE;
} else {
struct ofopgroup *group = ofopgroup_create_unattached(ofproto);
- ofoperation_create(group, rule,
- OFOPERATION_DELETE, OFPRR_EVICTION);
- oftable_remove_rule(rule);
- ofproto->ofproto_class->rule_delete(rule);
+ delete_flow__(rule, group, OFPRR_EVICTION);
+ ofopgroup_submit(group);
}
}
static enum ofperr
add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
struct ofputil_flow_mod *fm, const struct ofp_header *request)
+ OVS_REQUIRES(ofproto_mutex)
{
struct oftable *table;
struct ofopgroup *group;
modify_flows__(struct ofproto *ofproto, struct ofconn *ofconn,
struct ofputil_flow_mod *fm, const struct ofp_header *request,
const struct rule_collection *rules)
+ OVS_REQUIRES(ofproto_mutex)
{
enum ofoperation_type type;
struct ofopgroup *group;
static enum ofperr
modify_flows_add(struct ofproto *ofproto, struct ofconn *ofconn,
struct ofputil_flow_mod *fm, const struct ofp_header *request)
+ OVS_REQUIRES(ofproto_mutex)
{
if (fm->cookie_mask != htonll(0) || fm->new_cookie == htonll(UINT64_MAX)) {
return 0;
modify_flows_loose(struct ofproto *ofproto, struct ofconn *ofconn,
struct ofputil_flow_mod *fm,
const struct ofp_header *request)
+ OVS_REQUIRES(ofproto_mutex)
{
struct rule_criteria criteria;
struct rule_collection rules;
modify_flow_strict(struct ofproto *ofproto, struct ofconn *ofconn,
struct ofputil_flow_mod *fm,
const struct ofp_header *request)
+ OVS_REQUIRES(ofproto_mutex)
{
struct rule_criteria criteria;
struct rule_collection rules;
static void
delete_flow__(struct rule *rule, struct ofopgroup *group,
enum ofp_flow_removed_reason reason)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofproto *ofproto = rule->ofproto;
const struct ofp_header *request,
const struct rule_collection *rules,
enum ofp_flow_removed_reason reason)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofopgroup *group;
size_t i;
group = ofopgroup_create(ofproto, ofconn, request, UINT32_MAX);
for (i = 0; i < rules->n; i++) {
- struct rule *rule = rules->rules[i];
- ovs_mutex_lock(&rule->mutex);
- delete_flow__(rule, group, reason);
+ delete_flow__(rules->rules[i], group, reason);
}
ofopgroup_submit(group);
delete_flows_loose(struct ofproto *ofproto, struct ofconn *ofconn,
const struct ofputil_flow_mod *fm,
const struct ofp_header *request)
+ OVS_REQUIRES(ofproto_mutex)
{
struct rule_criteria criteria;
struct rule_collection rules;
delete_flow_strict(struct ofproto *ofproto, struct ofconn *ofconn,
const struct ofputil_flow_mod *fm,
const struct ofp_header *request)
+ OVS_REQUIRES(ofproto_mutex)
{
struct rule_criteria criteria;
struct rule_collection rules;
static void
ofproto_rule_send_removed(struct rule *rule, uint8_t reason)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofputil_flow_removed fr;
* OpenFlow flows. */
void
ofproto_rule_expire(struct rule *rule, uint8_t reason)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofproto *ofproto = rule->ofproto;
struct classifier *cls = &ofproto->tables[rule->table_id].cls;
- ovs_assert(reason == OFPRR_HARD_TIMEOUT || reason == OFPRR_IDLE_TIMEOUT);
- ofproto_rule_send_removed(rule, reason);
+ ovs_assert(reason == OFPRR_HARD_TIMEOUT || reason == OFPRR_IDLE_TIMEOUT
+ || reason == OFPRR_DELETE);
- ovs_rwlock_wrlock(&cls->rwlock);
- ofproto_rule_delete(ofproto, cls, rule);
- ovs_rwlock_unlock(&cls->rwlock);
+ ofproto_rule_send_removed(rule, reason);
+ ofproto_rule_delete__(ofproto, cls, rule);
}
/* Reduces '*timeout' to no more than 'max'. A value of zero in either case
\f
static enum ofperr
handle_flow_mod(struct ofconn *ofconn, const struct ofp_header *oh)
+ OVS_EXCLUDED(ofproto_mutex)
{
struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofputil_flow_mod fm;
static enum ofperr
handle_flow_mod__(struct ofproto *ofproto, struct ofconn *ofconn,
struct ofputil_flow_mod *fm, const struct ofp_header *oh)
+ OVS_EXCLUDED(ofproto_mutex)
{
enum ofperr error;
+ ovs_mutex_lock(&ofproto_mutex);
if (ofproto->n_pending < 50) {
switch (fm->command) {
case OFPFC_ADD:
ovs_assert(!list_is_empty(&ofproto->pending));
error = OFPROTO_POSTPONE;
}
+ ovs_mutex_unlock(&ofproto_mutex);
run_rule_executes(ofproto);
return error;
ofproto_compose_flow_refresh_update(const struct rule *rule,
enum nx_flow_monitor_flags flags,
struct list *msgs)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofoperation *op = rule->pending;
const struct rule_actions *actions;
void
ofmonitor_compose_refresh_updates(struct rule_collection *rules,
struct list *msgs)
+ OVS_REQUIRES(ofproto_mutex)
{
size_t i;
ofproto_collect_ofmonitor_refresh_rule(const struct ofmonitor *m,
struct rule *rule, uint64_t seqno,
struct rule_collection *rules)
+ OVS_REQUIRES(ofproto_mutex)
{
enum nx_flow_monitor_flags update;
ofproto_collect_ofmonitor_refresh_rules(const struct ofmonitor *m,
uint64_t seqno,
struct rule_collection *rules)
+ OVS_REQUIRES(ofproto_mutex)
{
const struct ofproto *ofproto = ofconn_get_ofproto(m->ofconn);
const struct ofoperation *op;
static void
ofproto_collect_ofmonitor_initial_rules(struct ofmonitor *m,
struct rule_collection *rules)
+ OVS_REQUIRES(ofproto_mutex)
{
if (m->flags & NXFMF_INITIAL) {
ofproto_collect_ofmonitor_refresh_rules(m, 0, rules);
void
ofmonitor_collect_resume_rules(struct ofmonitor *m,
uint64_t seqno, struct rule_collection *rules)
+ OVS_REQUIRES(ofproto_mutex)
{
ofproto_collect_ofmonitor_refresh_rules(m, seqno, rules);
}
static enum ofperr
handle_flow_monitor_request(struct ofconn *ofconn, const struct ofp_header *oh)
+ OVS_EXCLUDED(ofproto_mutex)
{
struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofmonitor **monitors;
ofpbuf_use_const(&b, oh, ntohs(oh->length));
monitors = NULL;
n_monitors = allocated_monitors = 0;
+
+ ovs_mutex_lock(&ofproto_mutex);
for (;;) {
struct ofputil_flow_monitor_request request;
struct ofmonitor *m;
ofpmp_init(&replies, oh);
ofmonitor_compose_refresh_updates(&rules, &replies);
+ ovs_mutex_unlock(&ofproto_mutex);
+
rule_collection_destroy(&rules);
ofconn_send_replies(ofconn, &replies);
-
free(monitors);
return 0;
error:
+ ovs_mutex_unlock(&ofproto_mutex);
+
for (i = 0; i < n_monitors; i++) {
ofmonitor_destroy(monitors[i]);
}
static enum ofperr
handle_flow_monitor_cancel(struct ofconn *ofconn, const struct ofp_header *oh)
+ OVS_EXCLUDED(ofproto_mutex)
{
struct ofmonitor *m;
+ enum ofperr error;
uint32_t id;
id = ofputil_decode_flow_monitor_cancel(oh);
+
+ ovs_mutex_lock(&ofproto_mutex);
m = ofmonitor_lookup(ofconn, id);
- if (!m) {
- return OFPERR_NXBRC_FM_BAD_ID;
+ if (m) {
+ ofmonitor_destroy(m);
+ error = 0;
+ } else {
+ error = OFPERR_NXBRC_FM_BAD_ID;
}
+ ovs_mutex_unlock(&ofproto_mutex);
- ofmonitor_destroy(m);
- return 0;
+ return error;
}
/* Meters implementation.
static void
meter_delete(struct ofproto *ofproto, uint32_t first, uint32_t last)
+ OVS_REQUIRES(ofproto_mutex)
{
uint32_t mid;
for (mid = first; mid <= last; ++mid) {
static enum ofperr
handle_delete_meter(struct ofconn *ofconn, const struct ofp_header *oh,
struct ofputil_meter_mod *mm)
+ OVS_EXCLUDED(ofproto_mutex)
{
struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
uint32_t meter_id = mm->meter.meter_id;
/* First delete the rules that use this meter. If any of those rules are
* currently being modified, postpone the whole operation until later. */
rule_collection_init(&rules);
+ ovs_mutex_lock(&ofproto_mutex);
for (meter_id = first; meter_id <= last; ++meter_id) {
struct meter *meter = ofproto->meters[meter_id];
if (meter && !list_is_empty(&meter->rules)) {
meter_delete(ofproto, first, last);
exit:
+ ovs_mutex_unlock(&ofproto_mutex);
rule_collection_destroy(&rules);
return error;
static enum ofperr
handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
+ OVS_EXCLUDED(ofproto_mutex)
{
const struct ofp_header *oh = msg->data;
enum ofptype type;
static bool
handle_openflow(struct ofconn *ofconn, const struct ofpbuf *ofp_msg)
+ OVS_EXCLUDED(ofproto_mutex)
{
int error = handle_openflow__(ofconn, ofp_msg);
if (error && error != OFPROTO_POSTPONE) {
* ofoperation_create() and then submit it with ofopgroup_submit(). */
static struct ofopgroup *
ofopgroup_create_unattached(struct ofproto *ofproto)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofopgroup *group = xzalloc(sizeof *group);
group->ofproto = ofproto;
static struct ofopgroup *
ofopgroup_create(struct ofproto *ofproto, struct ofconn *ofconn,
const struct ofp_header *request, uint32_t buffer_id)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofopgroup *group = ofopgroup_create_unattached(ofproto);
if (ofconn) {
* groups. */
static void
ofopgroup_submit(struct ofopgroup *group)
+ OVS_REQUIRES(ofproto_mutex)
{
if (!group->n_running) {
ofopgroup_complete(group);
static void
ofopgroup_complete(struct ofopgroup *group)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofproto *ofproto = group->ofproto;
}
}
} else {
- ovs_mutex_lock(&rule->mutex);
oftable_remove_rule(rule);
ofproto_rule_unref(rule);
}
ofoperation_create(struct ofopgroup *group, struct rule *rule,
enum ofoperation_type type,
enum ofp_flow_removed_reason reason)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofproto *ofproto = group->ofproto;
struct ofoperation *op;
static void
ofoperation_destroy(struct ofoperation *op)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofopgroup *group = op->group;
{
struct ofopgroup *group = op->group;
- ovs_assert(op->rule->pending == op);
ovs_assert(group->n_running > 0);
ovs_assert(!error || op->type != OFOPERATION_DELETE);
op->error = error;
if (!--group->n_running && !list_is_empty(&group->ofproto_node)) {
+ /* This function can be called from ->rule_construct(), in which case
+ * ofproto_mutex is held, or it can be called from ->run(), in which
+ * case ofproto_mutex is not held. But only in the latter case can we
+ * arrive here, so we can safely take ofproto_mutex now. */
+ ovs_mutex_lock(&ofproto_mutex);
+ ovs_assert(op->rule->pending == op);
ofopgroup_complete(group);
+ ovs_mutex_unlock(&ofproto_mutex);
}
}
\f
* or with no timeouts are not evictable.) */
static bool
choose_rule_to_evict(struct oftable *table, struct rule **rulep)
+ OVS_REQUIRES(ofproto_mutex)
{
struct eviction_group *evg;
struct rule *rule;
HEAP_FOR_EACH (rule, evg_node, &evg->rules) {
- if (!ovs_mutex_trylock(&rule->mutex)) {
- *rulep = rule;
- return true;
- }
+ *rulep = rule;
+ return true;
}
}
{
struct oftable *table;
+ ovs_mutex_lock(&ofproto_mutex);
OFPROTO_FOR_EACH_TABLE (table, ofproto) {
evict_rules_from_table(ofproto, table, 0);
}
+ ovs_mutex_unlock(&ofproto_mutex);
}
\f
/* Eviction groups. */
* adds or removes rules in 'evg'. */
static void
eviction_group_resized(struct oftable *table, struct eviction_group *evg)
+ OVS_REQUIRES(ofproto_mutex)
{
heap_change(&table->eviction_groups_by_size, &evg->size_node,
eviction_group_priority(heap_count(&evg->rules)));
* - Frees 'evg'. */
static void
eviction_group_destroy(struct oftable *table, struct eviction_group *evg)
+ OVS_REQUIRES(ofproto_mutex)
{
while (!heap_is_empty(&evg->rules)) {
struct rule *rule;
/* Removes 'rule' from its eviction group, if any. */
static void
eviction_group_remove_rule(struct rule *rule)
+ OVS_REQUIRES(ofproto_mutex)
{
if (rule->eviction_group) {
struct oftable *table = &rule->ofproto->tables[rule->table_id];
* returns the hash value. */
static uint32_t
eviction_group_hash_rule(struct rule *rule)
+ OVS_REQUIRES(ofproto_mutex)
{
struct oftable *table = &rule->ofproto->tables[rule->table_id];
const struct mf_subfield *sf;
* if necessary. */
static struct eviction_group *
eviction_group_find(struct oftable *table, uint32_t id)
+ OVS_REQUIRES(ofproto_mutex)
{
struct eviction_group *evg;
* for eviction. */
static uint32_t
rule_eviction_priority(struct rule *rule)
+ OVS_REQUIRES(ofproto_mutex)
{
long long int hard_expiration;
long long int idle_expiration;
* The caller must ensure that 'rule' is not already in an eviction group. */
static void
eviction_group_add_rule(struct rule *rule)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofproto *ofproto = rule->ofproto;
struct oftable *table = &ofproto->tables[rule->table_id];
* This function configures the former policy on 'table'. */
static void
oftable_disable_eviction(struct oftable *table)
+ OVS_REQUIRES(ofproto_mutex)
{
if (table->eviction_fields) {
struct eviction_group *evg, *next;
static void
oftable_enable_eviction(struct oftable *table,
const struct mf_subfield *fields, size_t n_fields)
+ OVS_REQUIRES(ofproto_mutex)
{
struct cls_cursor cursor;
struct rule *rule;
static void
oftable_remove_rule__(struct ofproto *ofproto, struct classifier *cls,
struct rule *rule)
- OVS_REQ_WRLOCK(cls->rwlock) OVS_RELEASES(rule->mutex)
+ OVS_REQUIRES(ofproto_mutex)
{
+ ovs_rwlock_wrlock(&cls->rwlock);
classifier_remove(cls, CONST_CAST(struct cls_rule *, &rule->cr));
+ ovs_rwlock_unlock(&cls->rwlock);
- ovs_mutex_lock(&ofproto_mutex);
cookies_remove(ofproto, rule);
- ovs_mutex_unlock(&ofproto_mutex);
eviction_group_remove_rule(rule);
- ovs_mutex_lock(&ofproto_mutex);
if (!list_is_empty(&rule->expirable)) {
list_remove(&rule->expirable);
}
- ovs_mutex_unlock(&ofproto_mutex);
if (!list_is_empty(&rule->meter_list_node)) {
list_remove(&rule->meter_list_node);
list_init(&rule->meter_list_node);
}
- ovs_mutex_unlock(&rule->mutex);
}
static void
oftable_remove_rule(struct rule *rule)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofproto *ofproto = rule->ofproto;
struct oftable *table = &ofproto->tables[rule->table_id];
- ovs_rwlock_wrlock(&table->cls.rwlock);
oftable_remove_rule__(ofproto, &table->cls, rule);
- ovs_rwlock_unlock(&table->cls.rwlock);
}
/* Inserts 'rule' into its oftable, which must not already contain any rule for
* the same cls_rule. */
static void
oftable_insert_rule(struct rule *rule)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofproto *ofproto = rule->ofproto;
struct oftable *table = &ofproto->tables[rule->table_id];
ovs_mutex_unlock(&rule->mutex);
if (may_expire) {
- ovs_mutex_lock(&ofproto_mutex);
list_insert(&ofproto->expirable, &rule->expirable);
- ovs_mutex_unlock(&ofproto_mutex);
}
- ovs_mutex_lock(&ofproto_mutex);
cookies_insert(ofproto, rule);
- ovs_mutex_unlock(&ofproto_mutex);
if (rule->actions->meter_id) {
struct meter *meter = ofproto->meters[rule->actions->meter_id];
OFPROTO_FOR_EACH_TABLE (oftable, ofproto) {
const struct cls_table *table;
+ ovs_rwlock_rdlock(&oftable->cls.rwlock);
HMAP_FOR_EACH (table, hmap_node, &oftable->cls.tables) {
if (minimask_get_vid_mask(&table->mask) == VLAN_VID_MASK) {
const struct cls_rule *rule;
}
}
}
+ ovs_rwlock_unlock(&oftable->cls.rwlock);
}
}