1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
16 #include "ofproto-dpif-upcall.h"
26 #include "dynamic-string.h"
27 #include "fail-open.h"
28 #include "guarded-list.h"
33 #include "ofproto-dpif-ipfix.h"
34 #include "ofproto-dpif-sflow.h"
35 #include "ofproto-dpif-xlate.h"
38 #include "poll-loop.h"
43 #define MAX_QUEUE_LENGTH 512
44 #define UPCALL_MAX_BATCH 64
45 #define REVALIDATE_MAX_BATCH 50
47 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
49 COVERAGE_DEFINE(dumped_duplicate_flow);
50 COVERAGE_DEFINE(dumped_new_flow);
51 COVERAGE_DEFINE(revalidate_missed_dp_flow);
53 /* A thread that reads upcalls from dpif, forwards each upcall's packet,
54 * and possibly sets up a kernel flow as a cache. */
56 struct udpif *udpif; /* Parent udpif. */
57 pthread_t thread; /* Thread ID. */
58 uint32_t handler_id; /* Handler id. */
61 /* A thread that processes datapath flows, updates OpenFlow statistics, and
62 * updates or removes them if necessary. */
64 struct udpif *udpif; /* Parent udpif. */
65 pthread_t thread; /* Thread ID. */
66 unsigned int id; /* ovsthread_id_self(). */
67 struct ovs_mutex *mutex; /* Points into udpif->ukeys for this
68 revalidator. Required for writing
70 struct cmap *ukeys; /* Points into udpif->ukeys for this
71 revalidator. Used for GC phase. */
74 /* An upcall handler for ofproto_dpif.
76 * udpif keeps records of two kind of logically separate units:
81 * - An array of 'struct handler's for upcall handling and flow
87 * - Revalidation threads which read the datapath flow table and maintains
91 struct list list_node; /* In all_udpifs list. */
93 struct dpif *dpif; /* Datapath handle. */
94 struct dpif_backer *backer; /* Opaque dpif_backer pointer. */
96 uint32_t secret; /* Random seed for upcall hash. */
98 struct handler *handlers; /* Upcall handlers. */
101 struct revalidator *revalidators; /* Flow revalidators. */
102 size_t n_revalidators;
104 struct latch exit_latch; /* Tells child threads to exit. */
107 struct seq *reval_seq; /* Incremented to force revalidation. */
108 bool need_revalidate; /* As indicated by 'reval_seq'. */
109 bool reval_exit; /* Set by leader on 'exit_latch. */
110 struct ovs_barrier reval_barrier; /* Barrier used by revalidators. */
111 struct dpif_flow_dump *dump; /* DPIF flow dump state. */
112 long long int dump_duration; /* Duration of the last flow dump. */
113 struct seq *dump_seq; /* Increments each dump iteration. */
115 /* There are 'n_revalidators' ukey cmaps. Each revalidator retains a
116 * reference to one of these for garbage collection.
118 * During the flow dump phase, revalidators insert into these with a random
119 * distribution. During the garbage collection phase, each revalidator
120 * takes care of garbage collecting one of these hmaps. */
122 struct ovs_mutex mutex; /* Take for writing to the following. */
123 struct cmap cmap; /* Datapath flow keys. */
126 /* Datapath flow statistics. */
127 unsigned int max_n_flows;
128 unsigned int avg_n_flows;
130 /* Following fields are accessed and modified by different threads. */
131 atomic_uint flow_limit; /* Datapath flow hard limit. */
133 /* n_flows_mutex prevents multiple threads updating these concurrently. */
134 atomic_uint n_flows; /* Number of flows in the datapath. */
135 atomic_llong n_flows_timestamp; /* Last time n_flows was updated. */
136 struct ovs_mutex n_flows_mutex;
138 /* Following fields are accessed and modified only from the main thread. */
139 struct unixctl_conn **conns; /* Connections waiting on dump_seq. */
140 uint64_t conn_seq; /* Corresponds to 'dump_seq' when
141 conns[n_conns-1] was stored. */
142 size_t n_conns; /* Number of connections waiting. */
146 BAD_UPCALL, /* Some kind of bug somewhere. */
147 MISS_UPCALL, /* A flow miss. */
148 SFLOW_UPCALL, /* sFlow sample. */
149 FLOW_SAMPLE_UPCALL, /* Per-flow sampling. */
150 IPFIX_UPCALL /* Per-bridge sampling. */
154 struct ofproto_dpif *ofproto; /* Parent ofproto. */
156 /* The flow and packet are only required to be constant when using
157 * dpif-netdev. If a modification is absolutely necessary, a const cast
158 * may be used with other datapaths. */
159 const struct flow *flow; /* Parsed representation of the packet. */
160 const struct ofpbuf *packet; /* Packet associated with this upcall. */
161 ofp_port_t in_port; /* OpenFlow in port, or OFPP_NONE. */
163 enum dpif_upcall_type type; /* Datapath type of the upcall. */
164 const struct nlattr *userdata; /* Userdata for DPIF_UC_ACTION Upcalls. */
166 bool xout_initialized; /* True if 'xout' must be uninitialized. */
167 struct xlate_out xout; /* Result of xlate_actions(). */
168 struct ofpbuf put_actions; /* Actions 'put' in the fastapath. */
170 struct dpif_ipfix *ipfix; /* IPFIX pointer or NULL. */
171 struct dpif_sflow *sflow; /* SFlow pointer or NULL. */
173 bool vsp_adjusted; /* 'packet' and 'flow' were adjusted for
174 VLAN splinters if true. */
176 /* Not used by the upcall callback interface. */
177 const struct nlattr *key; /* Datapath flow key. */
178 size_t key_len; /* Datapath flow key length. */
179 const struct nlattr *out_tun_key; /* Datapath output tunnel key. */
182 /* 'udpif_key's are responsible for tracking the little bit of state udpif
183 * needs to do flow expiration which can't be pulled directly from the
184 * datapath. They may be created or maintained by any revalidator during
185 * the dump phase, but are owned by a single revalidator, and are destroyed
186 * by that revalidator during the garbage-collection phase.
188 * While some elements of a udpif_key are protected by a mutex, the ukey itself
189 * is not. Therefore it is not safe to destroy a udpif_key except when all
190 * revalidators are in garbage collection phase, or they aren't running. */
192 struct cmap_node cmap_node; /* In parent revalidator 'ukeys' map. */
194 /* These elements are read only once created, and therefore aren't
195 * protected by a mutex. */
196 const struct nlattr *key; /* Datapath flow key. */
197 size_t key_len; /* Length of 'key'. */
198 uint32_t hash; /* Pre-computed hash for 'key'. */
200 struct ovs_mutex mutex; /* Guards the following. */
201 struct dpif_flow_stats stats OVS_GUARDED; /* Last known stats.*/
202 long long int created OVS_GUARDED; /* Estimate of creation time. */
203 uint64_t dump_seq OVS_GUARDED; /* Tracks udpif->dump_seq. */
204 bool flow_exists OVS_GUARDED; /* Ensures flows are only deleted
207 struct xlate_cache *xcache OVS_GUARDED; /* Cache for xlate entries that
208 * are affected by this ukey.
209 * Used for stats and learning.*/
211 struct odputil_keybuf key_buf; /* Memory for 'key'. */
212 struct nlattr key_buf_nla;
216 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
217 static struct list all_udpifs = LIST_INITIALIZER(&all_udpifs);
219 static size_t recv_upcalls(struct handler *);
220 static int process_upcall(struct udpif *, struct upcall *,
221 struct ofpbuf *odp_actions);
222 static void handle_upcalls(struct udpif *, struct upcall *, size_t n_upcalls);
223 static void udpif_stop_threads(struct udpif *);
224 static void udpif_start_threads(struct udpif *, size_t n_handlers,
225 size_t n_revalidators);
226 static void *udpif_upcall_handler(void *);
227 static void *udpif_revalidator(void *);
228 static unsigned long udpif_get_n_flows(struct udpif *);
229 static void revalidate(struct revalidator *);
230 static void revalidator_sweep(struct revalidator *);
231 static void revalidator_purge(struct revalidator *);
232 static void upcall_unixctl_show(struct unixctl_conn *conn, int argc,
233 const char *argv[], void *aux);
234 static void upcall_unixctl_disable_megaflows(struct unixctl_conn *, int argc,
235 const char *argv[], void *aux);
236 static void upcall_unixctl_enable_megaflows(struct unixctl_conn *, int argc,
237 const char *argv[], void *aux);
238 static void upcall_unixctl_set_flow_limit(struct unixctl_conn *conn, int argc,
239 const char *argv[], void *aux);
240 static void upcall_unixctl_dump_wait(struct unixctl_conn *conn, int argc,
241 const char *argv[], void *aux);
242 static void upcall_unixctl_purge(struct unixctl_conn *conn, int argc,
243 const char *argv[], void *aux);
245 static struct udpif_key *ukey_create(const struct nlattr *key, size_t key_len,
246 long long int used, uint32_t hash);
247 static struct udpif_key *ukey_lookup(struct udpif *udpif,
248 const struct nlattr *key, size_t key_len,
250 static bool ukey_acquire(struct udpif *udpif, const struct nlattr *key,
251 size_t key_len, long long int used,
252 struct udpif_key **result);
253 static void ukey_delete__(struct udpif_key *);
254 static void ukey_delete(struct revalidator *, struct udpif_key *);
255 static enum upcall_type classify_upcall(enum dpif_upcall_type type,
256 const struct nlattr *userdata);
258 static int upcall_receive(struct upcall *, const struct dpif_backer *,
259 const struct ofpbuf *packet, enum dpif_upcall_type,
260 const struct nlattr *userdata, const struct flow *);
261 static void upcall_uninit(struct upcall *);
263 static upcall_callback upcall_cb;
265 static atomic_bool enable_megaflows = ATOMIC_VAR_INIT(true);
268 udpif_create(struct dpif_backer *backer, struct dpif *dpif)
270 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
271 struct udpif *udpif = xzalloc(sizeof *udpif);
273 if (ovsthread_once_start(&once)) {
274 unixctl_command_register("upcall/show", "", 0, 0, upcall_unixctl_show,
276 unixctl_command_register("upcall/disable-megaflows", "", 0, 0,
277 upcall_unixctl_disable_megaflows, NULL);
278 unixctl_command_register("upcall/enable-megaflows", "", 0, 0,
279 upcall_unixctl_enable_megaflows, NULL);
280 unixctl_command_register("upcall/set-flow-limit", "", 1, 1,
281 upcall_unixctl_set_flow_limit, NULL);
282 unixctl_command_register("revalidator/wait", "", 0, 0,
283 upcall_unixctl_dump_wait, NULL);
284 unixctl_command_register("revalidator/purge", "", 0, 0,
285 upcall_unixctl_purge, NULL);
286 ovsthread_once_done(&once);
290 udpif->backer = backer;
291 atomic_init(&udpif->flow_limit, MIN(ofproto_flow_limit, 10000));
292 udpif->secret = random_uint32();
293 udpif->reval_seq = seq_create();
294 udpif->dump_seq = seq_create();
295 latch_init(&udpif->exit_latch);
296 list_push_back(&all_udpifs, &udpif->list_node);
297 atomic_init(&udpif->n_flows, 0);
298 atomic_init(&udpif->n_flows_timestamp, LLONG_MIN);
299 ovs_mutex_init(&udpif->n_flows_mutex);
301 dpif_register_upcall_cb(dpif, upcall_cb, udpif);
307 udpif_run(struct udpif *udpif)
309 if (udpif->conns && udpif->conn_seq != seq_read(udpif->dump_seq)) {
312 for (i = 0; i < udpif->n_conns; i++) {
313 unixctl_command_reply(udpif->conns[i], NULL);
322 udpif_destroy(struct udpif *udpif)
324 udpif_stop_threads(udpif);
326 list_remove(&udpif->list_node);
327 latch_destroy(&udpif->exit_latch);
328 seq_destroy(udpif->reval_seq);
329 seq_destroy(udpif->dump_seq);
330 ovs_mutex_destroy(&udpif->n_flows_mutex);
334 /* Stops the handler and revalidator threads, must be enclosed in
335 * ovsrcu quiescent state unless when destroying udpif. */
337 udpif_stop_threads(struct udpif *udpif)
339 if (udpif && (udpif->n_handlers != 0 || udpif->n_revalidators != 0)) {
342 latch_set(&udpif->exit_latch);
344 for (i = 0; i < udpif->n_handlers; i++) {
345 struct handler *handler = &udpif->handlers[i];
347 xpthread_join(handler->thread, NULL);
350 for (i = 0; i < udpif->n_revalidators; i++) {
351 xpthread_join(udpif->revalidators[i].thread, NULL);
354 dpif_disable_upcall(udpif->dpif);
356 for (i = 0; i < udpif->n_revalidators; i++) {
357 struct revalidator *revalidator = &udpif->revalidators[i];
359 /* Delete ukeys, and delete all flows from the datapath to prevent
360 * double-counting stats. */
361 revalidator_purge(revalidator);
363 cmap_destroy(&udpif->ukeys[i].cmap);
364 ovs_mutex_destroy(&udpif->ukeys[i].mutex);
367 latch_poll(&udpif->exit_latch);
369 ovs_barrier_destroy(&udpif->reval_barrier);
371 free(udpif->revalidators);
372 udpif->revalidators = NULL;
373 udpif->n_revalidators = 0;
375 free(udpif->handlers);
376 udpif->handlers = NULL;
377 udpif->n_handlers = 0;
384 /* Starts the handler and revalidator threads, must be enclosed in
385 * ovsrcu quiescent state. */
387 udpif_start_threads(struct udpif *udpif, size_t n_handlers,
388 size_t n_revalidators)
390 if (udpif && n_handlers && n_revalidators) {
393 udpif->n_handlers = n_handlers;
394 udpif->n_revalidators = n_revalidators;
396 udpif->handlers = xzalloc(udpif->n_handlers * sizeof *udpif->handlers);
397 for (i = 0; i < udpif->n_handlers; i++) {
398 struct handler *handler = &udpif->handlers[i];
400 handler->udpif = udpif;
401 handler->handler_id = i;
402 handler->thread = ovs_thread_create(
403 "handler", udpif_upcall_handler, handler);
406 dpif_enable_upcall(udpif->dpif);
408 ovs_barrier_init(&udpif->reval_barrier, udpif->n_revalidators);
409 udpif->reval_exit = false;
410 udpif->revalidators = xzalloc(udpif->n_revalidators
411 * sizeof *udpif->revalidators);
412 udpif->ukeys = xmalloc(sizeof *udpif->ukeys * n_revalidators);
413 for (i = 0; i < udpif->n_revalidators; i++) {
414 struct revalidator *revalidator = &udpif->revalidators[i];
416 revalidator->udpif = udpif;
417 cmap_init(&udpif->ukeys[i].cmap);
418 ovs_mutex_init(&udpif->ukeys[i].mutex);
419 revalidator->mutex = &udpif->ukeys[i].mutex;
420 revalidator->ukeys = &udpif->ukeys[i].cmap;
421 revalidator->thread = ovs_thread_create(
422 "revalidator", udpif_revalidator, revalidator);
427 /* Tells 'udpif' how many threads it should use to handle upcalls.
428 * 'n_handlers' and 'n_revalidators' can never be zero. 'udpif''s
429 * datapath handle must have packet reception enabled before starting
432 udpif_set_threads(struct udpif *udpif, size_t n_handlers,
433 size_t n_revalidators)
436 ovs_assert(n_handlers && n_revalidators);
438 ovsrcu_quiesce_start();
439 if (udpif->n_handlers != n_handlers
440 || udpif->n_revalidators != n_revalidators) {
441 udpif_stop_threads(udpif);
444 if (!udpif->handlers && !udpif->revalidators) {
447 error = dpif_handlers_set(udpif->dpif, n_handlers);
449 VLOG_ERR("failed to configure handlers in dpif %s: %s",
450 dpif_name(udpif->dpif), ovs_strerror(error));
454 udpif_start_threads(udpif, n_handlers, n_revalidators);
456 ovsrcu_quiesce_end();
459 /* Waits for all ongoing upcall translations to complete. This ensures that
460 * there are no transient references to any removed ofprotos (or other
461 * objects). In particular, this should be called after an ofproto is removed
462 * (e.g. via xlate_remove_ofproto()) but before it is destroyed. */
464 udpif_synchronize(struct udpif *udpif)
466 /* This is stronger than necessary. It would be sufficient to ensure
467 * (somehow) that each handler and revalidator thread had passed through
468 * its main loop once. */
469 size_t n_handlers = udpif->n_handlers;
470 size_t n_revalidators = udpif->n_revalidators;
472 ovsrcu_quiesce_start();
473 udpif_stop_threads(udpif);
474 udpif_start_threads(udpif, n_handlers, n_revalidators);
475 ovsrcu_quiesce_end();
478 /* Notifies 'udpif' that something changed which may render previous
479 * xlate_actions() results invalid. */
481 udpif_revalidate(struct udpif *udpif)
483 seq_change(udpif->reval_seq);
486 /* Returns a seq which increments every time 'udpif' pulls stats from the
487 * datapath. Callers can use this to get a sense of when might be a good time
488 * to do periodic work which relies on relatively up to date statistics. */
490 udpif_dump_seq(struct udpif *udpif)
492 return udpif->dump_seq;
496 udpif_get_memory_usage(struct udpif *udpif, struct simap *usage)
500 simap_increase(usage, "handlers", udpif->n_handlers);
502 simap_increase(usage, "revalidators", udpif->n_revalidators);
503 for (i = 0; i < udpif->n_revalidators; i++) {
504 simap_increase(usage, "udpif keys", cmap_count(&udpif->ukeys[i].cmap));
508 /* Remove flows from a single datapath. */
510 udpif_flush(struct udpif *udpif)
512 size_t n_handlers, n_revalidators;
514 n_handlers = udpif->n_handlers;
515 n_revalidators = udpif->n_revalidators;
517 ovsrcu_quiesce_start();
519 udpif_stop_threads(udpif);
520 dpif_flow_flush(udpif->dpif);
521 udpif_start_threads(udpif, n_handlers, n_revalidators);
523 ovsrcu_quiesce_end();
526 /* Removes all flows from all datapaths. */
528 udpif_flush_all_datapaths(void)
532 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
539 udpif_get_n_flows(struct udpif *udpif)
541 long long int time, now;
542 unsigned long flow_count;
545 atomic_read_relaxed(&udpif->n_flows_timestamp, &time);
546 if (time < now - 100 && !ovs_mutex_trylock(&udpif->n_flows_mutex)) {
547 struct dpif_dp_stats stats;
549 atomic_store_relaxed(&udpif->n_flows_timestamp, now);
550 dpif_get_dp_stats(udpif->dpif, &stats);
551 flow_count = stats.n_flows;
552 atomic_store_relaxed(&udpif->n_flows, flow_count);
553 ovs_mutex_unlock(&udpif->n_flows_mutex);
555 atomic_read_relaxed(&udpif->n_flows, &flow_count);
560 /* The upcall handler thread tries to read a batch of UPCALL_MAX_BATCH
561 * upcalls from dpif, processes the batch and installs corresponding flows
564 udpif_upcall_handler(void *arg)
566 struct handler *handler = arg;
567 struct udpif *udpif = handler->udpif;
569 while (!latch_is_set(&handler->udpif->exit_latch)) {
570 if (!recv_upcalls(handler)) {
571 dpif_recv_wait(udpif->dpif, handler->handler_id);
572 latch_wait(&udpif->exit_latch);
582 recv_upcalls(struct handler *handler)
584 struct udpif *udpif = handler->udpif;
585 uint64_t recv_stubs[UPCALL_MAX_BATCH][512 / 8];
586 struct ofpbuf recv_bufs[UPCALL_MAX_BATCH];
587 struct dpif_upcall dupcalls[UPCALL_MAX_BATCH];
588 struct upcall upcalls[UPCALL_MAX_BATCH];
589 struct flow flows[UPCALL_MAX_BATCH];
593 while (n_upcalls < UPCALL_MAX_BATCH) {
594 struct ofpbuf *recv_buf = &recv_bufs[n_upcalls];
595 struct dpif_upcall *dupcall = &dupcalls[n_upcalls];
596 struct upcall *upcall = &upcalls[n_upcalls];
597 struct flow *flow = &flows[n_upcalls];
598 struct pkt_metadata md;
601 ofpbuf_use_stub(recv_buf, recv_stubs[n_upcalls],
602 sizeof recv_stubs[n_upcalls]);
603 if (dpif_recv(udpif->dpif, handler->handler_id, dupcall, recv_buf)) {
604 ofpbuf_uninit(recv_buf);
608 if (odp_flow_key_to_flow(dupcall->key, dupcall->key_len, flow)
613 error = upcall_receive(upcall, udpif->backer, &dupcall->packet,
614 dupcall->type, dupcall->userdata, flow);
616 if (error == ENODEV) {
617 /* Received packet on datapath port for which we couldn't
618 * associate an ofproto. This can happen if a port is removed
619 * while traffic is being received. Print a rate-limited
620 * message in case it happens frequently. */
621 dpif_flow_put(udpif->dpif, DPIF_FP_CREATE, dupcall->key,
622 dupcall->key_len, NULL, 0, NULL, 0, NULL);
623 VLOG_INFO_RL(&rl, "received packet on unassociated datapath "
624 "port %"PRIu32, flow->in_port.odp_port);
629 upcall->key = dupcall->key;
630 upcall->key_len = dupcall->key_len;
632 upcall->out_tun_key = dupcall->out_tun_key;
634 if (vsp_adjust_flow(upcall->ofproto, flow, &dupcall->packet)) {
635 upcall->vsp_adjusted = true;
638 md = pkt_metadata_from_flow(flow);
639 flow_extract(&dupcall->packet, &md, flow);
641 error = process_upcall(udpif, upcall, NULL);
650 upcall_uninit(upcall);
652 ofpbuf_uninit(&dupcall->packet);
653 ofpbuf_uninit(recv_buf);
657 handle_upcalls(handler->udpif, upcalls, n_upcalls);
658 for (i = 0; i < n_upcalls; i++) {
659 ofpbuf_uninit(&dupcalls[i].packet);
660 ofpbuf_uninit(&recv_bufs[i]);
661 upcall_uninit(&upcalls[i]);
669 udpif_revalidator(void *arg)
671 /* Used by all revalidators. */
672 struct revalidator *revalidator = arg;
673 struct udpif *udpif = revalidator->udpif;
674 bool leader = revalidator == &udpif->revalidators[0];
676 /* Used only by the leader. */
677 long long int start_time = 0;
678 uint64_t last_reval_seq = 0;
681 revalidator->id = ovsthread_id_self();
686 reval_seq = seq_read(udpif->reval_seq);
687 udpif->need_revalidate = last_reval_seq != reval_seq;
688 last_reval_seq = reval_seq;
690 n_flows = udpif_get_n_flows(udpif);
691 udpif->max_n_flows = MAX(n_flows, udpif->max_n_flows);
692 udpif->avg_n_flows = (udpif->avg_n_flows + n_flows) / 2;
694 /* Only the leader checks the exit latch to prevent a race where
695 * some threads think it's true and exit and others think it's
696 * false and block indefinitely on the reval_barrier */
697 udpif->reval_exit = latch_is_set(&udpif->exit_latch);
699 start_time = time_msec();
700 if (!udpif->reval_exit) {
701 udpif->dump = dpif_flow_dump_create(udpif->dpif);
705 /* Wait for the leader to start the flow dump. */
706 ovs_barrier_block(&udpif->reval_barrier);
707 if (udpif->reval_exit) {
710 revalidate(revalidator);
712 /* Wait for all flows to have been dumped before we garbage collect. */
713 ovs_barrier_block(&udpif->reval_barrier);
714 revalidator_sweep(revalidator);
716 /* Wait for all revalidators to finish garbage collection. */
717 ovs_barrier_block(&udpif->reval_barrier);
720 unsigned int flow_limit;
721 long long int duration;
723 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
725 dpif_flow_dump_destroy(udpif->dump);
726 seq_change(udpif->dump_seq);
728 duration = MAX(time_msec() - start_time, 1);
729 udpif->dump_duration = duration;
730 if (duration > 2000) {
731 flow_limit /= duration / 1000;
732 } else if (duration > 1300) {
733 flow_limit = flow_limit * 3 / 4;
734 } else if (duration < 1000 && n_flows > 2000
735 && flow_limit < n_flows * 1000 / duration) {
738 flow_limit = MIN(ofproto_flow_limit, MAX(flow_limit, 1000));
739 atomic_store_relaxed(&udpif->flow_limit, flow_limit);
741 if (duration > 2000) {
742 VLOG_INFO("Spent an unreasonably long %lldms dumping flows",
746 poll_timer_wait_until(start_time + MIN(ofproto_max_idle, 500));
747 seq_wait(udpif->reval_seq, last_reval_seq);
748 latch_wait(&udpif->exit_latch);
756 static enum upcall_type
757 classify_upcall(enum dpif_upcall_type type, const struct nlattr *userdata)
759 union user_action_cookie cookie;
762 /* First look at the upcall type. */
770 case DPIF_N_UC_TYPES:
772 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, type);
776 /* "action" upcalls need a closer look. */
778 VLOG_WARN_RL(&rl, "action upcall missing cookie");
781 userdata_len = nl_attr_get_size(userdata);
782 if (userdata_len < sizeof cookie.type
783 || userdata_len > sizeof cookie) {
784 VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %"PRIuSIZE,
788 memset(&cookie, 0, sizeof cookie);
789 memcpy(&cookie, nl_attr_get(userdata), userdata_len);
790 if (userdata_len == MAX(8, sizeof cookie.sflow)
791 && cookie.type == USER_ACTION_COOKIE_SFLOW) {
793 } else if (userdata_len == MAX(8, sizeof cookie.slow_path)
794 && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
796 } else if (userdata_len == MAX(8, sizeof cookie.flow_sample)
797 && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
798 return FLOW_SAMPLE_UPCALL;
799 } else if (userdata_len == MAX(8, sizeof cookie.ipfix)
800 && cookie.type == USER_ACTION_COOKIE_IPFIX) {
803 VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16
804 " and size %"PRIuSIZE, cookie.type, userdata_len);
809 /* Calculates slow path actions for 'xout'. 'buf' must statically be
810 * initialized with at least 128 bytes of space. */
812 compose_slow_path(struct udpif *udpif, struct xlate_out *xout,
813 const struct flow *flow, odp_port_t odp_in_port,
816 union user_action_cookie cookie;
820 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
821 cookie.slow_path.unused = 0;
822 cookie.slow_path.reason = xout->slow;
824 port = xout->slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP | SLOW_STP)
827 pid = dpif_port_get_pid(udpif->dpif, port, flow_hash_5tuple(flow, 0));
828 odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, ODPP_NONE,
832 /* If there is no error, the upcall must be destroyed with upcall_uninit()
833 * before quiescing, as the referred objects are guaranteed to exist only
834 * until the calling thread quiesces. Otherwise, do not call upcall_uninit()
835 * since the 'upcall->put_actions' remains uninitialized. */
837 upcall_receive(struct upcall *upcall, const struct dpif_backer *backer,
838 const struct ofpbuf *packet, enum dpif_upcall_type type,
839 const struct nlattr *userdata, const struct flow *flow)
843 error = xlate_lookup(backer, flow, &upcall->ofproto, &upcall->ipfix,
844 &upcall->sflow, NULL, &upcall->in_port);
850 upcall->packet = packet;
852 upcall->userdata = userdata;
853 ofpbuf_init(&upcall->put_actions, 0);
855 upcall->xout_initialized = false;
856 upcall->vsp_adjusted = false;
861 upcall->out_tun_key = NULL;
867 upcall_xlate(struct udpif *udpif, struct upcall *upcall,
868 struct ofpbuf *odp_actions)
870 struct dpif_flow_stats stats;
874 stats.n_bytes = ofpbuf_size(upcall->packet);
875 stats.used = time_msec();
876 stats.tcp_flags = ntohs(upcall->flow->tcp_flags);
878 xlate_in_init(&xin, upcall->ofproto, upcall->flow, upcall->in_port, NULL,
879 stats.tcp_flags, upcall->packet);
880 xin.odp_actions = odp_actions;
882 if (upcall->type == DPIF_UC_MISS) {
883 xin.resubmit_stats = &stats;
885 /* For non-miss upcalls, there's a flow in the datapath which this
886 * packet was accounted to. Presumably the revalidators will deal
887 * with pushing its stats eventually. */
890 xlate_actions(&xin, &upcall->xout);
891 upcall->xout_initialized = true;
893 /* Special case for fail-open mode.
895 * If we are in fail-open mode, but we are connected to a controller too,
896 * then we should send the packet up to the controller in the hope that it
897 * will try to set up a flow and thereby allow us to exit fail-open.
899 * See the top-level comment in fail-open.c for more information.
901 * Copy packets before they are modified by execution. */
902 if (upcall->xout.fail_open) {
903 const struct ofpbuf *packet = upcall->packet;
904 struct ofproto_packet_in *pin;
906 pin = xmalloc(sizeof *pin);
907 pin->up.packet = xmemdup(ofpbuf_data(packet), ofpbuf_size(packet));
908 pin->up.packet_len = ofpbuf_size(packet);
909 pin->up.reason = OFPR_NO_MATCH;
910 pin->up.table_id = 0;
911 pin->up.cookie = OVS_BE64_MAX;
912 flow_get_metadata(upcall->flow, &pin->up.fmd);
913 pin->send_len = 0; /* Not used for flow table misses. */
914 pin->miss_type = OFPROTO_PACKET_IN_NO_MISS;
915 ofproto_dpif_send_packet_in(upcall->ofproto, pin);
918 if (!upcall->xout.slow) {
919 ofpbuf_use_const(&upcall->put_actions,
920 ofpbuf_data(upcall->xout.odp_actions),
921 ofpbuf_size(upcall->xout.odp_actions));
923 ofpbuf_init(&upcall->put_actions, 0);
924 compose_slow_path(udpif, &upcall->xout, upcall->flow,
925 upcall->flow->in_port.odp_port,
926 &upcall->put_actions);
931 upcall_uninit(struct upcall *upcall)
934 if (upcall->xout_initialized) {
935 xlate_out_uninit(&upcall->xout);
937 ofpbuf_uninit(&upcall->put_actions);
942 upcall_cb(const struct ofpbuf *packet, const struct flow *flow,
943 enum dpif_upcall_type type, const struct nlattr *userdata,
944 struct ofpbuf *actions, struct flow_wildcards *wc,
945 struct ofpbuf *put_actions, void *aux)
947 struct udpif *udpif = aux;
948 unsigned int flow_limit;
949 struct upcall upcall;
953 atomic_read_relaxed(&enable_megaflows, &megaflow);
954 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
956 error = upcall_receive(&upcall, udpif->backer, packet, type, userdata,
962 error = process_upcall(udpif, &upcall, actions);
967 if (upcall.xout.slow && put_actions) {
968 ofpbuf_put(put_actions, ofpbuf_data(&upcall.put_actions),
969 ofpbuf_size(&upcall.put_actions));
972 if (OVS_LIKELY(wc)) {
974 /* XXX: This could be avoided with sufficient API changes. */
975 *wc = upcall.xout.wc;
977 flow_wildcards_init_for_packet(wc, flow);
981 if (udpif_get_n_flows(udpif) >= flow_limit) {
986 upcall_uninit(&upcall);
991 process_upcall(struct udpif *udpif, struct upcall *upcall,
992 struct ofpbuf *odp_actions)
994 const struct nlattr *userdata = upcall->userdata;
995 const struct ofpbuf *packet = upcall->packet;
996 const struct flow *flow = upcall->flow;
998 switch (classify_upcall(upcall->type, userdata)) {
1000 upcall_xlate(udpif, upcall, odp_actions);
1004 if (upcall->sflow) {
1005 union user_action_cookie cookie;
1007 memset(&cookie, 0, sizeof cookie);
1008 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.sflow);
1009 dpif_sflow_received(upcall->sflow, packet, flow,
1010 flow->in_port.odp_port, &cookie);
1015 if (upcall->ipfix) {
1016 union user_action_cookie cookie;
1017 struct flow_tnl output_tunnel_key;
1019 memset(&cookie, 0, sizeof cookie);
1020 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.ipfix);
1022 if (upcall->out_tun_key) {
1023 memset(&output_tunnel_key, 0, sizeof output_tunnel_key);
1024 odp_tun_key_from_attr(upcall->out_tun_key,
1025 &output_tunnel_key);
1027 dpif_ipfix_bridge_sample(upcall->ipfix, packet, flow,
1028 flow->in_port.odp_port,
1029 cookie.ipfix.output_odp_port,
1030 upcall->out_tun_key ?
1031 &output_tunnel_key : NULL);
1035 case FLOW_SAMPLE_UPCALL:
1036 if (upcall->ipfix) {
1037 union user_action_cookie cookie;
1039 memset(&cookie, 0, sizeof cookie);
1040 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.flow_sample);
1042 /* The flow reflects exactly the contents of the packet.
1043 * Sample the packet using it. */
1044 dpif_ipfix_flow_sample(upcall->ipfix, packet, flow,
1045 cookie.flow_sample.collector_set_id,
1046 cookie.flow_sample.probability,
1047 cookie.flow_sample.obs_domain_id,
1048 cookie.flow_sample.obs_point_id);
1060 handle_upcalls(struct udpif *udpif, struct upcall *upcalls,
1063 struct odputil_keybuf mask_bufs[UPCALL_MAX_BATCH];
1064 struct dpif_op *opsp[UPCALL_MAX_BATCH * 2];
1065 struct dpif_op ops[UPCALL_MAX_BATCH * 2];
1066 unsigned int flow_limit;
1071 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1072 atomic_read_relaxed(&enable_megaflows, &megaflow);
1074 may_put = udpif_get_n_flows(udpif) < flow_limit;
1076 /* Handle the packets individually in order of arrival.
1078 * - For SLOW_CFM, SLOW_LACP, SLOW_STP, and SLOW_BFD, translation is what
1079 * processes received packets for these protocols.
1081 * - For SLOW_CONTROLLER, translation sends the packet to the OpenFlow
1084 * The loop fills 'ops' with an array of operations to execute in the
1087 for (i = 0; i < n_upcalls; i++) {
1088 struct upcall *upcall = &upcalls[i];
1089 const struct ofpbuf *packet = upcall->packet;
1092 if (upcall->vsp_adjusted) {
1093 /* This packet was received on a VLAN splinter port. We added a
1094 * VLAN to the packet to make the packet resemble the flow, but the
1095 * actions were composed assuming that the packet contained no
1096 * VLAN. So, we must remove the VLAN header from the packet before
1097 * trying to execute the actions. */
1098 if (ofpbuf_size(upcall->xout.odp_actions)) {
1099 eth_pop_vlan(CONST_CAST(struct ofpbuf *, upcall->packet));
1102 /* Remove the flow vlan tags inserted by vlan splinter logic
1103 * to ensure megaflow masks generated match the data path flow. */
1104 CONST_CAST(struct flow *, upcall->flow)->vlan_tci = 0;
1107 /* Do not install a flow into the datapath if:
1109 * - The datapath already has too many flows.
1111 * - We received this packet via some flow installed in the kernel
1113 if (may_put && upcall->type == DPIF_UC_MISS) {
1116 ofpbuf_use_stack(&mask, &mask_bufs[i], sizeof mask_bufs[i]);
1122 recirc = ofproto_dpif_get_enable_recirc(upcall->ofproto);
1123 max_mpls = ofproto_dpif_get_max_mpls_depth(upcall->ofproto);
1124 odp_flow_key_from_mask(&mask, &upcall->xout.wc.masks,
1125 upcall->flow, UINT32_MAX, max_mpls,
1130 op->type = DPIF_OP_FLOW_PUT;
1131 op->u.flow_put.flags = DPIF_FP_CREATE;
1132 op->u.flow_put.key = upcall->key;
1133 op->u.flow_put.key_len = upcall->key_len;
1134 op->u.flow_put.mask = ofpbuf_data(&mask);
1135 op->u.flow_put.mask_len = ofpbuf_size(&mask);
1136 op->u.flow_put.stats = NULL;
1137 op->u.flow_put.actions = ofpbuf_data(&upcall->put_actions);
1138 op->u.flow_put.actions_len = ofpbuf_size(&upcall->put_actions);
1141 if (ofpbuf_size(upcall->xout.odp_actions)) {
1143 op->type = DPIF_OP_EXECUTE;
1144 op->u.execute.packet = CONST_CAST(struct ofpbuf *, packet);
1145 odp_key_to_pkt_metadata(upcall->key, upcall->key_len,
1147 op->u.execute.actions = ofpbuf_data(upcall->xout.odp_actions);
1148 op->u.execute.actions_len = ofpbuf_size(upcall->xout.odp_actions);
1149 op->u.execute.needs_help = (upcall->xout.slow & SLOW_ACTION) != 0;
1150 op->u.execute.probe = false;
1154 /* Execute batch. */
1155 for (i = 0; i < n_ops; i++) {
1158 dpif_operate(udpif->dpif, opsp, n_ops);
1161 static struct udpif_key *
1162 ukey_lookup(struct udpif *udpif, const struct nlattr *key, size_t key_len,
1165 struct udpif_key *ukey;
1166 struct cmap *cmap = &udpif->ukeys[hash % udpif->n_revalidators].cmap;
1168 CMAP_FOR_EACH_WITH_HASH (ukey, cmap_node, hash, cmap) {
1169 if (ukey->key_len == key_len && !memcmp(ukey->key, key, key_len)) {
1176 /* Creates a ukey for 'key' and 'key_len', returning it with ukey->mutex in
1177 * a locked state. */
1178 static struct udpif_key *
1179 ukey_create(const struct nlattr *key, size_t key_len, long long int used,
1181 OVS_NO_THREAD_SAFETY_ANALYSIS
1183 struct udpif_key *ukey = xmalloc(sizeof *ukey);
1185 ovs_mutex_init(&ukey->mutex);
1186 ukey->key = &ukey->key_buf_nla;
1187 memcpy(&ukey->key_buf, key, key_len);
1188 ukey->key_len = key_len;
1190 ovs_mutex_lock(&ukey->mutex);
1193 ukey->flow_exists = true;
1194 ukey->created = used ? used : time_msec();
1195 memset(&ukey->stats, 0, sizeof ukey->stats);
1196 ukey->xcache = NULL;
1201 /* Searches for a ukey in 'udpif->ukeys' that matches 'key' and 'key_len' and
1202 * attempts to lock the ukey. If the ukey does not exist, create it.
1204 * Returns true on success, setting *result to the matching ukey and returning
1205 * it in a locked state. Otherwise, returns false and clears *result. */
1207 ukey_acquire(struct udpif *udpif, const struct nlattr *key, size_t key_len,
1208 long long int used, struct udpif_key **result)
1209 OVS_TRY_LOCK(true, (*result)->mutex)
1211 struct udpif_key *ukey;
1213 bool locked = false;
1215 hash = hash_bytes(key, key_len, udpif->secret);
1216 idx = hash % udpif->n_revalidators;
1218 ovs_mutex_lock(&udpif->ukeys[idx].mutex);
1219 ukey = ukey_lookup(udpif, key, key_len, hash);
1221 ukey = ukey_create(key, key_len, used, hash);
1222 cmap_insert(&udpif->ukeys[idx].cmap, &ukey->cmap_node, hash);
1224 } else if (!ovs_mutex_trylock(&ukey->mutex)) {
1227 ovs_mutex_unlock(&udpif->ukeys[idx].mutex);
1238 ukey_delete__(struct udpif_key *ukey)
1239 OVS_NO_THREAD_SAFETY_ANALYSIS
1241 xlate_cache_delete(ukey->xcache);
1242 ovs_mutex_destroy(&ukey->mutex);
1247 ukey_delete(struct revalidator *revalidator, struct udpif_key *ukey)
1249 cmap_remove(revalidator->ukeys, &ukey->cmap_node, ukey->hash);
1250 ovsrcu_postpone(ukey_delete__, ukey);
1254 should_revalidate(const struct udpif *udpif, uint64_t packets,
1257 long long int metric, now, duration;
1259 if (udpif->dump_duration < 200) {
1260 /* We are likely to handle full revalidation for the flows. */
1264 /* Calculate the mean time between seeing these packets. If this
1265 * exceeds the threshold, then delete the flow rather than performing
1266 * costly revalidation for flows that aren't being hit frequently.
1268 * This is targeted at situations where the dump_duration is high (~1s),
1269 * and revalidation is triggered by a call to udpif_revalidate(). In
1270 * these situations, revalidation of all flows causes fluctuations in the
1271 * flow_limit due to the interaction with the dump_duration and max_idle.
1272 * This tends to result in deletion of low-throughput flows anyway, so
1273 * skip the revalidation and just delete those flows. */
1274 packets = MAX(packets, 1);
1275 now = MAX(used, time_msec());
1276 duration = now - used;
1277 metric = duration / packets;
1280 /* The flow is receiving more than ~5pps, so keep it. */
1287 revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey,
1288 const struct dpif_flow *f)
1289 OVS_REQUIRES(ukey->mutex)
1291 uint64_t slow_path_buf[128 / 8];
1292 struct xlate_out xout, *xoutp;
1293 struct netflow *netflow;
1294 struct ofproto_dpif *ofproto;
1295 struct dpif_flow_stats push;
1296 struct ofpbuf xout_actions;
1297 struct flow flow, dp_mask;
1298 uint32_t *dp32, *xout32;
1299 ofp_port_t ofp_in_port;
1300 struct xlate_in xin;
1301 long long int last_used;
1310 last_used = ukey->stats.used;
1311 push.used = f->stats.used;
1312 push.tcp_flags = f->stats.tcp_flags;
1313 push.n_packets = (f->stats.n_packets > ukey->stats.n_packets
1314 ? f->stats.n_packets - ukey->stats.n_packets
1316 push.n_bytes = (f->stats.n_bytes > ukey->stats.n_bytes
1317 ? f->stats.n_bytes - ukey->stats.n_bytes
1320 if (udpif->need_revalidate && last_used
1321 && !should_revalidate(udpif, push.n_packets, last_used)) {
1326 /* We will push the stats, so update the ukey stats cache. */
1327 ukey->stats = f->stats;
1328 if (!push.n_packets && !udpif->need_revalidate) {
1333 if (ukey->xcache && !udpif->need_revalidate) {
1334 xlate_push_stats(ukey->xcache, &push);
1339 if (odp_flow_key_to_flow(ukey->key, ukey->key_len, &flow)
1344 error = xlate_lookup(udpif->backer, &flow, &ofproto, NULL, NULL, &netflow,
1350 if (udpif->need_revalidate) {
1351 xlate_cache_clear(ukey->xcache);
1353 if (!ukey->xcache) {
1354 ukey->xcache = xlate_cache_new();
1357 xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL, push.tcp_flags,
1359 if (push.n_packets) {
1360 xin.resubmit_stats = &push;
1361 xin.may_learn = true;
1363 xin.xcache = ukey->xcache;
1364 xin.skip_wildcards = !udpif->need_revalidate;
1365 xlate_actions(&xin, &xout);
1368 if (!udpif->need_revalidate) {
1374 ofpbuf_use_const(&xout_actions, ofpbuf_data(xout.odp_actions),
1375 ofpbuf_size(xout.odp_actions));
1377 ofpbuf_use_stack(&xout_actions, slow_path_buf, sizeof slow_path_buf);
1378 compose_slow_path(udpif, &xout, &flow, flow.in_port.odp_port,
1382 if (f->actions_len != ofpbuf_size(&xout_actions)
1383 || memcmp(ofpbuf_data(&xout_actions), f->actions, f->actions_len)) {
1387 if (odp_flow_key_to_mask(f->mask, f->mask_len, &dp_mask, &flow)
1392 /* Since the kernel is free to ignore wildcarded bits in the mask, we can't
1393 * directly check that the masks are the same. Instead we check that the
1394 * mask in the kernel is more specific i.e. less wildcarded, than what
1395 * we've calculated here. This guarantees we don't catch any packets we
1396 * shouldn't with the megaflow. */
1397 dp32 = (uint32_t *) &dp_mask;
1398 xout32 = (uint32_t *) &xout.wc.masks;
1399 for (i = 0; i < FLOW_U32S; i++) {
1400 if ((dp32[i] | xout32[i]) != dp32[i]) {
1407 if (netflow && !ok) {
1408 netflow_flow_clear(netflow, &flow);
1410 xlate_out_uninit(xoutp);
1415 struct udpif_key *ukey;
1416 struct dpif_flow_stats stats; /* Stats for 'op'. */
1417 struct dpif_op op; /* Flow del operation. */
1421 dump_op_init(struct dump_op *op, const struct nlattr *key, size_t key_len,
1422 struct udpif_key *ukey)
1425 op->op.type = DPIF_OP_FLOW_DEL;
1426 op->op.u.flow_del.key = key;
1427 op->op.u.flow_del.key_len = key_len;
1428 op->op.u.flow_del.stats = &op->stats;
1432 push_dump_ops__(struct udpif *udpif, struct dump_op *ops, size_t n_ops)
1434 struct dpif_op *opsp[REVALIDATE_MAX_BATCH];
1437 ovs_assert(n_ops <= REVALIDATE_MAX_BATCH);
1438 for (i = 0; i < n_ops; i++) {
1439 opsp[i] = &ops[i].op;
1441 dpif_operate(udpif->dpif, opsp, n_ops);
1443 for (i = 0; i < n_ops; i++) {
1444 struct dump_op *op = &ops[i];
1445 struct dpif_flow_stats *push, *stats, push_buf;
1447 stats = op->op.u.flow_del.stats;
1450 ovs_mutex_lock(&op->ukey->mutex);
1451 push->used = MAX(stats->used, op->ukey->stats.used);
1452 push->tcp_flags = stats->tcp_flags | op->ukey->stats.tcp_flags;
1453 push->n_packets = stats->n_packets - op->ukey->stats.n_packets;
1454 push->n_bytes = stats->n_bytes - op->ukey->stats.n_bytes;
1455 ovs_mutex_unlock(&op->ukey->mutex);
1457 if (push->n_packets || netflow_exists()) {
1458 struct ofproto_dpif *ofproto;
1459 struct netflow *netflow;
1460 ofp_port_t ofp_in_port;
1464 ovs_mutex_lock(&op->ukey->mutex);
1465 if (op->ukey->xcache) {
1466 xlate_push_stats(op->ukey->xcache, push);
1467 ovs_mutex_unlock(&op->ukey->mutex);
1470 ovs_mutex_unlock(&op->ukey->mutex);
1472 if (odp_flow_key_to_flow(op->op.u.flow_del.key,
1473 op->op.u.flow_del.key_len, &flow)
1478 error = xlate_lookup(udpif->backer, &flow, &ofproto,
1479 NULL, NULL, &netflow, &ofp_in_port);
1481 struct xlate_in xin;
1483 xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL,
1484 push->tcp_flags, NULL);
1485 xin.resubmit_stats = push->n_packets ? push : NULL;
1486 xin.may_learn = push->n_packets > 0;
1487 xin.skip_wildcards = true;
1488 xlate_actions_for_side_effects(&xin);
1491 netflow_flow_clear(netflow, &flow);
1499 push_dump_ops(struct revalidator *revalidator,
1500 struct dump_op *ops, size_t n_ops)
1504 push_dump_ops__(revalidator->udpif, ops, n_ops);
1505 ovs_mutex_lock(revalidator->mutex);
1506 for (i = 0; i < n_ops; i++) {
1507 ukey_delete(revalidator, ops[i].ukey);
1509 ovs_mutex_unlock(revalidator->mutex);
1513 revalidate(struct revalidator *revalidator)
1515 struct udpif *udpif = revalidator->udpif;
1516 struct dpif_flow_dump_thread *dump_thread;
1518 unsigned int flow_limit;
1520 dump_seq = seq_read(udpif->dump_seq);
1521 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1522 dump_thread = dpif_flow_dump_thread_create(udpif->dump);
1524 struct dump_op ops[REVALIDATE_MAX_BATCH];
1527 struct dpif_flow flows[REVALIDATE_MAX_BATCH];
1528 const struct dpif_flow *f;
1531 long long int max_idle;
1536 n_dumped = dpif_flow_dump_next(dump_thread, flows, ARRAY_SIZE(flows));
1543 /* In normal operation we want to keep flows around until they have
1544 * been idle for 'ofproto_max_idle' milliseconds. However:
1546 * - If the number of datapath flows climbs above 'flow_limit',
1547 * drop that down to 100 ms to try to bring the flows down to
1550 * - If the number of datapath flows climbs above twice
1551 * 'flow_limit', delete all the datapath flows as an emergency
1552 * measure. (We reassess this condition for the next batch of
1553 * datapath flows, so we will recover before all the flows are
1555 n_dp_flows = udpif_get_n_flows(udpif);
1556 kill_them_all = n_dp_flows > flow_limit * 2;
1557 max_idle = n_dp_flows > flow_limit ? 100 : ofproto_max_idle;
1559 for (f = flows; f < &flows[n_dumped]; f++) {
1560 long long int used = f->stats.used;
1561 struct udpif_key *ukey;
1562 bool already_dumped, keep;
1564 if (!ukey_acquire(udpif, f->key, f->key_len, used, &ukey)) {
1565 /* We couldn't acquire the ukey. This means that
1566 * another revalidator is processing this flow
1567 * concurrently, so don't bother processing it. */
1568 COVERAGE_INC(dumped_duplicate_flow);
1572 already_dumped = ukey->dump_seq == dump_seq;
1573 if (already_dumped) {
1574 /* The flow has already been handled during this flow dump
1575 * operation. Skip it. */
1577 COVERAGE_INC(dumped_duplicate_flow);
1579 COVERAGE_INC(dumped_new_flow);
1581 ovs_mutex_unlock(&ukey->mutex);
1586 used = ukey->created;
1588 if (kill_them_all || (used && used < now - max_idle)) {
1591 keep = revalidate_ukey(udpif, ukey, f);
1593 ukey->dump_seq = dump_seq;
1594 ukey->flow_exists = keep;
1597 dump_op_init(&ops[n_ops++], f->key, f->key_len, ukey);
1599 ovs_mutex_unlock(&ukey->mutex);
1603 push_dump_ops__(udpif, ops, n_ops);
1607 dpif_flow_dump_thread_destroy(dump_thread);
1610 /* Called with exclusive access to 'revalidator' and 'ukey'. */
1612 handle_missed_revalidation(struct revalidator *revalidator,
1613 struct udpif_key *ukey)
1615 struct udpif *udpif = revalidator->udpif;
1616 struct dpif_flow flow;
1618 uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
1621 COVERAGE_INC(revalidate_missed_dp_flow);
1623 ofpbuf_use_stub(&buf, &stub, sizeof stub);
1624 if (!dpif_flow_get(udpif->dpif, ukey->key, ukey->key_len, &buf, &flow)) {
1625 ovs_mutex_lock(&ukey->mutex);
1626 keep = revalidate_ukey(udpif, ukey, &flow);
1627 ovs_mutex_unlock(&ukey->mutex);
1629 ofpbuf_uninit(&buf);
1635 revalidator_sweep__(struct revalidator *revalidator, bool purge)
1637 struct dump_op ops[REVALIDATE_MAX_BATCH];
1638 struct udpif_key *ukey;
1643 dump_seq = seq_read(revalidator->udpif->dump_seq);
1645 CMAP_FOR_EACH(ukey, cmap_node, revalidator->ukeys) {
1646 bool flow_exists, seq_mismatch;
1648 ovs_mutex_lock(&ukey->mutex);
1649 flow_exists = ukey->flow_exists;
1650 seq_mismatch = (ukey->dump_seq != dump_seq
1651 && revalidator->udpif->need_revalidate);
1652 ovs_mutex_unlock(&ukey->mutex);
1657 && !handle_missed_revalidation(revalidator, ukey)))) {
1658 struct dump_op *op = &ops[n_ops++];
1660 dump_op_init(op, ukey->key, ukey->key_len, ukey);
1661 if (n_ops == REVALIDATE_MAX_BATCH) {
1662 push_dump_ops(revalidator, ops, n_ops);
1665 } else if (!flow_exists) {
1666 ovs_mutex_lock(revalidator->mutex);
1667 ukey_delete(revalidator, ukey);
1668 ovs_mutex_unlock(revalidator->mutex);
1673 push_dump_ops(revalidator, ops, n_ops);
1679 revalidator_sweep(struct revalidator *revalidator)
1681 revalidator_sweep__(revalidator, false);
1685 revalidator_purge(struct revalidator *revalidator)
1687 revalidator_sweep__(revalidator, true);
1691 upcall_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
1692 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
1694 struct ds ds = DS_EMPTY_INITIALIZER;
1695 struct udpif *udpif;
1697 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
1698 unsigned int flow_limit;
1701 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1703 ds_put_format(&ds, "%s:\n", dpif_name(udpif->dpif));
1704 ds_put_format(&ds, "\tflows : (current %lu)"
1705 " (avg %u) (max %u) (limit %u)\n", udpif_get_n_flows(udpif),
1706 udpif->avg_n_flows, udpif->max_n_flows, flow_limit);
1707 ds_put_format(&ds, "\tdump duration : %lldms\n", udpif->dump_duration);
1709 ds_put_char(&ds, '\n');
1710 for (i = 0; i < n_revalidators; i++) {
1711 struct revalidator *revalidator = &udpif->revalidators[i];
1713 ds_put_format(&ds, "\t%u: (keys %"PRIuSIZE")\n",
1714 revalidator->id, cmap_count(&udpif->ukeys[i].cmap));
1718 unixctl_command_reply(conn, ds_cstr(&ds));
1722 /* Disable using the megaflows.
1724 * This command is only needed for advanced debugging, so it's not
1725 * documented in the man page. */
1727 upcall_unixctl_disable_megaflows(struct unixctl_conn *conn,
1728 int argc OVS_UNUSED,
1729 const char *argv[] OVS_UNUSED,
1730 void *aux OVS_UNUSED)
1732 atomic_store_relaxed(&enable_megaflows, false);
1733 udpif_flush_all_datapaths();
1734 unixctl_command_reply(conn, "megaflows disabled");
1737 /* Re-enable using megaflows.
1739 * This command is only needed for advanced debugging, so it's not
1740 * documented in the man page. */
1742 upcall_unixctl_enable_megaflows(struct unixctl_conn *conn,
1743 int argc OVS_UNUSED,
1744 const char *argv[] OVS_UNUSED,
1745 void *aux OVS_UNUSED)
1747 atomic_store_relaxed(&enable_megaflows, true);
1748 udpif_flush_all_datapaths();
1749 unixctl_command_reply(conn, "megaflows enabled");
1752 /* Set the flow limit.
1754 * This command is only needed for advanced debugging, so it's not
1755 * documented in the man page. */
1757 upcall_unixctl_set_flow_limit(struct unixctl_conn *conn,
1758 int argc OVS_UNUSED,
1759 const char *argv[] OVS_UNUSED,
1760 void *aux OVS_UNUSED)
1762 struct ds ds = DS_EMPTY_INITIALIZER;
1763 struct udpif *udpif;
1764 unsigned int flow_limit = atoi(argv[1]);
1766 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
1767 atomic_store_relaxed(&udpif->flow_limit, flow_limit);
1769 ds_put_format(&ds, "set flow_limit to %u\n", flow_limit);
1770 unixctl_command_reply(conn, ds_cstr(&ds));
1775 upcall_unixctl_dump_wait(struct unixctl_conn *conn,
1776 int argc OVS_UNUSED,
1777 const char *argv[] OVS_UNUSED,
1778 void *aux OVS_UNUSED)
1780 if (list_is_singleton(&all_udpifs)) {
1781 struct udpif *udpif = NULL;
1784 udpif = OBJECT_CONTAINING(list_front(&all_udpifs), udpif, list_node);
1785 len = (udpif->n_conns + 1) * sizeof *udpif->conns;
1786 udpif->conn_seq = seq_read(udpif->dump_seq);
1787 udpif->conns = xrealloc(udpif->conns, len);
1788 udpif->conns[udpif->n_conns++] = conn;
1790 unixctl_command_reply_error(conn, "can't wait on multiple udpifs.");
1795 upcall_unixctl_purge(struct unixctl_conn *conn, int argc OVS_UNUSED,
1796 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
1798 struct udpif *udpif;
1800 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
1803 for (n = 0; n < udpif->n_revalidators; n++) {
1804 revalidator_purge(&udpif->revalidators[n]);
1807 unixctl_command_reply(conn, "");