2 * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
23 #include "dp-packet.h"
24 #include "dpif-netdev.h"
26 #include "netdev-provider.h"
27 #include "netdev-vport.h"
29 #include "openvswitch/dynamic-string.h"
30 #include "openvswitch/list.h"
31 #include "openvswitch/ofp-print.h"
32 #include "openvswitch/ofpbuf.h"
33 #include "openvswitch/vlog.h"
34 #include "ovs-atomic.h"
36 #include "pcap-file.h"
37 #include "poll-loop.h"
41 #include "unaligned.h"
44 #include "reconnect.h"
46 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
50 struct dummy_packet_stream {
51 struct stream *stream;
52 struct dp_packet rxbuf;
56 enum dummy_packet_conn_type {
57 NONE, /* No connection is configured. */
58 PASSIVE, /* Listener. */
59 ACTIVE /* Connect to listener. */
62 enum dummy_netdev_conn_state {
63 CONN_STATE_CONNECTED, /* Listener connected. */
64 CONN_STATE_NOT_CONNECTED, /* Listener not connected. */
65 CONN_STATE_UNKNOWN, /* No relavent information. */
68 struct dummy_packet_pconn {
69 struct pstream *pstream;
70 struct dummy_packet_stream *streams;
74 struct dummy_packet_rconn {
75 struct dummy_packet_stream *rstream;
76 struct reconnect *reconnect;
79 struct dummy_packet_conn {
80 enum dummy_packet_conn_type type;
82 struct dummy_packet_pconn pconn;
83 struct dummy_packet_rconn rconn;
87 struct pkt_list_node {
88 struct dp_packet *pkt;
89 struct ovs_list list_node;
92 /* Protects 'dummy_list'. */
93 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
95 /* Contains all 'struct dummy_dev's. */
96 static struct ovs_list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
97 = OVS_LIST_INITIALIZER(&dummy_list);
103 struct ovs_list list_node OVS_GUARDED_BY(dummy_list_mutex);
105 /* Protects all members below. */
106 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
108 struct eth_addr hwaddr OVS_GUARDED;
110 struct netdev_stats stats OVS_GUARDED;
111 enum netdev_flags flags OVS_GUARDED;
112 int ifindex OVS_GUARDED;
114 struct dummy_packet_conn conn OVS_GUARDED;
116 FILE *tx_pcap, *rxq_pcap OVS_GUARDED;
118 struct in_addr address, netmask;
119 struct in6_addr ipv6, ipv6_mask;
120 struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
123 /* Max 'recv_queue_len' in struct netdev_dummy. */
124 #define NETDEV_DUMMY_MAX_QUEUE 100
126 struct netdev_rxq_dummy {
127 struct netdev_rxq up;
128 struct ovs_list node; /* In netdev_dummy's "rxes" list. */
129 struct ovs_list recv_queue;
130 int recv_queue_len; /* ovs_list_size(&recv_queue). */
131 struct seq *seq; /* Reports newly queued packets. */
134 static unixctl_cb_func netdev_dummy_set_admin_state;
135 static int netdev_dummy_construct(struct netdev *);
136 static void netdev_dummy_queue_packet(struct netdev_dummy *, struct dp_packet *);
138 static void dummy_packet_stream_close(struct dummy_packet_stream *);
140 static void pkt_list_delete(struct ovs_list *);
143 is_dummy_class(const struct netdev_class *class)
145 return class->construct == netdev_dummy_construct;
148 static struct netdev_dummy *
149 netdev_dummy_cast(const struct netdev *netdev)
151 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
152 return CONTAINER_OF(netdev, struct netdev_dummy, up);
155 static struct netdev_rxq_dummy *
156 netdev_rxq_dummy_cast(const struct netdev_rxq *rx)
158 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
159 return CONTAINER_OF(rx, struct netdev_rxq_dummy, up);
163 dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream)
165 int rxbuf_size = stream ? 2048 : 0;
167 dp_packet_init(&s->rxbuf, rxbuf_size);
168 ovs_list_init(&s->txq);
171 static struct dummy_packet_stream *
172 dummy_packet_stream_create(struct stream *stream)
174 struct dummy_packet_stream *s;
176 s = xzalloc(sizeof *s);
177 dummy_packet_stream_init(s, stream);
183 dummy_packet_stream_wait(struct dummy_packet_stream *s)
185 stream_run_wait(s->stream);
186 if (!ovs_list_is_empty(&s->txq)) {
187 stream_send_wait(s->stream);
189 stream_recv_wait(s->stream);
193 dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
195 if (ovs_list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
197 struct pkt_list_node *node;
199 b = dp_packet_clone_data_with_headroom(buffer, size, 2);
200 put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size));
202 node = xmalloc(sizeof *node);
204 ovs_list_push_back(&s->txq, &node->list_node);
209 dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s)
214 stream_run(s->stream);
216 if (!ovs_list_is_empty(&s->txq)) {
217 struct pkt_list_node *txbuf_node;
218 struct dp_packet *txbuf;
221 ASSIGN_CONTAINER(txbuf_node, ovs_list_front(&s->txq), list_node);
222 txbuf = txbuf_node->pkt;
223 retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
226 dp_packet_pull(txbuf, retval);
227 if (!dp_packet_size(txbuf)) {
228 ovs_list_remove(&txbuf_node->list_node);
230 dp_packet_delete(txbuf);
232 } else if (retval != -EAGAIN) {
238 if (dp_packet_size(&s->rxbuf) < 2) {
239 n = 2 - dp_packet_size(&s->rxbuf);
243 frame_len = ntohs(get_unaligned_be16(dp_packet_data(&s->rxbuf)));
244 if (frame_len < ETH_HEADER_LEN) {
248 n = (2 + frame_len) - dp_packet_size(&s->rxbuf);
255 dp_packet_prealloc_tailroom(&s->rxbuf, n);
256 retval = stream_recv(s->stream, dp_packet_tail(&s->rxbuf), n);
259 dp_packet_set_size(&s->rxbuf, dp_packet_size(&s->rxbuf) + retval);
260 if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
261 dp_packet_pull(&s->rxbuf, 2);
262 netdev_dummy_queue_packet(dev,
263 dp_packet_clone(&s->rxbuf));
264 dp_packet_clear(&s->rxbuf);
266 } else if (retval != -EAGAIN) {
267 error = (retval < 0 ? -retval
268 : dp_packet_size(&s->rxbuf) ? EPROTO
277 dummy_packet_stream_close(struct dummy_packet_stream *s)
279 stream_close(s->stream);
280 dp_packet_uninit(&s->rxbuf);
281 pkt_list_delete(&s->txq);
285 dummy_packet_conn_init(struct dummy_packet_conn *conn)
287 memset(conn, 0, sizeof *conn);
292 dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args)
295 switch (conn->type) {
297 smap_add(args, "pstream", pstream_get_name(conn->u.pconn.pstream));
301 smap_add(args, "stream", stream_get_name(conn->u.rconn.rstream->stream));
311 dummy_packet_conn_close(struct dummy_packet_conn *conn)
314 struct dummy_packet_pconn *pconn = &conn->u.pconn;
315 struct dummy_packet_rconn *rconn = &conn->u.rconn;
317 switch (conn->type) {
319 pstream_close(pconn->pstream);
320 for (i = 0; i < pconn->n_streams; i++) {
321 dummy_packet_stream_close(&pconn->streams[i]);
323 free(pconn->streams);
324 pconn->pstream = NULL;
325 pconn->streams = NULL;
329 dummy_packet_stream_close(rconn->rstream);
330 free(rconn->rstream);
331 rconn->rstream = NULL;
332 reconnect_destroy(rconn->reconnect);
333 rconn->reconnect = NULL;
342 memset(conn, 0, sizeof *conn);
346 dummy_packet_conn_set_config(struct dummy_packet_conn *conn,
347 const struct smap *args)
349 const char *pstream = smap_get(args, "pstream");
350 const char *stream = smap_get(args, "stream");
352 if (pstream && stream) {
353 VLOG_WARN("Open failed: both %s and %s are configured",
358 switch (conn->type) {
361 !strcmp(pstream_get_name(conn->u.pconn.pstream), pstream)) {
364 dummy_packet_conn_close(conn);
368 !strcmp(stream_get_name(conn->u.rconn.rstream->stream), stream)) {
371 dummy_packet_conn_close(conn);
381 error = pstream_open(pstream, &conn->u.pconn.pstream, DSCP_DEFAULT);
383 VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
385 conn->type = PASSIVE;
391 struct stream *active_stream;
392 struct reconnect *reconnect;
394 reconnect = reconnect_create(time_msec());
395 reconnect_set_name(reconnect, stream);
396 reconnect_set_passive(reconnect, false, time_msec());
397 reconnect_enable(reconnect, time_msec());
398 reconnect_set_backoff(reconnect, 100, INT_MAX);
399 reconnect_set_probe_interval(reconnect, 0);
400 conn->u.rconn.reconnect = reconnect;
403 error = stream_open(stream, &active_stream, DSCP_DEFAULT);
404 conn->u.rconn.rstream = dummy_packet_stream_create(active_stream);
408 reconnect_connected(reconnect, time_msec());
412 reconnect_connecting(reconnect, time_msec());
416 reconnect_connect_failed(reconnect, time_msec(), error);
417 stream_close(active_stream);
418 conn->u.rconn.rstream->stream = NULL;
425 dummy_pconn_run(struct netdev_dummy *dev)
426 OVS_REQUIRES(dev->mutex)
428 struct stream *new_stream;
429 struct dummy_packet_pconn *pconn = &dev->conn.u.pconn;
433 error = pstream_accept(pconn->pstream, &new_stream);
435 struct dummy_packet_stream *s;
437 pconn->streams = xrealloc(pconn->streams,
438 ((pconn->n_streams + 1)
440 s = &pconn->streams[pconn->n_streams++];
441 dummy_packet_stream_init(s, new_stream);
442 } else if (error != EAGAIN) {
443 VLOG_WARN("%s: accept failed (%s)",
444 pstream_get_name(pconn->pstream), ovs_strerror(error));
445 pstream_close(pconn->pstream);
446 pconn->pstream = NULL;
447 dev->conn.type = NONE;
450 for (i = 0; i < pconn->n_streams; i++) {
451 struct dummy_packet_stream *s = &pconn->streams[i];
453 error = dummy_packet_stream_run(dev, s);
455 VLOG_DBG("%s: closing connection (%s)",
456 stream_get_name(s->stream),
457 ovs_retval_to_string(error));
458 dummy_packet_stream_close(s);
459 pconn->streams[i] = pconn->streams[--pconn->n_streams];
465 dummy_rconn_run(struct netdev_dummy *dev)
466 OVS_REQUIRES(dev->mutex)
468 struct dummy_packet_rconn *rconn = &dev->conn.u.rconn;
470 switch (reconnect_run(rconn->reconnect, time_msec())) {
471 case RECONNECT_CONNECT:
475 if (rconn->rstream->stream) {
476 error = stream_connect(rconn->rstream->stream);
478 error = stream_open(reconnect_get_name(rconn->reconnect),
479 &rconn->rstream->stream, DSCP_DEFAULT);
484 reconnect_connected(rconn->reconnect, time_msec());
488 reconnect_connecting(rconn->reconnect, time_msec());
492 reconnect_connect_failed(rconn->reconnect, time_msec(), error);
493 stream_close(rconn->rstream->stream);
494 rconn->rstream->stream = NULL;
500 case RECONNECT_DISCONNECT:
501 case RECONNECT_PROBE:
506 if (reconnect_is_connected(rconn->reconnect)) {
509 err = dummy_packet_stream_run(dev, rconn->rstream);
512 reconnect_disconnected(rconn->reconnect, time_msec(), err);
513 stream_close(rconn->rstream->stream);
514 rconn->rstream->stream = NULL;
520 dummy_packet_conn_run(struct netdev_dummy *dev)
521 OVS_REQUIRES(dev->mutex)
523 switch (dev->conn.type) {
525 dummy_pconn_run(dev);
529 dummy_rconn_run(dev);
539 dummy_packet_conn_wait(struct dummy_packet_conn *conn)
542 switch (conn->type) {
544 pstream_wait(conn->u.pconn.pstream);
545 for (i = 0; i < conn->u.pconn.n_streams; i++) {
546 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
547 dummy_packet_stream_wait(s);
551 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
552 dummy_packet_stream_wait(conn->u.rconn.rstream);
563 dummy_packet_conn_send(struct dummy_packet_conn *conn,
564 const void *buffer, size_t size)
568 switch (conn->type) {
570 for (i = 0; i < conn->u.pconn.n_streams; i++) {
571 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
573 dummy_packet_stream_send(s, buffer, size);
574 pstream_wait(conn->u.pconn.pstream);
579 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
580 dummy_packet_stream_send(conn->u.rconn.rstream, buffer, size);
581 dummy_packet_stream_wait(conn->u.rconn.rstream);
591 static enum dummy_netdev_conn_state
592 dummy_netdev_get_conn_state(struct dummy_packet_conn *conn)
594 enum dummy_netdev_conn_state state;
596 if (conn->type == ACTIVE) {
597 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
598 state = CONN_STATE_CONNECTED;
600 state = CONN_STATE_NOT_CONNECTED;
603 state = CONN_STATE_UNKNOWN;
610 netdev_dummy_run(void)
612 struct netdev_dummy *dev;
614 ovs_mutex_lock(&dummy_list_mutex);
615 LIST_FOR_EACH (dev, list_node, &dummy_list) {
616 ovs_mutex_lock(&dev->mutex);
617 dummy_packet_conn_run(dev);
618 ovs_mutex_unlock(&dev->mutex);
620 ovs_mutex_unlock(&dummy_list_mutex);
624 netdev_dummy_wait(void)
626 struct netdev_dummy *dev;
628 ovs_mutex_lock(&dummy_list_mutex);
629 LIST_FOR_EACH (dev, list_node, &dummy_list) {
630 ovs_mutex_lock(&dev->mutex);
631 dummy_packet_conn_wait(&dev->conn);
632 ovs_mutex_unlock(&dev->mutex);
634 ovs_mutex_unlock(&dummy_list_mutex);
637 static struct netdev *
638 netdev_dummy_alloc(void)
640 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
645 netdev_dummy_construct(struct netdev *netdev_)
647 static atomic_count next_n = ATOMIC_COUNT_INIT(0xaa550000);
648 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
651 n = atomic_count_inc(&next_n);
653 ovs_mutex_init(&netdev->mutex);
654 ovs_mutex_lock(&netdev->mutex);
655 netdev->hwaddr.ea[0] = 0xaa;
656 netdev->hwaddr.ea[1] = 0x55;
657 netdev->hwaddr.ea[2] = n >> 24;
658 netdev->hwaddr.ea[3] = n >> 16;
659 netdev->hwaddr.ea[4] = n >> 8;
660 netdev->hwaddr.ea[5] = n;
663 netdev->ifindex = -EOPNOTSUPP;
665 dummy_packet_conn_init(&netdev->conn);
667 ovs_list_init(&netdev->rxes);
668 ovs_mutex_unlock(&netdev->mutex);
670 ovs_mutex_lock(&dummy_list_mutex);
671 ovs_list_push_back(&dummy_list, &netdev->list_node);
672 ovs_mutex_unlock(&dummy_list_mutex);
678 netdev_dummy_destruct(struct netdev *netdev_)
680 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
682 ovs_mutex_lock(&dummy_list_mutex);
683 ovs_list_remove(&netdev->list_node);
684 ovs_mutex_unlock(&dummy_list_mutex);
686 ovs_mutex_lock(&netdev->mutex);
687 dummy_packet_conn_close(&netdev->conn);
688 netdev->conn.type = NONE;
690 ovs_mutex_unlock(&netdev->mutex);
691 ovs_mutex_destroy(&netdev->mutex);
695 netdev_dummy_dealloc(struct netdev *netdev_)
697 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
703 netdev_dummy_get_config(const struct netdev *netdev_, struct smap *args)
705 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
707 ovs_mutex_lock(&netdev->mutex);
709 if (netdev->ifindex >= 0) {
710 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
713 dummy_packet_conn_get_config(&netdev->conn, args);
715 ovs_mutex_unlock(&netdev->mutex);
720 netdev_dummy_get_addr_list(const struct netdev *netdev_, struct in6_addr **paddr,
721 struct in6_addr **pmask, int *n_addr)
723 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
724 int cnt = 0, i = 0, err = 0;
725 struct in6_addr *addr, *mask;
727 ovs_mutex_lock(&netdev->mutex);
728 if (netdev->address.s_addr != INADDR_ANY) {
732 if (ipv6_addr_is_set(&netdev->ipv6)) {
739 addr = xmalloc(sizeof *addr * cnt);
740 mask = xmalloc(sizeof *mask * cnt);
741 if (netdev->address.s_addr != INADDR_ANY) {
742 in6_addr_set_mapped_ipv4(&addr[i], netdev->address.s_addr);
743 in6_addr_set_mapped_ipv4(&mask[i], netdev->netmask.s_addr);
747 if (ipv6_addr_is_set(&netdev->ipv6)) {
748 memcpy(&addr[i], &netdev->ipv6, sizeof *addr);
749 memcpy(&mask[i], &netdev->ipv6_mask, sizeof *mask);
761 ovs_mutex_unlock(&netdev->mutex);
767 netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address,
768 struct in_addr netmask)
770 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
772 ovs_mutex_lock(&netdev->mutex);
773 netdev->address = address;
774 netdev->netmask = netmask;
775 netdev_change_seq_changed(netdev_);
776 ovs_mutex_unlock(&netdev->mutex);
782 netdev_dummy_set_in6(struct netdev *netdev_, struct in6_addr *in6,
783 struct in6_addr *mask)
785 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
787 ovs_mutex_lock(&netdev->mutex);
789 netdev->ipv6_mask = *mask;
790 netdev_change_seq_changed(netdev_);
791 ovs_mutex_unlock(&netdev->mutex);
797 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args)
799 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
802 ovs_mutex_lock(&netdev->mutex);
803 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
805 dummy_packet_conn_set_config(&netdev->conn, args);
807 if (netdev->rxq_pcap) {
808 fclose(netdev->rxq_pcap);
810 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
811 fclose(netdev->tx_pcap);
813 netdev->rxq_pcap = netdev->tx_pcap = NULL;
814 pcap = smap_get(args, "pcap");
816 netdev->rxq_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab");
818 const char *rxq_pcap = smap_get(args, "rxq_pcap");
819 const char *tx_pcap = smap_get(args, "tx_pcap");
822 netdev->rxq_pcap = ovs_pcap_open(rxq_pcap, "ab");
825 netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab");
829 ovs_mutex_unlock(&netdev->mutex);
834 static struct netdev_rxq *
835 netdev_dummy_rxq_alloc(void)
837 struct netdev_rxq_dummy *rx = xzalloc(sizeof *rx);
842 netdev_dummy_rxq_construct(struct netdev_rxq *rxq_)
844 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
845 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
847 ovs_mutex_lock(&netdev->mutex);
848 ovs_list_push_back(&netdev->rxes, &rx->node);
849 ovs_list_init(&rx->recv_queue);
850 rx->recv_queue_len = 0;
851 rx->seq = seq_create();
852 ovs_mutex_unlock(&netdev->mutex);
858 netdev_dummy_rxq_destruct(struct netdev_rxq *rxq_)
860 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
861 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
863 ovs_mutex_lock(&netdev->mutex);
864 ovs_list_remove(&rx->node);
865 pkt_list_delete(&rx->recv_queue);
866 ovs_mutex_unlock(&netdev->mutex);
867 seq_destroy(rx->seq);
871 netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_)
873 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
879 netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **arr,
882 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
883 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
884 struct dp_packet *packet;
886 ovs_mutex_lock(&netdev->mutex);
887 if (!ovs_list_is_empty(&rx->recv_queue)) {
888 struct pkt_list_node *pkt_node;
890 ASSIGN_CONTAINER(pkt_node, ovs_list_pop_front(&rx->recv_queue), list_node);
891 packet = pkt_node->pkt;
893 rx->recv_queue_len--;
897 ovs_mutex_unlock(&netdev->mutex);
902 ovs_mutex_lock(&netdev->mutex);
903 netdev->stats.rx_packets++;
904 netdev->stats.rx_bytes += dp_packet_size(packet);
905 ovs_mutex_unlock(&netdev->mutex);
907 dp_packet_pad(packet);
915 netdev_dummy_rxq_wait(struct netdev_rxq *rxq_)
917 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
918 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
919 uint64_t seq = seq_read(rx->seq);
921 ovs_mutex_lock(&netdev->mutex);
922 if (!ovs_list_is_empty(&rx->recv_queue)) {
923 poll_immediate_wake();
925 seq_wait(rx->seq, seq);
927 ovs_mutex_unlock(&netdev->mutex);
931 netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
933 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
934 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
936 ovs_mutex_lock(&netdev->mutex);
937 pkt_list_delete(&rx->recv_queue);
938 rx->recv_queue_len = 0;
939 ovs_mutex_unlock(&netdev->mutex);
947 netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
948 struct dp_packet **pkts, int cnt, bool may_steal)
950 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
954 for (i = 0; i < cnt; i++) {
955 const void *buffer = dp_packet_data(pkts[i]);
956 size_t size = dp_packet_size(pkts[i]);
958 if (size < ETH_HEADER_LEN) {
962 const struct eth_header *eth = buffer;
965 ovs_mutex_lock(&dev->mutex);
966 max_size = dev->mtu + ETH_HEADER_LEN;
967 ovs_mutex_unlock(&dev->mutex);
969 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
970 max_size += VLAN_HEADER_LEN;
972 if (size > max_size) {
978 ovs_mutex_lock(&dev->mutex);
979 dev->stats.tx_packets++;
980 dev->stats.tx_bytes += size;
982 dummy_packet_conn_send(&dev->conn, buffer, size);
984 /* Reply to ARP requests for 'dev''s assigned IP address. */
985 if (dev->address.s_addr) {
986 struct dp_packet packet;
989 dp_packet_use_const(&packet, buffer, size);
990 flow_extract(&packet, &flow);
991 if (flow.dl_type == htons(ETH_TYPE_ARP)
992 && flow.nw_proto == ARP_OP_REQUEST
993 && flow.nw_dst == dev->address.s_addr) {
994 struct dp_packet *reply = dp_packet_new(0);
995 compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src,
996 false, flow.nw_dst, flow.nw_src);
997 netdev_dummy_queue_packet(dev, reply);
1002 struct dp_packet packet;
1004 dp_packet_use_const(&packet, buffer, size);
1005 ovs_pcap_write(dev->tx_pcap, &packet);
1006 fflush(dev->tx_pcap);
1009 ovs_mutex_unlock(&dev->mutex);
1013 for (i = 0; i < cnt; i++) {
1014 dp_packet_delete(pkts[i]);
1022 netdev_dummy_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1024 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1026 ovs_mutex_lock(&dev->mutex);
1027 if (!eth_addr_equals(dev->hwaddr, mac)) {
1029 netdev_change_seq_changed(netdev);
1031 ovs_mutex_unlock(&dev->mutex);
1037 netdev_dummy_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1039 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1041 ovs_mutex_lock(&dev->mutex);
1043 ovs_mutex_unlock(&dev->mutex);
1049 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
1051 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1053 ovs_mutex_lock(&dev->mutex);
1055 ovs_mutex_unlock(&dev->mutex);
1061 netdev_dummy_set_mtu(const struct netdev *netdev, int mtu)
1063 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1065 ovs_mutex_lock(&dev->mutex);
1067 ovs_mutex_unlock(&dev->mutex);
1073 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1075 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1077 ovs_mutex_lock(&dev->mutex);
1078 /* Passing only collected counters */
1079 stats->tx_packets = dev->stats.tx_packets;
1080 stats->tx_bytes = dev->stats.tx_bytes;
1081 stats->rx_packets = dev->stats.rx_packets;
1082 stats->rx_bytes = dev->stats.rx_bytes;
1083 ovs_mutex_unlock(&dev->mutex);
1089 netdev_dummy_get_queue(const struct netdev *netdev OVS_UNUSED,
1090 unsigned int queue_id, struct smap *details OVS_UNUSED)
1092 if (queue_id == 0) {
1100 netdev_dummy_init_queue_stats(struct netdev_queue_stats *stats)
1102 *stats = (struct netdev_queue_stats) {
1103 .tx_bytes = UINT64_MAX,
1104 .tx_packets = UINT64_MAX,
1105 .tx_errors = UINT64_MAX,
1106 .created = LLONG_MIN,
1111 netdev_dummy_get_queue_stats(const struct netdev *netdev OVS_UNUSED,
1112 unsigned int queue_id,
1113 struct netdev_queue_stats *stats)
1115 if (queue_id == 0) {
1116 netdev_dummy_init_queue_stats(stats);
1123 struct netdev_dummy_queue_state {
1124 unsigned int next_queue;
1128 netdev_dummy_queue_dump_start(const struct netdev *netdev OVS_UNUSED,
1131 struct netdev_dummy_queue_state *state = xmalloc(sizeof *state);
1132 state->next_queue = 0;
1138 netdev_dummy_queue_dump_next(const struct netdev *netdev OVS_UNUSED,
1140 unsigned int *queue_id,
1141 struct smap *details OVS_UNUSED)
1143 struct netdev_dummy_queue_state *state = state_;
1144 if (state->next_queue == 0) {
1146 state->next_queue++;
1154 netdev_dummy_queue_dump_done(const struct netdev *netdev OVS_UNUSED,
1162 netdev_dummy_dump_queue_stats(const struct netdev *netdev OVS_UNUSED,
1163 void (*cb)(unsigned int queue_id,
1164 struct netdev_queue_stats *,
1168 struct netdev_queue_stats stats;
1169 netdev_dummy_init_queue_stats(&stats);
1175 netdev_dummy_get_ifindex(const struct netdev *netdev)
1177 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1180 ovs_mutex_lock(&dev->mutex);
1181 ifindex = dev->ifindex;
1182 ovs_mutex_unlock(&dev->mutex);
1188 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
1189 enum netdev_flags off, enum netdev_flags on,
1190 enum netdev_flags *old_flagsp)
1191 OVS_REQUIRES(netdev->mutex)
1193 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1197 *old_flagsp = netdev->flags;
1198 netdev->flags |= on;
1199 netdev->flags &= ~off;
1200 if (*old_flagsp != netdev->flags) {
1201 netdev_change_seq_changed(&netdev->up);
1208 netdev_dummy_update_flags(struct netdev *netdev_,
1209 enum netdev_flags off, enum netdev_flags on,
1210 enum netdev_flags *old_flagsp)
1212 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
1215 ovs_mutex_lock(&netdev->mutex);
1216 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
1217 ovs_mutex_unlock(&netdev->mutex);
1222 /* Helper functions. */
1224 static const struct netdev_class dummy_class = {
1232 netdev_dummy_construct,
1233 netdev_dummy_destruct,
1234 netdev_dummy_dealloc,
1235 netdev_dummy_get_config,
1236 netdev_dummy_set_config,
1237 NULL, /* get_tunnel_config */
1238 NULL, /* build header */
1239 NULL, /* push header */
1240 NULL, /* pop header */
1241 NULL, /* get_numa_id */
1242 NULL, /* set_multiq */
1244 netdev_dummy_send, /* send */
1245 NULL, /* send_wait */
1247 netdev_dummy_set_etheraddr,
1248 netdev_dummy_get_etheraddr,
1249 netdev_dummy_get_mtu,
1250 netdev_dummy_set_mtu,
1251 netdev_dummy_get_ifindex,
1252 NULL, /* get_carrier */
1253 NULL, /* get_carrier_resets */
1254 NULL, /* get_miimon */
1255 netdev_dummy_get_stats,
1257 NULL, /* get_features */
1258 NULL, /* set_advertisements */
1260 NULL, /* set_policing */
1261 NULL, /* get_qos_types */
1262 NULL, /* get_qos_capabilities */
1265 netdev_dummy_get_queue,
1266 NULL, /* set_queue */
1267 NULL, /* delete_queue */
1268 netdev_dummy_get_queue_stats,
1269 netdev_dummy_queue_dump_start,
1270 netdev_dummy_queue_dump_next,
1271 netdev_dummy_queue_dump_done,
1272 netdev_dummy_dump_queue_stats,
1275 netdev_dummy_get_addr_list,
1276 NULL, /* add_router */
1277 NULL, /* get_next_hop */
1278 NULL, /* get_status */
1279 NULL, /* arp_lookup */
1281 netdev_dummy_update_flags,
1283 netdev_dummy_rxq_alloc,
1284 netdev_dummy_rxq_construct,
1285 netdev_dummy_rxq_destruct,
1286 netdev_dummy_rxq_dealloc,
1287 netdev_dummy_rxq_recv,
1288 netdev_dummy_rxq_wait,
1289 netdev_dummy_rxq_drain,
1293 pkt_list_delete(struct ovs_list *l)
1295 struct pkt_list_node *pkt;
1297 LIST_FOR_EACH_POP(pkt, list_node, l) {
1298 dp_packet_delete(pkt->pkt);
1303 static struct dp_packet *
1304 eth_from_packet_or_flow(const char *s)
1306 enum odp_key_fitness fitness;
1307 struct dp_packet *packet;
1308 struct ofpbuf odp_key;
1312 if (!eth_from_hex(s, &packet)) {
1316 /* Convert string to datapath key.
1318 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1319 * the code for that currently calls exit() on parse error. We have to
1320 * settle for parsing a datapath key for now.
1322 ofpbuf_init(&odp_key, 0);
1323 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
1325 ofpbuf_uninit(&odp_key);
1329 /* Convert odp_key to flow. */
1330 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
1331 if (fitness == ODP_FIT_ERROR) {
1332 ofpbuf_uninit(&odp_key);
1336 packet = dp_packet_new(0);
1337 flow_compose(packet, &flow);
1339 ofpbuf_uninit(&odp_key);
1344 netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet)
1346 struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
1348 pkt_node->pkt = packet;
1349 ovs_list_push_back(&rx->recv_queue, &pkt_node->list_node);
1350 rx->recv_queue_len++;
1351 seq_change(rx->seq);
1355 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet)
1356 OVS_REQUIRES(dummy->mutex)
1358 struct netdev_rxq_dummy *rx, *prev;
1360 if (dummy->rxq_pcap) {
1361 ovs_pcap_write(dummy->rxq_pcap, packet);
1362 fflush(dummy->rxq_pcap);
1365 LIST_FOR_EACH (rx, node, &dummy->rxes) {
1366 if (rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
1368 netdev_dummy_queue_packet__(prev, dp_packet_clone(packet));
1374 netdev_dummy_queue_packet__(prev, packet);
1376 dp_packet_delete(packet);
1381 netdev_dummy_receive(struct unixctl_conn *conn,
1382 int argc, const char *argv[], void *aux OVS_UNUSED)
1384 struct netdev_dummy *dummy_dev;
1385 struct netdev *netdev;
1388 netdev = netdev_from_name(argv[1]);
1389 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
1390 unixctl_command_reply_error(conn, "no such dummy netdev");
1393 dummy_dev = netdev_dummy_cast(netdev);
1395 for (i = 2; i < argc; i++) {
1396 struct dp_packet *packet;
1398 packet = eth_from_packet_or_flow(argv[i]);
1400 unixctl_command_reply_error(conn, "bad packet syntax");
1404 ovs_mutex_lock(&dummy_dev->mutex);
1405 netdev_dummy_queue_packet(dummy_dev, packet);
1406 ovs_mutex_unlock(&dummy_dev->mutex);
1409 unixctl_command_reply(conn, NULL);
1412 netdev_close(netdev);
1416 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
1417 OVS_REQUIRES(dev->mutex)
1419 enum netdev_flags old_flags;
1422 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1424 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1429 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
1430 const char *argv[], void *aux OVS_UNUSED)
1434 if (!strcasecmp(argv[argc - 1], "up")) {
1436 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1439 unixctl_command_reply_error(conn, "Invalid Admin State");
1444 struct netdev *netdev = netdev_from_name(argv[1]);
1445 if (netdev && is_dummy_class(netdev->netdev_class)) {
1446 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1448 ovs_mutex_lock(&dummy_dev->mutex);
1449 netdev_dummy_set_admin_state__(dummy_dev, up);
1450 ovs_mutex_unlock(&dummy_dev->mutex);
1452 netdev_close(netdev);
1454 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1455 netdev_close(netdev);
1459 struct netdev_dummy *netdev;
1461 ovs_mutex_lock(&dummy_list_mutex);
1462 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1463 ovs_mutex_lock(&netdev->mutex);
1464 netdev_dummy_set_admin_state__(netdev, up);
1465 ovs_mutex_unlock(&netdev->mutex);
1467 ovs_mutex_unlock(&dummy_list_mutex);
1469 unixctl_command_reply(conn, "OK");
1473 display_conn_state__(struct ds *s, const char *name,
1474 enum dummy_netdev_conn_state state)
1476 ds_put_format(s, "%s: ", name);
1479 case CONN_STATE_CONNECTED:
1480 ds_put_cstr(s, "connected\n");
1483 case CONN_STATE_NOT_CONNECTED:
1484 ds_put_cstr(s, "disconnected\n");
1487 case CONN_STATE_UNKNOWN:
1489 ds_put_cstr(s, "unknown\n");
1495 netdev_dummy_conn_state(struct unixctl_conn *conn, int argc,
1496 const char *argv[], void *aux OVS_UNUSED)
1498 enum dummy_netdev_conn_state state = CONN_STATE_UNKNOWN;
1504 const char *dev_name = argv[1];
1505 struct netdev *netdev = netdev_from_name(dev_name);
1507 if (netdev && is_dummy_class(netdev->netdev_class)) {
1508 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1510 ovs_mutex_lock(&dummy_dev->mutex);
1511 state = dummy_netdev_get_conn_state(&dummy_dev->conn);
1512 ovs_mutex_unlock(&dummy_dev->mutex);
1514 netdev_close(netdev);
1516 display_conn_state__(&s, dev_name, state);
1518 struct netdev_dummy *netdev;
1520 ovs_mutex_lock(&dummy_list_mutex);
1521 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1522 ovs_mutex_lock(&netdev->mutex);
1523 state = dummy_netdev_get_conn_state(&netdev->conn);
1524 ovs_mutex_unlock(&netdev->mutex);
1525 if (state != CONN_STATE_UNKNOWN) {
1526 display_conn_state__(&s, netdev->up.name, state);
1529 ovs_mutex_unlock(&dummy_list_mutex);
1532 unixctl_command_reply(conn, ds_cstr(&s));
1537 netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1538 const char *argv[], void *aux OVS_UNUSED)
1540 struct netdev *netdev = netdev_from_name(argv[1]);
1542 if (netdev && is_dummy_class(netdev->netdev_class)) {
1543 struct in_addr ip, mask;
1546 error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr);
1548 netdev_dummy_set_in4(netdev, ip, mask);
1549 unixctl_command_reply(conn, "OK");
1551 unixctl_command_reply_error(conn, error);
1555 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1558 netdev_close(netdev);
1562 netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1563 const char *argv[], void *aux OVS_UNUSED)
1565 struct netdev *netdev = netdev_from_name(argv[1]);
1567 if (netdev && is_dummy_class(netdev->netdev_class)) {
1568 struct in6_addr ip6;
1572 error = ipv6_parse_cidr(argv[2], &ip6, &plen);
1574 struct in6_addr mask;
1576 mask = ipv6_create_mask(plen);
1577 netdev_dummy_set_in6(netdev, &ip6, &mask);
1578 unixctl_command_reply(conn, "OK");
1580 unixctl_command_reply_error(conn, error);
1583 netdev_close(netdev);
1585 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1588 netdev_close(netdev);
1593 netdev_dummy_override(const char *type)
1595 if (!netdev_unregister_provider(type)) {
1596 struct netdev_class *class;
1599 class = xmemdup(&dummy_class, sizeof dummy_class);
1600 class->type = xstrdup(type);
1601 error = netdev_register_provider(class);
1603 VLOG_ERR("%s: failed to register netdev provider (%s)",
1604 type, ovs_strerror(error));
1605 free(CONST_CAST(char *, class->type));
1612 netdev_dummy_register(enum dummy_level level)
1614 unixctl_command_register("netdev-dummy/receive", "name packet|flow...",
1615 2, INT_MAX, netdev_dummy_receive, NULL);
1616 unixctl_command_register("netdev-dummy/set-admin-state",
1617 "[netdev] up|down", 1, 2,
1618 netdev_dummy_set_admin_state, NULL);
1619 unixctl_command_register("netdev-dummy/conn-state",
1621 netdev_dummy_conn_state, NULL);
1622 unixctl_command_register("netdev-dummy/ip4addr",
1623 "[netdev] ipaddr/mask-prefix-len", 2, 2,
1624 netdev_dummy_ip4addr, NULL);
1625 unixctl_command_register("netdev-dummy/ip6addr",
1626 "[netdev] ip6addr", 2, 2,
1627 netdev_dummy_ip6addr, NULL);
1629 if (level == DUMMY_OVERRIDE_ALL) {
1634 netdev_enumerate_types(&types);
1635 SSET_FOR_EACH (type, &types) {
1636 if (strcmp(type, "patch")) {
1637 netdev_dummy_override(type);
1640 sset_destroy(&types);
1641 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
1642 netdev_dummy_override("system");
1644 netdev_register_provider(&dummy_class);
1646 netdev_vport_tunnel_register();