2 * Copyright (c) 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
34 #include "dp-packet.h"
35 #include "dpif-netdev.h"
37 #include "netdev-dpdk.h"
38 #include "netdev-provider.h"
39 #include "netdev-vport.h"
41 #include "ofp-print.h"
43 #include "ovs-thread.h"
48 #include "unaligned.h"
51 #include "openvswitch/vlog.h"
53 #include "rte_config.h"
55 #include "rte_virtio_net.h"
57 VLOG_DEFINE_THIS_MODULE(dpdk);
58 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
60 #define DPDK_PORT_WATCHDOG_INTERVAL 5
62 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
63 #define OVS_VPORT_DPDK "ovs_dpdk"
66 * need to reserve tons of extra space in the mbufs so we can align the
67 * DMA addresses to 4KB.
70 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
71 #define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
72 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
74 /* Max and min number of packets in the mempool. OVS tries to allocate a
75 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
76 * enough hugepages) we keep halving the number until the allocation succeeds
77 * or we reach MIN_NB_MBUF */
79 #define MAX_NB_MBUF (4096 * 64)
80 #define MIN_NB_MBUF (4096 * 4)
81 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
83 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
84 BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
86 /* The smallest possible NB_MBUF that we're going to try should be a multiple
87 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
88 BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
93 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
94 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
96 char *cuse_dev_name = NULL; /* Character device cuse_dev_name. */
97 char *vhost_sock_dir = NULL; /* Location of vhost-user sockets */
100 * Maximum amount of time in micro seconds to try and enqueue to vhost.
102 #define VHOST_ENQ_RETRY_USECS 100
104 static const struct rte_eth_conf port_conf = {
106 .mq_mode = ETH_MQ_RX_RSS,
108 .header_split = 0, /* Header Split disabled */
109 .hw_ip_checksum = 0, /* IP checksum offload disabled */
110 .hw_vlan_filter = 0, /* VLAN filtering disabled */
111 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
117 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
121 .mq_mode = ETH_MQ_TX_NONE,
125 enum { MAX_TX_QUEUE_LEN = 384 };
126 enum { DPDK_RING_SIZE = 256 };
127 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
128 enum { DRAIN_TSC = 200000ULL };
135 static int rte_eal_init_ret = ENODEV;
137 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
139 /* Contains all 'struct dpdk_dev's. */
140 static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
141 = OVS_LIST_INITIALIZER(&dpdk_list);
143 static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
144 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
146 /* This mutex must be used by non pmd threads when allocating or freeing
147 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
148 * use mempools, a non pmd thread should hold this mutex while calling them */
149 static struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
152 struct rte_mempool *mp;
156 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
159 /* There should be one 'struct dpdk_tx_queue' created for
161 struct dpdk_tx_queue {
162 bool flush_tx; /* Set to true to flush queue everytime */
163 /* pkts are queued. */
165 rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
166 * from concurrent access. It is used only
167 * if the queue is shared among different
168 * pmd threads (see 'txq_needs_locking'). */
170 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
173 /* dpdk has no way to remove dpdk ring ethernet devices
174 so we have to keep them around once they've been created
177 static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
178 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
181 /* For the client rings */
182 struct rte_ring *cring_tx;
183 struct rte_ring *cring_rx;
184 int user_port_id; /* User given port no, parsed from port name */
185 int eth_port_id; /* ethernet device port id */
186 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
193 enum dpdk_dev_type type;
195 struct dpdk_tx_queue *tx_q;
197 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
199 struct dpdk_mp *dpdk_mp;
203 struct netdev_stats stats;
205 rte_spinlock_t stats_lock;
207 uint8_t hwaddr[ETH_ADDR_LEN];
208 enum netdev_flags flags;
210 struct rte_eth_link link;
213 /* The user might request more txqs than the NIC has. We remap those
214 * ('up.n_txq') on these ('real_n_txq').
215 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
216 * true and we will take a spinlock on transmission */
218 bool txq_needs_locking;
220 /* Spinlock for vhost transmission. Other DPDK devices use spinlocks in
222 rte_spinlock_t vhost_tx_lock;
224 /* virtio-net structure for vhost device */
225 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
227 /* Identifier used to distinguish vhost devices from each other */
228 char vhost_id[PATH_MAX];
231 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
234 struct netdev_rxq_dpdk {
235 struct netdev_rxq up;
239 static bool thread_is_pmd(void);
241 static int netdev_dpdk_construct(struct netdev *);
243 struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
246 is_dpdk_class(const struct netdev_class *class)
248 return class->construct == netdev_dpdk_construct;
251 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
252 * for all other segments data, bss and text. */
255 dpdk_rte_mzalloc(size_t sz)
259 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
266 /* XXX this function should be called only by pmd threads (or by non pmd
267 * threads holding the nonpmd_mempool_mutex) */
269 free_dpdk_buf(struct dp_packet *p)
271 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
273 rte_pktmbuf_free_seg(pkt);
277 __rte_pktmbuf_init(struct rte_mempool *mp,
278 void *opaque_arg OVS_UNUSED,
280 unsigned i OVS_UNUSED)
282 struct rte_mbuf *m = _m;
283 uint32_t buf_len = mp->elt_size - sizeof(struct dp_packet);
285 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct dp_packet));
287 memset(m, 0, mp->elt_size);
289 /* start of buffer is just after mbuf structure */
290 m->buf_addr = (char *)m + sizeof(struct dp_packet);
291 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
292 sizeof(struct dp_packet);
293 m->buf_len = (uint16_t)buf_len;
295 /* keep some headroom between start of buffer and data */
296 m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
298 /* init some constant fields */
305 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
306 void *opaque_arg OVS_UNUSED,
308 unsigned i OVS_UNUSED)
310 struct rte_mbuf *m = _m;
312 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
314 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
317 static struct dpdk_mp *
318 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
320 struct dpdk_mp *dmp = NULL;
321 char mp_name[RTE_MEMPOOL_NAMESIZE];
324 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
325 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
331 dmp = dpdk_rte_mzalloc(sizeof *dmp);
332 dmp->socket_id = socket_id;
336 mp_size = MAX_NB_MBUF;
338 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
339 dmp->mtu, dmp->socket_id, mp_size) < 0) {
343 dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
345 sizeof(struct rte_pktmbuf_pool_private),
346 rte_pktmbuf_pool_init, NULL,
347 ovs_rte_pktmbuf_init, NULL,
349 } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
351 if (dmp->mp == NULL) {
354 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
357 list_push_back(&dpdk_mp_list, &dmp->list_node);
362 dpdk_mp_put(struct dpdk_mp *dmp)
370 ovs_assert(dmp->refcount >= 0);
373 /* I could not find any API to destroy mp. */
374 if (dmp->refcount == 0) {
375 list_delete(dmp->list_node);
376 /* destroy mp-pool. */
382 check_link_status(struct netdev_dpdk *dev)
384 struct rte_eth_link link;
386 rte_eth_link_get_nowait(dev->port_id, &link);
388 if (dev->link.link_status != link.link_status) {
389 netdev_change_seq_changed(&dev->up);
391 dev->link_reset_cnt++;
393 if (dev->link.link_status) {
394 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
395 dev->port_id, (unsigned)dev->link.link_speed,
396 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
397 ("full-duplex") : ("half-duplex"));
399 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
405 dpdk_watchdog(void *dummy OVS_UNUSED)
407 struct netdev_dpdk *dev;
409 pthread_detach(pthread_self());
412 ovs_mutex_lock(&dpdk_mutex);
413 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
414 ovs_mutex_lock(&dev->mutex);
415 check_link_status(dev);
416 ovs_mutex_unlock(&dev->mutex);
418 ovs_mutex_unlock(&dpdk_mutex);
419 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
426 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
428 struct rte_pktmbuf_pool_private *mbp_priv;
429 struct rte_eth_dev_info info;
430 struct ether_addr eth_addr;
434 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
438 rte_eth_dev_info_get(dev->port_id, &info);
439 dev->up.n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
440 dev->real_n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
442 diag = rte_eth_dev_configure(dev->port_id, dev->up.n_rxq, dev->real_n_txq,
445 VLOG_ERR("eth dev config error %d. rxq:%d txq:%d", diag, dev->up.n_rxq,
450 for (i = 0; i < dev->real_n_txq; i++) {
451 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
452 dev->socket_id, NULL);
454 VLOG_ERR("eth dev tx queue setup error %d",diag);
459 for (i = 0; i < dev->up.n_rxq; i++) {
460 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
462 NULL, dev->dpdk_mp->mp);
464 VLOG_ERR("eth dev rx queue setup error %d",diag);
469 diag = rte_eth_dev_start(dev->port_id);
471 VLOG_ERR("eth dev start error %d",diag);
475 rte_eth_promiscuous_enable(dev->port_id);
476 rte_eth_allmulticast_enable(dev->port_id);
478 memset(ð_addr, 0x0, sizeof(eth_addr));
479 rte_eth_macaddr_get(dev->port_id, ð_addr);
480 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
481 dev->port_id, ETH_ADDR_ARGS(eth_addr.addr_bytes));
483 memcpy(dev->hwaddr, eth_addr.addr_bytes, ETH_ADDR_LEN);
484 rte_eth_link_get_nowait(dev->port_id, &dev->link);
486 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
487 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
489 dev->flags = NETDEV_UP | NETDEV_PROMISC;
493 static struct netdev_dpdk *
494 netdev_dpdk_cast(const struct netdev *netdev)
496 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
499 static struct netdev *
500 netdev_dpdk_alloc(void)
502 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
507 netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
511 netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
512 for (i = 0; i < n_txqs; i++) {
513 int numa_id = ovs_numa_get_numa_id(i);
515 if (!netdev->txq_needs_locking) {
516 /* Each index is considered as a cpu core id, since there should
517 * be one tx queue for each cpu core. If the corresponding core
518 * is not on the same numa node as 'netdev', flags the
520 netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
522 /* Queues are shared among CPUs. Always flush */
523 netdev->tx_q[i].flush_tx = true;
525 rte_spinlock_init(&netdev->tx_q[i].tx_lock);
530 netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no,
531 enum dpdk_dev_type type)
532 OVS_REQUIRES(dpdk_mutex)
534 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
538 ovs_mutex_init(&netdev->mutex);
539 ovs_mutex_lock(&netdev->mutex);
541 rte_spinlock_init(&netdev->stats_lock);
543 /* If the 'sid' is negative, it means that the kernel fails
544 * to obtain the pci numa info. In that situation, always
546 if (type == DPDK_DEV_ETH) {
547 sid = rte_eth_dev_socket_id(port_no);
549 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
552 netdev->socket_id = sid < 0 ? SOCKET0 : sid;
553 netdev->port_id = port_no;
556 netdev->mtu = ETHER_MTU;
557 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
559 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
560 if (!netdev->dpdk_mp) {
565 netdev_->n_txq = NR_QUEUE;
566 netdev_->n_rxq = NR_QUEUE;
567 netdev->real_n_txq = NR_QUEUE;
569 if (type == DPDK_DEV_ETH) {
570 netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
571 err = dpdk_eth_dev_init(netdev);
577 list_push_back(&dpdk_list, &netdev->list_node);
581 rte_free(netdev->tx_q);
583 ovs_mutex_unlock(&netdev->mutex);
588 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
589 unsigned int *port_no)
593 if (strncmp(dev_name, prefix, strlen(prefix))) {
597 cport = dev_name + strlen(prefix);
598 *port_no = strtol(cport, NULL, 0); /* string must be null terminated */
603 vhost_construct_helper(struct netdev *netdev_)
605 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
607 if (rte_eal_init_ret) {
608 return rte_eal_init_ret;
611 rte_spinlock_init(&netdev->vhost_tx_lock);
612 return netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
616 netdev_dpdk_vhost_cuse_construct(struct netdev *netdev_)
618 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
621 ovs_mutex_lock(&dpdk_mutex);
622 strncpy(netdev->vhost_id, netdev->up.name, sizeof(netdev->vhost_id));
623 err = vhost_construct_helper(netdev_);
624 ovs_mutex_unlock(&dpdk_mutex);
629 netdev_dpdk_vhost_user_construct(struct netdev *netdev_)
631 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
634 ovs_mutex_lock(&dpdk_mutex);
635 /* Take the name of the vhost-user port and append it to the location where
636 * the socket is to be created, then register the socket.
638 snprintf(netdev->vhost_id, sizeof(netdev->vhost_id), "%s/%s",
639 vhost_sock_dir, netdev_->name);
640 err = rte_vhost_driver_register(netdev->vhost_id);
642 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
645 VLOG_INFO("Socket %s created for vhost-user port %s\n", netdev->vhost_id, netdev_->name);
646 err = vhost_construct_helper(netdev_);
647 ovs_mutex_unlock(&dpdk_mutex);
652 netdev_dpdk_construct(struct netdev *netdev)
654 unsigned int port_no;
657 if (rte_eal_init_ret) {
658 return rte_eal_init_ret;
661 /* Names always start with "dpdk" */
662 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
667 ovs_mutex_lock(&dpdk_mutex);
668 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
669 ovs_mutex_unlock(&dpdk_mutex);
674 netdev_dpdk_destruct(struct netdev *netdev_)
676 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
678 ovs_mutex_lock(&dev->mutex);
679 rte_eth_dev_stop(dev->port_id);
680 ovs_mutex_unlock(&dev->mutex);
682 ovs_mutex_lock(&dpdk_mutex);
684 list_remove(&dev->list_node);
685 dpdk_mp_put(dev->dpdk_mp);
686 ovs_mutex_unlock(&dpdk_mutex);
690 netdev_dpdk_vhost_destruct(struct netdev *netdev_)
692 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
694 /* Can't remove a port while a guest is attached to it. */
695 if (netdev_dpdk_get_virtio(dev) != NULL) {
696 VLOG_ERR("Can not remove port, vhost device still attached");
700 ovs_mutex_lock(&dpdk_mutex);
701 list_remove(&dev->list_node);
702 dpdk_mp_put(dev->dpdk_mp);
703 ovs_mutex_unlock(&dpdk_mutex);
707 netdev_dpdk_dealloc(struct netdev *netdev_)
709 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
715 netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
717 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
719 ovs_mutex_lock(&dev->mutex);
721 smap_add_format(args, "configured_rx_queues", "%d", netdev_->n_rxq);
722 smap_add_format(args, "requested_tx_queues", "%d", netdev_->n_txq);
723 smap_add_format(args, "configured_tx_queues", "%d", dev->real_n_txq);
724 ovs_mutex_unlock(&dev->mutex);
730 netdev_dpdk_get_numa_id(const struct netdev *netdev_)
732 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
734 return netdev->socket_id;
737 /* Sets the number of tx queues and rx queues for the dpdk interface.
738 * If the configuration fails, do not try restoring its old configuration
739 * and just returns the error. */
741 netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
744 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
747 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
751 ovs_mutex_lock(&dpdk_mutex);
752 ovs_mutex_lock(&netdev->mutex);
754 rte_eth_dev_stop(netdev->port_id);
756 netdev->up.n_txq = n_txq;
757 netdev->up.n_rxq = n_rxq;
759 rte_free(netdev->tx_q);
760 err = dpdk_eth_dev_init(netdev);
761 netdev_dpdk_alloc_txq(netdev, netdev->real_n_txq);
763 netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
765 ovs_mutex_unlock(&netdev->mutex);
766 ovs_mutex_unlock(&dpdk_mutex);
772 netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
775 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
778 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
782 ovs_mutex_lock(&dpdk_mutex);
783 ovs_mutex_lock(&netdev->mutex);
785 netdev->up.n_txq = n_txq;
786 netdev->real_n_txq = 1;
787 netdev->up.n_rxq = 1;
789 ovs_mutex_unlock(&netdev->mutex);
790 ovs_mutex_unlock(&dpdk_mutex);
795 static struct netdev_rxq *
796 netdev_dpdk_rxq_alloc(void)
798 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
803 static struct netdev_rxq_dpdk *
804 netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
806 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
810 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
812 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
813 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
815 ovs_mutex_lock(&netdev->mutex);
816 rx->port_id = netdev->port_id;
817 ovs_mutex_unlock(&netdev->mutex);
823 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
828 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
830 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
836 dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
838 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
841 while (nb_tx != txq->count) {
844 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
853 if (OVS_UNLIKELY(nb_tx != txq->count)) {
854 /* free buffers, which we couldn't transmit, one at a time (each
855 * packet could come from a different mempool) */
858 for (i = nb_tx; i < txq->count; i++) {
859 rte_pktmbuf_free_seg(txq->burst_pkts[i]);
861 rte_spinlock_lock(&dev->stats_lock);
862 dev->stats.tx_dropped += txq->count-nb_tx;
863 rte_spinlock_unlock(&dev->stats_lock);
867 txq->tsc = rte_get_timer_cycles();
871 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
873 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
875 if (txq->count == 0) {
878 dpdk_queue_flush__(dev, qid);
882 is_vhost_running(struct virtio_net *dev)
884 return (dev != NULL && (dev->flags & VIRTIO_DEV_RUNNING));
888 * The receive path for the vhost port is the TX path out from guest.
891 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq_,
892 struct dp_packet **packets, int *c)
894 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
895 struct netdev *netdev = rx->up.netdev;
896 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
897 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
901 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
905 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid,
906 vhost_dev->dpdk_mp->mp,
907 (struct rte_mbuf **)packets,
913 rte_spinlock_lock(&vhost_dev->stats_lock);
914 vhost_dev->stats.rx_packets += (uint64_t)nb_rx;
915 rte_spinlock_unlock(&vhost_dev->stats_lock);
922 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
925 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
926 struct netdev *netdev = rx->up.netdev;
927 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
930 /* There is only one tx queue for this core. Do not flush other
932 * Do not flush tx queue which is shared among CPUs
933 * since it is always flushed */
934 if (rxq_->queue_id == rte_lcore_id() &&
935 OVS_LIKELY(!dev->txq_needs_locking)) {
936 dpdk_queue_flush(dev, rxq_->queue_id);
939 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
940 (struct rte_mbuf **) packets,
952 __netdev_dpdk_vhost_send(struct netdev *netdev, struct dp_packet **pkts,
953 int cnt, bool may_steal)
955 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
956 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
957 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
958 unsigned int total_pkts = cnt;
961 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
962 rte_spinlock_lock(&vhost_dev->stats_lock);
963 vhost_dev->stats.tx_dropped+= cnt;
964 rte_spinlock_unlock(&vhost_dev->stats_lock);
968 /* There is vHost TX single queue, So we need to lock it for TX. */
969 rte_spinlock_lock(&vhost_dev->vhost_tx_lock);
972 unsigned int tx_pkts;
974 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, VIRTIO_RXQ,
976 if (OVS_LIKELY(tx_pkts)) {
977 /* Packets have been sent.*/
979 /* Prepare for possible next iteration.*/
980 cur_pkts = &cur_pkts[tx_pkts];
982 uint64_t timeout = VHOST_ENQ_RETRY_USECS * rte_get_timer_hz() / 1E6;
983 unsigned int expired = 0;
986 start = rte_get_timer_cycles();
990 * Unable to enqueue packets to vhost interface.
991 * Check available entries before retrying.
993 while (!rte_vring_available_entries(virtio_dev, VIRTIO_RXQ)) {
994 if (OVS_UNLIKELY((rte_get_timer_cycles() - start) > timeout)) {
1000 /* break out of main loop. */
1005 rte_spinlock_unlock(&vhost_dev->vhost_tx_lock);
1007 rte_spinlock_lock(&vhost_dev->stats_lock);
1008 vhost_dev->stats.tx_packets += (total_pkts - cnt);
1009 vhost_dev->stats.tx_dropped += cnt;
1010 rte_spinlock_unlock(&vhost_dev->stats_lock);
1016 for (i = 0; i < total_pkts; i++) {
1017 dp_packet_delete(pkts[i]);
1023 dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
1024 struct rte_mbuf **pkts, int cnt)
1026 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1032 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
1033 int tocopy = MIN(freeslots, cnt-i);
1035 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
1036 tocopy * sizeof (struct rte_mbuf *));
1038 txq->count += tocopy;
1041 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
1042 dpdk_queue_flush__(dev, qid);
1044 diff_tsc = rte_get_timer_cycles() - txq->tsc;
1045 if (diff_tsc >= DRAIN_TSC) {
1046 dpdk_queue_flush__(dev, qid);
1051 /* Tx function. Transmit packets indefinitely */
1053 dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
1055 OVS_NO_THREAD_SAFETY_ANALYSIS
1057 #if !defined(__CHECKER__) && !defined(_WIN32)
1058 const size_t PKT_ARRAY_SIZE = cnt;
1060 /* Sparse or MSVC doesn't like variable length array. */
1061 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
1063 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1064 struct rte_mbuf *mbufs[PKT_ARRAY_SIZE];
1069 /* If we are on a non pmd thread we have to use the mempool mutex, because
1070 * every non pmd thread shares the same mempool cache */
1072 if (!thread_is_pmd()) {
1073 ovs_mutex_lock(&nonpmd_mempool_mutex);
1076 for (i = 0; i < cnt; i++) {
1077 int size = dp_packet_size(pkts[i]);
1079 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1080 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1081 (int)size , dev->max_packet_len);
1087 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
1089 if (!mbufs[newcnt]) {
1094 /* We have to do a copy for now */
1095 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
1097 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
1098 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
1103 if (OVS_UNLIKELY(dropped)) {
1104 rte_spinlock_lock(&dev->stats_lock);
1105 dev->stats.tx_dropped += dropped;
1106 rte_spinlock_unlock(&dev->stats_lock);
1109 if (dev->type == DPDK_DEV_VHOST) {
1110 __netdev_dpdk_vhost_send(netdev, (struct dp_packet **) mbufs, newcnt, true);
1112 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
1113 dpdk_queue_flush(dev, qid);
1116 if (!thread_is_pmd()) {
1117 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1122 netdev_dpdk_vhost_send(struct netdev *netdev, int qid OVS_UNUSED, struct dp_packet **pkts,
1123 int cnt, bool may_steal)
1125 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1128 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1130 for (i = 0; i < cnt; i++) {
1131 dp_packet_delete(pkts[i]);
1135 __netdev_dpdk_vhost_send(netdev, pkts, cnt, may_steal);
1141 netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
1142 struct dp_packet **pkts, int cnt, bool may_steal)
1146 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1147 qid = qid % dev->real_n_txq;
1148 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1151 if (OVS_UNLIKELY(!may_steal ||
1152 pkts[0]->source != DPBUF_DPDK)) {
1153 struct netdev *netdev = &dev->up;
1155 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1158 for (i = 0; i < cnt; i++) {
1159 dp_packet_delete(pkts[i]);
1163 int next_tx_idx = 0;
1166 for (i = 0; i < cnt; i++) {
1167 int size = dp_packet_size(pkts[i]);
1169 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1170 if (next_tx_idx != i) {
1171 dpdk_queue_pkts(dev, qid,
1172 (struct rte_mbuf **)&pkts[next_tx_idx],
1176 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1177 (int)size , dev->max_packet_len);
1179 dp_packet_delete(pkts[i]);
1181 next_tx_idx = i + 1;
1184 if (next_tx_idx != cnt) {
1185 dpdk_queue_pkts(dev, qid,
1186 (struct rte_mbuf **)&pkts[next_tx_idx],
1190 if (OVS_UNLIKELY(dropped)) {
1191 rte_spinlock_lock(&dev->stats_lock);
1192 dev->stats.tx_dropped += dropped;
1193 rte_spinlock_unlock(&dev->stats_lock);
1197 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1198 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1203 netdev_dpdk_eth_send(struct netdev *netdev, int qid,
1204 struct dp_packet **pkts, int cnt, bool may_steal)
1206 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1208 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1213 netdev_dpdk_set_etheraddr(struct netdev *netdev,
1214 const uint8_t mac[ETH_ADDR_LEN])
1216 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1218 ovs_mutex_lock(&dev->mutex);
1219 if (!eth_addr_equals(dev->hwaddr, mac)) {
1220 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
1221 netdev_change_seq_changed(netdev);
1223 ovs_mutex_unlock(&dev->mutex);
1229 netdev_dpdk_get_etheraddr(const struct netdev *netdev,
1230 uint8_t mac[ETH_ADDR_LEN])
1232 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1234 ovs_mutex_lock(&dev->mutex);
1235 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
1236 ovs_mutex_unlock(&dev->mutex);
1242 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1244 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1246 ovs_mutex_lock(&dev->mutex);
1248 ovs_mutex_unlock(&dev->mutex);
1254 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1256 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1258 struct dpdk_mp *old_mp;
1261 ovs_mutex_lock(&dpdk_mutex);
1262 ovs_mutex_lock(&dev->mutex);
1263 if (dev->mtu == mtu) {
1268 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
1274 rte_eth_dev_stop(dev->port_id);
1277 old_mp = dev->dpdk_mp;
1280 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1282 err = dpdk_eth_dev_init(dev);
1286 dev->dpdk_mp = old_mp;
1287 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1288 dpdk_eth_dev_init(dev);
1292 dpdk_mp_put(old_mp);
1293 netdev_change_seq_changed(netdev);
1295 ovs_mutex_unlock(&dev->mutex);
1296 ovs_mutex_unlock(&dpdk_mutex);
1301 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
1304 netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1305 struct netdev_stats *stats)
1307 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1309 ovs_mutex_lock(&dev->mutex);
1310 memset(stats, 0, sizeof(*stats));
1311 /* Unsupported Stats */
1312 stats->rx_errors = UINT64_MAX;
1313 stats->tx_errors = UINT64_MAX;
1314 stats->multicast = UINT64_MAX;
1315 stats->collisions = UINT64_MAX;
1316 stats->rx_crc_errors = UINT64_MAX;
1317 stats->rx_fifo_errors = UINT64_MAX;
1318 stats->rx_frame_errors = UINT64_MAX;
1319 stats->rx_length_errors = UINT64_MAX;
1320 stats->rx_missed_errors = UINT64_MAX;
1321 stats->rx_over_errors = UINT64_MAX;
1322 stats->tx_aborted_errors = UINT64_MAX;
1323 stats->tx_carrier_errors = UINT64_MAX;
1324 stats->tx_errors = UINT64_MAX;
1325 stats->tx_fifo_errors = UINT64_MAX;
1326 stats->tx_heartbeat_errors = UINT64_MAX;
1327 stats->tx_window_errors = UINT64_MAX;
1328 stats->rx_bytes += UINT64_MAX;
1329 stats->rx_dropped += UINT64_MAX;
1330 stats->tx_bytes += UINT64_MAX;
1332 rte_spinlock_lock(&dev->stats_lock);
1333 /* Supported Stats */
1334 stats->rx_packets += dev->stats.rx_packets;
1335 stats->tx_packets += dev->stats.tx_packets;
1336 stats->tx_dropped += dev->stats.tx_dropped;
1337 rte_spinlock_unlock(&dev->stats_lock);
1338 ovs_mutex_unlock(&dev->mutex);
1344 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1346 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1347 struct rte_eth_stats rte_stats;
1350 netdev_dpdk_get_carrier(netdev, &gg);
1351 ovs_mutex_lock(&dev->mutex);
1352 rte_eth_stats_get(dev->port_id, &rte_stats);
1354 memset(stats, 0, sizeof(*stats));
1356 stats->rx_packets = rte_stats.ipackets;
1357 stats->tx_packets = rte_stats.opackets;
1358 stats->rx_bytes = rte_stats.ibytes;
1359 stats->tx_bytes = rte_stats.obytes;
1360 stats->rx_errors = rte_stats.ierrors;
1361 stats->tx_errors = rte_stats.oerrors;
1362 stats->multicast = rte_stats.imcasts;
1364 rte_spinlock_lock(&dev->stats_lock);
1365 stats->tx_dropped = dev->stats.tx_dropped;
1366 rte_spinlock_unlock(&dev->stats_lock);
1367 ovs_mutex_unlock(&dev->mutex);
1373 netdev_dpdk_get_features(const struct netdev *netdev_,
1374 enum netdev_features *current,
1375 enum netdev_features *advertised OVS_UNUSED,
1376 enum netdev_features *supported OVS_UNUSED,
1377 enum netdev_features *peer OVS_UNUSED)
1379 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1380 struct rte_eth_link link;
1382 ovs_mutex_lock(&dev->mutex);
1384 ovs_mutex_unlock(&dev->mutex);
1386 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
1387 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
1388 *current = NETDEV_F_AUTONEG;
1390 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1391 if (link.link_speed == ETH_LINK_SPEED_10) {
1392 *current = NETDEV_F_10MB_HD;
1394 if (link.link_speed == ETH_LINK_SPEED_100) {
1395 *current = NETDEV_F_100MB_HD;
1397 if (link.link_speed == ETH_LINK_SPEED_1000) {
1398 *current = NETDEV_F_1GB_HD;
1400 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1401 if (link.link_speed == ETH_LINK_SPEED_10) {
1402 *current = NETDEV_F_10MB_FD;
1404 if (link.link_speed == ETH_LINK_SPEED_100) {
1405 *current = NETDEV_F_100MB_FD;
1407 if (link.link_speed == ETH_LINK_SPEED_1000) {
1408 *current = NETDEV_F_1GB_FD;
1410 if (link.link_speed == ETH_LINK_SPEED_10000) {
1411 *current = NETDEV_F_10GB_FD;
1419 netdev_dpdk_get_ifindex(const struct netdev *netdev)
1421 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1424 ovs_mutex_lock(&dev->mutex);
1425 ifindex = dev->port_id;
1426 ovs_mutex_unlock(&dev->mutex);
1432 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
1434 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1436 ovs_mutex_lock(&dev->mutex);
1437 check_link_status(dev);
1438 *carrier = dev->link.link_status;
1440 ovs_mutex_unlock(&dev->mutex);
1446 netdev_dpdk_vhost_get_carrier(const struct netdev *netdev_, bool *carrier)
1448 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1449 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1451 ovs_mutex_lock(&dev->mutex);
1453 if (is_vhost_running(virtio_dev)) {
1459 ovs_mutex_unlock(&dev->mutex);
1464 static long long int
1465 netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
1467 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1468 long long int carrier_resets;
1470 ovs_mutex_lock(&dev->mutex);
1471 carrier_resets = dev->link_reset_cnt;
1472 ovs_mutex_unlock(&dev->mutex);
1474 return carrier_resets;
1478 netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
1479 long long int interval OVS_UNUSED)
1485 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1486 enum netdev_flags off, enum netdev_flags on,
1487 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
1491 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1495 *old_flagsp = dev->flags;
1499 if (dev->flags == *old_flagsp) {
1503 if (dev->type == DPDK_DEV_ETH) {
1504 if (dev->flags & NETDEV_UP) {
1505 err = rte_eth_dev_start(dev->port_id);
1510 if (dev->flags & NETDEV_PROMISC) {
1511 rte_eth_promiscuous_enable(dev->port_id);
1514 if (!(dev->flags & NETDEV_UP)) {
1515 rte_eth_dev_stop(dev->port_id);
1523 netdev_dpdk_update_flags(struct netdev *netdev_,
1524 enum netdev_flags off, enum netdev_flags on,
1525 enum netdev_flags *old_flagsp)
1527 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1530 ovs_mutex_lock(&netdev->mutex);
1531 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1532 ovs_mutex_unlock(&netdev->mutex);
1538 netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1540 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1541 struct rte_eth_dev_info dev_info;
1543 if (dev->port_id < 0)
1546 ovs_mutex_lock(&dev->mutex);
1547 rte_eth_dev_info_get(dev->port_id, &dev_info);
1548 ovs_mutex_unlock(&dev->mutex);
1550 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1552 smap_add_format(args, "port_no", "%d", dev->port_id);
1553 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1554 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1555 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1556 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1557 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1558 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1559 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1560 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1561 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1562 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1564 smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
1565 smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
1571 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1572 OVS_REQUIRES(dev->mutex)
1574 enum netdev_flags old_flags;
1577 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1579 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1584 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1585 const char *argv[], void *aux OVS_UNUSED)
1589 if (!strcasecmp(argv[argc - 1], "up")) {
1591 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1594 unixctl_command_reply_error(conn, "Invalid Admin State");
1599 struct netdev *netdev = netdev_from_name(argv[1]);
1600 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1601 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1603 ovs_mutex_lock(&dpdk_dev->mutex);
1604 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1605 ovs_mutex_unlock(&dpdk_dev->mutex);
1607 netdev_close(netdev);
1609 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1610 netdev_close(netdev);
1614 struct netdev_dpdk *netdev;
1616 ovs_mutex_lock(&dpdk_mutex);
1617 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1618 ovs_mutex_lock(&netdev->mutex);
1619 netdev_dpdk_set_admin_state__(netdev, up);
1620 ovs_mutex_unlock(&netdev->mutex);
1622 ovs_mutex_unlock(&dpdk_mutex);
1624 unixctl_command_reply(conn, "OK");
1628 * Set virtqueue flags so that we do not receive interrupts.
1631 set_irq_status(struct virtio_net *dev)
1633 dev->virtqueue[VIRTIO_RXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
1634 dev->virtqueue[VIRTIO_TXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
1638 * A new virtio-net device is added to a vhost port.
1641 new_device(struct virtio_net *dev)
1643 struct netdev_dpdk *netdev;
1644 bool exists = false;
1646 ovs_mutex_lock(&dpdk_mutex);
1647 /* Add device to the vhost port with the same name as that passed down. */
1648 LIST_FOR_EACH(netdev, list_node, &dpdk_list) {
1649 if (strncmp(dev->ifname, netdev->vhost_id, IF_NAME_SZ) == 0) {
1650 ovs_mutex_lock(&netdev->mutex);
1651 ovsrcu_set(&netdev->virtio_dev, dev);
1652 ovs_mutex_unlock(&netdev->mutex);
1654 dev->flags |= VIRTIO_DEV_RUNNING;
1655 /* Disable notifications. */
1656 set_irq_status(dev);
1660 ovs_mutex_unlock(&dpdk_mutex);
1663 VLOG_INFO("vHost Device '%s' (%ld) can't be added - name not found",
1664 dev->ifname, dev->device_fh);
1669 VLOG_INFO("vHost Device '%s' (%ld) has been added",
1670 dev->ifname, dev->device_fh);
1675 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1676 * flag to stop any more packets from being sent or received to/from a VM and
1677 * ensure all currently queued packets have been sent/received before removing
1681 destroy_device(volatile struct virtio_net *dev)
1683 struct netdev_dpdk *vhost_dev;
1685 ovs_mutex_lock(&dpdk_mutex);
1686 LIST_FOR_EACH (vhost_dev, list_node, &dpdk_list) {
1687 if (netdev_dpdk_get_virtio(vhost_dev) == dev) {
1689 ovs_mutex_lock(&vhost_dev->mutex);
1690 dev->flags &= ~VIRTIO_DEV_RUNNING;
1691 ovsrcu_set(&vhost_dev->virtio_dev, NULL);
1692 ovs_mutex_unlock(&vhost_dev->mutex);
1695 * Wait for other threads to quiesce before
1696 * setting the virtio_dev to NULL.
1698 ovsrcu_synchronize();
1700 * As call to ovsrcu_synchronize() will end the quiescent state,
1701 * put thread back into quiescent state before returning.
1703 ovsrcu_quiesce_start();
1706 ovs_mutex_unlock(&dpdk_mutex);
1708 VLOG_INFO("vHost Device '%s' (%ld) has been removed",
1709 dev->ifname, dev->device_fh);
1713 netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
1715 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
1719 * These callbacks allow virtio-net devices to be added to vhost ports when
1720 * configuration has been fully complete.
1722 static const struct virtio_net_device_ops virtio_net_device_ops =
1724 .new_device = new_device,
1725 .destroy_device = destroy_device,
1729 start_vhost_loop(void *dummy OVS_UNUSED)
1731 pthread_detach(pthread_self());
1732 /* Put the cuse thread into quiescent state. */
1733 ovsrcu_quiesce_start();
1734 rte_vhost_driver_session_start();
1739 dpdk_vhost_class_init(void)
1741 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1742 ovs_thread_create("vhost_thread", start_vhost_loop, NULL);
1747 dpdk_vhost_cuse_class_init(void)
1752 /* Register CUSE device to handle IOCTLs.
1753 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1754 * is set to vhost-net.
1756 err = rte_vhost_driver_register(cuse_dev_name);
1759 VLOG_ERR("CUSE device setup failure.");
1763 dpdk_vhost_class_init();
1768 dpdk_vhost_user_class_init(void)
1770 dpdk_vhost_class_init();
1775 dpdk_common_init(void)
1777 unixctl_command_register("netdev-dpdk/set-admin-state",
1778 "[netdev] up|down", 1, 2,
1779 netdev_dpdk_set_admin_state, NULL);
1781 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
1787 dpdk_ring_create(const char dev_name[], unsigned int port_no,
1788 unsigned int *eth_port_id)
1790 struct dpdk_ring *ivshmem;
1794 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
1795 if (ivshmem == NULL) {
1799 /* XXX: Add support for multiquque ring. */
1800 err = snprintf(ring_name, 10, "%s_tx", dev_name);
1805 /* Create single consumer/producer rings, netdev does explicit locking. */
1806 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1807 RING_F_SP_ENQ | RING_F_SC_DEQ);
1808 if (ivshmem->cring_tx == NULL) {
1813 err = snprintf(ring_name, 10, "%s_rx", dev_name);
1818 /* Create single consumer/producer rings, netdev does explicit locking. */
1819 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1820 RING_F_SP_ENQ | RING_F_SC_DEQ);
1821 if (ivshmem->cring_rx == NULL) {
1826 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
1827 &ivshmem->cring_tx, 1, SOCKET0);
1834 ivshmem->user_port_id = port_no;
1835 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
1836 list_push_back(&dpdk_ring_list, &ivshmem->list_node);
1838 *eth_port_id = ivshmem->eth_port_id;
1843 dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
1845 struct dpdk_ring *ivshmem;
1846 unsigned int port_no;
1849 /* Names always start with "dpdkr" */
1850 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
1855 /* look through our list to find the device */
1856 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
1857 if (ivshmem->user_port_id == port_no) {
1858 VLOG_INFO("Found dpdk ring device %s:", dev_name);
1859 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
1863 /* Need to create the device rings */
1864 return dpdk_ring_create(dev_name, port_no, eth_port_id);
1868 netdev_dpdk_ring_send(struct netdev *netdev_, int qid,
1869 struct dp_packet **pkts, int cnt, bool may_steal)
1871 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1874 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
1875 * rss hash field is clear. This is because the same mbuf may be modified by
1876 * the consumer of the ring and return into the datapath without recalculating
1878 for (i = 0; i < cnt; i++) {
1879 dp_packet_set_rss_hash(pkts[i], 0);
1882 netdev_dpdk_send__(netdev, qid, pkts, cnt, may_steal);
1887 netdev_dpdk_ring_construct(struct netdev *netdev)
1889 unsigned int port_no = 0;
1892 if (rte_eal_init_ret) {
1893 return rte_eal_init_ret;
1896 ovs_mutex_lock(&dpdk_mutex);
1898 err = dpdk_ring_open(netdev->name, &port_no);
1903 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
1906 ovs_mutex_unlock(&dpdk_mutex);
1910 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
1911 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
1915 NULL, /* netdev_dpdk_run */ \
1916 NULL, /* netdev_dpdk_wait */ \
1918 netdev_dpdk_alloc, \
1921 netdev_dpdk_dealloc, \
1922 netdev_dpdk_get_config, \
1923 NULL, /* netdev_dpdk_set_config */ \
1924 NULL, /* get_tunnel_config */ \
1925 NULL, /* build header */ \
1926 NULL, /* push header */ \
1927 NULL, /* pop header */ \
1928 netdev_dpdk_get_numa_id, /* get_numa_id */ \
1929 MULTIQ, /* set_multiq */ \
1932 NULL, /* send_wait */ \
1934 netdev_dpdk_set_etheraddr, \
1935 netdev_dpdk_get_etheraddr, \
1936 netdev_dpdk_get_mtu, \
1937 netdev_dpdk_set_mtu, \
1938 netdev_dpdk_get_ifindex, \
1940 netdev_dpdk_get_carrier_resets, \
1941 netdev_dpdk_set_miimon, \
1944 NULL, /* set_advertisements */ \
1946 NULL, /* set_policing */ \
1947 NULL, /* get_qos_types */ \
1948 NULL, /* get_qos_capabilities */ \
1949 NULL, /* get_qos */ \
1950 NULL, /* set_qos */ \
1951 NULL, /* get_queue */ \
1952 NULL, /* set_queue */ \
1953 NULL, /* delete_queue */ \
1954 NULL, /* get_queue_stats */ \
1955 NULL, /* queue_dump_start */ \
1956 NULL, /* queue_dump_next */ \
1957 NULL, /* queue_dump_done */ \
1958 NULL, /* dump_queue_stats */ \
1960 NULL, /* get_in4 */ \
1961 NULL, /* set_in4 */ \
1962 NULL, /* get_in6 */ \
1963 NULL, /* add_router */ \
1964 NULL, /* get_next_hop */ \
1966 NULL, /* arp_lookup */ \
1968 netdev_dpdk_update_flags, \
1970 netdev_dpdk_rxq_alloc, \
1971 netdev_dpdk_rxq_construct, \
1972 netdev_dpdk_rxq_destruct, \
1973 netdev_dpdk_rxq_dealloc, \
1975 NULL, /* rx_wait */ \
1976 NULL, /* rxq_drain */ \
1980 process_vhost_flags(char *flag, char *default_val, int size,
1981 char **argv, char **new_val)
1985 /* Depending on which version of vhost is in use, process the vhost-specific
1986 * flag if it is provided on the vswitchd command line, otherwise resort to
1989 * For vhost-user: Process "-cuse_dev_name" to set the custom location of
1990 * the vhost-user socket(s).
1991 * For vhost-cuse: Process "-vhost_sock_dir" to set the custom name of the
1992 * vhost-cuse character device.
1994 if (!strcmp(argv[1], flag) && (strlen(argv[2]) <= size)) {
1996 *new_val = strdup(argv[2]);
1997 VLOG_INFO("User-provided %s in use: %s", flag, *new_val);
1999 VLOG_INFO("No %s provided - defaulting to %s", flag, default_val);
2000 *new_val = default_val;
2007 dpdk_init(int argc, char **argv)
2011 char *pragram_name = argv[0];
2013 if (argc < 2 || strcmp(argv[1], "--dpdk"))
2016 /* Remove the --dpdk argument from arg list.*/
2021 if (process_vhost_flags("-cuse_dev_name", strdup("vhost-net"),
2022 PATH_MAX, argv, &cuse_dev_name)) {
2024 if (process_vhost_flags("-vhost_sock_dir", strdup(ovs_rundir()),
2025 NAME_MAX, argv, &vhost_sock_dir)) {
2029 err = stat(vhost_sock_dir, &s);
2031 VLOG_ERR("vHostUser socket DIR '%s' does not exist.",
2036 /* Remove the vhost flag configuration parameters from the argument
2037 * list, so that the correct elements are passed to the DPDK
2038 * initialization function
2041 argv += 2; /* Increment by two to bypass the vhost flag arguments */
2045 /* Keep the program name argument as this is needed for call to
2048 argv[0] = pragram_name;
2050 /* Make sure things are initialized ... */
2051 result = rte_eal_init(argc, argv);
2053 ovs_abort(result, "Cannot init EAL");
2056 rte_memzone_dump(stdout);
2057 rte_eal_init_ret = 0;
2059 if (argc > result) {
2060 argv[result] = argv[0];
2063 /* We are called from the main thread here */
2064 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
2066 return result + 1 + base;
2069 static const struct netdev_class dpdk_class =
2073 netdev_dpdk_construct,
2074 netdev_dpdk_destruct,
2075 netdev_dpdk_set_multiq,
2076 netdev_dpdk_eth_send,
2077 netdev_dpdk_get_carrier,
2078 netdev_dpdk_get_stats,
2079 netdev_dpdk_get_features,
2080 netdev_dpdk_get_status,
2081 netdev_dpdk_rxq_recv);
2083 static const struct netdev_class dpdk_ring_class =
2087 netdev_dpdk_ring_construct,
2088 netdev_dpdk_destruct,
2089 netdev_dpdk_set_multiq,
2090 netdev_dpdk_ring_send,
2091 netdev_dpdk_get_carrier,
2092 netdev_dpdk_get_stats,
2093 netdev_dpdk_get_features,
2094 netdev_dpdk_get_status,
2095 netdev_dpdk_rxq_recv);
2097 static const struct netdev_class dpdk_vhost_cuse_class =
2100 dpdk_vhost_cuse_class_init,
2101 netdev_dpdk_vhost_cuse_construct,
2102 netdev_dpdk_vhost_destruct,
2103 netdev_dpdk_vhost_set_multiq,
2104 netdev_dpdk_vhost_send,
2105 netdev_dpdk_vhost_get_carrier,
2106 netdev_dpdk_vhost_get_stats,
2109 netdev_dpdk_vhost_rxq_recv);
2111 const struct netdev_class dpdk_vhost_user_class =
2114 dpdk_vhost_user_class_init,
2115 netdev_dpdk_vhost_user_construct,
2116 netdev_dpdk_vhost_destruct,
2117 netdev_dpdk_vhost_set_multiq,
2118 netdev_dpdk_vhost_send,
2119 netdev_dpdk_vhost_get_carrier,
2120 netdev_dpdk_vhost_get_stats,
2123 netdev_dpdk_vhost_rxq_recv);
2126 netdev_dpdk_register(void)
2128 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2130 if (rte_eal_init_ret) {
2134 if (ovsthread_once_start(&once)) {
2136 netdev_register_provider(&dpdk_class);
2137 netdev_register_provider(&dpdk_ring_class);
2139 netdev_register_provider(&dpdk_vhost_cuse_class);
2141 netdev_register_provider(&dpdk_vhost_user_class);
2143 ovsthread_once_done(&once);
2148 pmd_thread_setaffinity_cpu(unsigned cpu)
2154 CPU_SET(cpu, &cpuset);
2155 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
2157 VLOG_ERR("Thread affinity error %d",err);
2160 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2161 ovs_assert(cpu != NON_PMD_CORE_ID);
2162 RTE_PER_LCORE(_lcore_id) = cpu;
2170 return rte_lcore_id() != NON_PMD_CORE_ID;