2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
35 #include "dp-packet.h"
36 #include "dpif-netdev.h"
37 #include "fatal-signal.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
42 #include "openvswitch/list.h"
43 #include "openvswitch/ofp-print.h"
44 #include "openvswitch/vlog.h"
46 #include "ovs-thread.h"
52 #include "unaligned.h"
56 #include "rte_config.h"
58 #include "rte_meter.h"
59 #include "rte_virtio_net.h"
61 VLOG_DEFINE_THIS_MODULE(dpdk);
62 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
64 #define DPDK_PORT_WATCHDOG_INTERVAL 5
66 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
67 #define OVS_VPORT_DPDK "ovs_dpdk"
70 * need to reserve tons of extra space in the mbufs so we can align the
71 * DMA addresses to 4KB.
72 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
73 * performance for standard Ethernet MTU.
75 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + (2 * VLAN_HEADER_LEN))
76 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
77 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
78 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len)- ETHER_HDR_LEN - ETHER_CRC_LEN)
79 #define MBUF_SIZE(mtu) ( MTU_TO_MAX_FRAME_LEN(mtu) \
80 + sizeof(struct dp_packet) \
81 + RTE_PKTMBUF_HEADROOM)
82 #define NETDEV_DPDK_MBUF_ALIGN 1024
84 /* Max and min number of packets in the mempool. OVS tries to allocate a
85 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
86 * enough hugepages) we keep halving the number until the allocation succeeds
87 * or we reach MIN_NB_MBUF */
89 #define MAX_NB_MBUF (4096 * 64)
90 #define MIN_NB_MBUF (4096 * 4)
91 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
93 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
94 BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
96 /* The smallest possible NB_MBUF that we're going to try should be a multiple
97 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
98 BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
103 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
104 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
106 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
107 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
108 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
109 * yet mapped to another queue. */
112 static char *cuse_dev_name = NULL; /* Character device cuse_dev_name. */
114 static char *vhost_sock_dir = NULL; /* Location of vhost-user sockets */
117 * Maximum amount of time in micro seconds to try and enqueue to vhost.
119 #define VHOST_ENQ_RETRY_USECS 100
121 static const struct rte_eth_conf port_conf = {
123 .mq_mode = ETH_MQ_RX_RSS,
125 .header_split = 0, /* Header Split disabled */
126 .hw_ip_checksum = 0, /* IP checksum offload disabled */
127 .hw_vlan_filter = 0, /* VLAN filtering disabled */
128 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
134 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
138 .mq_mode = ETH_MQ_TX_NONE,
142 enum { MAX_TX_QUEUE_LEN = 384 };
143 enum { DPDK_RING_SIZE = 256 };
144 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
145 enum { DRAIN_TSC = 200000ULL };
152 static int rte_eal_init_ret = ENODEV;
154 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
156 /* Quality of Service */
158 /* An instance of a QoS configuration. Always associated with a particular
161 * Each QoS implementation subclasses this with whatever additional data it
165 const struct dpdk_qos_ops *ops;
168 /* A particular implementation of dpdk QoS operations.
170 * The functions below return 0 if successful or a positive errno value on
171 * failure, except where otherwise noted. All of them must be provided, except
172 * where otherwise noted.
174 struct dpdk_qos_ops {
176 /* Name of the QoS type */
177 const char *qos_name;
179 /* Called to construct the QoS implementation on 'netdev'. The
180 * implementation should make the appropriate calls to configure QoS
181 * according to 'details'. The implementation may assume that any current
182 * QoS configuration already installed should be destroyed before
183 * constructing the new configuration.
185 * The contents of 'details' should be documented as valid for 'ovs_name'
186 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
187 * (which is built as ovs-vswitchd.conf.db(8)).
189 * This function must return 0 if and only if it sets 'netdev->qos_conf'
190 * to an initialized 'struct qos_conf'.
192 * For all QoS implementations it should always be non-null.
194 int (*qos_construct)(struct netdev *netdev, const struct smap *details);
196 /* Destroys the data structures allocated by the implementation as part of
199 * For all QoS implementations it should always be non-null.
201 void (*qos_destruct)(struct netdev *netdev, struct qos_conf *conf);
203 /* Retrieves details of 'netdev->qos_conf' configuration into 'details'.
205 * The contents of 'details' should be documented as valid for 'ovs_name'
206 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
207 * (which is built as ovs-vswitchd.conf.db(8)).
209 int (*qos_get)(const struct netdev *netdev, struct smap *details);
211 /* Reconfigures 'netdev->qos_conf' according to 'details', performing any
212 * required calls to complete the reconfiguration.
214 * The contents of 'details' should be documented as valid for 'ovs_name'
215 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
216 * (which is built as ovs-vswitchd.conf.db(8)).
218 * This function may be null if 'qos_conf' is not configurable.
220 int (*qos_set)(struct netdev *netdev, const struct smap *details);
222 /* Modify an array of rte_mbufs. The modification is specific to
223 * each qos implementation.
225 * The function should take and array of mbufs and an int representing
226 * the current number of mbufs present in the array.
228 * After the function has performed a qos modification to the array of
229 * mbufs it returns an int representing the number of mbufs now present in
230 * the array. This value is can then be passed to the port send function
231 * along with the modified array for transmission.
233 * For all QoS implementations it should always be non-null.
235 int (*qos_run)(struct netdev *netdev, struct rte_mbuf **pkts,
239 /* dpdk_qos_ops for each type of user space QoS implementation */
240 static const struct dpdk_qos_ops egress_policer_ops;
243 * Array of dpdk_qos_ops, contains pointer to all supported QoS
246 static const struct dpdk_qos_ops *const qos_confs[] = {
251 /* Contains all 'struct dpdk_dev's. */
252 static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
253 = OVS_LIST_INITIALIZER(&dpdk_list);
255 static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
256 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
258 /* This mutex must be used by non pmd threads when allocating or freeing
259 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
260 * use mempools, a non pmd thread should hold this mutex while calling them */
261 static struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
264 struct rte_mempool *mp;
268 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
271 /* There should be one 'struct dpdk_tx_queue' created for
273 struct dpdk_tx_queue {
274 bool flush_tx; /* Set to true to flush queue everytime */
275 /* pkts are queued. */
277 rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
278 * from concurrent access. It is used only
279 * if the queue is shared among different
280 * pmd threads (see 'txq_needs_locking'). */
281 int map; /* Mapping of configured vhost-user queues
282 * to enabled by guest. */
284 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
287 /* dpdk has no way to remove dpdk ring ethernet devices
288 so we have to keep them around once they've been created
291 static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
292 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
295 /* For the client rings */
296 struct rte_ring *cring_tx;
297 struct rte_ring *cring_rx;
298 unsigned int user_port_id; /* User given port no, parsed from port name */
299 int eth_port_id; /* ethernet device port id */
300 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
307 enum dpdk_dev_type type;
309 struct dpdk_tx_queue *tx_q;
311 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
313 struct dpdk_mp *dpdk_mp;
317 struct netdev_stats stats;
319 rte_spinlock_t stats_lock;
321 struct eth_addr hwaddr;
322 enum netdev_flags flags;
324 struct rte_eth_link link;
327 /* The user might request more txqs than the NIC has. We remap those
328 * ('up.n_txq') on these ('real_n_txq').
329 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
330 * true and we will take a spinlock on transmission */
333 bool txq_needs_locking;
335 /* virtio-net structure for vhost device */
336 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
338 /* Identifier used to distinguish vhost devices from each other */
339 char vhost_id[PATH_MAX];
342 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
344 /* QoS configuration and lock for the device */
345 struct qos_conf *qos_conf;
346 rte_spinlock_t qos_lock;
350 struct netdev_rxq_dpdk {
351 struct netdev_rxq up;
355 static bool dpdk_thread_is_pmd(void);
357 static int netdev_dpdk_construct(struct netdev *);
359 struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
362 is_dpdk_class(const struct netdev_class *class)
364 return class->construct == netdev_dpdk_construct;
367 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
368 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
369 * value, insufficient buffers are allocated to accomodate the packet in its
370 * entirety. Furthermore, certain drivers need to ensure that there is also
371 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
372 * frames). If the RX buffer is too small, then the driver enables scatter RX
373 * behaviour, which reduces performance. To prevent this, use a buffer size that
374 * is closest to 'mtu', but which satisfies the aforementioned criteria.
377 dpdk_buf_size(int mtu)
379 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) + RTE_PKTMBUF_HEADROOM),
380 NETDEV_DPDK_MBUF_ALIGN);
383 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
384 * for all other segments data, bss and text. */
387 dpdk_rte_mzalloc(size_t sz)
391 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
398 /* XXX this function should be called only by pmd threads (or by non pmd
399 * threads holding the nonpmd_mempool_mutex) */
401 free_dpdk_buf(struct dp_packet *p)
403 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
405 rte_pktmbuf_free(pkt);
409 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
410 void *opaque_arg OVS_UNUSED,
412 unsigned i OVS_UNUSED)
414 struct rte_mbuf *m = _m;
416 rte_pktmbuf_init(mp, opaque_arg, _m, i);
418 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
421 static struct dpdk_mp *
422 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
424 struct dpdk_mp *dmp = NULL;
425 char mp_name[RTE_MEMPOOL_NAMESIZE];
427 struct rte_pktmbuf_pool_private mbp_priv;
429 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
430 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
436 dmp = dpdk_rte_mzalloc(sizeof *dmp);
437 dmp->socket_id = socket_id;
440 mbp_priv.mbuf_data_room_size = MBUF_SIZE(mtu) - sizeof(struct dp_packet);
441 mbp_priv.mbuf_priv_size = sizeof (struct dp_packet) - sizeof (struct rte_mbuf);
443 mp_size = MAX_NB_MBUF;
445 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
446 dmp->mtu, dmp->socket_id, mp_size) < 0) {
450 dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
452 sizeof(struct rte_pktmbuf_pool_private),
453 rte_pktmbuf_pool_init, &mbp_priv,
454 ovs_rte_pktmbuf_init, NULL,
456 } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
458 if (dmp->mp == NULL) {
461 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
464 ovs_list_push_back(&dpdk_mp_list, &dmp->list_node);
469 dpdk_mp_put(struct dpdk_mp *dmp)
477 ovs_assert(dmp->refcount >= 0);
480 /* I could not find any API to destroy mp. */
481 if (dmp->refcount == 0) {
482 list_delete(dmp->list_node);
483 /* destroy mp-pool. */
489 check_link_status(struct netdev_dpdk *dev)
491 struct rte_eth_link link;
493 rte_eth_link_get_nowait(dev->port_id, &link);
495 if (dev->link.link_status != link.link_status) {
496 netdev_change_seq_changed(&dev->up);
498 dev->link_reset_cnt++;
500 if (dev->link.link_status) {
501 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
502 dev->port_id, (unsigned)dev->link.link_speed,
503 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
504 ("full-duplex") : ("half-duplex"));
506 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
512 dpdk_watchdog(void *dummy OVS_UNUSED)
514 struct netdev_dpdk *dev;
516 pthread_detach(pthread_self());
519 ovs_mutex_lock(&dpdk_mutex);
520 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
521 ovs_mutex_lock(&dev->mutex);
522 check_link_status(dev);
523 ovs_mutex_unlock(&dev->mutex);
525 ovs_mutex_unlock(&dpdk_mutex);
526 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
533 dpdk_eth_dev_queue_setup(struct netdev_dpdk *dev, int n_rxq, int n_txq)
538 /* A device may report more queues than it makes available (this has
539 * been observed for Intel xl710, which reserves some of them for
540 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
541 * available. When this happens we can retry the configuration
542 * and request less queues */
543 while (n_rxq && n_txq) {
545 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq);
548 diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &port_conf);
553 for (i = 0; i < n_txq; i++) {
554 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
555 dev->socket_id, NULL);
557 VLOG_INFO("Interface %s txq(%d) setup error: %s",
558 dev->up.name, i, rte_strerror(-diag));
564 /* Retry with less tx queues */
569 for (i = 0; i < n_rxq; i++) {
570 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
571 dev->socket_id, NULL,
574 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
575 dev->up.name, i, rte_strerror(-diag));
581 /* Retry with less rx queues */
586 dev->up.n_rxq = n_rxq;
587 dev->real_n_txq = n_txq;
597 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
599 struct rte_pktmbuf_pool_private *mbp_priv;
600 struct rte_eth_dev_info info;
601 struct ether_addr eth_addr;
605 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
609 rte_eth_dev_info_get(dev->port_id, &info);
611 n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
612 n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
614 diag = dpdk_eth_dev_queue_setup(dev, n_rxq, n_txq);
616 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
617 dev->up.name, n_rxq, n_txq, rte_strerror(-diag));
621 diag = rte_eth_dev_start(dev->port_id);
623 VLOG_ERR("Interface %s start error: %s", dev->up.name,
624 rte_strerror(-diag));
628 rte_eth_promiscuous_enable(dev->port_id);
629 rte_eth_allmulticast_enable(dev->port_id);
631 memset(ð_addr, 0x0, sizeof(eth_addr));
632 rte_eth_macaddr_get(dev->port_id, ð_addr);
633 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
634 dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes));
636 memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN);
637 rte_eth_link_get_nowait(dev->port_id, &dev->link);
639 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
640 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
642 dev->flags = NETDEV_UP | NETDEV_PROMISC;
646 static struct netdev_dpdk *
647 netdev_dpdk_cast(const struct netdev *netdev)
649 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
652 static struct netdev *
653 netdev_dpdk_alloc(void)
655 struct netdev_dpdk *dev;
657 if (!rte_eal_init_ret) { /* Only after successful initialization */
658 dev = dpdk_rte_mzalloc(sizeof *dev);
667 netdev_dpdk_alloc_txq(struct netdev_dpdk *dev, unsigned int n_txqs)
671 dev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *dev->tx_q);
672 for (i = 0; i < n_txqs; i++) {
673 int numa_id = ovs_numa_get_numa_id(i);
675 if (!dev->txq_needs_locking) {
676 /* Each index is considered as a cpu core id, since there should
677 * be one tx queue for each cpu core. If the corresponding core
678 * is not on the same numa node as 'dev', flags the
680 dev->tx_q[i].flush_tx = dev->socket_id == numa_id;
682 /* Queues are shared among CPUs. Always flush */
683 dev->tx_q[i].flush_tx = true;
686 /* Initialize map for vhost devices. */
687 dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
688 rte_spinlock_init(&dev->tx_q[i].tx_lock);
693 netdev_dpdk_init(struct netdev *netdev, unsigned int port_no,
694 enum dpdk_dev_type type)
695 OVS_REQUIRES(dpdk_mutex)
697 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
702 ovs_mutex_init(&dev->mutex);
703 ovs_mutex_lock(&dev->mutex);
705 rte_spinlock_init(&dev->stats_lock);
707 /* If the 'sid' is negative, it means that the kernel fails
708 * to obtain the pci numa info. In that situation, always
710 if (type == DPDK_DEV_ETH) {
711 sid = rte_eth_dev_socket_id(port_no);
713 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
716 dev->socket_id = sid < 0 ? SOCKET0 : sid;
717 dev->port_id = port_no;
720 dev->mtu = ETHER_MTU;
721 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
723 buf_size = dpdk_buf_size(dev->mtu);
724 dev->dpdk_mp = dpdk_mp_get(dev->socket_id, FRAME_LEN_TO_MTU(buf_size));
730 /* Initialise QoS configuration to NULL and qos lock to unlocked */
731 dev->qos_conf = NULL;
732 rte_spinlock_init(&dev->qos_lock);
734 netdev->n_txq = NR_QUEUE;
735 netdev->n_rxq = NR_QUEUE;
736 netdev->requested_n_rxq = NR_QUEUE;
737 dev->real_n_txq = NR_QUEUE;
739 if (type == DPDK_DEV_ETH) {
740 netdev_dpdk_alloc_txq(dev, NR_QUEUE);
741 err = dpdk_eth_dev_init(dev);
746 netdev_dpdk_alloc_txq(dev, OVS_VHOST_MAX_QUEUE_NUM);
749 ovs_list_push_back(&dpdk_list, &dev->list_node);
755 ovs_mutex_unlock(&dev->mutex);
759 /* dev_name must be the prefix followed by a positive decimal number.
760 * (no leading + or - signs are allowed) */
762 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
763 unsigned int *port_no)
767 if (strncmp(dev_name, prefix, strlen(prefix))) {
771 cport = dev_name + strlen(prefix);
773 if (str_to_uint(cport, 10, port_no)) {
781 vhost_construct_helper(struct netdev *netdev) OVS_REQUIRES(dpdk_mutex)
783 if (rte_eal_init_ret) {
784 return rte_eal_init_ret;
787 return netdev_dpdk_init(netdev, -1, DPDK_DEV_VHOST);
791 netdev_dpdk_vhost_cuse_construct(struct netdev *netdev)
793 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
796 if (rte_eal_init_ret) {
797 return rte_eal_init_ret;
800 ovs_mutex_lock(&dpdk_mutex);
801 strncpy(dev->vhost_id, netdev->name, sizeof(dev->vhost_id));
802 err = vhost_construct_helper(netdev);
803 ovs_mutex_unlock(&dpdk_mutex);
808 netdev_dpdk_vhost_user_construct(struct netdev *netdev)
810 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
811 const char *name = netdev->name;
814 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
815 * the file system. '/' or '\' would traverse directories, so they're not
816 * acceptable in 'name'. */
817 if (strchr(name, '/') || strchr(name, '\\')) {
818 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
819 "A valid name must not include '/' or '\\'",
824 if (rte_eal_init_ret) {
825 return rte_eal_init_ret;
828 ovs_mutex_lock(&dpdk_mutex);
829 /* Take the name of the vhost-user port and append it to the location where
830 * the socket is to be created, then register the socket.
832 snprintf(dev->vhost_id, sizeof(dev->vhost_id), "%s/%s",
833 vhost_sock_dir, name);
835 err = rte_vhost_driver_register(dev->vhost_id);
837 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
840 fatal_signal_add_file_to_unlink(dev->vhost_id);
841 VLOG_INFO("Socket %s created for vhost-user port %s\n",
842 dev->vhost_id, name);
843 err = vhost_construct_helper(netdev);
846 ovs_mutex_unlock(&dpdk_mutex);
851 netdev_dpdk_construct(struct netdev *netdev)
853 unsigned int port_no;
856 if (rte_eal_init_ret) {
857 return rte_eal_init_ret;
860 /* Names always start with "dpdk" */
861 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
866 ovs_mutex_lock(&dpdk_mutex);
867 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
868 ovs_mutex_unlock(&dpdk_mutex);
873 netdev_dpdk_destruct(struct netdev *netdev)
875 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
877 ovs_mutex_lock(&dev->mutex);
878 rte_eth_dev_stop(dev->port_id);
879 ovs_mutex_unlock(&dev->mutex);
881 ovs_mutex_lock(&dpdk_mutex);
883 ovs_list_remove(&dev->list_node);
884 dpdk_mp_put(dev->dpdk_mp);
885 ovs_mutex_unlock(&dpdk_mutex);
889 netdev_dpdk_vhost_destruct(struct netdev *netdev)
891 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
893 /* Guest becomes an orphan if still attached. */
894 if (netdev_dpdk_get_virtio(dev) != NULL) {
895 VLOG_ERR("Removing port '%s' while vhost device still attached.",
897 VLOG_ERR("To restore connectivity after re-adding of port, VM on socket"
898 " '%s' must be restarted.",
902 if (rte_vhost_driver_unregister(dev->vhost_id)) {
903 VLOG_ERR("Unable to remove vhost-user socket %s", dev->vhost_id);
905 fatal_signal_remove_file_to_unlink(dev->vhost_id);
908 ovs_mutex_lock(&dpdk_mutex);
910 ovs_list_remove(&dev->list_node);
911 dpdk_mp_put(dev->dpdk_mp);
912 ovs_mutex_unlock(&dpdk_mutex);
916 netdev_dpdk_dealloc(struct netdev *netdev)
918 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
924 netdev_dpdk_get_config(const struct netdev *netdev, struct smap *args)
926 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
928 ovs_mutex_lock(&dev->mutex);
930 smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
931 smap_add_format(args, "configured_rx_queues", "%d", netdev->n_rxq);
932 smap_add_format(args, "requested_tx_queues", "%d", netdev->n_txq);
933 smap_add_format(args, "configured_tx_queues", "%d", dev->real_n_txq);
934 ovs_mutex_unlock(&dev->mutex);
940 netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args)
942 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
944 ovs_mutex_lock(&dev->mutex);
945 netdev->requested_n_rxq = MAX(smap_get_int(args, "n_rxq",
946 netdev->requested_n_rxq), 1);
947 netdev_change_seq_changed(netdev);
948 ovs_mutex_unlock(&dev->mutex);
954 netdev_dpdk_get_numa_id(const struct netdev *netdev)
956 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
958 return dev->socket_id;
961 /* Sets the number of tx queues and rx queues for the dpdk interface.
962 * If the configuration fails, do not try restoring its old configuration
963 * and just returns the error. */
965 netdev_dpdk_set_multiq(struct netdev *netdev, unsigned int n_txq,
968 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
970 int old_rxq, old_txq;
972 if (netdev->n_txq == n_txq && netdev->n_rxq == n_rxq) {
976 ovs_mutex_lock(&dpdk_mutex);
977 ovs_mutex_lock(&dev->mutex);
979 rte_eth_dev_stop(dev->port_id);
981 old_txq = netdev->n_txq;
982 old_rxq = netdev->n_rxq;
983 netdev->n_txq = n_txq;
984 netdev->n_rxq = n_rxq;
987 err = dpdk_eth_dev_init(dev);
988 netdev_dpdk_alloc_txq(dev, dev->real_n_txq);
990 /* If there has been an error, it means that the requested queues
991 * have not been created. Restore the old numbers. */
992 netdev->n_txq = old_txq;
993 netdev->n_rxq = old_rxq;
996 dev->txq_needs_locking = dev->real_n_txq != netdev->n_txq;
998 ovs_mutex_unlock(&dev->mutex);
999 ovs_mutex_unlock(&dpdk_mutex);
1005 netdev_dpdk_vhost_cuse_set_multiq(struct netdev *netdev, unsigned int n_txq,
1008 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1011 if (netdev->n_txq == n_txq && netdev->n_rxq == n_rxq) {
1015 ovs_mutex_lock(&dpdk_mutex);
1016 ovs_mutex_lock(&dev->mutex);
1018 netdev->n_txq = n_txq;
1019 dev->real_n_txq = 1;
1021 dev->txq_needs_locking = dev->real_n_txq != netdev->n_txq;
1023 ovs_mutex_unlock(&dev->mutex);
1024 ovs_mutex_unlock(&dpdk_mutex);
1030 netdev_dpdk_vhost_set_multiq(struct netdev *netdev, unsigned int n_txq,
1033 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1036 if (netdev->n_txq == n_txq && netdev->n_rxq == n_rxq) {
1040 ovs_mutex_lock(&dpdk_mutex);
1041 ovs_mutex_lock(&dev->mutex);
1043 netdev->n_txq = n_txq;
1044 netdev->n_rxq = n_rxq;
1046 ovs_mutex_unlock(&dev->mutex);
1047 ovs_mutex_unlock(&dpdk_mutex);
1052 static struct netdev_rxq *
1053 netdev_dpdk_rxq_alloc(void)
1055 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
1060 static struct netdev_rxq_dpdk *
1061 netdev_rxq_dpdk_cast(const struct netdev_rxq *rxq)
1063 return CONTAINER_OF(rxq, struct netdev_rxq_dpdk, up);
1067 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq)
1069 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1070 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1072 ovs_mutex_lock(&dev->mutex);
1073 rx->port_id = dev->port_id;
1074 ovs_mutex_unlock(&dev->mutex);
1080 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq OVS_UNUSED)
1085 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq)
1087 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1093 dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
1095 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1098 while (nb_tx != txq->count) {
1101 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
1102 txq->count - nb_tx);
1110 if (OVS_UNLIKELY(nb_tx != txq->count)) {
1111 /* free buffers, which we couldn't transmit, one at a time (each
1112 * packet could come from a different mempool) */
1115 for (i = nb_tx; i < txq->count; i++) {
1116 rte_pktmbuf_free(txq->burst_pkts[i]);
1118 rte_spinlock_lock(&dev->stats_lock);
1119 dev->stats.tx_dropped += txq->count-nb_tx;
1120 rte_spinlock_unlock(&dev->stats_lock);
1124 txq->tsc = rte_get_timer_cycles();
1128 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
1130 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1132 if (txq->count == 0) {
1135 dpdk_queue_flush__(dev, qid);
1139 is_vhost_running(struct virtio_net *virtio_dev)
1141 return (virtio_dev != NULL && (virtio_dev->flags & VIRTIO_DEV_RUNNING));
1145 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
1146 struct dp_packet **packets, int count)
1149 struct dp_packet *packet;
1151 stats->rx_packets += count;
1152 for (i = 0; i < count; i++) {
1153 packet = packets[i];
1155 if (OVS_UNLIKELY(dp_packet_size(packet) < ETH_HEADER_LEN)) {
1156 /* This only protects the following multicast counting from
1157 * too short packets, but it does not stop the packet from
1158 * further processing. */
1160 stats->rx_length_errors++;
1164 struct eth_header *eh = (struct eth_header *) dp_packet_data(packet);
1165 if (OVS_UNLIKELY(eth_addr_is_multicast(eh->eth_dst))) {
1169 stats->rx_bytes += dp_packet_size(packet);
1174 * The receive path for the vhost port is the TX path out from guest.
1177 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
1178 struct dp_packet **packets, int *c)
1180 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1181 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1182 int qid = rxq->queue_id;
1185 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
1189 if (rxq->queue_id >= dev->real_n_rxq) {
1193 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid * VIRTIO_QNUM + VIRTIO_TXQ,
1195 (struct rte_mbuf **)packets,
1201 rte_spinlock_lock(&dev->stats_lock);
1202 netdev_dpdk_vhost_update_rx_counters(&dev->stats, packets, nb_rx);
1203 rte_spinlock_unlock(&dev->stats_lock);
1210 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet **packets,
1213 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1214 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1217 /* There is only one tx queue for this core. Do not flush other
1219 * Do not flush tx queue which is shared among CPUs
1220 * since it is always flushed */
1221 if (rxq->queue_id == rte_lcore_id() &&
1222 OVS_LIKELY(!dev->txq_needs_locking)) {
1223 dpdk_queue_flush(dev, rxq->queue_id);
1226 nb_rx = rte_eth_rx_burst(rx->port_id, rxq->queue_id,
1227 (struct rte_mbuf **) packets,
1239 netdev_dpdk_qos_run__(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
1242 struct netdev *netdev = &dev->up;
1244 if (dev->qos_conf != NULL) {
1245 rte_spinlock_lock(&dev->qos_lock);
1246 if (dev->qos_conf != NULL) {
1247 cnt = dev->qos_conf->ops->qos_run(netdev, pkts, cnt);
1249 rte_spinlock_unlock(&dev->qos_lock);
1256 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats *stats,
1257 struct dp_packet **packets,
1262 int sent = attempted - dropped;
1264 stats->tx_packets += sent;
1265 stats->tx_dropped += dropped;
1267 for (i = 0; i < sent; i++) {
1268 stats->tx_bytes += dp_packet_size(packets[i]);
1273 __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
1274 struct dp_packet **pkts, int cnt,
1277 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1278 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1279 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
1280 unsigned int total_pkts = cnt;
1281 unsigned int qos_pkts = cnt;
1284 qid = dev->tx_q[qid % dev->real_n_txq].map;
1286 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev) || qid < 0)) {
1287 rte_spinlock_lock(&dev->stats_lock);
1288 dev->stats.tx_dropped+= cnt;
1289 rte_spinlock_unlock(&dev->stats_lock);
1293 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1295 /* Check has QoS has been configured for the netdev */
1296 cnt = netdev_dpdk_qos_run__(dev, cur_pkts, cnt);
1300 int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
1301 unsigned int tx_pkts;
1303 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, vhost_qid,
1305 if (OVS_LIKELY(tx_pkts)) {
1306 /* Packets have been sent.*/
1308 /* Prepare for possible next iteration.*/
1309 cur_pkts = &cur_pkts[tx_pkts];
1311 uint64_t timeout = VHOST_ENQ_RETRY_USECS * rte_get_timer_hz() / 1E6;
1312 unsigned int expired = 0;
1315 start = rte_get_timer_cycles();
1319 * Unable to enqueue packets to vhost interface.
1320 * Check available entries before retrying.
1322 while (!rte_vring_available_entries(virtio_dev, vhost_qid)) {
1323 if (OVS_UNLIKELY((rte_get_timer_cycles() - start) > timeout)) {
1329 /* break out of main loop. */
1335 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1337 rte_spinlock_lock(&dev->stats_lock);
1339 netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts, cnt);
1340 rte_spinlock_unlock(&dev->stats_lock);
1346 for (i = 0; i < total_pkts; i++) {
1347 dp_packet_delete(pkts[i]);
1353 dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
1354 struct rte_mbuf **pkts, int cnt)
1356 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1362 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
1363 int tocopy = MIN(freeslots, cnt-i);
1365 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
1366 tocopy * sizeof (struct rte_mbuf *));
1368 txq->count += tocopy;
1371 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
1372 dpdk_queue_flush__(dev, qid);
1374 diff_tsc = rte_get_timer_cycles() - txq->tsc;
1375 if (diff_tsc >= DRAIN_TSC) {
1376 dpdk_queue_flush__(dev, qid);
1381 /* Tx function. Transmit packets indefinitely */
1383 dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
1385 OVS_NO_THREAD_SAFETY_ANALYSIS
1387 #if !defined(__CHECKER__) && !defined(_WIN32)
1388 const size_t PKT_ARRAY_SIZE = cnt;
1390 /* Sparse or MSVC doesn't like variable length array. */
1391 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
1393 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1394 struct rte_mbuf *mbufs[PKT_ARRAY_SIZE];
1399 /* If we are on a non pmd thread we have to use the mempool mutex, because
1400 * every non pmd thread shares the same mempool cache */
1402 if (!dpdk_thread_is_pmd()) {
1403 ovs_mutex_lock(&nonpmd_mempool_mutex);
1406 for (i = 0; i < cnt; i++) {
1407 int size = dp_packet_size(pkts[i]);
1409 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1410 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1411 (int)size , dev->max_packet_len);
1417 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
1419 if (!mbufs[newcnt]) {
1424 /* We have to do a copy for now */
1425 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
1427 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
1428 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
1433 if (dev->type == DPDK_DEV_VHOST) {
1434 __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) mbufs, newcnt, true);
1436 unsigned int qos_pkts = newcnt;
1438 /* Check if QoS has been configured for this netdev. */
1439 newcnt = netdev_dpdk_qos_run__(dev, mbufs, newcnt);
1441 dropped += qos_pkts - newcnt;
1442 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
1443 dpdk_queue_flush(dev, qid);
1446 if (OVS_UNLIKELY(dropped)) {
1447 rte_spinlock_lock(&dev->stats_lock);
1448 dev->stats.tx_dropped += dropped;
1449 rte_spinlock_unlock(&dev->stats_lock);
1452 if (!dpdk_thread_is_pmd()) {
1453 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1458 netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct dp_packet **pkts,
1459 int cnt, bool may_steal)
1461 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1464 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1466 for (i = 0; i < cnt; i++) {
1467 dp_packet_delete(pkts[i]);
1471 __netdev_dpdk_vhost_send(netdev, qid, pkts, cnt, may_steal);
1477 netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
1478 struct dp_packet **pkts, int cnt, bool may_steal)
1482 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1483 qid = qid % dev->real_n_txq;
1484 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1487 if (OVS_UNLIKELY(!may_steal ||
1488 pkts[0]->source != DPBUF_DPDK)) {
1489 struct netdev *netdev = &dev->up;
1491 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1494 for (i = 0; i < cnt; i++) {
1495 dp_packet_delete(pkts[i]);
1499 int next_tx_idx = 0;
1501 unsigned int qos_pkts = 0;
1502 unsigned int temp_cnt = 0;
1504 for (i = 0; i < cnt; i++) {
1505 int size = dp_packet_size(pkts[i]);
1507 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1508 if (next_tx_idx != i) {
1509 temp_cnt = i - next_tx_idx;
1510 qos_pkts = temp_cnt;
1512 temp_cnt = netdev_dpdk_qos_run__(dev, (struct rte_mbuf**)pkts,
1514 dropped += qos_pkts - temp_cnt;
1515 dpdk_queue_pkts(dev, qid,
1516 (struct rte_mbuf **)&pkts[next_tx_idx],
1521 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1522 (int)size , dev->max_packet_len);
1524 dp_packet_delete(pkts[i]);
1526 next_tx_idx = i + 1;
1529 if (next_tx_idx != cnt) {
1533 cnt = netdev_dpdk_qos_run__(dev, (struct rte_mbuf**)pkts, cnt);
1534 dropped += qos_pkts - cnt;
1535 dpdk_queue_pkts(dev, qid, (struct rte_mbuf **)&pkts[next_tx_idx],
1539 if (OVS_UNLIKELY(dropped)) {
1540 rte_spinlock_lock(&dev->stats_lock);
1541 dev->stats.tx_dropped += dropped;
1542 rte_spinlock_unlock(&dev->stats_lock);
1546 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1547 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1552 netdev_dpdk_eth_send(struct netdev *netdev, int qid,
1553 struct dp_packet **pkts, int cnt, bool may_steal)
1555 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1557 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1562 netdev_dpdk_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1564 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1566 ovs_mutex_lock(&dev->mutex);
1567 if (!eth_addr_equals(dev->hwaddr, mac)) {
1569 netdev_change_seq_changed(netdev);
1571 ovs_mutex_unlock(&dev->mutex);
1577 netdev_dpdk_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1579 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1581 ovs_mutex_lock(&dev->mutex);
1583 ovs_mutex_unlock(&dev->mutex);
1589 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1591 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1593 ovs_mutex_lock(&dev->mutex);
1595 ovs_mutex_unlock(&dev->mutex);
1601 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1603 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1604 int old_mtu, err, dpdk_mtu;
1605 struct dpdk_mp *old_mp;
1609 ovs_mutex_lock(&dpdk_mutex);
1610 ovs_mutex_lock(&dev->mutex);
1611 if (dev->mtu == mtu) {
1616 buf_size = dpdk_buf_size(mtu);
1617 dpdk_mtu = FRAME_LEN_TO_MTU(buf_size);
1619 mp = dpdk_mp_get(dev->socket_id, dpdk_mtu);
1625 rte_eth_dev_stop(dev->port_id);
1628 old_mp = dev->dpdk_mp;
1631 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
1633 err = dpdk_eth_dev_init(dev);
1637 dev->dpdk_mp = old_mp;
1638 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
1639 dpdk_eth_dev_init(dev);
1643 dpdk_mp_put(old_mp);
1644 netdev_change_seq_changed(netdev);
1646 ovs_mutex_unlock(&dev->mutex);
1647 ovs_mutex_unlock(&dpdk_mutex);
1652 netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier);
1655 netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1656 struct netdev_stats *stats)
1658 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1660 ovs_mutex_lock(&dev->mutex);
1661 memset(stats, 0, sizeof(*stats));
1662 /* Unsupported Stats */
1663 stats->collisions = UINT64_MAX;
1664 stats->rx_crc_errors = UINT64_MAX;
1665 stats->rx_fifo_errors = UINT64_MAX;
1666 stats->rx_frame_errors = UINT64_MAX;
1667 stats->rx_missed_errors = UINT64_MAX;
1668 stats->rx_over_errors = UINT64_MAX;
1669 stats->tx_aborted_errors = UINT64_MAX;
1670 stats->tx_carrier_errors = UINT64_MAX;
1671 stats->tx_errors = UINT64_MAX;
1672 stats->tx_fifo_errors = UINT64_MAX;
1673 stats->tx_heartbeat_errors = UINT64_MAX;
1674 stats->tx_window_errors = UINT64_MAX;
1675 stats->rx_dropped += UINT64_MAX;
1677 rte_spinlock_lock(&dev->stats_lock);
1678 /* Supported Stats */
1679 stats->rx_packets += dev->stats.rx_packets;
1680 stats->tx_packets += dev->stats.tx_packets;
1681 stats->tx_dropped += dev->stats.tx_dropped;
1682 stats->multicast = dev->stats.multicast;
1683 stats->rx_bytes = dev->stats.rx_bytes;
1684 stats->tx_bytes = dev->stats.tx_bytes;
1685 stats->rx_errors = dev->stats.rx_errors;
1686 stats->rx_length_errors = dev->stats.rx_length_errors;
1687 rte_spinlock_unlock(&dev->stats_lock);
1689 ovs_mutex_unlock(&dev->mutex);
1695 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1697 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1698 struct rte_eth_stats rte_stats;
1701 netdev_dpdk_get_carrier(netdev, &gg);
1702 ovs_mutex_lock(&dev->mutex);
1703 rte_eth_stats_get(dev->port_id, &rte_stats);
1705 memset(stats, 0, sizeof(*stats));
1707 stats->rx_packets = rte_stats.ipackets;
1708 stats->tx_packets = rte_stats.opackets;
1709 stats->rx_bytes = rte_stats.ibytes;
1710 stats->tx_bytes = rte_stats.obytes;
1711 /* DPDK counts imissed as errors, but count them here as dropped instead */
1712 stats->rx_errors = rte_stats.ierrors - rte_stats.imissed;
1713 stats->tx_errors = rte_stats.oerrors;
1714 stats->multicast = rte_stats.imcasts;
1716 rte_spinlock_lock(&dev->stats_lock);
1717 stats->tx_dropped = dev->stats.tx_dropped;
1718 rte_spinlock_unlock(&dev->stats_lock);
1720 /* These are the available DPDK counters for packets not received due to
1721 * local resource constraints in DPDK and NIC respectively. */
1722 stats->rx_dropped = rte_stats.rx_nombuf + rte_stats.imissed;
1723 stats->collisions = UINT64_MAX;
1725 stats->rx_length_errors = UINT64_MAX;
1726 stats->rx_over_errors = UINT64_MAX;
1727 stats->rx_crc_errors = UINT64_MAX;
1728 stats->rx_frame_errors = UINT64_MAX;
1729 stats->rx_fifo_errors = UINT64_MAX;
1730 stats->rx_missed_errors = rte_stats.imissed;
1732 stats->tx_aborted_errors = UINT64_MAX;
1733 stats->tx_carrier_errors = UINT64_MAX;
1734 stats->tx_fifo_errors = UINT64_MAX;
1735 stats->tx_heartbeat_errors = UINT64_MAX;
1736 stats->tx_window_errors = UINT64_MAX;
1738 ovs_mutex_unlock(&dev->mutex);
1744 netdev_dpdk_get_features(const struct netdev *netdev,
1745 enum netdev_features *current,
1746 enum netdev_features *advertised OVS_UNUSED,
1747 enum netdev_features *supported OVS_UNUSED,
1748 enum netdev_features *peer OVS_UNUSED)
1750 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1751 struct rte_eth_link link;
1753 ovs_mutex_lock(&dev->mutex);
1755 ovs_mutex_unlock(&dev->mutex);
1757 if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1758 if (link.link_speed == ETH_SPEED_NUM_10M) {
1759 *current = NETDEV_F_10MB_HD;
1761 if (link.link_speed == ETH_SPEED_NUM_100M) {
1762 *current = NETDEV_F_100MB_HD;
1764 if (link.link_speed == ETH_SPEED_NUM_1G) {
1765 *current = NETDEV_F_1GB_HD;
1767 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1768 if (link.link_speed == ETH_SPEED_NUM_10M) {
1769 *current = NETDEV_F_10MB_FD;
1771 if (link.link_speed == ETH_SPEED_NUM_100M) {
1772 *current = NETDEV_F_100MB_FD;
1774 if (link.link_speed == ETH_SPEED_NUM_1G) {
1775 *current = NETDEV_F_1GB_FD;
1777 if (link.link_speed == ETH_SPEED_NUM_10G) {
1778 *current = NETDEV_F_10GB_FD;
1782 if (link.link_autoneg) {
1783 *current |= NETDEV_F_AUTONEG;
1790 netdev_dpdk_get_ifindex(const struct netdev *netdev)
1792 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1795 ovs_mutex_lock(&dev->mutex);
1796 ifindex = dev->port_id;
1797 ovs_mutex_unlock(&dev->mutex);
1803 netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier)
1805 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1807 ovs_mutex_lock(&dev->mutex);
1808 check_link_status(dev);
1809 *carrier = dev->link.link_status;
1811 ovs_mutex_unlock(&dev->mutex);
1817 netdev_dpdk_vhost_get_carrier(const struct netdev *netdev, bool *carrier)
1819 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1820 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1822 ovs_mutex_lock(&dev->mutex);
1824 if (is_vhost_running(virtio_dev)) {
1830 ovs_mutex_unlock(&dev->mutex);
1835 static long long int
1836 netdev_dpdk_get_carrier_resets(const struct netdev *netdev)
1838 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1839 long long int carrier_resets;
1841 ovs_mutex_lock(&dev->mutex);
1842 carrier_resets = dev->link_reset_cnt;
1843 ovs_mutex_unlock(&dev->mutex);
1845 return carrier_resets;
1849 netdev_dpdk_set_miimon(struct netdev *netdev OVS_UNUSED,
1850 long long int interval OVS_UNUSED)
1856 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1857 enum netdev_flags off, enum netdev_flags on,
1858 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
1862 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1866 *old_flagsp = dev->flags;
1870 if (dev->flags == *old_flagsp) {
1874 if (dev->type == DPDK_DEV_ETH) {
1875 if (dev->flags & NETDEV_UP) {
1876 err = rte_eth_dev_start(dev->port_id);
1881 if (dev->flags & NETDEV_PROMISC) {
1882 rte_eth_promiscuous_enable(dev->port_id);
1885 if (!(dev->flags & NETDEV_UP)) {
1886 rte_eth_dev_stop(dev->port_id);
1894 netdev_dpdk_update_flags(struct netdev *netdev,
1895 enum netdev_flags off, enum netdev_flags on,
1896 enum netdev_flags *old_flagsp)
1898 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1901 ovs_mutex_lock(&dev->mutex);
1902 error = netdev_dpdk_update_flags__(dev, off, on, old_flagsp);
1903 ovs_mutex_unlock(&dev->mutex);
1909 netdev_dpdk_get_status(const struct netdev *netdev, struct smap *args)
1911 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1912 struct rte_eth_dev_info dev_info;
1914 if (dev->port_id < 0)
1917 ovs_mutex_lock(&dev->mutex);
1918 rte_eth_dev_info_get(dev->port_id, &dev_info);
1919 ovs_mutex_unlock(&dev->mutex);
1921 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1923 smap_add_format(args, "port_no", "%d", dev->port_id);
1924 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1925 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1926 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1927 smap_add_format(args, "max_rx_pktlen", "%u", dev->max_packet_len);
1928 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1929 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1930 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1931 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1932 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1933 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1935 if (dev_info.pci_dev) {
1936 smap_add_format(args, "pci-vendor_id", "0x%u",
1937 dev_info.pci_dev->id.vendor_id);
1938 smap_add_format(args, "pci-device_id", "0x%x",
1939 dev_info.pci_dev->id.device_id);
1946 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1947 OVS_REQUIRES(dev->mutex)
1949 enum netdev_flags old_flags;
1952 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1954 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1959 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1960 const char *argv[], void *aux OVS_UNUSED)
1964 if (!strcasecmp(argv[argc - 1], "up")) {
1966 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1969 unixctl_command_reply_error(conn, "Invalid Admin State");
1974 struct netdev *netdev = netdev_from_name(argv[1]);
1975 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1976 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1978 ovs_mutex_lock(&dpdk_dev->mutex);
1979 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1980 ovs_mutex_unlock(&dpdk_dev->mutex);
1982 netdev_close(netdev);
1984 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1985 netdev_close(netdev);
1989 struct netdev_dpdk *netdev;
1991 ovs_mutex_lock(&dpdk_mutex);
1992 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1993 ovs_mutex_lock(&netdev->mutex);
1994 netdev_dpdk_set_admin_state__(netdev, up);
1995 ovs_mutex_unlock(&netdev->mutex);
1997 ovs_mutex_unlock(&dpdk_mutex);
1999 unixctl_command_reply(conn, "OK");
2003 * Set virtqueue flags so that we do not receive interrupts.
2006 set_irq_status(struct virtio_net *virtio_dev)
2011 for (i = 0; i < virtio_dev->virt_qp_nb; i++) {
2012 idx = i * VIRTIO_QNUM;
2013 rte_vhost_enable_guest_notification(virtio_dev, idx + VIRTIO_RXQ, 0);
2014 rte_vhost_enable_guest_notification(virtio_dev, idx + VIRTIO_TXQ, 0);
2019 * Fixes mapping for vhost-user tx queues. Must be called after each
2020 * enabling/disabling of queues and real_n_txq modifications.
2023 netdev_dpdk_remap_txqs(struct netdev_dpdk *dev)
2024 OVS_REQUIRES(dev->mutex)
2026 int *enabled_queues, n_enabled = 0;
2027 int i, k, total_txqs = dev->real_n_txq;
2029 enabled_queues = dpdk_rte_mzalloc(total_txqs * sizeof *enabled_queues);
2031 for (i = 0; i < total_txqs; i++) {
2032 /* Enabled queues always mapped to themselves. */
2033 if (dev->tx_q[i].map == i) {
2034 enabled_queues[n_enabled++] = i;
2038 if (n_enabled == 0 && total_txqs != 0) {
2039 enabled_queues[0] = OVS_VHOST_QUEUE_DISABLED;
2044 for (i = 0; i < total_txqs; i++) {
2045 if (dev->tx_q[i].map != i) {
2046 dev->tx_q[i].map = enabled_queues[k];
2047 k = (k + 1) % n_enabled;
2051 VLOG_DBG("TX queue mapping for %s\n", dev->vhost_id);
2052 for (i = 0; i < total_txqs; i++) {
2053 VLOG_DBG("%2d --> %2d", i, dev->tx_q[i].map);
2056 rte_free(enabled_queues);
2060 netdev_dpdk_vhost_set_queues(struct netdev_dpdk *dev, struct virtio_net *virtio_dev)
2061 OVS_REQUIRES(dev->mutex)
2065 qp_num = virtio_dev->virt_qp_nb;
2066 if (qp_num > dev->up.n_rxq) {
2067 VLOG_ERR("vHost Device '%s' %"PRIu64" can't be added - "
2068 "too many queues %d > %d", virtio_dev->ifname, virtio_dev->device_fh,
2069 qp_num, dev->up.n_rxq);
2073 dev->real_n_rxq = qp_num;
2074 dev->real_n_txq = qp_num;
2075 dev->txq_needs_locking = true;
2076 /* Enable TX queue 0 by default if it wasn't disabled. */
2077 if (dev->tx_q[0].map == OVS_VHOST_QUEUE_MAP_UNKNOWN) {
2078 dev->tx_q[0].map = 0;
2081 netdev_dpdk_remap_txqs(dev);
2087 * A new virtio-net device is added to a vhost port.
2090 new_device(struct virtio_net *virtio_dev)
2092 struct netdev_dpdk *dev;
2093 bool exists = false;
2095 ovs_mutex_lock(&dpdk_mutex);
2096 /* Add device to the vhost port with the same name as that passed down. */
2097 LIST_FOR_EACH(dev, list_node, &dpdk_list) {
2098 if (strncmp(virtio_dev->ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
2099 ovs_mutex_lock(&dev->mutex);
2100 if (netdev_dpdk_vhost_set_queues(dev, virtio_dev)) {
2101 ovs_mutex_unlock(&dev->mutex);
2102 ovs_mutex_unlock(&dpdk_mutex);
2105 ovsrcu_set(&dev->virtio_dev, virtio_dev);
2107 virtio_dev->flags |= VIRTIO_DEV_RUNNING;
2108 /* Disable notifications. */
2109 set_irq_status(virtio_dev);
2110 ovs_mutex_unlock(&dev->mutex);
2114 ovs_mutex_unlock(&dpdk_mutex);
2117 VLOG_INFO("vHost Device '%s' %"PRIu64" can't be added - name not "
2118 "found", virtio_dev->ifname, virtio_dev->device_fh);
2123 VLOG_INFO("vHost Device '%s' %"PRIu64" has been added", virtio_dev->ifname,
2124 virtio_dev->device_fh);
2128 /* Clears mapping for all available queues of vhost interface. */
2130 netdev_dpdk_txq_map_clear(struct netdev_dpdk *dev)
2131 OVS_REQUIRES(dev->mutex)
2135 for (i = 0; i < dev->real_n_txq; i++) {
2136 dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
2141 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2142 * flag to stop any more packets from being sent or received to/from a VM and
2143 * ensure all currently queued packets have been sent/received before removing
2147 destroy_device(volatile struct virtio_net *virtio_dev)
2149 struct netdev_dpdk *dev;
2150 bool exists = false;
2152 ovs_mutex_lock(&dpdk_mutex);
2153 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
2154 if (netdev_dpdk_get_virtio(dev) == virtio_dev) {
2156 ovs_mutex_lock(&dev->mutex);
2157 virtio_dev->flags &= ~VIRTIO_DEV_RUNNING;
2158 ovsrcu_set(&dev->virtio_dev, NULL);
2159 netdev_dpdk_txq_map_clear(dev);
2161 ovs_mutex_unlock(&dev->mutex);
2166 ovs_mutex_unlock(&dpdk_mutex);
2168 if (exists == true) {
2170 * Wait for other threads to quiesce after setting the 'virtio_dev'
2171 * to NULL, before returning.
2173 ovsrcu_synchronize();
2175 * As call to ovsrcu_synchronize() will end the quiescent state,
2176 * put thread back into quiescent state before returning.
2178 ovsrcu_quiesce_start();
2179 VLOG_INFO("vHost Device '%s' %"PRIu64" has been removed",
2180 virtio_dev->ifname, virtio_dev->device_fh);
2182 VLOG_INFO("vHost Device '%s' %"PRIu64" not found", virtio_dev->ifname,
2183 virtio_dev->device_fh);
2188 vring_state_changed(struct virtio_net *virtio_dev, uint16_t queue_id,
2191 struct netdev_dpdk *dev;
2192 bool exists = false;
2193 int qid = queue_id / VIRTIO_QNUM;
2195 if (queue_id % VIRTIO_QNUM == VIRTIO_TXQ) {
2199 ovs_mutex_lock(&dpdk_mutex);
2200 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
2201 if (strncmp(virtio_dev->ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
2202 ovs_mutex_lock(&dev->mutex);
2204 dev->tx_q[qid].map = qid;
2206 dev->tx_q[qid].map = OVS_VHOST_QUEUE_DISABLED;
2208 netdev_dpdk_remap_txqs(dev);
2210 ovs_mutex_unlock(&dev->mutex);
2214 ovs_mutex_unlock(&dpdk_mutex);
2217 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s' %"
2218 PRIu64" changed to \'%s\'", queue_id, qid,
2219 virtio_dev->ifname, virtio_dev->device_fh,
2220 (enable == 1) ? "enabled" : "disabled");
2222 VLOG_INFO("vHost Device '%s' %"PRIu64" not found", virtio_dev->ifname,
2223 virtio_dev->device_fh);
2231 netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
2233 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
2237 * These callbacks allow virtio-net devices to be added to vhost ports when
2238 * configuration has been fully complete.
2240 static const struct virtio_net_device_ops virtio_net_device_ops =
2242 .new_device = new_device,
2243 .destroy_device = destroy_device,
2244 .vring_state_changed = vring_state_changed
2248 start_vhost_loop(void *dummy OVS_UNUSED)
2250 pthread_detach(pthread_self());
2251 /* Put the cuse thread into quiescent state. */
2252 ovsrcu_quiesce_start();
2253 rte_vhost_driver_session_start();
2258 dpdk_vhost_class_init(void)
2260 rte_vhost_driver_callback_register(&virtio_net_device_ops);
2261 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4
2262 | 1ULL << VIRTIO_NET_F_HOST_TSO6
2263 | 1ULL << VIRTIO_NET_F_CSUM);
2265 ovs_thread_create("vhost_thread", start_vhost_loop, NULL);
2270 dpdk_vhost_cuse_class_init(void)
2276 dpdk_vhost_user_class_init(void)
2282 dpdk_common_init(void)
2284 unixctl_command_register("netdev-dpdk/set-admin-state",
2285 "[netdev] up|down", 1, 2,
2286 netdev_dpdk_set_admin_state, NULL);
2293 dpdk_ring_create(const char dev_name[], unsigned int port_no,
2294 unsigned int *eth_port_id)
2296 struct dpdk_ring *ivshmem;
2297 char ring_name[RTE_RING_NAMESIZE];
2300 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
2301 if (ivshmem == NULL) {
2305 /* XXX: Add support for multiquque ring. */
2306 err = snprintf(ring_name, sizeof(ring_name), "%s_tx", dev_name);
2311 /* Create single producer tx ring, netdev does explicit locking. */
2312 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2314 if (ivshmem->cring_tx == NULL) {
2319 err = snprintf(ring_name, sizeof(ring_name), "%s_rx", dev_name);
2324 /* Create single consumer rx ring, netdev does explicit locking. */
2325 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2327 if (ivshmem->cring_rx == NULL) {
2332 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
2333 &ivshmem->cring_tx, 1, SOCKET0);
2340 ivshmem->user_port_id = port_no;
2341 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
2342 ovs_list_push_back(&dpdk_ring_list, &ivshmem->list_node);
2344 *eth_port_id = ivshmem->eth_port_id;
2349 dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
2351 struct dpdk_ring *ivshmem;
2352 unsigned int port_no;
2355 /* Names always start with "dpdkr" */
2356 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
2361 /* look through our list to find the device */
2362 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
2363 if (ivshmem->user_port_id == port_no) {
2364 VLOG_INFO("Found dpdk ring device %s:", dev_name);
2365 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
2369 /* Need to create the device rings */
2370 return dpdk_ring_create(dev_name, port_no, eth_port_id);
2374 netdev_dpdk_ring_send(struct netdev *netdev, int qid,
2375 struct dp_packet **pkts, int cnt, bool may_steal)
2377 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2380 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2381 * rss hash field is clear. This is because the same mbuf may be modified by
2382 * the consumer of the ring and return into the datapath without recalculating
2384 for (i = 0; i < cnt; i++) {
2385 dp_packet_rss_invalidate(pkts[i]);
2388 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
2393 netdev_dpdk_ring_construct(struct netdev *netdev)
2395 unsigned int port_no = 0;
2398 if (rte_eal_init_ret) {
2399 return rte_eal_init_ret;
2402 ovs_mutex_lock(&dpdk_mutex);
2404 err = dpdk_ring_open(netdev->name, &port_no);
2409 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
2412 ovs_mutex_unlock(&dpdk_mutex);
2419 * Initialize QoS configuration operations.
2422 qos_conf_init(struct qos_conf *conf, const struct dpdk_qos_ops *ops)
2428 * Search existing QoS operations in qos_ops and compare each set of
2429 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2432 static const struct dpdk_qos_ops *
2433 qos_lookup_name(const char *name)
2435 const struct dpdk_qos_ops *const *opsp;
2437 for (opsp = qos_confs; *opsp != NULL; opsp++) {
2438 const struct dpdk_qos_ops *ops = *opsp;
2439 if (!strcmp(name, ops->qos_name)) {
2447 * Call qos_destruct to clean up items associated with the netdevs
2448 * qos_conf. Set netdevs qos_conf to NULL.
2451 qos_delete_conf(struct netdev *netdev)
2453 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2455 rte_spinlock_lock(&dev->qos_lock);
2456 if (dev->qos_conf) {
2457 if (dev->qos_conf->ops->qos_destruct) {
2458 dev->qos_conf->ops->qos_destruct(netdev, dev->qos_conf);
2460 dev->qos_conf = NULL;
2462 rte_spinlock_unlock(&dev->qos_lock);
2466 netdev_dpdk_get_qos_types(const struct netdev *netdev OVS_UNUSED,
2469 const struct dpdk_qos_ops *const *opsp;
2471 for (opsp = qos_confs; *opsp != NULL; opsp++) {
2472 const struct dpdk_qos_ops *ops = *opsp;
2473 if (ops->qos_construct && ops->qos_name[0] != '\0') {
2474 sset_add(types, ops->qos_name);
2481 netdev_dpdk_get_qos(const struct netdev *netdev,
2482 const char **typep, struct smap *details)
2484 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2487 ovs_mutex_lock(&dev->mutex);
2489 *typep = dev->qos_conf->ops->qos_name;
2490 error = (dev->qos_conf->ops->qos_get
2491 ? dev->qos_conf->ops->qos_get(netdev, details): 0);
2493 ovs_mutex_unlock(&dev->mutex);
2499 netdev_dpdk_set_qos(struct netdev *netdev,
2500 const char *type, const struct smap *details)
2502 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2503 const struct dpdk_qos_ops *new_ops = NULL;
2506 /* If type is empty or unsupported then the current QoS configuration
2507 * for the dpdk-netdev can be destroyed */
2508 new_ops = qos_lookup_name(type);
2510 if (type[0] == '\0' || !new_ops || !new_ops->qos_construct) {
2511 qos_delete_conf(netdev);
2515 ovs_mutex_lock(&dev->mutex);
2517 if (dev->qos_conf) {
2518 if (new_ops == dev->qos_conf->ops) {
2519 error = new_ops->qos_set ? new_ops->qos_set(netdev, details) : 0;
2521 /* Delete existing QoS configuration. */
2522 qos_delete_conf(netdev);
2523 ovs_assert(dev->qos_conf == NULL);
2525 /* Install new QoS configuration. */
2526 error = new_ops->qos_construct(netdev, details);
2527 ovs_assert((error == 0) == (dev->qos_conf != NULL));
2530 error = new_ops->qos_construct(netdev, details);
2531 ovs_assert((error == 0) == (dev->qos_conf != NULL));
2534 ovs_mutex_unlock(&dev->mutex);
2538 /* egress-policer details */
2540 struct egress_policer {
2541 struct qos_conf qos_conf;
2542 struct rte_meter_srtcm_params app_srtcm_params;
2543 struct rte_meter_srtcm egress_meter;
2546 static struct egress_policer *
2547 egress_policer_get__(const struct netdev *netdev)
2549 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2550 return CONTAINER_OF(dev->qos_conf, struct egress_policer, qos_conf);
2554 egress_policer_qos_construct(struct netdev *netdev,
2555 const struct smap *details)
2557 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2558 struct egress_policer *policer;
2563 rte_spinlock_lock(&dev->qos_lock);
2564 policer = xmalloc(sizeof *policer);
2565 qos_conf_init(&policer->qos_conf, &egress_policer_ops);
2566 dev->qos_conf = &policer->qos_conf;
2567 cir_s = smap_get(details, "cir");
2568 cbs_s = smap_get(details, "cbs");
2569 policer->app_srtcm_params.cir = cir_s ? strtoull(cir_s, NULL, 10) : 0;
2570 policer->app_srtcm_params.cbs = cbs_s ? strtoull(cbs_s, NULL, 10) : 0;
2571 policer->app_srtcm_params.ebs = 0;
2572 err = rte_meter_srtcm_config(&policer->egress_meter,
2573 &policer->app_srtcm_params);
2574 rte_spinlock_unlock(&dev->qos_lock);
2580 egress_policer_qos_destruct(struct netdev *netdev OVS_UNUSED,
2581 struct qos_conf *conf)
2583 struct egress_policer *policer = CONTAINER_OF(conf, struct egress_policer,
2589 egress_policer_qos_get(const struct netdev *netdev, struct smap *details)
2591 struct egress_policer *policer = egress_policer_get__(netdev);
2592 smap_add_format(details, "cir", "%llu",
2593 1ULL * policer->app_srtcm_params.cir);
2594 smap_add_format(details, "cbs", "%llu",
2595 1ULL * policer->app_srtcm_params.cbs);
2600 egress_policer_qos_set(struct netdev *netdev, const struct smap *details)
2602 struct egress_policer *policer;
2607 policer = egress_policer_get__(netdev);
2608 cir_s = smap_get(details, "cir");
2609 cbs_s = smap_get(details, "cbs");
2610 policer->app_srtcm_params.cir = cir_s ? strtoull(cir_s, NULL, 10) : 0;
2611 policer->app_srtcm_params.cbs = cbs_s ? strtoull(cbs_s, NULL, 10) : 0;
2612 policer->app_srtcm_params.ebs = 0;
2613 err = rte_meter_srtcm_config(&policer->egress_meter,
2614 &policer->app_srtcm_params);
2620 egress_policer_pkt_handle__(struct rte_meter_srtcm *meter,
2621 struct rte_mbuf *pkt, uint64_t time)
2623 uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct ether_hdr);
2625 return rte_meter_srtcm_color_blind_check(meter, time, pkt_len) ==
2630 egress_policer_run(struct netdev *netdev, struct rte_mbuf **pkts,
2635 struct egress_policer *policer = egress_policer_get__(netdev);
2636 struct rte_mbuf *pkt = NULL;
2637 uint64_t current_time = rte_rdtsc();
2639 for(i = 0; i < pkt_cnt; i++) {
2641 /* Handle current packet */
2642 if (egress_policer_pkt_handle__(&policer->egress_meter, pkt,
2649 rte_pktmbuf_free(pkt);
2656 static const struct dpdk_qos_ops egress_policer_ops = {
2657 "egress-policer", /* qos_name */
2658 egress_policer_qos_construct,
2659 egress_policer_qos_destruct,
2660 egress_policer_qos_get,
2661 egress_policer_qos_set,
2665 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
2666 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
2669 true, /* is_pmd */ \
2671 NULL, /* netdev_dpdk_run */ \
2672 NULL, /* netdev_dpdk_wait */ \
2674 netdev_dpdk_alloc, \
2677 netdev_dpdk_dealloc, \
2678 netdev_dpdk_get_config, \
2679 netdev_dpdk_set_config, \
2680 NULL, /* get_tunnel_config */ \
2681 NULL, /* build header */ \
2682 NULL, /* push header */ \
2683 NULL, /* pop header */ \
2684 netdev_dpdk_get_numa_id, /* get_numa_id */ \
2685 MULTIQ, /* set_multiq */ \
2688 NULL, /* send_wait */ \
2690 netdev_dpdk_set_etheraddr, \
2691 netdev_dpdk_get_etheraddr, \
2692 netdev_dpdk_get_mtu, \
2693 netdev_dpdk_set_mtu, \
2694 netdev_dpdk_get_ifindex, \
2696 netdev_dpdk_get_carrier_resets, \
2697 netdev_dpdk_set_miimon, \
2700 NULL, /* set_advertisements */ \
2702 NULL, /* set_policing */ \
2703 netdev_dpdk_get_qos_types, \
2704 NULL, /* get_qos_capabilities */ \
2705 netdev_dpdk_get_qos, \
2706 netdev_dpdk_set_qos, \
2707 NULL, /* get_queue */ \
2708 NULL, /* set_queue */ \
2709 NULL, /* delete_queue */ \
2710 NULL, /* get_queue_stats */ \
2711 NULL, /* queue_dump_start */ \
2712 NULL, /* queue_dump_next */ \
2713 NULL, /* queue_dump_done */ \
2714 NULL, /* dump_queue_stats */ \
2716 NULL, /* set_in4 */ \
2717 NULL, /* get_addr_list */ \
2718 NULL, /* add_router */ \
2719 NULL, /* get_next_hop */ \
2721 NULL, /* arp_lookup */ \
2723 netdev_dpdk_update_flags, \
2725 netdev_dpdk_rxq_alloc, \
2726 netdev_dpdk_rxq_construct, \
2727 netdev_dpdk_rxq_destruct, \
2728 netdev_dpdk_rxq_dealloc, \
2730 NULL, /* rx_wait */ \
2731 NULL, /* rxq_drain */ \
2735 process_vhost_flags(char *flag, char *default_val, int size,
2736 const struct smap *ovs_other_config,
2742 val = smap_get(ovs_other_config, flag);
2744 /* Depending on which version of vhost is in use, process the vhost-specific
2745 * flag if it is provided, otherwise resort to default value.
2747 if (val && (strlen(val) <= size)) {
2749 *new_val = xstrdup(val);
2750 VLOG_INFO("User-provided %s in use: %s", flag, *new_val);
2752 VLOG_INFO("No %s provided - defaulting to %s", flag, default_val);
2753 *new_val = default_val;
2760 grow_argv(char ***argv, size_t cur_siz, size_t grow_by)
2762 return xrealloc(*argv, sizeof(char *) * (cur_siz + grow_by));
2766 dpdk_option_extend(char ***argv, int argc, const char *option,
2769 char **newargv = grow_argv(argv, argc, 2);
2771 newargv[argc] = xstrdup(option);
2772 newargv[argc+1] = xstrdup(value);
2776 construct_dpdk_options(const struct smap *ovs_other_config,
2777 char ***argv, const int initial_size)
2779 struct dpdk_options_map {
2780 const char *ovs_configuration;
2781 const char *dpdk_option;
2782 bool default_enabled;
2783 const char *default_value;
2785 {"dpdk-lcore-mask", "-c", false, NULL},
2786 {"dpdk-hugepage-dir", "--huge-dir", false, NULL},
2789 int i, ret = initial_size;
2791 /*First, construct from the flat-options (non-mutex)*/
2792 for (i = 0; i < ARRAY_SIZE(opts); ++i) {
2793 const char *lookup = smap_get(ovs_other_config,
2794 opts[i].ovs_configuration);
2795 if (!lookup && opts[i].default_enabled) {
2796 lookup = opts[i].default_value;
2800 dpdk_option_extend(argv, ret, opts[i].dpdk_option, lookup);
2808 #define MAX_DPDK_EXCL_OPTS 10
2811 construct_dpdk_mutex_options(const struct smap *ovs_other_config,
2812 char ***argv, const int initial_size)
2814 struct dpdk_exclusive_options_map {
2815 const char *category;
2816 const char *ovs_dpdk_options[MAX_DPDK_EXCL_OPTS];
2817 const char *eal_dpdk_options[MAX_DPDK_EXCL_OPTS];
2818 const char *default_value;
2822 {"dpdk-alloc-mem", "dpdk-socket-mem", NULL,},
2823 {"-m", "--socket-mem", NULL,},
2828 int i, ret = initial_size;
2829 for (i = 0; i < ARRAY_SIZE(excl_opts); ++i) {
2830 int found_opts = 0, scan, found_pos = -1;
2831 const char *found_value;
2832 struct dpdk_exclusive_options_map *popt = &excl_opts[i];
2834 for (scan = 0; scan < MAX_DPDK_EXCL_OPTS
2835 && popt->ovs_dpdk_options[scan]; ++scan) {
2836 const char *lookup = smap_get(ovs_other_config,
2837 popt->ovs_dpdk_options[scan]);
2838 if (lookup && strlen(lookup)) {
2841 found_value = lookup;
2846 if (popt->default_option) {
2847 found_pos = popt->default_option;
2848 found_value = popt->default_value;
2854 if (found_opts > 1) {
2855 VLOG_ERR("Multiple defined options for %s. Please check your"
2856 " database settings and reconfigure if necessary.",
2860 dpdk_option_extend(argv, ret, popt->eal_dpdk_options[found_pos],
2869 get_dpdk_args(const struct smap *ovs_other_config, char ***argv)
2871 int i = construct_dpdk_options(ovs_other_config, argv, 1);
2872 i = construct_dpdk_mutex_options(ovs_other_config, argv, i);
2876 static char **dpdk_argv;
2877 static int dpdk_argc;
2880 deferred_argv_release(void)
2883 for (result = 0; result < dpdk_argc; ++result) {
2884 free(dpdk_argv[result]);
2891 dpdk_init__(const struct smap *ovs_other_config)
2899 char *sock_dir_subcomponent;
2902 if (!smap_get_bool(ovs_other_config, "dpdk-init", false)) {
2903 VLOG_INFO("DPDK Disabled - to change this requires a restart.\n");
2907 VLOG_INFO("DPDK Enabled, initializing");
2910 if (process_vhost_flags("cuse-dev-name", xstrdup("vhost-net"),
2911 PATH_MAX, ovs_other_config, &cuse_dev_name)) {
2913 if (process_vhost_flags("vhost-sock-dir", xstrdup(""),
2914 NAME_MAX, ovs_other_config,
2915 &sock_dir_subcomponent)) {
2917 if (!strstr(sock_dir_subcomponent, "..")) {
2918 vhost_sock_dir = xasprintf("%s/%s", ovs_rundir(),
2919 sock_dir_subcomponent);
2921 err = stat(vhost_sock_dir, &s);
2923 VLOG_ERR("vhost-user sock directory '%s' does not exist.",
2927 vhost_sock_dir = xstrdup(ovs_rundir());
2928 VLOG_ERR("vhost-user sock directory request '%s/%s' has invalid"
2929 "characters '..' - using %s instead.",
2930 ovs_rundir(), sock_dir_subcomponent, ovs_rundir());
2932 free(sock_dir_subcomponent);
2934 vhost_sock_dir = xstrdup(ovs_rundir());
2935 free(sock_dir_subcomponent);
2939 /* Get the main thread affinity */
2941 err = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
2944 VLOG_ERR("Thread getaffinity error %d.", err);
2947 argv = grow_argv(&argv, 0, 1);
2948 argv[0] = xstrdup(ovs_get_program_name());
2949 argc = get_dpdk_args(ovs_other_config, &argv);
2951 argv = grow_argv(&argv, argc, 1);
2956 /* Make sure things are initialized ... */
2957 result = rte_eal_init(argc, argv);
2959 ovs_abort(result, "Cannot init EAL");
2962 /* Set the main thread affinity back to pre rte_eal_init() value */
2964 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t),
2967 VLOG_ERR("Thread setaffinity error %d", err);
2974 atexit(deferred_argv_release);
2976 rte_memzone_dump(stdout);
2977 rte_eal_init_ret = 0;
2979 /* We are called from the main thread here */
2980 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
2982 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
2985 /* Register CUSE device to handle IOCTLs.
2986 * Unless otherwise specified, cuse_dev_name is set to vhost-net.
2988 err = rte_vhost_driver_register(cuse_dev_name);
2991 VLOG_ERR("CUSE device setup failure.");
2996 dpdk_vhost_class_init();
2998 /* Finally, register the dpdk classes */
2999 netdev_dpdk_register();
3003 dpdk_init(const struct smap *ovs_other_config)
3005 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
3007 if (ovs_other_config && ovsthread_once_start(&once)) {
3008 dpdk_init__(ovs_other_config);
3009 ovsthread_once_done(&once);
3013 static const struct netdev_class dpdk_class =
3017 netdev_dpdk_construct,
3018 netdev_dpdk_destruct,
3019 netdev_dpdk_set_multiq,
3020 netdev_dpdk_eth_send,
3021 netdev_dpdk_get_carrier,
3022 netdev_dpdk_get_stats,
3023 netdev_dpdk_get_features,
3024 netdev_dpdk_get_status,
3025 netdev_dpdk_rxq_recv);
3027 static const struct netdev_class dpdk_ring_class =
3031 netdev_dpdk_ring_construct,
3032 netdev_dpdk_destruct,
3033 netdev_dpdk_set_multiq,
3034 netdev_dpdk_ring_send,
3035 netdev_dpdk_get_carrier,
3036 netdev_dpdk_get_stats,
3037 netdev_dpdk_get_features,
3038 netdev_dpdk_get_status,
3039 netdev_dpdk_rxq_recv);
3041 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class =
3044 dpdk_vhost_cuse_class_init,
3045 netdev_dpdk_vhost_cuse_construct,
3046 netdev_dpdk_vhost_destruct,
3047 netdev_dpdk_vhost_cuse_set_multiq,
3048 netdev_dpdk_vhost_send,
3049 netdev_dpdk_vhost_get_carrier,
3050 netdev_dpdk_vhost_get_stats,
3053 netdev_dpdk_vhost_rxq_recv);
3055 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class =
3058 dpdk_vhost_user_class_init,
3059 netdev_dpdk_vhost_user_construct,
3060 netdev_dpdk_vhost_destruct,
3061 netdev_dpdk_vhost_set_multiq,
3062 netdev_dpdk_vhost_send,
3063 netdev_dpdk_vhost_get_carrier,
3064 netdev_dpdk_vhost_get_stats,
3067 netdev_dpdk_vhost_rxq_recv);
3070 netdev_dpdk_register(void)
3073 netdev_register_provider(&dpdk_class);
3074 netdev_register_provider(&dpdk_ring_class);
3076 netdev_register_provider(&dpdk_vhost_cuse_class);
3078 netdev_register_provider(&dpdk_vhost_user_class);
3083 pmd_thread_setaffinity_cpu(unsigned cpu)
3089 CPU_SET(cpu, &cpuset);
3090 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
3092 VLOG_ERR("Thread affinity error %d",err);
3095 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
3096 ovs_assert(cpu != NON_PMD_CORE_ID);
3097 RTE_PER_LCORE(_lcore_id) = cpu;
3103 dpdk_thread_is_pmd(void)
3105 return rte_lcore_id() != NON_PMD_CORE_ID;