= HMAP_INITIALIZER(&netdev_classes);
struct netdev_registered_class {
- struct hmap_node hmap_node; /* In 'netdev_classes', by class->type. */
- const struct netdev_class *class;
- atomic_int ref_cnt; /* Number of 'struct netdev's of this class. */
+ /* In 'netdev_classes', by class->type. */
+ struct hmap_node hmap_node OVS_GUARDED_BY(netdev_class_mutex);
+ const struct netdev_class *class OVS_GUARDED_BY(netdev_class_mutex);
+ /* Number of 'struct netdev's of this class. */
+ int ref_cnt OVS_GUARDED_BY(netdev_class_mutex);
};
/* This is set pretty low because we probably won't learn anything from the
bool
netdev_is_pmd(const struct netdev *netdev)
{
- return !strcmp(netdev->netdev_class->type, "dpdk");
+ return (!strcmp(netdev->netdev_class->type, "dpdk") ||
+ !strcmp(netdev->netdev_class->type, "dpdkr"));
}
static void
-netdev_initialize(void)
+netdev_class_mutex_initialize(void)
OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
{
static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
if (ovsthread_once_start(&once)) {
ovs_mutex_init_recursive(&netdev_class_mutex);
+ ovsthread_once_done(&once);
+ }
+}
+
+static void
+netdev_initialize(void)
+ OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
+{
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
+
+ if (ovsthread_once_start(&once)) {
+ netdev_class_mutex_initialize();
fatal_signal_add_hook(restore_all_flags, NULL, NULL, true);
netdev_vport_patch_register();
{
struct netdev_registered_class *rc;
+ netdev_initialize();
ovs_mutex_lock(&netdev_class_mutex);
HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
if (rc->class->run) {
{
int error;
+ netdev_class_mutex_initialize();
ovs_mutex_lock(&netdev_class_mutex);
if (netdev_lookup_class(new_class->type)) {
VLOG_WARN("attempted to register duplicate netdev provider: %s",
hmap_insert(&netdev_classes, &rc->hmap_node,
hash_string(new_class->type, 0));
rc->class = new_class;
- atomic_init(&rc->ref_cnt, 0);
+ rc->ref_cnt = 0;
} else {
VLOG_ERR("failed to initialize %s network device class: %s",
new_class->type, ovs_strerror(error));
"registered: %s", type);
error = EAFNOSUPPORT;
} else {
- int ref_cnt;
-
- atomic_read(&rc->ref_cnt, &ref_cnt);
- if (!ref_cnt) {
+ if (!rc->ref_cnt) {
hmap_remove(&netdev_classes, &rc->hmap_node);
free(rc);
error = 0;
ovs_mutex_lock(&netdev_class_mutex);
HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class);
- if (dpif_port && !strcmp(dpif_port, name)) {
+ if (dpif_port && !strncmp(name, dpif_port, strlen(dpif_port))) {
ovs_mutex_unlock(&netdev_class_mutex);
return true;
}
error = rc->class->construct(netdev);
if (!error) {
- int old_ref_cnt;
-
- atomic_add(&rc->ref_cnt, 1, &old_ref_cnt);
+ rc->ref_cnt++;
netdev_change_seq_changed(netdev);
} else {
free(netdev->name);
error = 0;
}
- ovs_mutex_unlock(&netdev_mutex);
- ovs_mutex_unlock(&netdev_class_mutex);
-
if (!error) {
netdev->ref_cnt++;
*netdevp = netdev;
} else {
*netdevp = NULL;
}
+ ovs_mutex_unlock(&netdev_mutex);
+ ovs_mutex_unlock(&netdev_class_mutex);
+
return error;
}
/* Reconfigures the device 'netdev' with 'args'. 'args' may be empty
* or NULL if none are needed. */
int
-netdev_set_config(struct netdev *netdev, const struct smap *args)
+netdev_set_config(struct netdev *netdev, const struct smap *args, char **errp)
OVS_EXCLUDED(netdev_mutex)
{
if (netdev->netdev_class->set_config) {
error = netdev->netdev_class->set_config(netdev,
args ? args : &no_args);
if (error) {
- VLOG_WARN("%s: could not set configuration (%s)",
- netdev_get_name(netdev), ovs_strerror(error));
+ VLOG_WARN_BUF(errp, "%s: could not set configuration (%s)",
+ netdev_get_name(netdev), ovs_strerror(error));
}
return error;
} else if (args && !smap_is_empty(args)) {
- VLOG_WARN("%s: arguments provided to device that is not configurable",
- netdev_get_name(netdev));
+ VLOG_WARN_BUF(errp, "%s: arguments provided to device that is not configurable",
+ netdev_get_name(netdev));
}
return 0;
}
if (!--dev->ref_cnt) {
const struct netdev_class *class = dev->netdev_class;
struct netdev_registered_class *rc;
- int old_ref_cnt;
dev->netdev_class->destruct(dev);
- shash_delete(&netdev_shash, dev->node);
+ if (dev->node) {
+ shash_delete(&netdev_shash, dev->node);
+ }
free(dev->name);
dev->netdev_class->dealloc(dev);
ovs_mutex_unlock(&netdev_mutex);
ovs_mutex_lock(&netdev_class_mutex);
rc = netdev_lookup_class(class->type);
- atomic_sub(&rc->ref_cnt, 1, &old_ref_cnt);
- ovs_assert(old_ref_cnt > 0);
+ ovs_assert(rc->ref_cnt > 0);
+ rc->ref_cnt--;
ovs_mutex_unlock(&netdev_class_mutex);
} else {
ovs_mutex_unlock(&netdev_mutex);
}
}
+/* Removes 'netdev' from the global shash and unrefs 'netdev'.
+ *
+ * This allows handler and revalidator threads to still retain references
+ * to this netdev while the main thread changes interface configuration.
+ *
+ * This function should only be called by the main thread when closing
+ * netdevs during user configuration changes. Otherwise, netdev_close should be
+ * used to close netdevs. */
+void
+netdev_remove(struct netdev *netdev)
+{
+ if (netdev) {
+ ovs_mutex_lock(&netdev_mutex);
+ if (netdev->node) {
+ shash_delete(&netdev_shash, netdev->node);
+ netdev->node = NULL;
+ netdev_change_seq_changed(netdev);
+ }
+ netdev_unref(netdev);
+ }
+}
+
/* Parses 'netdev_name_', which is of the form [type@]name into its component
* pieces. 'name' and 'type' must be freed by the caller. */
void
rx->queue_id = id;
error = netdev->netdev_class->rxq_construct(rx);
if (!error) {
- ovs_mutex_lock(&netdev_mutex);
- netdev->ref_cnt++;
- ovs_mutex_unlock(&netdev_mutex);
-
+ netdev_ref(netdev);
*rxp = rx;
return 0;
}
* This function may be set to null if it would always return EOPNOTSUPP
* anyhow. */
int
-netdev_rxq_recv(struct netdev_rxq *rx, struct ofpbuf **buffers, int *cnt)
+netdev_rxq_recv(struct netdev_rxq *rx, struct dpif_packet **buffers, int *cnt)
{
int retval;
: 0);
}
-/* Sends 'buffer' on 'netdev'. Returns 0 if successful, otherwise a positive
- * errno value. Returns EAGAIN without blocking if the packet cannot be queued
- * immediately. Returns EMSGSIZE if a partial packet was transmitted or if
- * the packet is too big or too small to transmit on the device.
+/* Sends 'buffers' on 'netdev'. Returns 0 if successful (for every packet),
+ * otherwise a positive errno value. Returns EAGAIN without blocking if
+ * at least one the packets cannot be queued immediately. Returns EMSGSIZE
+ * if a partial packet was transmitted or if a packet is too big or too small
+ * to transmit on the device.
+ *
+ * If the function returns a non-zero value, some of the packets might have
+ * been sent anyway.
*
* To retain ownership of 'buffer' caller can set may_steal to false.
*
* Some network devices may not implement support for this function. In such
* cases this function will always return EOPNOTSUPP. */
int
-netdev_send(struct netdev *netdev, struct ofpbuf *buffer, bool may_steal)
+netdev_send(struct netdev *netdev, struct dpif_packet **buffers, int cnt,
+ bool may_steal)
{
int error;
error = (netdev->netdev_class->send
- ? netdev->netdev_class->send(netdev, buffer, may_steal)
+ ? netdev->netdev_class->send(netdev, buffers, cnt, may_steal)
: EOPNOTSUPP);
if (!error) {
COVERAGE_INC(netdev_sent);
ovs_mutex_unlock(&netdev_mutex);
}
+/* Extracts pointers to all 'netdev-vports' into an array 'vports'
+ * and returns it. Stores the size of the array into '*size'.
+ *
+ * The caller is responsible for freeing 'vports' and must close
+ * each 'netdev-vport' in the list. */
+struct netdev **
+netdev_get_vports(size_t *size)
+ OVS_EXCLUDED(netdev_mutex)
+{
+ struct netdev **vports;
+ struct shash_node *node;
+ size_t n = 0;
+
+ if (!size) {
+ return NULL;
+ }
+
+ /* Explicitly allocates big enough chunk of memory. */
+ vports = xmalloc(shash_count(&netdev_shash) * sizeof *vports);
+ ovs_mutex_lock(&netdev_mutex);
+ SHASH_FOR_EACH (node, &netdev_shash) {
+ struct netdev *dev = node->data;
+
+ if (netdev_vport_is_vport_class(dev->netdev_class)) {
+ dev->ref_cnt++;
+ vports[n] = dev;
+ n++;
+ }
+ }
+ ovs_mutex_unlock(&netdev_mutex);
+ *size = n;
+
+ return vports;
+}
+
const char *
netdev_get_type_from_name(const char *name)
{