* Released under the GPLv2 only.
*/
+#include <linux/delay.h>
+
#include "greybus.h"
+#include "greybus_trace.h"
+#define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000
#define GB_INTERFACE_DEVICE_ID_BAD 0xff
+#define GB_INTERFACE_AUTOSUSPEND_MS 3000
+
+/* Time required for interface to enter standby before disabling REFCLK */
+#define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS 20
+
/* Don't-care selector index */
#define DME_SELECTOR_INDEX_NULL 0
#define TOSHIBA_DMID 0x0126
#define TOSHIBA_ES2_BRIDGE_DPID 0x1000
#define TOSHIBA_ES3_APBRIDGE_DPID 0x1001
-#define TOSHIBA_ES3_GPBRIDGE_DPID 0x1002
+#define TOSHIBA_ES3_GBPHY_DPID 0x1002
+static int gb_interface_hibernate_link(struct gb_interface *intf);
+static int gb_interface_refclk_set(struct gb_interface *intf, bool enable);
static int gb_interface_dme_attr_get(struct gb_interface *intf,
u16 attr, u32 *val)
intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
}
+/* Locking: Caller holds the interface mutex. */
+static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
+{
+ int ret;
+
+ dev_info(&intf->dev, "legacy mode switch detected\n");
+
+ /* Mark as disconnected to prevent I/O during disable. */
+ intf->disconnected = true;
+ gb_interface_disable(intf);
+ intf->disconnected = false;
+
+ ret = gb_interface_enable(intf);
+ if (ret) {
+ dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
+ gb_interface_deactivate(intf);
+ }
+
+ return ret;
+}
+
+void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
+ u32 mailbox)
+{
+ mutex_lock(&intf->mutex);
+
+ if (result) {
+ dev_warn(&intf->dev,
+ "mailbox event with UniPro error: 0x%04x\n",
+ result);
+ goto err_disable;
+ }
+
+ if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
+ dev_warn(&intf->dev,
+ "mailbox event with unexpected value: 0x%08x\n",
+ mailbox);
+ goto err_disable;
+ }
+
+ if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
+ gb_interface_legacy_mode_switch(intf);
+ goto out_unlock;
+ }
+
+ if (!intf->mode_switch) {
+ dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
+ mailbox);
+ goto err_disable;
+ }
+
+ dev_info(&intf->dev, "mode switch detected\n");
+
+ complete(&intf->mode_switch_completion);
+
+out_unlock:
+ mutex_unlock(&intf->mutex);
+
+ return;
+
+err_disable:
+ gb_interface_disable(intf);
+ gb_interface_deactivate(intf);
+ mutex_unlock(&intf->mutex);
+}
+
+static void gb_interface_mode_switch_work(struct work_struct *work)
+{
+ struct gb_interface *intf;
+ struct gb_control *control;
+ unsigned long timeout;
+ int ret;
+
+ intf = container_of(work, struct gb_interface, mode_switch_work);
+
+ mutex_lock(&intf->mutex);
+ /* Make sure interface is still enabled. */
+ if (!intf->enabled) {
+ dev_dbg(&intf->dev, "mode switch aborted\n");
+ intf->mode_switch = false;
+ mutex_unlock(&intf->mutex);
+ goto out_interface_put;
+ }
+
+ /*
+ * Prepare the control device for mode switch and make sure to get an
+ * extra reference before it goes away during interface disable.
+ */
+ control = gb_control_get(intf->control);
+ gb_control_mode_switch_prepare(control);
+ gb_interface_disable(intf);
+ mutex_unlock(&intf->mutex);
+
+ timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
+ ret = wait_for_completion_interruptible_timeout(
+ &intf->mode_switch_completion, timeout);
+
+ /* Finalise control-connection mode switch. */
+ gb_control_mode_switch_complete(control);
+ gb_control_put(control);
+
+ if (ret < 0) {
+ dev_err(&intf->dev, "mode switch interrupted\n");
+ goto err_deactivate;
+ } else if (ret == 0) {
+ dev_err(&intf->dev, "mode switch timed out\n");
+ goto err_deactivate;
+ }
+
+ /* Re-enable (re-enumerate) interface if still active. */
+ mutex_lock(&intf->mutex);
+ intf->mode_switch = false;
+ if (intf->active) {
+ ret = gb_interface_enable(intf);
+ if (ret) {
+ dev_err(&intf->dev, "failed to re-enable interface: %d\n",
+ ret);
+ gb_interface_deactivate(intf);
+ }
+ }
+ mutex_unlock(&intf->mutex);
+
+out_interface_put:
+ gb_interface_put(intf);
+
+ return;
+
+err_deactivate:
+ mutex_lock(&intf->mutex);
+ intf->mode_switch = false;
+ gb_interface_deactivate(intf);
+ mutex_unlock(&intf->mutex);
+
+ gb_interface_put(intf);
+}
+
+int gb_interface_request_mode_switch(struct gb_interface *intf)
+{
+ int ret = 0;
+
+ mutex_lock(&intf->mutex);
+ if (intf->mode_switch) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ intf->mode_switch = true;
+ reinit_completion(&intf->mode_switch_completion);
+
+ /*
+ * Get a reference to the interface device, which will be put once the
+ * mode switch is complete.
+ */
+ get_device(&intf->dev);
+
+ if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
+ put_device(&intf->dev);
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&intf->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
+
/*
* T_TstSrcIncrement is written by the module on ES2 as a stand-in for the
* init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and
static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
{
struct gb_host_device *hd = intf->hd;
+ unsigned long bootrom_quirks;
int ret;
u32 value;
u16 attr;
init_status = value >> 24;
/*
- * Check if the interface is executing the quirky ES3 bootrom that
- * requires E2EFC, CSD and CSV to be disabled and that does not
- * support the interface-version request.
+ * Check if the interface is executing the quirky ES3 bootrom that,
+ * for example, requires E2EFC, CSD and CSV to be disabled.
*/
+ bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
+ GB_INTERFACE_QUIRK_FORCED_DISABLE |
+ GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH;
switch (init_status) {
case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
- intf->quirks |= GB_INTERFACE_QUIRK_NO_CPORT_FEATURES;
- intf->quirks |= GB_INTERFACE_QUIRK_NO_INTERFACE_VERSION;
+ intf->quirks |= bootrom_quirks;
break;
+ default:
+ intf->quirks &= ~bootrom_quirks;
}
/* Clear the init status. */
gb_interface_attr(interface_id, "%u");
gb_interface_attr(vendor_id, "0x%08x");
gb_interface_attr(product_id, "0x%08x");
-gb_interface_attr(vendor_string, "%s");
-gb_interface_attr(product_string, "%s");
gb_interface_attr(serial_number, "0x%016llx");
-static ssize_t version_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct gb_interface *intf = to_gb_interface(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%u.%u\n", intf->version_major,
- intf->version_minor);
-}
-static DEVICE_ATTR_RO(version);
-
static ssize_t voltage_now_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
}
static DEVICE_ATTR_RO(power_now);
+static const char *gb_interface_type_string(struct gb_interface *intf)
+{
+ static const char * const types[] = {
+ [GB_SVC_INTF_TYPE_UNKNOWN] = "unknown",
+ [GB_SVC_INTF_TYPE_DUMMY] = "dummy",
+ [GB_SVC_INTF_TYPE_UNIPRO] = "unipro",
+ [GB_SVC_INTF_TYPE_GREYBUS] = "greybus",
+ };
+
+ return types[intf->type];
+}
+
+static ssize_t interface_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_interface *intf = to_gb_interface(dev);
+
+ return sprintf(buf, "%s\n", gb_interface_type_string(intf));
+}
+static DEVICE_ATTR_RO(interface_type);
+
static struct attribute *interface_attrs[] = {
&dev_attr_ddbl1_manufacturer_id.attr,
&dev_attr_ddbl1_product_id.attr,
&dev_attr_interface_id.attr,
&dev_attr_vendor_id.attr,
&dev_attr_product_id.attr,
- &dev_attr_vendor_string.attr,
- &dev_attr_product_string.attr,
&dev_attr_serial_number.attr,
- &dev_attr_version.attr,
&dev_attr_voltage_now.attr,
&dev_attr_current_now.attr,
&dev_attr_power_now.attr,
+ &dev_attr_interface_type.attr,
NULL,
};
ATTRIBUTE_GROUPS(interface);
+static void gb_interface_release(struct device *dev)
+{
+ struct gb_interface *intf = to_gb_interface(dev);
+
+ trace_gb_interface_release(intf);
-// FIXME, odds are you don't want to call this function, rework the caller to
-// not need it please.
-struct gb_interface *gb_interface_find(struct gb_host_device *hd,
- u8 interface_id)
+ kfree(intf);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int gb_interface_suspend(struct device *dev)
{
- struct gb_interface *intf;
+ struct gb_interface *intf = to_gb_interface(dev);
+ int ret, timesync_ret;
+
+ ret = gb_control_interface_suspend_prepare(intf->control);
+ if (ret)
+ return ret;
+
+ gb_timesync_interface_remove(intf);
+
+ ret = gb_control_suspend(intf->control);
+ if (ret)
+ goto err_hibernate_abort;
+
+ ret = gb_interface_hibernate_link(intf);
+ if (ret)
+ return ret;
+
+ /* Delay to allow interface to enter standby before disabling refclk */
+ msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS);
+
+ ret = gb_interface_refclk_set(intf, false);
+ if (ret)
+ return ret;
- list_for_each_entry(intf, &hd->interfaces, links)
- if (intf->interface_id == interface_id)
- return intf;
+ return 0;
+
+err_hibernate_abort:
+ gb_control_interface_hibernate_abort(intf->control);
+
+ timesync_ret = gb_timesync_interface_add(intf);
+ if (timesync_ret) {
+ dev_err(dev, "failed to add to timesync: %d\n", timesync_ret);
+ return timesync_ret;
+ }
- return NULL;
+ return ret;
}
-static void gb_interface_release(struct device *dev)
+static int gb_interface_resume(struct device *dev)
{
struct gb_interface *intf = to_gb_interface(dev);
+ struct gb_svc *svc = intf->hd->svc;
+ int ret;
- kfree(intf->product_string);
- kfree(intf->vendor_string);
+ ret = gb_interface_refclk_set(intf, true);
+ if (ret)
+ return ret;
- if (intf->control)
- gb_control_put(intf->control);
+ ret = gb_svc_intf_resume(svc, intf->interface_id);
+ if (ret)
+ return ret;
- kfree(intf);
+ ret = gb_control_resume(intf->control);
+ if (ret)
+ return ret;
+
+ ret = gb_timesync_interface_add(intf);
+ if (ret) {
+ dev_err(dev, "failed to add to timesync: %d\n", ret);
+ return ret;
+ }
+
+ ret = gb_timesync_schedule_synchronous(intf);
+ if (ret) {
+ dev_err(dev, "failed to synchronize FrameTime: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
}
+static int gb_interface_runtime_idle(struct device *dev)
+{
+ pm_runtime_mark_last_busy(dev);
+ pm_request_autosuspend(dev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_interface_pm_ops = {
+ SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume,
+ gb_interface_runtime_idle)
+};
+
struct device_type greybus_interface_type = {
.name = "greybus_interface",
.release = gb_interface_release,
+ .pm = &gb_interface_pm_ops,
};
/*
*
* Returns a pointer to the new interfce or a null pointer if a
* failure occurs due to memory exhaustion.
- *
- * Locking: Caller ensures serialisation with gb_interface_remove and
- * gb_interface_find.
*/
-struct gb_interface *gb_interface_create(struct gb_host_device *hd,
+struct gb_interface *gb_interface_create(struct gb_module *module,
u8 interface_id)
{
+ struct gb_host_device *hd = module->hd;
struct gb_interface *intf;
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
return NULL;
intf->hd = hd; /* XXX refcount? */
+ intf->module = module;
intf->interface_id = interface_id;
INIT_LIST_HEAD(&intf->bundles);
INIT_LIST_HEAD(&intf->manifest_descs);
+ mutex_init(&intf->mutex);
+ INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
+ init_completion(&intf->mode_switch_completion);
/* Invalid device id to start with */
intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
- intf->dev.parent = &hd->dev;
+ intf->dev.parent = &module->dev;
intf->dev.bus = &greybus_bus_type;
intf->dev.type = &greybus_interface_type;
intf->dev.groups = interface_groups;
- intf->dev.dma_mask = hd->dev.dma_mask;
+ intf->dev.dma_mask = module->dev.dma_mask;
device_initialize(&intf->dev);
- dev_set_name(&intf->dev, "%d-%d", hd->bus_id, interface_id);
+ dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev),
+ interface_id);
- intf->control = gb_control_create(intf);
- if (!intf->control) {
- put_device(&intf->dev);
- return NULL;
- }
+ pm_runtime_set_autosuspend_delay(&intf->dev,
+ GB_INTERFACE_AUTOSUSPEND_MS);
- list_add(&intf->links, &hd->interfaces);
+ trace_gb_interface_create(intf);
return intf;
}
+static int gb_interface_vsys_set(struct gb_interface *intf, bool enable)
+{
+ struct gb_svc *svc = intf->hd->svc;
+ int ret;
+
+ dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
+
+ ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable);
+ if (ret) {
+ dev_err(&intf->dev, "failed to set v_sys: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gb_interface_refclk_set(struct gb_interface *intf, bool enable)
+{
+ struct gb_svc *svc = intf->hd->svc;
+ int ret;
+
+ dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
+
+ ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable);
+ if (ret) {
+ dev_err(&intf->dev, "failed to set refclk: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gb_interface_unipro_set(struct gb_interface *intf, bool enable)
+{
+ struct gb_svc *svc = intf->hd->svc;
+ int ret;
+
+ dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
+
+ ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable);
+ if (ret) {
+ dev_err(&intf->dev, "failed to set UniPro: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gb_interface_activate_operation(struct gb_interface *intf)
+{
+ struct gb_svc *svc = intf->hd->svc;
+ u8 type;
+ int ret;
+
+ dev_dbg(&intf->dev, "%s\n", __func__);
+
+ ret = gb_svc_intf_activate(svc, intf->interface_id, &type);
+ if (ret) {
+ dev_err(&intf->dev, "failed to activate: %d\n", ret);
+ return ret;
+ }
+
+ intf->type = type;
+
+ switch (type) {
+ case GB_SVC_INTF_TYPE_DUMMY:
+ dev_info(&intf->dev, "dummy interface detected\n");
+ /* FIXME: handle as an error for now */
+ return -ENODEV;
+ case GB_SVC_INTF_TYPE_UNIPRO:
+ dev_err(&intf->dev, "interface type UniPro not supported\n");
+ /* FIXME: check if this is a Toshiba bridge before retrying? */
+ return -EAGAIN;
+ case GB_SVC_INTF_TYPE_GREYBUS:
+ break;
+ default:
+ dev_err(&intf->dev, "unknown interface type: %u\n", type);
+ intf->type = GB_SVC_INTF_TYPE_UNKNOWN;
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int gb_interface_hibernate_link(struct gb_interface *intf)
+{
+ struct gb_svc *svc = intf->hd->svc;
+
+ return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id);
+}
+
+/*
+ * Activate an interface.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
int gb_interface_activate(struct gb_interface *intf)
{
int ret;
- ret = gb_interface_read_dme(intf);
+ if (intf->ejected)
+ return -ENODEV;
+
+ ret = gb_interface_vsys_set(intf, true);
if (ret)
return ret;
+ ret = gb_interface_refclk_set(intf, true);
+ if (ret)
+ goto err_vsys_disable;
+
+ ret = gb_interface_unipro_set(intf, true);
+ if (ret)
+ goto err_refclk_disable;
+
+ ret = gb_interface_activate_operation(intf);
+ if (ret)
+ goto err_unipro_disable;
+
+ ret = gb_interface_read_dme(intf);
+ if (ret)
+ goto err_hibernate_link;
+
ret = gb_interface_route_create(intf);
if (ret)
- return ret;
+ goto err_hibernate_link;
+
+ intf->active = true;
+
+ trace_gb_interface_activate(intf);
return 0;
+
+err_hibernate_link:
+ gb_interface_hibernate_link(intf);
+err_unipro_disable:
+ gb_interface_unipro_set(intf, false);
+err_refclk_disable:
+ gb_interface_refclk_set(intf, false);
+err_vsys_disable:
+ gb_interface_vsys_set(intf, false);
+
+ return ret;
}
+/*
+ * Deactivate an interface.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
void gb_interface_deactivate(struct gb_interface *intf)
{
+ if (!intf->active)
+ return;
+
+ trace_gb_interface_deactivate(intf);
+
+ /* Abort any ongoing mode switch. */
+ if (intf->mode_switch)
+ complete(&intf->mode_switch_completion);
+
gb_interface_route_destroy(intf);
+ gb_interface_hibernate_link(intf);
+ gb_interface_unipro_set(intf, false);
+ gb_interface_refclk_set(intf, false);
+ gb_interface_vsys_set(intf, false);
+
+ intf->active = false;
}
/*
- * Enable an interface by enabling its control connection and fetching the
- * manifest and other information over it.
+ * Enable an interface by enabling its control connection, fetching the
+ * manifest and other information over it, and finally registering its child
+ * devices.
+ *
+ * Locking: Caller holds the interface mutex.
*/
int gb_interface_enable(struct gb_interface *intf)
{
+ struct gb_control *control;
struct gb_bundle *bundle, *tmp;
int ret, size;
void *manifest;
}
/* Establish control connection */
+ control = gb_control_create(intf);
+ if (IS_ERR(control)) {
+ dev_err(&intf->dev, "failed to create control device: %ld\n",
+ PTR_ERR(control));
+ return PTR_ERR(control);
+ }
+ intf->control = control;
+
ret = gb_control_enable(intf->control);
if (ret)
- return ret;
+ goto err_put_control;
/* Get manifest size using control protocol on CPort */
size = gb_control_get_manifest_size_operation(intf);
goto err_destroy_bundles;
}
- ret = gb_control_get_interface_version_operation(intf);
+ ret = gb_control_get_bundle_versions(intf->control);
if (ret)
goto err_destroy_bundles;
- ret = gb_control_get_bundle_versions(intf->control);
+ /* Register the control device and any bundles */
+ ret = gb_control_add(intf->control);
if (ret)
goto err_destroy_bundles;
+ ret = gb_timesync_interface_add(intf);
+ if (ret) {
+ dev_err(&intf->dev, "failed to add to timesync: %d\n", ret);
+ goto err_destroy_bundles;
+ }
+
+ pm_runtime_use_autosuspend(&intf->dev);
+ pm_runtime_get_noresume(&intf->dev);
+ pm_runtime_set_active(&intf->dev);
+ pm_runtime_enable(&intf->dev);
+
+ list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
+ ret = gb_bundle_add(bundle);
+ if (ret) {
+ gb_bundle_destroy(bundle);
+ continue;
+ }
+ }
+
kfree(manifest);
+ intf->enabled = true;
+
+ pm_runtime_put(&intf->dev);
+
+ trace_gb_interface_enable(intf);
+
return 0;
err_destroy_bundles:
kfree(manifest);
err_disable_control:
gb_control_disable(intf->control);
+err_put_control:
+ gb_control_put(intf->control);
+ intf->control = NULL;
return ret;
}
-/* Disable an interface and destroy its bundles. */
+/*
+ * Disable an interface and destroy its bundles.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
void gb_interface_disable(struct gb_interface *intf)
{
struct gb_bundle *bundle;
struct gb_bundle *next;
- /*
- * Disable the control-connection early to avoid operation timeouts
- * when the interface is already gone.
- */
- if (intf->disconnected)
- gb_control_disable(intf->control);
+ if (!intf->enabled)
+ return;
+
+ trace_gb_interface_disable(intf);
+
+ pm_runtime_get_sync(&intf->dev);
+
+ /* Set disconnected flag to avoid I/O during connection tear down. */
+ if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE)
+ intf->disconnected = true;
list_for_each_entry_safe(bundle, next, &intf->bundles, links)
gb_bundle_destroy(bundle);
+ if (!intf->mode_switch && !intf->disconnected)
+ gb_control_interface_deactivate_prepare(intf->control);
+
+ gb_timesync_interface_remove(intf);
+ gb_control_del(intf->control);
gb_control_disable(intf->control);
+ gb_control_put(intf->control);
+ intf->control = NULL;
+
+ intf->enabled = false;
+
+ pm_runtime_disable(&intf->dev);
+ pm_runtime_set_suspended(&intf->dev);
+ pm_runtime_dont_use_autosuspend(&intf->dev);
+ pm_runtime_put_noidle(&intf->dev);
+}
+
+/* Enable TimeSync on an Interface control connection. */
+int gb_interface_timesync_enable(struct gb_interface *intf, u8 count,
+ u64 frame_time, u32 strobe_delay, u32 refclk)
+{
+ return gb_control_timesync_enable(intf->control, count,
+ frame_time, strobe_delay,
+ refclk);
+}
+
+/* Disable TimeSync on an Interface control connection. */
+int gb_interface_timesync_disable(struct gb_interface *intf)
+{
+ return gb_control_timesync_disable(intf->control);
}
-/* Register an interface and its bundles. */
+/* Transmit the Authoritative FrameTime via an Interface control connection. */
+int gb_interface_timesync_authoritative(struct gb_interface *intf,
+ u64 *frame_time)
+{
+ return gb_control_timesync_authoritative(intf->control,
+ frame_time);
+}
+
+/* Register an interface. */
int gb_interface_add(struct gb_interface *intf)
{
- struct gb_bundle *bundle, *tmp;
int ret;
ret = device_add(&intf->dev);
return ret;
}
+ trace_gb_interface_add(intf);
+
dev_info(&intf->dev, "Interface added: VID=0x%08x, PID=0x%08x\n",
intf->vendor_id, intf->product_id);
dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
intf->ddbl1_manufacturer_id, intf->ddbl1_product_id);
- list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
- ret = gb_bundle_add(bundle);
- if (ret) {
- gb_bundle_destroy(bundle);
- continue;
- }
- }
-
return 0;
}
-/* Deregister an interface and drop its reference. */
-void gb_interface_remove(struct gb_interface *intf)
+/* Deregister an interface. */
+void gb_interface_del(struct gb_interface *intf)
{
if (device_is_registered(&intf->dev)) {
+ trace_gb_interface_del(intf);
+
device_del(&intf->dev);
dev_info(&intf->dev, "Interface removed\n");
}
+}
- list_del(&intf->links);
-
+void gb_interface_put(struct gb_interface *intf)
+{
put_device(&intf->dev);
}