2 * Greybus "AP" USB driver for "ES2" controller chips
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
7 * Released under the GPLv2 only.
9 #include <linux/kthread.h>
10 #include <linux/sizes.h>
11 #include <linux/usb.h>
12 #include <linux/kfifo.h>
13 #include <linux/debugfs.h>
14 #include <asm/unaligned.h>
17 #include "kernel_ver.h"
18 #include "connection.h"
19 #include "greybus_trace.h"
22 /* Fixed CPort numbers */
23 #define ES2_CPORT_CDSI0 16
24 #define ES2_CPORT_CDSI1 17
26 /* Memory sizes for the buffers sent to/from the ES2 controller */
27 #define ES2_GBUF_MSG_SIZE_MAX 2048
29 static const struct usb_device_id id_table[] = {
30 { USB_DEVICE(0x18d1, 0x1eaf) },
33 MODULE_DEVICE_TABLE(usb, id_table);
35 #define APB1_LOG_SIZE SZ_16K
37 /* Number of bulk in and bulk out couple */
41 * Number of CPort IN urbs in flight at any point in time.
42 * Adjust if we are having stalls in the USB buffer due to not enough urbs in
45 #define NUM_CPORT_IN_URB 4
47 /* Number of CPort OUT urbs in flight at any point in time.
48 * Adjust if we get messages saying we are out of urbs in the system log.
50 #define NUM_CPORT_OUT_URB (8 * NUM_BULKS)
53 * @endpoint: bulk in endpoint for CPort data
54 * @urb: array of urbs for the CPort in messages
55 * @buffer: array of buffers for the @cport_in_urb urbs
59 struct urb *urb[NUM_CPORT_IN_URB];
60 u8 *buffer[NUM_CPORT_IN_URB];
64 * @endpoint: bulk out endpoint for CPort data
66 struct es2_cport_out {
71 * es2_ap_dev - ES2 USB Bridge to AP structure
72 * @usb_dev: pointer to the USB device we are.
73 * @usb_intf: pointer to the USB interface we are bound to.
74 * @hd: pointer to our gb_host_device structure
76 * @cport_in: endpoint, urbs and buffer for cport in messages
77 * @cport_out: endpoint for for cport out messages
78 * @cport_out_urb: array of urbs for the CPort out messages
79 * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
81 * @cport_out_urb_cancelled: array of flags indicating whether the
82 * corresponding @cport_out_urb is being cancelled
83 * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
85 * @apb_log_task: task pointer for logging thread
86 * @apb_log_dentry: file system entry for the log file interface
87 * @apb_log_enable_dentry: file system entry for enabling logging
88 * @apb_log_fifo: kernel FIFO to carry logged data
91 struct usb_device *usb_dev;
92 struct usb_interface *usb_intf;
93 struct gb_host_device *hd;
95 struct es2_cport_in cport_in[NUM_BULKS];
96 struct es2_cport_out cport_out[NUM_BULKS];
97 struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
98 bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
99 bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
100 spinlock_t cport_out_urb_lock;
106 struct task_struct *apb_log_task;
107 struct dentry *apb_log_dentry;
108 struct dentry *apb_log_enable_dentry;
109 DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
113 * cport_to_ep - information about cport to endpoints mapping
114 * @cport_id: the id of cport to map to endpoints
115 * @endpoint_in: the endpoint number to use for in transfer
116 * @endpoint_out: he endpoint number to use for out transfer
125 * timesync_enable_request - Enable timesync in an APBridge
126 * @count: number of TimeSync Pulses to expect
127 * @frame_time: the initial FrameTime at the first TimeSync Pulse
128 * @strobe_delay: the expected delay in microseconds between each TimeSync Pulse
129 * @refclk: The AP mandated reference clock to run FrameTime at
131 struct timesync_enable_request {
139 * timesync_authoritative_request - Transmit authoritative FrameTime to APBridge
140 * @frame_time: An array of authoritative FrameTimes provided by the SVC
141 * and relayed to the APBridge by the AP
143 struct timesync_authoritative_request {
144 __le64 frame_time[GB_TIMESYNC_MAX_STROBES];
147 static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
149 return (struct es2_ap_dev *)&hd->hd_priv;
152 static void cport_out_callback(struct urb *urb);
153 static void usb_log_enable(struct es2_ap_dev *es2);
154 static void usb_log_disable(struct es2_ap_dev *es2);
156 /* Get the endpoints pair mapped to the cport */
157 static int cport_to_ep_pair(struct es2_ap_dev *es2, u16 cport_id)
159 if (cport_id >= es2->hd->num_cports)
161 return es2->cport_to_ep[cport_id];
164 #define ES2_TIMEOUT 500 /* 500 ms for the SVC to do something */
166 /* Disable for now until we work all of this out to keep a warning-free build */
168 /* Test if the endpoints pair is already mapped to a cport */
169 static int ep_pair_in_use(struct es2_ap_dev *es2, int ep_pair)
173 for (i = 0; i < es2->hd->num_cports; i++) {
174 if (es2->cport_to_ep[i] == ep_pair)
180 /* Configure the endpoint mapping and send the request to APBridge */
181 static int map_cport_to_ep(struct es2_ap_dev *es2,
182 u16 cport_id, int ep_pair)
185 struct cport_to_ep *cport_to_ep;
187 if (ep_pair < 0 || ep_pair >= NUM_BULKS)
189 if (cport_id >= es2->hd->num_cports)
191 if (ep_pair && ep_pair_in_use(es2, ep_pair))
194 cport_to_ep = kmalloc(sizeof(*cport_to_ep), GFP_KERNEL);
198 es2->cport_to_ep[cport_id] = ep_pair;
199 cport_to_ep->cport_id = cpu_to_le16(cport_id);
200 cport_to_ep->endpoint_in = es2->cport_in[ep_pair].endpoint;
201 cport_to_ep->endpoint_out = es2->cport_out[ep_pair].endpoint;
203 retval = usb_control_msg(es2->usb_dev,
204 usb_sndctrlpipe(es2->usb_dev, 0),
205 GB_APB_REQUEST_EP_MAPPING,
206 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
209 sizeof(*cport_to_ep),
211 if (retval == sizeof(*cport_to_ep))
218 /* Unmap a cport: use the muxed endpoints pair */
219 static int unmap_cport(struct es2_ap_dev *es2, u16 cport_id)
221 return map_cport_to_ep(es2, cport_id, 0);
225 static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
227 struct usb_device *udev = es2->usb_dev;
231 data = kmalloc(size, GFP_KERNEL);
234 memcpy(data, req, size);
236 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
238 USB_DIR_OUT | USB_TYPE_VENDOR |
240 0, 0, data, size, ES2_TIMEOUT);
242 dev_err(&udev->dev, "%s: return error %d\n", __func__, retval);
250 static void ap_urb_complete(struct urb *urb)
252 struct usb_ctrlrequest *dr = urb->context;
258 static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
260 struct usb_device *udev = es2->usb_dev;
262 struct usb_ctrlrequest *dr;
266 urb = usb_alloc_urb(0, GFP_ATOMIC);
270 dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC);
276 buf = (u8 *)dr + sizeof(*dr);
277 memcpy(buf, req, size);
280 dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
283 dr->wLength = cpu_to_le16(size);
285 usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0),
286 (unsigned char *)dr, buf, size,
287 ap_urb_complete, dr);
288 retval = usb_submit_urb(urb, GFP_ATOMIC);
296 static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
299 struct es2_ap_dev *es2 = hd_to_es2(hd);
302 return output_async(es2, req, size, cmd);
304 return output_sync(es2, req, size, cmd);
307 static int es2_cport_in_enable(struct es2_ap_dev *es2,
308 struct es2_cport_in *cport_in)
314 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
315 urb = cport_in->urb[i];
317 ret = usb_submit_urb(urb, GFP_KERNEL);
319 dev_err(&es2->usb_dev->dev,
320 "failed to submit in-urb: %d\n", ret);
328 for (--i; i >= 0; --i) {
329 urb = cport_in->urb[i];
336 static void es2_cport_in_disable(struct es2_ap_dev *es2,
337 struct es2_cport_in *cport_in)
342 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
343 urb = cport_in->urb[i];
348 static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
350 struct urb *urb = NULL;
354 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
356 /* Look in our pool of allocated urbs first, as that's the "fastest" */
357 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
358 if (es2->cport_out_urb_busy[i] == false &&
359 es2->cport_out_urb_cancelled[i] == false) {
360 es2->cport_out_urb_busy[i] = true;
361 urb = es2->cport_out_urb[i];
365 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
370 * Crap, pool is empty, complain to the syslog and go allocate one
371 * dynamically as we have to succeed.
373 dev_dbg(&es2->usb_dev->dev,
374 "No free CPort OUT urbs, having to dynamically allocate one!\n");
375 return usb_alloc_urb(0, gfp_mask);
378 static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
383 * See if this was an urb in our pool, if so mark it "free", otherwise
384 * we need to free it ourselves.
386 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
387 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
388 if (urb == es2->cport_out_urb[i]) {
389 es2->cport_out_urb_busy[i] = false;
394 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
396 /* If urb is not NULL, then we need to free this urb */
401 * We (ab)use the operation-message header pad bytes to transfer the
402 * cport id in order to minimise overhead.
405 gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
407 header->pad[0] = cport_id;
410 /* Clear the pad bytes used for the CPort id */
411 static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
416 /* Extract the CPort id packed into the header, and clear it */
417 static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
419 u16 cport_id = header->pad[0];
421 gb_message_cport_clear(header);
427 * Returns zero if the message was successfully queued, or a negative errno
430 static int message_send(struct gb_host_device *hd, u16 cport_id,
431 struct gb_message *message, gfp_t gfp_mask)
433 struct es2_ap_dev *es2 = hd_to_es2(hd);
434 struct usb_device *udev = es2->usb_dev;
442 * The data actually transferred will include an indication
443 * of where the data should be sent. Do one last check of
444 * the target CPort id before filling it in.
446 if (!cport_id_valid(hd, cport_id)) {
447 dev_err(&udev->dev, "invalid cport %u\n", cport_id);
451 /* Find a free urb */
452 urb = next_free_urb(es2, gfp_mask);
456 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
457 message->hcpriv = urb;
458 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
460 /* Pack the cport id into the message header */
461 gb_message_cport_pack(message->header, cport_id);
463 buffer_size = sizeof(*message->header) + message->payload_size;
465 ep_pair = cport_to_ep_pair(es2, cport_id);
466 usb_fill_bulk_urb(urb, udev,
467 usb_sndbulkpipe(udev,
468 es2->cport_out[ep_pair].endpoint),
469 message->buffer, buffer_size,
470 cport_out_callback, message);
471 urb->transfer_flags |= URB_ZERO_PACKET;
472 trace_gb_host_device_send(hd, cport_id, buffer_size);
473 retval = usb_submit_urb(urb, gfp_mask);
475 dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
477 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
478 message->hcpriv = NULL;
479 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
482 gb_message_cport_clear(message->header);
491 * Can not be called in atomic context.
493 static void message_cancel(struct gb_message *message)
495 struct gb_host_device *hd = message->operation->connection->hd;
496 struct es2_ap_dev *es2 = hd_to_es2(hd);
502 spin_lock_irq(&es2->cport_out_urb_lock);
503 urb = message->hcpriv;
505 /* Prevent dynamically allocated urb from being deallocated. */
508 /* Prevent pre-allocated urb from being reused. */
509 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
510 if (urb == es2->cport_out_urb[i]) {
511 es2->cport_out_urb_cancelled[i] = true;
515 spin_unlock_irq(&es2->cport_out_urb_lock);
519 if (i < NUM_CPORT_OUT_URB) {
520 spin_lock_irq(&es2->cport_out_urb_lock);
521 es2->cport_out_urb_cancelled[i] = false;
522 spin_unlock_irq(&es2->cport_out_urb_lock);
528 static int cport_reset(struct gb_host_device *hd, u16 cport_id)
530 struct es2_ap_dev *es2 = hd_to_es2(hd);
531 struct usb_device *udev = es2->usb_dev;
535 case GB_SVC_CPORT_ID:
536 case ES2_CPORT_CDSI0:
537 case ES2_CPORT_CDSI1:
541 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
542 GB_APB_REQUEST_RESET_CPORT,
543 USB_DIR_OUT | USB_TYPE_VENDOR |
544 USB_RECIP_INTERFACE, cport_id, 0,
545 NULL, 0, ES2_TIMEOUT);
547 dev_err(&udev->dev, "failed to reset cport %u: %d\n", cport_id,
555 static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
558 struct es2_ap_dev *es2 = hd_to_es2(hd);
559 struct ida *id_map = &hd->cport_id_map;
560 int ida_start, ida_end;
563 case ES2_CPORT_CDSI0:
564 case ES2_CPORT_CDSI1:
565 dev_err(&hd->dev, "cport %d not available\n", cport_id);
569 if (flags & GB_CONNECTION_FLAG_OFFLOADED &&
570 flags & GB_CONNECTION_FLAG_CDSI1) {
571 if (es2->cdsi1_in_use) {
572 dev_err(&hd->dev, "CDSI1 already in use\n");
576 es2->cdsi1_in_use = true;
578 return ES2_CPORT_CDSI1;
583 ida_end = hd->num_cports;
584 } else if (cport_id < hd->num_cports) {
585 ida_start = cport_id;
586 ida_end = cport_id + 1;
588 dev_err(&hd->dev, "cport %d not available\n", cport_id);
592 return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
595 static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
597 struct es2_ap_dev *es2 = hd_to_es2(hd);
600 case ES2_CPORT_CDSI1:
601 es2->cdsi1_in_use = false;
605 ida_simple_remove(&hd->cport_id_map, cport_id);
608 static int cport_enable(struct gb_host_device *hd, u16 cport_id)
612 retval = cport_reset(hd, cport_id);
619 static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
622 struct es2_ap_dev *es2 = hd_to_es2(hd);
623 struct usb_device *udev = es2->usb_dev;
625 if (!cport_id_valid(hd, cport_id)) {
626 dev_err(&udev->dev, "invalid cport %u\n", cport_id);
630 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
631 GB_APB_REQUEST_LATENCY_TAG_EN,
632 USB_DIR_OUT | USB_TYPE_VENDOR |
633 USB_RECIP_INTERFACE, cport_id, 0, NULL,
637 dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
642 static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
645 struct es2_ap_dev *es2 = hd_to_es2(hd);
646 struct usb_device *udev = es2->usb_dev;
648 if (!cport_id_valid(hd, cport_id)) {
649 dev_err(&udev->dev, "invalid cport %u\n", cport_id);
653 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
654 GB_APB_REQUEST_LATENCY_TAG_DIS,
655 USB_DIR_OUT | USB_TYPE_VENDOR |
656 USB_RECIP_INTERFACE, cport_id, 0, NULL,
660 dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
665 static int cport_features_enable(struct gb_host_device *hd, u16 cport_id)
668 struct es2_ap_dev *es2 = hd_to_es2(hd);
669 struct usb_device *udev = es2->usb_dev;
671 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
672 GB_APB_REQUEST_CPORT_FEAT_EN,
673 USB_DIR_OUT | USB_TYPE_VENDOR |
674 USB_RECIP_INTERFACE, cport_id, 0, NULL,
677 dev_err(&udev->dev, "Cannot enable CPort features for cport %u: %d\n",
682 static int cport_features_disable(struct gb_host_device *hd, u16 cport_id)
685 struct es2_ap_dev *es2 = hd_to_es2(hd);
686 struct usb_device *udev = es2->usb_dev;
688 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
689 GB_APB_REQUEST_CPORT_FEAT_DIS,
690 USB_DIR_OUT | USB_TYPE_VENDOR |
691 USB_RECIP_INTERFACE, cport_id, 0, NULL,
695 "Cannot disable CPort features for cport %u: %d\n",
700 static int timesync_enable(struct gb_host_device *hd, u8 count,
701 u64 frame_time, u32 strobe_delay, u32 refclk)
704 struct es2_ap_dev *es2 = hd_to_es2(hd);
705 struct usb_device *udev = es2->usb_dev;
706 struct gb_control_timesync_enable_request *request;
708 request = kzalloc(sizeof(*request), GFP_KERNEL);
712 request->count = count;
713 request->frame_time = cpu_to_le64(frame_time);
714 request->strobe_delay = cpu_to_le32(strobe_delay);
715 request->refclk = cpu_to_le32(refclk);
716 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
717 REQUEST_TIMESYNC_ENABLE,
718 USB_DIR_OUT | USB_TYPE_VENDOR |
719 USB_RECIP_INTERFACE, 0, 0, request,
720 sizeof(*request), ES2_TIMEOUT);
722 dev_err(&udev->dev, "Cannot enable timesync %d\n", retval);
728 static int timesync_disable(struct gb_host_device *hd)
731 struct es2_ap_dev *es2 = hd_to_es2(hd);
732 struct usb_device *udev = es2->usb_dev;
734 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
735 REQUEST_TIMESYNC_DISABLE,
736 USB_DIR_OUT | USB_TYPE_VENDOR |
737 USB_RECIP_INTERFACE, 0, 0, NULL,
740 dev_err(&udev->dev, "Cannot disable timesync %d\n", retval);
745 static int timesync_authoritative(struct gb_host_device *hd, u64 *frame_time)
748 struct es2_ap_dev *es2 = hd_to_es2(hd);
749 struct usb_device *udev = es2->usb_dev;
750 struct timesync_authoritative_request *request;
752 request = kzalloc(sizeof(*request), GFP_KERNEL);
756 for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
757 request->frame_time[i] = cpu_to_le64(frame_time[i]);
759 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
760 REQUEST_TIMESYNC_AUTHORITATIVE,
761 USB_DIR_OUT | USB_TYPE_VENDOR |
762 USB_RECIP_INTERFACE, 0, 0, request,
763 sizeof(*request), ES2_TIMEOUT);
765 dev_err(&udev->dev, "Cannot timesync authoritative out %d\n", retval);
771 static int timesync_get_last_event(struct gb_host_device *hd, u64 *frame_time)
774 struct es2_ap_dev *es2 = hd_to_es2(hd);
775 struct usb_device *udev = es2->usb_dev;
776 __le64 *response_frame_time;
778 response_frame_time = kzalloc(sizeof(*response_frame_time), GFP_KERNEL);
779 if (!response_frame_time)
782 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
783 REQUEST_TIMESYNC_GET_LAST_EVENT,
784 USB_DIR_IN | USB_TYPE_VENDOR |
785 USB_RECIP_INTERFACE, 0, 0, response_frame_time,
786 sizeof(*response_frame_time), ES2_TIMEOUT);
788 if (retval != sizeof(*response_frame_time)) {
789 dev_err(&udev->dev, "Cannot get last TimeSync event: %d\n",
797 *frame_time = le64_to_cpu(*response_frame_time);
800 kfree(response_frame_time);
804 static struct gb_hd_driver es2_driver = {
805 .hd_priv_size = sizeof(struct es2_ap_dev),
806 .message_send = message_send,
807 .message_cancel = message_cancel,
808 .cport_allocate = es2_cport_allocate,
809 .cport_release = es2_cport_release,
810 .cport_enable = cport_enable,
811 .latency_tag_enable = latency_tag_enable,
812 .latency_tag_disable = latency_tag_disable,
814 .cport_features_enable = cport_features_enable,
815 .cport_features_disable = cport_features_disable,
816 .timesync_enable = timesync_enable,
817 .timesync_disable = timesync_disable,
818 .timesync_authoritative = timesync_authoritative,
819 .timesync_get_last_event = timesync_get_last_event,
822 /* Common function to report consistent warnings based on URB status */
823 static int check_urb_status(struct urb *urb)
825 struct device *dev = &urb->dev->dev;
826 int status = urb->status;
833 dev_err(dev, "%s: overflow actual length is %d\n",
834 __func__, urb->actual_length);
840 /* device is gone, stop sending */
843 dev_err(dev, "%s: unknown status %d\n", __func__, status);
848 static void es2_destroy(struct es2_ap_dev *es2)
850 struct usb_device *udev;
854 debugfs_remove(es2->apb_log_enable_dentry);
855 usb_log_disable(es2);
857 /* Tear down everything! */
858 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
859 struct urb *urb = es2->cport_out_urb[i];
865 es2->cport_out_urb[i] = NULL;
866 es2->cport_out_urb_busy[i] = false; /* just to be anal */
869 for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
870 struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];
872 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
873 struct urb *urb = cport_in->urb[i];
878 kfree(cport_in->buffer[i]);
879 cport_in->buffer[i] = NULL;
883 kfree(es2->cport_to_ep);
891 static void cport_in_callback(struct urb *urb)
893 struct gb_host_device *hd = urb->context;
894 struct device *dev = &urb->dev->dev;
895 struct gb_operation_msg_hdr *header;
896 int status = check_urb_status(urb);
901 if ((status == -EAGAIN) || (status == -EPROTO))
904 /* The urb is being unlinked */
905 if (status == -ENOENT || status == -ESHUTDOWN)
908 dev_err(dev, "urb cport in error %d (dropped)\n", status);
912 if (urb->actual_length < sizeof(*header)) {
913 dev_err(dev, "short message received\n");
917 /* Extract the CPort id, which is packed in the message header */
918 header = urb->transfer_buffer;
919 cport_id = gb_message_cport_unpack(header);
921 if (cport_id_valid(hd, cport_id)) {
922 trace_gb_host_device_recv(hd, cport_id, urb->actual_length);
923 greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
926 dev_err(dev, "invalid cport id %u received\n", cport_id);
929 /* put our urb back in the request pool */
930 retval = usb_submit_urb(urb, GFP_ATOMIC);
932 dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
935 static void cport_out_callback(struct urb *urb)
937 struct gb_message *message = urb->context;
938 struct gb_host_device *hd = message->operation->connection->hd;
939 struct es2_ap_dev *es2 = hd_to_es2(hd);
940 int status = check_urb_status(urb);
943 gb_message_cport_clear(message->header);
945 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
946 message->hcpriv = NULL;
947 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
950 * Tell the submitter that the message send (attempt) is
951 * complete, and report the status.
953 greybus_message_sent(hd, message, status);
958 #define APB1_LOG_MSG_SIZE 64
959 static void apb_log_get(struct es2_ap_dev *es2, char *buf)
963 /* SVC messages go down our control pipe */
965 retval = usb_control_msg(es2->usb_dev,
966 usb_rcvctrlpipe(es2->usb_dev, 0),
968 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
974 kfifo_in(&es2->apb_log_fifo, buf, retval);
975 } while (retval > 0);
978 static int apb_log_poll(void *data)
980 struct es2_ap_dev *es2 = data;
983 buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
987 while (!kthread_should_stop()) {
989 apb_log_get(es2, buf);
997 static ssize_t apb_log_read(struct file *f, char __user *buf,
998 size_t count, loff_t *ppos)
1000 struct es2_ap_dev *es2 = f->f_inode->i_private;
1005 if (count > APB1_LOG_SIZE)
1006 count = APB1_LOG_SIZE;
1008 tmp_buf = kmalloc(count, GFP_KERNEL);
1012 copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
1013 ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);
1020 static const struct file_operations apb_log_fops = {
1021 .read = apb_log_read,
1024 static void usb_log_enable(struct es2_ap_dev *es2)
1026 if (!IS_ERR_OR_NULL(es2->apb_log_task))
1029 /* get log from APB1 */
1030 es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
1031 if (IS_ERR(es2->apb_log_task))
1033 /* XXX We will need to rename this per APB */
1034 es2->apb_log_dentry = debugfs_create_file("apb_log", S_IRUGO,
1035 gb_debugfs_get(), es2,
1039 static void usb_log_disable(struct es2_ap_dev *es2)
1041 if (IS_ERR_OR_NULL(es2->apb_log_task))
1044 debugfs_remove(es2->apb_log_dentry);
1045 es2->apb_log_dentry = NULL;
1047 kthread_stop(es2->apb_log_task);
1048 es2->apb_log_task = NULL;
1051 static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
1052 size_t count, loff_t *ppos)
1054 struct es2_ap_dev *es2 = f->f_inode->i_private;
1055 int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
1058 sprintf(tmp_buf, "%d\n", enable);
1059 return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
1062 static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
1063 size_t count, loff_t *ppos)
1067 struct es2_ap_dev *es2 = f->f_inode->i_private;
1069 retval = kstrtoint_from_user(buf, count, 10, &enable);
1074 usb_log_enable(es2);
1076 usb_log_disable(es2);
1081 static const struct file_operations apb_log_enable_fops = {
1082 .read = apb_log_enable_read,
1083 .write = apb_log_enable_write,
1086 static int apb_get_cport_count(struct usb_device *udev)
1089 __le16 *cport_count;
1091 cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL);
1095 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1096 GB_APB_REQUEST_CPORT_COUNT,
1097 USB_DIR_IN | USB_TYPE_VENDOR |
1098 USB_RECIP_INTERFACE, 0, 0, cport_count,
1099 sizeof(*cport_count), ES2_TIMEOUT);
1100 if (retval != sizeof(*cport_count)) {
1101 dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
1110 retval = le16_to_cpu(*cport_count);
1112 /* We need to fit a CPort ID in one byte of a message header */
1113 if (retval > U8_MAX) {
1115 dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
1124 * The ES2 USB Bridge device has 15 endpoints
1125 * 1 Control - usual USB stuff + AP -> APBridgeA messages
1126 * 7 Bulk IN - CPort data in
1127 * 7 Bulk OUT - CPort data out
1129 static int ap_probe(struct usb_interface *interface,
1130 const struct usb_device_id *id)
1132 struct es2_ap_dev *es2;
1133 struct gb_host_device *hd;
1134 struct usb_device *udev;
1135 struct usb_host_interface *iface_desc;
1136 struct usb_endpoint_descriptor *endpoint;
1143 udev = usb_get_dev(interface_to_usbdev(interface));
1145 num_cports = apb_get_cport_count(udev);
1146 if (num_cports < 0) {
1148 dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
1153 hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
1160 es2 = hd_to_es2(hd);
1162 es2->usb_intf = interface;
1163 es2->usb_dev = udev;
1164 spin_lock_init(&es2->cport_out_urb_lock);
1165 INIT_KFIFO(es2->apb_log_fifo);
1166 usb_set_intfdata(interface, es2);
1169 * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
1172 retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0);
1175 retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1);
1179 es2->cport_to_ep = kcalloc(hd->num_cports, sizeof(*es2->cport_to_ep),
1181 if (!es2->cport_to_ep) {
1186 /* find all bulk endpoints */
1187 iface_desc = interface->cur_altsetting;
1188 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
1189 endpoint = &iface_desc->endpoint[i].desc;
1191 if (usb_endpoint_is_bulk_in(endpoint)) {
1192 es2->cport_in[bulk_in++].endpoint =
1193 endpoint->bEndpointAddress;
1194 } else if (usb_endpoint_is_bulk_out(endpoint)) {
1195 es2->cport_out[bulk_out++].endpoint =
1196 endpoint->bEndpointAddress;
1199 "Unknown endpoint type found, address 0x%02x\n",
1200 endpoint->bEndpointAddress);
1203 if (bulk_in != NUM_BULKS || bulk_out != NUM_BULKS) {
1204 dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
1209 /* Allocate buffers for our cport in messages */
1210 for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
1211 struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];
1213 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
1217 urb = usb_alloc_urb(0, GFP_KERNEL);
1222 buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
1228 usb_fill_bulk_urb(urb, udev,
1229 usb_rcvbulkpipe(udev,
1230 cport_in->endpoint),
1231 buffer, ES2_GBUF_MSG_SIZE_MAX,
1232 cport_in_callback, hd);
1233 cport_in->urb[i] = urb;
1234 cport_in->buffer[i] = buffer;
1238 /* Allocate urbs for our CPort OUT messages */
1239 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
1242 urb = usb_alloc_urb(0, GFP_KERNEL);
1248 es2->cport_out_urb[i] = urb;
1249 es2->cport_out_urb_busy[i] = false; /* just to be anal */
1252 /* XXX We will need to rename this per APB */
1253 es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
1254 (S_IWUSR | S_IRUGO),
1255 gb_debugfs_get(), es2,
1256 &apb_log_enable_fops);
1258 retval = gb_hd_add(hd);
1262 for (i = 0; i < NUM_BULKS; ++i) {
1263 retval = es2_cport_in_enable(es2, &es2->cport_in[i]);
1265 goto err_disable_cport_in;
1270 err_disable_cport_in:
1271 for (--i; i >= 0; --i)
1272 es2_cport_in_disable(es2, &es2->cport_in[i]);
1280 static void ap_disconnect(struct usb_interface *interface)
1282 struct es2_ap_dev *es2 = usb_get_intfdata(interface);
1287 for (i = 0; i < NUM_BULKS; ++i)
1288 es2_cport_in_disable(es2, &es2->cport_in[i]);
1293 static struct usb_driver es2_ap_driver = {
1294 .name = "es2_ap_driver",
1296 .disconnect = ap_disconnect,
1297 .id_table = id_table,
1301 module_usb_driver(es2_ap_driver);
1303 MODULE_LICENSE("GPL v2");
1304 MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");