greybus: es2: Release reserved cports CDSI0 and CDSI1
[cascardo/linux.git] / drivers / staging / greybus / es2.c
1 /*
2  * Greybus "AP" USB driver for "ES2" controller chips
3  *
4  * Copyright 2014-2015 Google Inc.
5  * Copyright 2014-2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9 #include <linux/kthread.h>
10 #include <linux/sizes.h>
11 #include <linux/usb.h>
12 #include <linux/kfifo.h>
13 #include <linux/debugfs.h>
14 #include <asm/unaligned.h>
15
16 #include "greybus.h"
17 #include "kernel_ver.h"
18 #include "connection.h"
19
20 /* Fixed CPort numbers */
21 #define ES2_CPORT_CDSI0         16
22 #define ES2_CPORT_CDSI1         17
23
24 /* Memory sizes for the buffers sent to/from the ES2 controller */
25 #define ES2_GBUF_MSG_SIZE_MAX   2048
26
27 static const struct usb_device_id id_table[] = {
28         { USB_DEVICE(0x18d1, 0x1eaf) },
29         { },
30 };
31 MODULE_DEVICE_TABLE(usb, id_table);
32
33 #define APB1_LOG_SIZE           SZ_16K
34
35 /* Number of bulk in and bulk out couple */
36 #define NUM_BULKS               7
37
38 /*
39  * Number of CPort IN urbs in flight at any point in time.
40  * Adjust if we are having stalls in the USB buffer due to not enough urbs in
41  * flight.
42  */
43 #define NUM_CPORT_IN_URB        4
44
45 /* Number of CPort OUT urbs in flight at any point in time.
46  * Adjust if we get messages saying we are out of urbs in the system log.
47  */
48 #define NUM_CPORT_OUT_URB       (8 * NUM_BULKS)
49
50 /*
51  * @endpoint: bulk in endpoint for CPort data
52  * @urb: array of urbs for the CPort in messages
53  * @buffer: array of buffers for the @cport_in_urb urbs
54  */
55 struct es2_cport_in {
56         __u8 endpoint;
57         struct urb *urb[NUM_CPORT_IN_URB];
58         u8 *buffer[NUM_CPORT_IN_URB];
59 };
60
61 /*
62  * @endpoint: bulk out endpoint for CPort data
63  */
64 struct es2_cport_out {
65         __u8 endpoint;
66 };
67
68 /**
69  * es2_ap_dev - ES2 USB Bridge to AP structure
70  * @usb_dev: pointer to the USB device we are.
71  * @usb_intf: pointer to the USB interface we are bound to.
72  * @hd: pointer to our gb_host_device structure
73
74  * @cport_in: endpoint, urbs and buffer for cport in messages
75  * @cport_out: endpoint for for cport out messages
76  * @cport_out_urb: array of urbs for the CPort out messages
77  * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
78  *                      not.
79  * @cport_out_urb_cancelled: array of flags indicating whether the
80  *                      corresponding @cport_out_urb is being cancelled
81  * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
82  *
83  * @apb_log_task: task pointer for logging thread
84  * @apb_log_dentry: file system entry for the log file interface
85  * @apb_log_enable_dentry: file system entry for enabling logging
86  * @apb_log_fifo: kernel FIFO to carry logged data
87  */
88 struct es2_ap_dev {
89         struct usb_device *usb_dev;
90         struct usb_interface *usb_intf;
91         struct gb_host_device *hd;
92
93         struct es2_cport_in cport_in[NUM_BULKS];
94         struct es2_cport_out cport_out[NUM_BULKS];
95         struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
96         bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
97         bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
98         spinlock_t cport_out_urb_lock;
99
100         bool cdsi1_in_use;
101
102         int *cport_to_ep;
103
104         struct task_struct *apb_log_task;
105         struct dentry *apb_log_dentry;
106         struct dentry *apb_log_enable_dentry;
107         DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
108 };
109
110 /**
111  * cport_to_ep - information about cport to endpoints mapping
112  * @cport_id: the id of cport to map to endpoints
113  * @endpoint_in: the endpoint number to use for in transfer
114  * @endpoint_out: he endpoint number to use for out transfer
115  */
116 struct cport_to_ep {
117         __le16 cport_id;
118         __u8 endpoint_in;
119         __u8 endpoint_out;
120 };
121
122 /**
123  * timesync_enable_request - Enable timesync in an APBridge
124  * @count: number of TimeSync Pulses to expect
125  * @frame_time: the initial FrameTime at the first TimeSync Pulse
126  * @strobe_delay: the expected delay in microseconds between each TimeSync Pulse
127  * @refclk: The AP mandated reference clock to run FrameTime at
128  */
129 struct timesync_enable_request {
130         __u8    count;
131         __le64  frame_time;
132         __le32  strobe_delay;
133         __le32  refclk;
134 } __packed;
135
136 /**
137  * timesync_authoritative_request - Transmit authoritative FrameTime to APBridge
138  * @frame_time: An array of authoritative FrameTimes provided by the SVC
139  *              and relayed to the APBridge by the AP
140  */
141 struct timesync_authoritative_request {
142         __le64  frame_time[GB_TIMESYNC_MAX_STROBES];
143 } __packed;
144
145 static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
146 {
147         return (struct es2_ap_dev *)&hd->hd_priv;
148 }
149
150 static void cport_out_callback(struct urb *urb);
151 static void usb_log_enable(struct es2_ap_dev *es2);
152 static void usb_log_disable(struct es2_ap_dev *es2);
153
154 /* Get the endpoints pair mapped to the cport */
155 static int cport_to_ep_pair(struct es2_ap_dev *es2, u16 cport_id)
156 {
157         if (cport_id >= es2->hd->num_cports)
158                 return 0;
159         return es2->cport_to_ep[cport_id];
160 }
161
162 #define ES2_TIMEOUT     500     /* 500 ms for the SVC to do something */
163
164 /* Disable for now until we work all of this out to keep a warning-free build */
165 #if 0
166 /* Test if the endpoints pair is already mapped to a cport */
167 static int ep_pair_in_use(struct es2_ap_dev *es2, int ep_pair)
168 {
169         int i;
170
171         for (i = 0; i < es2->hd->num_cports; i++) {
172                 if (es2->cport_to_ep[i] == ep_pair)
173                         return 1;
174         }
175         return 0;
176 }
177
178 /* Configure the endpoint mapping and send the request to APBridge */
179 static int map_cport_to_ep(struct es2_ap_dev *es2,
180                                 u16 cport_id, int ep_pair)
181 {
182         int retval;
183         struct cport_to_ep *cport_to_ep;
184
185         if (ep_pair < 0 || ep_pair >= NUM_BULKS)
186                 return -EINVAL;
187         if (cport_id >= es2->hd->num_cports)
188                 return -EINVAL;
189         if (ep_pair && ep_pair_in_use(es2, ep_pair))
190                 return -EINVAL;
191
192         cport_to_ep = kmalloc(sizeof(*cport_to_ep), GFP_KERNEL);
193         if (!cport_to_ep)
194                 return -ENOMEM;
195
196         es2->cport_to_ep[cport_id] = ep_pair;
197         cport_to_ep->cport_id = cpu_to_le16(cport_id);
198         cport_to_ep->endpoint_in = es2->cport_in[ep_pair].endpoint;
199         cport_to_ep->endpoint_out = es2->cport_out[ep_pair].endpoint;
200
201         retval = usb_control_msg(es2->usb_dev,
202                                  usb_sndctrlpipe(es2->usb_dev, 0),
203                                  GB_APB_REQUEST_EP_MAPPING,
204                                  USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
205                                  0x00, 0x00,
206                                  (char *)cport_to_ep,
207                                  sizeof(*cport_to_ep),
208                                  ES2_TIMEOUT);
209         if (retval == sizeof(*cport_to_ep))
210                 retval = 0;
211         kfree(cport_to_ep);
212
213         return retval;
214 }
215
216 /* Unmap a cport: use the muxed endpoints pair */
217 static int unmap_cport(struct es2_ap_dev *es2, u16 cport_id)
218 {
219         return map_cport_to_ep(es2, cport_id, 0);
220 }
221 #endif
222
223 static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
224 {
225         struct usb_device *udev = es2->usb_dev;
226         u8 *data;
227         int retval;
228
229         data = kmalloc(size, GFP_KERNEL);
230         if (!data)
231                 return -ENOMEM;
232         memcpy(data, req, size);
233
234         retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
235                                  cmd,
236                                  USB_DIR_OUT | USB_TYPE_VENDOR |
237                                  USB_RECIP_INTERFACE,
238                                  0, 0, data, size, ES2_TIMEOUT);
239         if (retval < 0)
240                 dev_err(&udev->dev, "%s: return error %d\n", __func__, retval);
241         else
242                 retval = 0;
243
244         kfree(data);
245         return retval;
246 }
247
248 static void ap_urb_complete(struct urb *urb)
249 {
250         struct usb_ctrlrequest *dr = urb->context;
251
252         kfree(dr);
253         usb_free_urb(urb);
254 }
255
256 static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
257 {
258         struct usb_device *udev = es2->usb_dev;
259         struct urb *urb;
260         struct usb_ctrlrequest *dr;
261         u8 *buf;
262         int retval;
263
264         urb = usb_alloc_urb(0, GFP_ATOMIC);
265         if (!urb)
266                 return -ENOMEM;
267
268         dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC);
269         if (!dr) {
270                 usb_free_urb(urb);
271                 return -ENOMEM;
272         }
273
274         buf = (u8 *)dr + sizeof(*dr);
275         memcpy(buf, req, size);
276
277         dr->bRequest = cmd;
278         dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
279         dr->wValue = 0;
280         dr->wIndex = 0;
281         dr->wLength = cpu_to_le16(size);
282
283         usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0),
284                              (unsigned char *)dr, buf, size,
285                              ap_urb_complete, dr);
286         retval = usb_submit_urb(urb, GFP_ATOMIC);
287         if (retval) {
288                 usb_free_urb(urb);
289                 kfree(dr);
290         }
291         return retval;
292 }
293
294 static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
295                      bool async)
296 {
297         struct es2_ap_dev *es2 = hd_to_es2(hd);
298
299         if (async)
300                 return output_async(es2, req, size, cmd);
301
302         return output_sync(es2, req, size, cmd);
303 }
304
305 static int es2_cport_in_enable(struct es2_ap_dev *es2,
306                                 struct es2_cport_in *cport_in)
307 {
308         struct urb *urb;
309         int ret;
310         int i;
311
312         for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
313                 urb = cport_in->urb[i];
314
315                 ret = usb_submit_urb(urb, GFP_KERNEL);
316                 if (ret) {
317                         dev_err(&es2->usb_dev->dev,
318                                         "failed to submit in-urb: %d\n", ret);
319                         goto err_kill_urbs;
320                 }
321         }
322
323         return 0;
324
325 err_kill_urbs:
326         for (--i; i >= 0; --i) {
327                 urb = cport_in->urb[i];
328                 usb_kill_urb(urb);
329         }
330
331         return ret;
332 }
333
334 static void es2_cport_in_disable(struct es2_ap_dev *es2,
335                                 struct es2_cport_in *cport_in)
336 {
337         struct urb *urb;
338         int i;
339
340         for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
341                 urb = cport_in->urb[i];
342                 usb_kill_urb(urb);
343         }
344 }
345
346 static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
347 {
348         struct urb *urb = NULL;
349         unsigned long flags;
350         int i;
351
352         spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
353
354         /* Look in our pool of allocated urbs first, as that's the "fastest" */
355         for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
356                 if (es2->cport_out_urb_busy[i] == false &&
357                                 es2->cport_out_urb_cancelled[i] == false) {
358                         es2->cport_out_urb_busy[i] = true;
359                         urb = es2->cport_out_urb[i];
360                         break;
361                 }
362         }
363         spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
364         if (urb)
365                 return urb;
366
367         /*
368          * Crap, pool is empty, complain to the syslog and go allocate one
369          * dynamically as we have to succeed.
370          */
371         dev_dbg(&es2->usb_dev->dev,
372                 "No free CPort OUT urbs, having to dynamically allocate one!\n");
373         return usb_alloc_urb(0, gfp_mask);
374 }
375
376 static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
377 {
378         unsigned long flags;
379         int i;
380         /*
381          * See if this was an urb in our pool, if so mark it "free", otherwise
382          * we need to free it ourselves.
383          */
384         spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
385         for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
386                 if (urb == es2->cport_out_urb[i]) {
387                         es2->cport_out_urb_busy[i] = false;
388                         urb = NULL;
389                         break;
390                 }
391         }
392         spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
393
394         /* If urb is not NULL, then we need to free this urb */
395         usb_free_urb(urb);
396 }
397
398 /*
399  * We (ab)use the operation-message header pad bytes to transfer the
400  * cport id in order to minimise overhead.
401  */
402 static void
403 gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
404 {
405         header->pad[0] = cport_id;
406 }
407
408 /* Clear the pad bytes used for the CPort id */
409 static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
410 {
411         header->pad[0] = 0;
412 }
413
414 /* Extract the CPort id packed into the header, and clear it */
415 static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
416 {
417         u16 cport_id = header->pad[0];
418
419         gb_message_cport_clear(header);
420
421         return cport_id;
422 }
423
424 /*
425  * Returns zero if the message was successfully queued, or a negative errno
426  * otherwise.
427  */
428 static int message_send(struct gb_host_device *hd, u16 cport_id,
429                         struct gb_message *message, gfp_t gfp_mask)
430 {
431         struct es2_ap_dev *es2 = hd_to_es2(hd);
432         struct usb_device *udev = es2->usb_dev;
433         size_t buffer_size;
434         int retval;
435         struct urb *urb;
436         int ep_pair;
437         unsigned long flags;
438
439         /*
440          * The data actually transferred will include an indication
441          * of where the data should be sent.  Do one last check of
442          * the target CPort id before filling it in.
443          */
444         if (!cport_id_valid(hd, cport_id)) {
445                 dev_err(&udev->dev, "invalid cport %u\n", cport_id);
446                 return -EINVAL;
447         }
448
449         /* Find a free urb */
450         urb = next_free_urb(es2, gfp_mask);
451         if (!urb)
452                 return -ENOMEM;
453
454         spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
455         message->hcpriv = urb;
456         spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
457
458         /* Pack the cport id into the message header */
459         gb_message_cport_pack(message->header, cport_id);
460
461         buffer_size = sizeof(*message->header) + message->payload_size;
462
463         ep_pair = cport_to_ep_pair(es2, cport_id);
464         usb_fill_bulk_urb(urb, udev,
465                           usb_sndbulkpipe(udev,
466                                           es2->cport_out[ep_pair].endpoint),
467                           message->buffer, buffer_size,
468                           cport_out_callback, message);
469         urb->transfer_flags |= URB_ZERO_PACKET;
470         retval = usb_submit_urb(urb, gfp_mask);
471         if (retval) {
472                 dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
473
474                 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
475                 message->hcpriv = NULL;
476                 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
477
478                 free_urb(es2, urb);
479                 gb_message_cport_clear(message->header);
480
481                 return retval;
482         }
483
484         return 0;
485 }
486
487 /*
488  * Can not be called in atomic context.
489  */
490 static void message_cancel(struct gb_message *message)
491 {
492         struct gb_host_device *hd = message->operation->connection->hd;
493         struct es2_ap_dev *es2 = hd_to_es2(hd);
494         struct urb *urb;
495         int i;
496
497         might_sleep();
498
499         spin_lock_irq(&es2->cport_out_urb_lock);
500         urb = message->hcpriv;
501
502         /* Prevent dynamically allocated urb from being deallocated. */
503         usb_get_urb(urb);
504
505         /* Prevent pre-allocated urb from being reused. */
506         for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
507                 if (urb == es2->cport_out_urb[i]) {
508                         es2->cport_out_urb_cancelled[i] = true;
509                         break;
510                 }
511         }
512         spin_unlock_irq(&es2->cport_out_urb_lock);
513
514         usb_kill_urb(urb);
515
516         if (i < NUM_CPORT_OUT_URB) {
517                 spin_lock_irq(&es2->cport_out_urb_lock);
518                 es2->cport_out_urb_cancelled[i] = false;
519                 spin_unlock_irq(&es2->cport_out_urb_lock);
520         }
521
522         usb_free_urb(urb);
523 }
524
525 static int cport_reset(struct gb_host_device *hd, u16 cport_id)
526 {
527         struct es2_ap_dev *es2 = hd_to_es2(hd);
528         struct usb_device *udev = es2->usb_dev;
529         int retval;
530
531         switch (cport_id) {
532         case GB_SVC_CPORT_ID:
533         case ES2_CPORT_CDSI0:
534         case ES2_CPORT_CDSI1:
535                 return 0;
536         }
537
538         retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
539                                  GB_APB_REQUEST_RESET_CPORT,
540                                  USB_DIR_OUT | USB_TYPE_VENDOR |
541                                  USB_RECIP_INTERFACE, cport_id, 0,
542                                  NULL, 0, ES2_TIMEOUT);
543         if (retval < 0) {
544                 dev_err(&udev->dev, "failed to reset cport %u: %d\n", cport_id,
545                         retval);
546                 return retval;
547         }
548
549         return 0;
550 }
551
552 static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
553                                 unsigned long flags)
554 {
555         struct es2_ap_dev *es2 = hd_to_es2(hd);
556         struct ida *id_map = &hd->cport_id_map;
557         int ida_start, ida_end;
558
559         switch (cport_id) {
560         case ES2_CPORT_CDSI0:
561         case ES2_CPORT_CDSI1:
562                 dev_err(&hd->dev, "cport %d not available\n", cport_id);
563                 return -EBUSY;
564         }
565
566         if (flags & GB_CONNECTION_FLAG_OFFLOADED &&
567                         flags & GB_CONNECTION_FLAG_CDSI1) {
568                 if (es2->cdsi1_in_use) {
569                         dev_err(&hd->dev, "CDSI1 already in use\n");
570                         return -EBUSY;
571                 }
572
573                 es2->cdsi1_in_use = true;
574
575                 return ES2_CPORT_CDSI1;
576         }
577
578         if (cport_id < 0) {
579                 ida_start = 0;
580                 ida_end = hd->num_cports;
581         } else if (cport_id < hd->num_cports) {
582                 ida_start = cport_id;
583                 ida_end = cport_id + 1;
584         } else {
585                 dev_err(&hd->dev, "cport %d not available\n", cport_id);
586                 return -EINVAL;
587         }
588
589         return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
590 }
591
592 static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
593 {
594         struct es2_ap_dev *es2 = hd_to_es2(hd);
595
596         switch (cport_id) {
597         case ES2_CPORT_CDSI1:
598                 es2->cdsi1_in_use = false;
599                 return;
600         }
601
602         ida_simple_remove(&hd->cport_id_map, cport_id);
603 }
604
605 static int cport_enable(struct gb_host_device *hd, u16 cport_id)
606 {
607         int retval;
608
609         retval = cport_reset(hd, cport_id);
610         if (retval)
611                 return retval;
612
613         return 0;
614 }
615
616 static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
617 {
618         int retval;
619         struct es2_ap_dev *es2 = hd_to_es2(hd);
620         struct usb_device *udev = es2->usb_dev;
621
622         retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
623                                  GB_APB_REQUEST_LATENCY_TAG_EN,
624                                  USB_DIR_OUT | USB_TYPE_VENDOR |
625                                  USB_RECIP_INTERFACE, cport_id, 0, NULL,
626                                  0, ES2_TIMEOUT);
627
628         if (retval < 0)
629                 dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
630                         cport_id);
631         return retval;
632 }
633
634 static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
635 {
636         int retval;
637         struct es2_ap_dev *es2 = hd_to_es2(hd);
638         struct usb_device *udev = es2->usb_dev;
639
640         retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
641                                  GB_APB_REQUEST_LATENCY_TAG_DIS,
642                                  USB_DIR_OUT | USB_TYPE_VENDOR |
643                                  USB_RECIP_INTERFACE, cport_id, 0, NULL,
644                                  0, ES2_TIMEOUT);
645
646         if (retval < 0)
647                 dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
648                         cport_id);
649         return retval;
650 }
651
652 static int cport_features_enable(struct gb_host_device *hd, u16 cport_id)
653 {
654         int retval;
655         struct es2_ap_dev *es2 = hd_to_es2(hd);
656         struct usb_device *udev = es2->usb_dev;
657
658         retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
659                                  GB_APB_REQUEST_CPORT_FEAT_EN,
660                                  USB_DIR_OUT | USB_TYPE_VENDOR |
661                                  USB_RECIP_INTERFACE, cport_id, 0, NULL,
662                                  0, ES2_TIMEOUT);
663         if (retval < 0)
664                 dev_err(&udev->dev, "Cannot enable CPort features for cport %u: %d\n",
665                         cport_id, retval);
666         return retval;
667 }
668
669 static int cport_features_disable(struct gb_host_device *hd, u16 cport_id)
670 {
671         int retval;
672         struct es2_ap_dev *es2 = hd_to_es2(hd);
673         struct usb_device *udev = es2->usb_dev;
674
675         retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
676                                  GB_APB_REQUEST_CPORT_FEAT_DIS,
677                                  USB_DIR_OUT | USB_TYPE_VENDOR |
678                                  USB_RECIP_INTERFACE, cport_id, 0, NULL,
679                                  0, ES2_TIMEOUT);
680         if (retval < 0)
681                 dev_err(&udev->dev,
682                         "Cannot disable CPort features for cport %u: %d\n",
683                         cport_id, retval);
684         return retval;
685 }
686
687 static int timesync_enable(struct gb_host_device *hd, u8 count,
688                            u64 frame_time, u32 strobe_delay, u32 refclk)
689 {
690         int retval;
691         struct es2_ap_dev *es2 = hd_to_es2(hd);
692         struct usb_device *udev = es2->usb_dev;
693         struct gb_control_timesync_enable_request *request;
694
695         request = kzalloc(sizeof(*request), GFP_KERNEL);
696         if (!request)
697                 return -ENOMEM;
698
699         request->count = count;
700         request->frame_time = cpu_to_le64(frame_time);
701         request->strobe_delay = cpu_to_le32(strobe_delay);
702         request->refclk = cpu_to_le32(refclk);
703         retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
704                                  REQUEST_TIMESYNC_ENABLE,
705                                  USB_DIR_OUT | USB_TYPE_VENDOR |
706                                  USB_RECIP_INTERFACE, 0, 0, request,
707                                  sizeof(*request), ES2_TIMEOUT);
708         if (retval < 0)
709                 dev_err(&udev->dev, "Cannot enable timesync %d\n", retval);
710
711         kfree(request);
712         return retval;
713 }
714
715 static int timesync_disable(struct gb_host_device *hd)
716 {
717         int retval;
718         struct es2_ap_dev *es2 = hd_to_es2(hd);
719         struct usb_device *udev = es2->usb_dev;
720
721         retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
722                                  REQUEST_TIMESYNC_DISABLE,
723                                  USB_DIR_OUT | USB_TYPE_VENDOR |
724                                  USB_RECIP_INTERFACE, 0, 0, NULL,
725                                  0, ES2_TIMEOUT);
726         if (retval < 0)
727                 dev_err(&udev->dev, "Cannot disable timesync %d\n", retval);
728
729         return retval;
730 }
731
732 static int timesync_authoritative(struct gb_host_device *hd, u64 *frame_time)
733 {
734         int retval, i;
735         struct es2_ap_dev *es2 = hd_to_es2(hd);
736         struct usb_device *udev = es2->usb_dev;
737         struct timesync_authoritative_request *request;
738
739         request = kzalloc(sizeof(*request), GFP_KERNEL);
740         if (!request)
741                 return -ENOMEM;
742
743         for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
744                 request->frame_time[i] = cpu_to_le64(frame_time[i]);
745
746         retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
747                                  REQUEST_TIMESYNC_AUTHORITATIVE,
748                                  USB_DIR_OUT | USB_TYPE_VENDOR |
749                                  USB_RECIP_INTERFACE, 0, 0, request,
750                                  sizeof(*request), ES2_TIMEOUT);
751         if (retval < 0)
752                 dev_err(&udev->dev, "Cannot timesync authoritative out %d\n", retval);
753
754         kfree(request);
755         return retval;
756 }
757
758 static int timesync_get_last_event(struct gb_host_device *hd, u64 *frame_time)
759 {
760         int retval;
761         struct es2_ap_dev *es2 = hd_to_es2(hd);
762         struct usb_device *udev = es2->usb_dev;
763         __le64 *response_frame_time;
764
765         response_frame_time = kzalloc(sizeof(*response_frame_time), GFP_KERNEL);
766         if (!response_frame_time)
767                 return -ENOMEM;
768
769         retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
770                                  REQUEST_TIMESYNC_GET_LAST_EVENT,
771                                  USB_DIR_IN | USB_TYPE_VENDOR |
772                                  USB_RECIP_INTERFACE, 0, 0, response_frame_time,
773                                  sizeof(*response_frame_time), ES2_TIMEOUT);
774
775         if (retval != sizeof(*response_frame_time)) {
776                 dev_err(&udev->dev, "Cannot get last TimeSync event: %d\n",
777                         retval);
778
779                 if (retval >= 0)
780                         retval = -EIO;
781
782                 goto out;
783         }
784         *frame_time = le64_to_cpu(*response_frame_time);
785         retval = 0;
786 out:
787         kfree(response_frame_time);
788         return retval;
789 }
790
791 static struct gb_hd_driver es2_driver = {
792         .hd_priv_size                   = sizeof(struct es2_ap_dev),
793         .message_send                   = message_send,
794         .message_cancel                 = message_cancel,
795         .cport_allocate                 = es2_cport_allocate,
796         .cport_release                  = es2_cport_release,
797         .cport_enable                   = cport_enable,
798         .latency_tag_enable             = latency_tag_enable,
799         .latency_tag_disable            = latency_tag_disable,
800         .output                         = output,
801         .cport_features_enable          = cport_features_enable,
802         .cport_features_disable         = cport_features_disable,
803         .timesync_enable                = timesync_enable,
804         .timesync_disable               = timesync_disable,
805         .timesync_authoritative         = timesync_authoritative,
806         .timesync_get_last_event        = timesync_get_last_event,
807 };
808
809 /* Common function to report consistent warnings based on URB status */
810 static int check_urb_status(struct urb *urb)
811 {
812         struct device *dev = &urb->dev->dev;
813         int status = urb->status;
814
815         switch (status) {
816         case 0:
817                 return 0;
818
819         case -EOVERFLOW:
820                 dev_err(dev, "%s: overflow actual length is %d\n",
821                         __func__, urb->actual_length);
822         case -ECONNRESET:
823         case -ENOENT:
824         case -ESHUTDOWN:
825         case -EILSEQ:
826         case -EPROTO:
827                 /* device is gone, stop sending */
828                 return status;
829         }
830         dev_err(dev, "%s: unknown status %d\n", __func__, status);
831
832         return -EAGAIN;
833 }
834
835 static void es2_destroy(struct es2_ap_dev *es2)
836 {
837         struct usb_device *udev;
838         int bulk_in;
839         int i;
840
841         debugfs_remove(es2->apb_log_enable_dentry);
842         usb_log_disable(es2);
843
844         /* Tear down everything! */
845         for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
846                 struct urb *urb = es2->cport_out_urb[i];
847
848                 if (!urb)
849                         break;
850                 usb_kill_urb(urb);
851                 usb_free_urb(urb);
852                 es2->cport_out_urb[i] = NULL;
853                 es2->cport_out_urb_busy[i] = false;     /* just to be anal */
854         }
855
856         for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
857                 struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];
858
859                 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
860                         struct urb *urb = cport_in->urb[i];
861
862                         if (!urb)
863                                 break;
864                         usb_free_urb(urb);
865                         kfree(cport_in->buffer[i]);
866                         cport_in->buffer[i] = NULL;
867                 }
868         }
869
870         kfree(es2->cport_to_ep);
871
872         /* release reserved CDSI0 and CDSI1 cports */
873         gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1);
874         gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0);
875
876         udev = es2->usb_dev;
877         gb_hd_put(es2->hd);
878
879         usb_put_dev(udev);
880 }
881
882 static void cport_in_callback(struct urb *urb)
883 {
884         struct gb_host_device *hd = urb->context;
885         struct device *dev = &urb->dev->dev;
886         struct gb_operation_msg_hdr *header;
887         int status = check_urb_status(urb);
888         int retval;
889         u16 cport_id;
890
891         if (status) {
892                 if ((status == -EAGAIN) || (status == -EPROTO))
893                         goto exit;
894
895                 /* The urb is being unlinked */
896                 if (status == -ENOENT || status == -ESHUTDOWN)
897                         return;
898
899                 dev_err(dev, "urb cport in error %d (dropped)\n", status);
900                 return;
901         }
902
903         if (urb->actual_length < sizeof(*header)) {
904                 dev_err(dev, "short message received\n");
905                 goto exit;
906         }
907
908         /* Extract the CPort id, which is packed in the message header */
909         header = urb->transfer_buffer;
910         cport_id = gb_message_cport_unpack(header);
911
912         if (cport_id_valid(hd, cport_id)) {
913                 greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
914                                                         urb->actual_length);
915         } else {
916                 dev_err(dev, "invalid cport id %u received\n", cport_id);
917         }
918 exit:
919         /* put our urb back in the request pool */
920         retval = usb_submit_urb(urb, GFP_ATOMIC);
921         if (retval)
922                 dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
923 }
924
925 static void cport_out_callback(struct urb *urb)
926 {
927         struct gb_message *message = urb->context;
928         struct gb_host_device *hd = message->operation->connection->hd;
929         struct es2_ap_dev *es2 = hd_to_es2(hd);
930         int status = check_urb_status(urb);
931         unsigned long flags;
932
933         gb_message_cport_clear(message->header);
934
935         spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
936         message->hcpriv = NULL;
937         spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
938
939         /*
940          * Tell the submitter that the message send (attempt) is
941          * complete, and report the status.
942          */
943         greybus_message_sent(hd, message, status);
944
945         free_urb(es2, urb);
946 }
947
948 #define APB1_LOG_MSG_SIZE       64
949 static void apb_log_get(struct es2_ap_dev *es2, char *buf)
950 {
951         int retval;
952
953         /* SVC messages go down our control pipe */
954         do {
955                 retval = usb_control_msg(es2->usb_dev,
956                                         usb_rcvctrlpipe(es2->usb_dev, 0),
957                                         GB_APB_REQUEST_LOG,
958                                         USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
959                                         0x00, 0x00,
960                                         buf,
961                                         APB1_LOG_MSG_SIZE,
962                                         ES2_TIMEOUT);
963                 if (retval > 0)
964                         kfifo_in(&es2->apb_log_fifo, buf, retval);
965         } while (retval > 0);
966 }
967
968 static int apb_log_poll(void *data)
969 {
970         struct es2_ap_dev *es2 = data;
971         char *buf;
972
973         buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
974         if (!buf)
975                 return -ENOMEM;
976
977         while (!kthread_should_stop()) {
978                 msleep(1000);
979                 apb_log_get(es2, buf);
980         }
981
982         kfree(buf);
983
984         return 0;
985 }
986
987 static ssize_t apb_log_read(struct file *f, char __user *buf,
988                                 size_t count, loff_t *ppos)
989 {
990         struct es2_ap_dev *es2 = f->f_inode->i_private;
991         ssize_t ret;
992         size_t copied;
993         char *tmp_buf;
994
995         if (count > APB1_LOG_SIZE)
996                 count = APB1_LOG_SIZE;
997
998         tmp_buf = kmalloc(count, GFP_KERNEL);
999         if (!tmp_buf)
1000                 return -ENOMEM;
1001
1002         copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
1003         ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);
1004
1005         kfree(tmp_buf);
1006
1007         return ret;
1008 }
1009
1010 static const struct file_operations apb_log_fops = {
1011         .read   = apb_log_read,
1012 };
1013
1014 static void usb_log_enable(struct es2_ap_dev *es2)
1015 {
1016         if (!IS_ERR_OR_NULL(es2->apb_log_task))
1017                 return;
1018
1019         /* get log from APB1 */
1020         es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
1021         if (IS_ERR(es2->apb_log_task))
1022                 return;
1023         /* XXX We will need to rename this per APB */
1024         es2->apb_log_dentry = debugfs_create_file("apb_log", S_IRUGO,
1025                                                 gb_debugfs_get(), es2,
1026                                                 &apb_log_fops);
1027 }
1028
1029 static void usb_log_disable(struct es2_ap_dev *es2)
1030 {
1031         if (IS_ERR_OR_NULL(es2->apb_log_task))
1032                 return;
1033
1034         debugfs_remove(es2->apb_log_dentry);
1035         es2->apb_log_dentry = NULL;
1036
1037         kthread_stop(es2->apb_log_task);
1038         es2->apb_log_task = NULL;
1039 }
1040
1041 static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
1042                                 size_t count, loff_t *ppos)
1043 {
1044         struct es2_ap_dev *es2 = f->f_inode->i_private;
1045         int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
1046         char tmp_buf[3];
1047
1048         sprintf(tmp_buf, "%d\n", enable);
1049         return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
1050 }
1051
1052 static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
1053                                 size_t count, loff_t *ppos)
1054 {
1055         int enable;
1056         ssize_t retval;
1057         struct es2_ap_dev *es2 = f->f_inode->i_private;
1058
1059         retval = kstrtoint_from_user(buf, count, 10, &enable);
1060         if (retval)
1061                 return retval;
1062
1063         if (enable)
1064                 usb_log_enable(es2);
1065         else
1066                 usb_log_disable(es2);
1067
1068         return count;
1069 }
1070
1071 static const struct file_operations apb_log_enable_fops = {
1072         .read   = apb_log_enable_read,
1073         .write  = apb_log_enable_write,
1074 };
1075
1076 static int apb_get_cport_count(struct usb_device *udev)
1077 {
1078         int retval;
1079         __le16 *cport_count;
1080
1081         cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL);
1082         if (!cport_count)
1083                 return -ENOMEM;
1084
1085         retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1086                                  GB_APB_REQUEST_CPORT_COUNT,
1087                                  USB_DIR_IN | USB_TYPE_VENDOR |
1088                                  USB_RECIP_INTERFACE, 0, 0, cport_count,
1089                                  sizeof(*cport_count), ES2_TIMEOUT);
1090         if (retval != sizeof(*cport_count)) {
1091                 dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
1092                         retval);
1093
1094                 if (retval >= 0)
1095                         retval = -EIO;
1096
1097                 goto out;
1098         }
1099
1100         retval = le16_to_cpu(*cport_count);
1101
1102         /* We need to fit a CPort ID in one byte of a message header */
1103         if (retval > U8_MAX) {
1104                 retval = U8_MAX;
1105                 dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
1106         }
1107
1108 out:
1109         kfree(cport_count);
1110         return retval;
1111 }
1112
1113 /*
1114  * The ES2 USB Bridge device has 15 endpoints
1115  * 1 Control - usual USB stuff + AP -> APBridgeA messages
1116  * 7 Bulk IN - CPort data in
1117  * 7 Bulk OUT - CPort data out
1118  */
1119 static int ap_probe(struct usb_interface *interface,
1120                     const struct usb_device_id *id)
1121 {
1122         struct es2_ap_dev *es2;
1123         struct gb_host_device *hd;
1124         struct usb_device *udev;
1125         struct usb_host_interface *iface_desc;
1126         struct usb_endpoint_descriptor *endpoint;
1127         int bulk_in = 0;
1128         int bulk_out = 0;
1129         int retval;
1130         int i;
1131         int num_cports;
1132
1133         udev = usb_get_dev(interface_to_usbdev(interface));
1134
1135         num_cports = apb_get_cport_count(udev);
1136         if (num_cports < 0) {
1137                 usb_put_dev(udev);
1138                 dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
1139                         num_cports);
1140                 return num_cports;
1141         }
1142
1143         hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
1144                                 num_cports);
1145         if (IS_ERR(hd)) {
1146                 usb_put_dev(udev);
1147                 return PTR_ERR(hd);
1148         }
1149
1150         es2 = hd_to_es2(hd);
1151         es2->hd = hd;
1152         es2->usb_intf = interface;
1153         es2->usb_dev = udev;
1154         spin_lock_init(&es2->cport_out_urb_lock);
1155         INIT_KFIFO(es2->apb_log_fifo);
1156         usb_set_intfdata(interface, es2);
1157
1158         /*
1159          * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
1160          * dynamically.
1161          */
1162         retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0);
1163         if (retval)
1164                 goto error;
1165         retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1);
1166         if (retval)
1167                 goto error;
1168
1169         es2->cport_to_ep = kcalloc(hd->num_cports, sizeof(*es2->cport_to_ep),
1170                                    GFP_KERNEL);
1171         if (!es2->cport_to_ep) {
1172                 retval = -ENOMEM;
1173                 goto error;
1174         }
1175
1176         /* find all bulk endpoints */
1177         iface_desc = interface->cur_altsetting;
1178         for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
1179                 endpoint = &iface_desc->endpoint[i].desc;
1180
1181                 if (usb_endpoint_is_bulk_in(endpoint)) {
1182                         es2->cport_in[bulk_in++].endpoint =
1183                                 endpoint->bEndpointAddress;
1184                 } else if (usb_endpoint_is_bulk_out(endpoint)) {
1185                         es2->cport_out[bulk_out++].endpoint =
1186                                 endpoint->bEndpointAddress;
1187                 } else {
1188                         dev_err(&udev->dev,
1189                                 "Unknown endpoint type found, address 0x%02x\n",
1190                                 endpoint->bEndpointAddress);
1191                 }
1192         }
1193         if (bulk_in != NUM_BULKS || bulk_out != NUM_BULKS) {
1194                 dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
1195                 retval = -ENODEV;
1196                 goto error;
1197         }
1198
1199         /* Allocate buffers for our cport in messages */
1200         for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
1201                 struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];
1202
1203                 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
1204                         struct urb *urb;
1205                         u8 *buffer;
1206
1207                         urb = usb_alloc_urb(0, GFP_KERNEL);
1208                         if (!urb) {
1209                                 retval = -ENOMEM;
1210                                 goto error;
1211                         }
1212                         buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
1213                         if (!buffer) {
1214                                 retval = -ENOMEM;
1215                                 goto error;
1216                         }
1217
1218                         usb_fill_bulk_urb(urb, udev,
1219                                           usb_rcvbulkpipe(udev,
1220                                                           cport_in->endpoint),
1221                                           buffer, ES2_GBUF_MSG_SIZE_MAX,
1222                                           cport_in_callback, hd);
1223                         cport_in->urb[i] = urb;
1224                         cport_in->buffer[i] = buffer;
1225                 }
1226         }
1227
1228         /* Allocate urbs for our CPort OUT messages */
1229         for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
1230                 struct urb *urb;
1231
1232                 urb = usb_alloc_urb(0, GFP_KERNEL);
1233                 if (!urb) {
1234                         retval = -ENOMEM;
1235                         goto error;
1236                 }
1237
1238                 es2->cport_out_urb[i] = urb;
1239                 es2->cport_out_urb_busy[i] = false;     /* just to be anal */
1240         }
1241
1242         /* XXX We will need to rename this per APB */
1243         es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
1244                                                         (S_IWUSR | S_IRUGO),
1245                                                         gb_debugfs_get(), es2,
1246                                                         &apb_log_enable_fops);
1247
1248         retval = gb_hd_add(hd);
1249         if (retval)
1250                 goto error;
1251
1252         for (i = 0; i < NUM_BULKS; ++i) {
1253                 retval = es2_cport_in_enable(es2, &es2->cport_in[i]);
1254                 if (retval)
1255                         goto err_disable_cport_in;
1256         }
1257
1258         return 0;
1259
1260 err_disable_cport_in:
1261         for (--i; i >= 0; --i)
1262                 es2_cport_in_disable(es2, &es2->cport_in[i]);
1263         gb_hd_del(hd);
1264 error:
1265         es2_destroy(es2);
1266
1267         return retval;
1268 }
1269
1270 static void ap_disconnect(struct usb_interface *interface)
1271 {
1272         struct es2_ap_dev *es2 = usb_get_intfdata(interface);
1273         int i;
1274
1275         gb_hd_del(es2->hd);
1276
1277         for (i = 0; i < NUM_BULKS; ++i)
1278                 es2_cport_in_disable(es2, &es2->cport_in[i]);
1279
1280         es2_destroy(es2);
1281 }
1282
1283 static struct usb_driver es2_ap_driver = {
1284         .name =         "es2_ap_driver",
1285         .probe =        ap_probe,
1286         .disconnect =   ap_disconnect,
1287         .id_table =     id_table,
1288         .soft_unbind =  1,
1289 };
1290
1291 module_usb_driver(es2_ap_driver);
1292
1293 MODULE_LICENSE("GPL v2");
1294 MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");