Merge tag 'sound-3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound
[cascardo/linux.git] / drivers / usb / wusbcore / wa-xfer.c
1 /*
2  * WUSB Wire Adapter
3  * Data transfer and URB enqueing
4  *
5  * Copyright (C) 2005-2006 Intel Corporation
6  * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20  * 02110-1301, USA.
21  *
22  *
23  * How transfers work: get a buffer, break it up in segments (segment
24  * size is a multiple of the maxpacket size). For each segment issue a
25  * segment request (struct wa_xfer_*), then send the data buffer if
26  * out or nothing if in (all over the DTO endpoint).
27  *
28  * For each submitted segment request, a notification will come over
29  * the NEP endpoint and a transfer result (struct xfer_result) will
30  * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31  * data coming (inbound transfer), schedule a read and handle it.
32  *
33  * Sounds simple, it is a pain to implement.
34  *
35  *
36  * ENTRY POINTS
37  *
38  *   FIXME
39  *
40  * LIFE CYCLE / STATE DIAGRAM
41  *
42  *   FIXME
43  *
44  * THIS CODE IS DISGUSTING
45  *
46  *   Warned you are; it's my second try and still not happy with it.
47  *
48  * NOTES:
49  *
50  *   - No iso
51  *
52  *   - Supports DMA xfers, control, bulk and maybe interrupt
53  *
54  *   - Does not recycle unused rpipes
55  *
56  *     An rpipe is assigned to an endpoint the first time it is used,
57  *     and then it's there, assigned, until the endpoint is disabled
58  *     (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59  *     rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60  *     (should be a mutex).
61  *
62  *     Two methods it could be done:
63  *
64  *     (a) set up a timer every time an rpipe's use count drops to 1
65  *         (which means unused) or when a transfer ends. Reset the
66  *         timer when a xfer is queued. If the timer expires, release
67  *         the rpipe [see rpipe_ep_disable()].
68  *
69  *     (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70  *         when none are found go over the list, check their endpoint
71  *         and their activity record (if no last-xfer-done-ts in the
72  *         last x seconds) take it
73  *
74  *     However, due to the fact that we have a set of limited
75  *     resources (max-segments-at-the-same-time per xfer,
76  *     xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77  *     we are going to have to rebuild all this based on an scheduler,
78  *     to where we have a list of transactions to do and based on the
79  *     availability of the different required components (blocks,
80  *     rpipes, segment slots, etc), we go scheduling them. Painful.
81  */
82 #include <linux/spinlock.h>
83 #include <linux/slab.h>
84 #include <linux/hash.h>
85 #include <linux/ratelimit.h>
86 #include <linux/export.h>
87 #include <linux/scatterlist.h>
88
89 #include "wa-hc.h"
90 #include "wusbhc.h"
91
92 enum {
93         /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
94         WA_SEGS_MAX = 128,
95 };
96
97 enum wa_seg_status {
98         WA_SEG_NOTREADY,
99         WA_SEG_READY,
100         WA_SEG_DELAYED,
101         WA_SEG_SUBMITTED,
102         WA_SEG_PENDING,
103         WA_SEG_DTI_PENDING,
104         WA_SEG_DONE,
105         WA_SEG_ERROR,
106         WA_SEG_ABORTED,
107 };
108
109 static void wa_xfer_delayed_run(struct wa_rpipe *);
110 static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
111
112 /*
113  * Life cycle governed by 'struct urb' (the refcount of the struct is
114  * that of the 'struct urb' and usb_free_urb() would free the whole
115  * struct).
116  */
117 struct wa_seg {
118         struct urb tr_urb;              /* transfer request urb. */
119         struct urb *isoc_pack_desc_urb; /* for isoc packet descriptor. */
120         struct urb *dto_urb;            /* for data output. */
121         struct list_head list_node;     /* for rpipe->req_list */
122         struct wa_xfer *xfer;           /* out xfer */
123         u8 index;                       /* which segment we are */
124         int isoc_frame_count;   /* number of isoc frames in this segment. */
125         int isoc_frame_offset;  /* starting frame offset in the xfer URB. */
126         /* Isoc frame that the current transfer buffer corresponds to. */
127         int isoc_frame_index;
128         int isoc_size;  /* size of all isoc frames sent by this seg. */
129         enum wa_seg_status status;
130         ssize_t result;                 /* bytes xfered or error */
131         struct wa_xfer_hdr xfer_hdr;
132 };
133
134 static inline void wa_seg_init(struct wa_seg *seg)
135 {
136         usb_init_urb(&seg->tr_urb);
137
138         /* set the remaining memory to 0. */
139         memset(((void *)seg) + sizeof(seg->tr_urb), 0,
140                 sizeof(*seg) - sizeof(seg->tr_urb));
141 }
142
143 /*
144  * Protected by xfer->lock
145  *
146  */
147 struct wa_xfer {
148         struct kref refcnt;
149         struct list_head list_node;
150         spinlock_t lock;
151         u32 id;
152
153         struct wahc *wa;                /* Wire adapter we are plugged to */
154         struct usb_host_endpoint *ep;
155         struct urb *urb;                /* URB we are transferring for */
156         struct wa_seg **seg;            /* transfer segments */
157         u8 segs, segs_submitted, segs_done;
158         unsigned is_inbound:1;
159         unsigned is_dma:1;
160         size_t seg_size;
161         int result;
162
163         gfp_t gfp;                      /* allocation mask */
164
165         struct wusb_dev *wusb_dev;      /* for activity timestamps */
166 };
167
168 static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
169         struct wa_seg *seg, int curr_iso_frame);
170 static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
171                 int starting_index, enum wa_seg_status status);
172
173 static inline void wa_xfer_init(struct wa_xfer *xfer)
174 {
175         kref_init(&xfer->refcnt);
176         INIT_LIST_HEAD(&xfer->list_node);
177         spin_lock_init(&xfer->lock);
178 }
179
180 /*
181  * Destroy a transfer structure
182  *
183  * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
184  * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
185  */
186 static void wa_xfer_destroy(struct kref *_xfer)
187 {
188         struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
189         if (xfer->seg) {
190                 unsigned cnt;
191                 for (cnt = 0; cnt < xfer->segs; cnt++) {
192                         struct wa_seg *seg = xfer->seg[cnt];
193                         if (seg) {
194                                 usb_free_urb(seg->isoc_pack_desc_urb);
195                                 if (seg->dto_urb) {
196                                         kfree(seg->dto_urb->sg);
197                                         usb_free_urb(seg->dto_urb);
198                                 }
199                                 usb_free_urb(&seg->tr_urb);
200                         }
201                 }
202                 kfree(xfer->seg);
203         }
204         kfree(xfer);
205 }
206
207 static void wa_xfer_get(struct wa_xfer *xfer)
208 {
209         kref_get(&xfer->refcnt);
210 }
211
212 static void wa_xfer_put(struct wa_xfer *xfer)
213 {
214         kref_put(&xfer->refcnt, wa_xfer_destroy);
215 }
216
217 /*
218  * Try to get exclusive access to the DTO endpoint resource.  Return true
219  * if successful.
220  */
221 static inline int __wa_dto_try_get(struct wahc *wa)
222 {
223         return (test_and_set_bit(0, &wa->dto_in_use) == 0);
224 }
225
226 /* Release the DTO endpoint resource. */
227 static inline void __wa_dto_put(struct wahc *wa)
228 {
229         clear_bit_unlock(0, &wa->dto_in_use);
230 }
231
232 /* Service RPIPEs that are waiting on the DTO resource. */
233 static void wa_check_for_delayed_rpipes(struct wahc *wa)
234 {
235         unsigned long flags;
236         int dto_waiting = 0;
237         struct wa_rpipe *rpipe;
238
239         spin_lock_irqsave(&wa->rpipe_lock, flags);
240         while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
241                 rpipe = list_first_entry(&wa->rpipe_delayed_list,
242                                 struct wa_rpipe, list_node);
243                 __wa_xfer_delayed_run(rpipe, &dto_waiting);
244                 /* remove this RPIPE from the list if it is not waiting. */
245                 if (!dto_waiting) {
246                         pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
247                                 __func__,
248                                 le16_to_cpu(rpipe->descr.wRPipeIndex));
249                         list_del_init(&rpipe->list_node);
250                 }
251         }
252         spin_unlock_irqrestore(&wa->rpipe_lock, flags);
253 }
254
255 /* add this RPIPE to the end of the delayed RPIPE list. */
256 static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
257 {
258         unsigned long flags;
259
260         spin_lock_irqsave(&wa->rpipe_lock, flags);
261         /* add rpipe to the list if it is not already on it. */
262         if (list_empty(&rpipe->list_node)) {
263                 pr_debug("%s: adding RPIPE %d to the delayed list.\n",
264                         __func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
265                 list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
266         }
267         spin_unlock_irqrestore(&wa->rpipe_lock, flags);
268 }
269
270 /*
271  * xfer is referenced
272  *
273  * xfer->lock has to be unlocked
274  *
275  * We take xfer->lock for setting the result; this is a barrier
276  * against drivers/usb/core/hcd.c:unlink1() being called after we call
277  * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
278  * reference to the transfer.
279  */
280 static void wa_xfer_giveback(struct wa_xfer *xfer)
281 {
282         unsigned long flags;
283
284         spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
285         list_del_init(&xfer->list_node);
286         usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
287         spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
288         /* FIXME: segmentation broken -- kills DWA */
289         wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
290         wa_put(xfer->wa);
291         wa_xfer_put(xfer);
292 }
293
294 /*
295  * xfer is referenced
296  *
297  * xfer->lock has to be unlocked
298  */
299 static void wa_xfer_completion(struct wa_xfer *xfer)
300 {
301         if (xfer->wusb_dev)
302                 wusb_dev_put(xfer->wusb_dev);
303         rpipe_put(xfer->ep->hcpriv);
304         wa_xfer_giveback(xfer);
305 }
306
307 /*
308  * Initialize a transfer's ID
309  *
310  * We need to use a sequential number; if we use the pointer or the
311  * hash of the pointer, it can repeat over sequential transfers and
312  * then it will confuse the HWA....wonder why in hell they put a 32
313  * bit handle in there then.
314  */
315 static void wa_xfer_id_init(struct wa_xfer *xfer)
316 {
317         xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
318 }
319
320 /* Return the xfer's ID. */
321 static inline u32 wa_xfer_id(struct wa_xfer *xfer)
322 {
323         return xfer->id;
324 }
325
326 /* Return the xfer's ID in transport format (little endian). */
327 static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
328 {
329         return cpu_to_le32(xfer->id);
330 }
331
332 /*
333  * If transfer is done, wrap it up and return true
334  *
335  * xfer->lock has to be locked
336  */
337 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
338 {
339         struct device *dev = &xfer->wa->usb_iface->dev;
340         unsigned result, cnt;
341         struct wa_seg *seg;
342         struct urb *urb = xfer->urb;
343         unsigned found_short = 0;
344
345         result = xfer->segs_done == xfer->segs_submitted;
346         if (result == 0)
347                 goto out;
348         urb->actual_length = 0;
349         for (cnt = 0; cnt < xfer->segs; cnt++) {
350                 seg = xfer->seg[cnt];
351                 switch (seg->status) {
352                 case WA_SEG_DONE:
353                         if (found_short && seg->result > 0) {
354                                 dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
355                                         xfer, wa_xfer_id(xfer), cnt,
356                                         seg->result);
357                                 urb->status = -EINVAL;
358                                 goto out;
359                         }
360                         urb->actual_length += seg->result;
361                         if (!(usb_pipeisoc(xfer->urb->pipe))
362                                 && seg->result < xfer->seg_size
363                             && cnt != xfer->segs-1)
364                                 found_short = 1;
365                         dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
366                                 "result %zu urb->actual_length %d\n",
367                                 xfer, wa_xfer_id(xfer), seg->index, found_short,
368                                 seg->result, urb->actual_length);
369                         break;
370                 case WA_SEG_ERROR:
371                         xfer->result = seg->result;
372                         dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
373                                 xfer, wa_xfer_id(xfer), seg->index, seg->result,
374                                 seg->result);
375                         goto out;
376                 case WA_SEG_ABORTED:
377                         xfer->result = seg->result;
378                         dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
379                                 xfer, wa_xfer_id(xfer), seg->index, seg->result,
380                                 seg->result);
381                         goto out;
382                 default:
383                         dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
384                                  xfer, wa_xfer_id(xfer), cnt, seg->status);
385                         xfer->result = -EINVAL;
386                         goto out;
387                 }
388         }
389         xfer->result = 0;
390 out:
391         return result;
392 }
393
394 /*
395  * Mark the given segment as done.  Return true if this completes the xfer.
396  * This should only be called for segs that have been submitted to an RPIPE.
397  * Delayed segs are not marked as submitted so they do not need to be marked
398  * as done when cleaning up.
399  *
400  * xfer->lock has to be locked
401  */
402 static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
403         struct wa_seg *seg, enum wa_seg_status status)
404 {
405         seg->status = status;
406         xfer->segs_done++;
407
408         /* check for done. */
409         return __wa_xfer_is_done(xfer);
410 }
411
412 /*
413  * Search for a transfer list ID on the HCD's URB list
414  *
415  * For 32 bit architectures, we use the pointer itself; for 64 bits, a
416  * 32-bit hash of the pointer.
417  *
418  * @returns NULL if not found.
419  */
420 static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
421 {
422         unsigned long flags;
423         struct wa_xfer *xfer_itr;
424         spin_lock_irqsave(&wa->xfer_list_lock, flags);
425         list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
426                 if (id == xfer_itr->id) {
427                         wa_xfer_get(xfer_itr);
428                         goto out;
429                 }
430         }
431         xfer_itr = NULL;
432 out:
433         spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
434         return xfer_itr;
435 }
436
437 struct wa_xfer_abort_buffer {
438         struct urb urb;
439         struct wahc *wa;
440         struct wa_xfer_abort cmd;
441 };
442
443 static void __wa_xfer_abort_cb(struct urb *urb)
444 {
445         struct wa_xfer_abort_buffer *b = urb->context;
446         struct wahc *wa = b->wa;
447
448         /*
449          * If the abort request URB failed, then the HWA did not get the abort
450          * command.  Forcibly clean up the xfer without waiting for a Transfer
451          * Result from the HWA.
452          */
453         if (urb->status < 0) {
454                 struct wa_xfer *xfer;
455                 struct device *dev = &wa->usb_iface->dev;
456
457                 xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
458                 dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
459                         __func__, urb->status);
460                 if (xfer) {
461                         unsigned long flags;
462                         int done;
463                         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
464
465                         dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
466                                 __func__, xfer, wa_xfer_id(xfer));
467                         spin_lock_irqsave(&xfer->lock, flags);
468                         /* mark all segs as aborted. */
469                         wa_complete_remaining_xfer_segs(xfer, 0,
470                                 WA_SEG_ABORTED);
471                         done = __wa_xfer_is_done(xfer);
472                         spin_unlock_irqrestore(&xfer->lock, flags);
473                         if (done)
474                                 wa_xfer_completion(xfer);
475                         wa_xfer_delayed_run(rpipe);
476                         wa_xfer_put(xfer);
477                 } else {
478                         dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
479                                  __func__, le32_to_cpu(b->cmd.dwTransferID));
480                 }
481         }
482
483         wa_put(wa);     /* taken in __wa_xfer_abort */
484         usb_put_urb(&b->urb);
485 }
486
487 /*
488  * Aborts an ongoing transaction
489  *
490  * Assumes the transfer is referenced and locked and in a submitted
491  * state (mainly that there is an endpoint/rpipe assigned).
492  *
493  * The callback (see above) does nothing but freeing up the data by
494  * putting the URB. Because the URB is allocated at the head of the
495  * struct, the whole space we allocated is kfreed. *
496  */
497 static int __wa_xfer_abort(struct wa_xfer *xfer)
498 {
499         int result = -ENOMEM;
500         struct device *dev = &xfer->wa->usb_iface->dev;
501         struct wa_xfer_abort_buffer *b;
502         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
503
504         b = kmalloc(sizeof(*b), GFP_ATOMIC);
505         if (b == NULL)
506                 goto error_kmalloc;
507         b->cmd.bLength =  sizeof(b->cmd);
508         b->cmd.bRequestType = WA_XFER_ABORT;
509         b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
510         b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
511         b->wa = wa_get(xfer->wa);
512
513         usb_init_urb(&b->urb);
514         usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
515                 usb_sndbulkpipe(xfer->wa->usb_dev,
516                                 xfer->wa->dto_epd->bEndpointAddress),
517                 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
518         result = usb_submit_urb(&b->urb, GFP_ATOMIC);
519         if (result < 0)
520                 goto error_submit;
521         return result;                          /* callback frees! */
522
523
524 error_submit:
525         wa_put(xfer->wa);
526         if (printk_ratelimit())
527                 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
528                         xfer, result);
529         kfree(b);
530 error_kmalloc:
531         return result;
532
533 }
534
535 /*
536  * Calculate the number of isoc frames starting from isoc_frame_offset
537  * that will fit a in transfer segment.
538  */
539 static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
540         int isoc_frame_offset, int *total_size)
541 {
542         int segment_size = 0, frame_count = 0;
543         int index = isoc_frame_offset;
544         struct usb_iso_packet_descriptor *iso_frame_desc =
545                 xfer->urb->iso_frame_desc;
546
547         while ((index < xfer->urb->number_of_packets)
548                 && ((segment_size + iso_frame_desc[index].length)
549                                 <= xfer->seg_size)) {
550                 /*
551                  * For Alereon HWA devices, only include an isoc frame in an
552                  * out segment if it is physically contiguous with the previous
553                  * frame.  This is required because those devices expect
554                  * the isoc frames to be sent as a single USB transaction as
555                  * opposed to one transaction per frame with standard HWA.
556                  */
557                 if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
558                         && (xfer->is_inbound == 0)
559                         && (index > isoc_frame_offset)
560                         && ((iso_frame_desc[index - 1].offset +
561                                 iso_frame_desc[index - 1].length) !=
562                                 iso_frame_desc[index].offset))
563                         break;
564
565                 /* this frame fits. count it. */
566                 ++frame_count;
567                 segment_size += iso_frame_desc[index].length;
568
569                 /* move to the next isoc frame. */
570                 ++index;
571         }
572
573         *total_size = segment_size;
574         return frame_count;
575 }
576
577 /*
578  *
579  * @returns < 0 on error, transfer segment request size if ok
580  */
581 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
582                                      enum wa_xfer_type *pxfer_type)
583 {
584         ssize_t result;
585         struct device *dev = &xfer->wa->usb_iface->dev;
586         size_t maxpktsize;
587         struct urb *urb = xfer->urb;
588         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
589
590         switch (rpipe->descr.bmAttribute & 0x3) {
591         case USB_ENDPOINT_XFER_CONTROL:
592                 *pxfer_type = WA_XFER_TYPE_CTL;
593                 result = sizeof(struct wa_xfer_ctl);
594                 break;
595         case USB_ENDPOINT_XFER_INT:
596         case USB_ENDPOINT_XFER_BULK:
597                 *pxfer_type = WA_XFER_TYPE_BI;
598                 result = sizeof(struct wa_xfer_bi);
599                 break;
600         case USB_ENDPOINT_XFER_ISOC:
601                 *pxfer_type = WA_XFER_TYPE_ISO;
602                 result = sizeof(struct wa_xfer_hwaiso);
603                 break;
604         default:
605                 /* never happens */
606                 BUG();
607                 result = -EINVAL;       /* shut gcc up */
608         }
609         xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
610         xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
611
612         maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
613         xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
614                 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
615         /* Compute the segment size and make sure it is a multiple of
616          * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
617          * a check (FIXME) */
618         if (xfer->seg_size < maxpktsize) {
619                 dev_err(dev,
620                         "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
621                         xfer->seg_size, maxpktsize);
622                 result = -EINVAL;
623                 goto error;
624         }
625         xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
626         if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
627                 int index = 0;
628
629                 xfer->segs = 0;
630                 /*
631                  * loop over urb->number_of_packets to determine how many
632                  * xfer segments will be needed to send the isoc frames.
633                  */
634                 while (index < urb->number_of_packets) {
635                         int seg_size; /* don't care. */
636                         index += __wa_seg_calculate_isoc_frame_count(xfer,
637                                         index, &seg_size);
638                         ++xfer->segs;
639                 }
640         } else {
641                 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
642                                                 xfer->seg_size);
643                 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
644                         xfer->segs = 1;
645         }
646
647         if (xfer->segs > WA_SEGS_MAX) {
648                 dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
649                         (urb->transfer_buffer_length/xfer->seg_size),
650                         WA_SEGS_MAX);
651                 result = -EINVAL;
652                 goto error;
653         }
654 error:
655         return result;
656 }
657
658 static void __wa_setup_isoc_packet_descr(
659                 struct wa_xfer_packet_info_hwaiso *packet_desc,
660                 struct wa_xfer *xfer,
661                 struct wa_seg *seg) {
662         struct usb_iso_packet_descriptor *iso_frame_desc =
663                 xfer->urb->iso_frame_desc;
664         int frame_index;
665
666         /* populate isoc packet descriptor. */
667         packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
668         packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
669                 (sizeof(packet_desc->PacketLength[0]) *
670                         seg->isoc_frame_count));
671         for (frame_index = 0; frame_index < seg->isoc_frame_count;
672                 ++frame_index) {
673                 int offset_index = frame_index + seg->isoc_frame_offset;
674                 packet_desc->PacketLength[frame_index] =
675                         cpu_to_le16(iso_frame_desc[offset_index].length);
676         }
677 }
678
679
680 /* Fill in the common request header and xfer-type specific data. */
681 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
682                                  struct wa_xfer_hdr *xfer_hdr0,
683                                  enum wa_xfer_type xfer_type,
684                                  size_t xfer_hdr_size)
685 {
686         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
687         struct wa_seg *seg = xfer->seg[0];
688
689         xfer_hdr0 = &seg->xfer_hdr;
690         xfer_hdr0->bLength = xfer_hdr_size;
691         xfer_hdr0->bRequestType = xfer_type;
692         xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
693         xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
694         xfer_hdr0->bTransferSegment = 0;
695         switch (xfer_type) {
696         case WA_XFER_TYPE_CTL: {
697                 struct wa_xfer_ctl *xfer_ctl =
698                         container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
699                 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
700                 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
701                        sizeof(xfer_ctl->baSetupData));
702                 break;
703         }
704         case WA_XFER_TYPE_BI:
705                 break;
706         case WA_XFER_TYPE_ISO: {
707                 struct wa_xfer_hwaiso *xfer_iso =
708                         container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
709                 struct wa_xfer_packet_info_hwaiso *packet_desc =
710                         ((void *)xfer_iso) + xfer_hdr_size;
711
712                 /* populate the isoc section of the transfer request. */
713                 xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
714                 /* populate isoc packet descriptor. */
715                 __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
716                 break;
717         }
718         default:
719                 BUG();
720         };
721 }
722
723 /*
724  * Callback for the OUT data phase of the segment request
725  *
726  * Check wa_seg_tr_cb(); most comments also apply here because this
727  * function does almost the same thing and they work closely
728  * together.
729  *
730  * If the seg request has failed but this DTO phase has succeeded,
731  * wa_seg_tr_cb() has already failed the segment and moved the
732  * status to WA_SEG_ERROR, so this will go through 'case 0' and
733  * effectively do nothing.
734  */
735 static void wa_seg_dto_cb(struct urb *urb)
736 {
737         struct wa_seg *seg = urb->context;
738         struct wa_xfer *xfer = seg->xfer;
739         struct wahc *wa;
740         struct device *dev;
741         struct wa_rpipe *rpipe;
742         unsigned long flags;
743         unsigned rpipe_ready = 0;
744         int data_send_done = 1, release_dto = 0, holding_dto = 0;
745         u8 done = 0;
746         int result;
747
748         /* free the sg if it was used. */
749         kfree(urb->sg);
750         urb->sg = NULL;
751
752         spin_lock_irqsave(&xfer->lock, flags);
753         wa = xfer->wa;
754         dev = &wa->usb_iface->dev;
755         if (usb_pipeisoc(xfer->urb->pipe)) {
756                 /* Alereon HWA sends all isoc frames in a single transfer. */
757                 if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
758                         seg->isoc_frame_index += seg->isoc_frame_count;
759                 else
760                         seg->isoc_frame_index += 1;
761                 if (seg->isoc_frame_index < seg->isoc_frame_count) {
762                         data_send_done = 0;
763                         holding_dto = 1; /* checked in error cases. */
764                         /*
765                          * if this is the last isoc frame of the segment, we
766                          * can release DTO after sending this frame.
767                          */
768                         if ((seg->isoc_frame_index + 1) >=
769                                 seg->isoc_frame_count)
770                                 release_dto = 1;
771                 }
772                 dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
773                         wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
774                         holding_dto, release_dto);
775         }
776         spin_unlock_irqrestore(&xfer->lock, flags);
777
778         switch (urb->status) {
779         case 0:
780                 spin_lock_irqsave(&xfer->lock, flags);
781                 seg->result += urb->actual_length;
782                 if (data_send_done) {
783                         dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
784                                 wa_xfer_id(xfer), seg->index, seg->result);
785                         if (seg->status < WA_SEG_PENDING)
786                                 seg->status = WA_SEG_PENDING;
787                 } else {
788                         /* should only hit this for isoc xfers. */
789                         /*
790                          * Populate the dto URB with the next isoc frame buffer,
791                          * send the URB and release DTO if we no longer need it.
792                          */
793                          __wa_populate_dto_urb_isoc(xfer, seg,
794                                 seg->isoc_frame_offset + seg->isoc_frame_index);
795
796                         /* resubmit the URB with the next isoc frame. */
797                         /* take a ref on resubmit. */
798                         wa_xfer_get(xfer);
799                         result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
800                         if (result < 0) {
801                                 dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
802                                        wa_xfer_id(xfer), seg->index, result);
803                                 spin_unlock_irqrestore(&xfer->lock, flags);
804                                 goto error_dto_submit;
805                         }
806                 }
807                 spin_unlock_irqrestore(&xfer->lock, flags);
808                 if (release_dto) {
809                         __wa_dto_put(wa);
810                         wa_check_for_delayed_rpipes(wa);
811                 }
812                 break;
813         case -ECONNRESET:       /* URB unlinked; no need to do anything */
814         case -ENOENT:           /* as it was done by the who unlinked us */
815                 if (holding_dto) {
816                         __wa_dto_put(wa);
817                         wa_check_for_delayed_rpipes(wa);
818                 }
819                 break;
820         default:                /* Other errors ... */
821                 dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
822                         wa_xfer_id(xfer), seg->index, urb->status);
823                 goto error_default;
824         }
825
826         /* taken when this URB was submitted. */
827         wa_xfer_put(xfer);
828         return;
829
830 error_dto_submit:
831         /* taken on resubmit attempt. */
832         wa_xfer_put(xfer);
833 error_default:
834         spin_lock_irqsave(&xfer->lock, flags);
835         rpipe = xfer->ep->hcpriv;
836         if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
837                     EDC_ERROR_TIMEFRAME)){
838                 dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
839                 wa_reset_all(wa);
840         }
841         if (seg->status != WA_SEG_ERROR) {
842                 seg->result = urb->status;
843                 __wa_xfer_abort(xfer);
844                 rpipe_ready = rpipe_avail_inc(rpipe);
845                 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
846         }
847         spin_unlock_irqrestore(&xfer->lock, flags);
848         if (holding_dto) {
849                 __wa_dto_put(wa);
850                 wa_check_for_delayed_rpipes(wa);
851         }
852         if (done)
853                 wa_xfer_completion(xfer);
854         if (rpipe_ready)
855                 wa_xfer_delayed_run(rpipe);
856         /* taken when this URB was submitted. */
857         wa_xfer_put(xfer);
858 }
859
860 /*
861  * Callback for the isoc packet descriptor phase of the segment request
862  *
863  * Check wa_seg_tr_cb(); most comments also apply here because this
864  * function does almost the same thing and they work closely
865  * together.
866  *
867  * If the seg request has failed but this phase has succeeded,
868  * wa_seg_tr_cb() has already failed the segment and moved the
869  * status to WA_SEG_ERROR, so this will go through 'case 0' and
870  * effectively do nothing.
871  */
872 static void wa_seg_iso_pack_desc_cb(struct urb *urb)
873 {
874         struct wa_seg *seg = urb->context;
875         struct wa_xfer *xfer = seg->xfer;
876         struct wahc *wa;
877         struct device *dev;
878         struct wa_rpipe *rpipe;
879         unsigned long flags;
880         unsigned rpipe_ready = 0;
881         u8 done = 0;
882
883         switch (urb->status) {
884         case 0:
885                 spin_lock_irqsave(&xfer->lock, flags);
886                 wa = xfer->wa;
887                 dev = &wa->usb_iface->dev;
888                 dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
889                         wa_xfer_id(xfer), seg->index);
890                 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
891                         seg->status = WA_SEG_PENDING;
892                 spin_unlock_irqrestore(&xfer->lock, flags);
893                 break;
894         case -ECONNRESET:       /* URB unlinked; no need to do anything */
895         case -ENOENT:           /* as it was done by the who unlinked us */
896                 break;
897         default:                /* Other errors ... */
898                 spin_lock_irqsave(&xfer->lock, flags);
899                 wa = xfer->wa;
900                 dev = &wa->usb_iface->dev;
901                 rpipe = xfer->ep->hcpriv;
902                 pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
903                                 wa_xfer_id(xfer), seg->index, urb->status);
904                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
905                             EDC_ERROR_TIMEFRAME)){
906                         dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
907                         wa_reset_all(wa);
908                 }
909                 if (seg->status != WA_SEG_ERROR) {
910                         usb_unlink_urb(seg->dto_urb);
911                         seg->result = urb->status;
912                         __wa_xfer_abort(xfer);
913                         rpipe_ready = rpipe_avail_inc(rpipe);
914                         done = __wa_xfer_mark_seg_as_done(xfer, seg,
915                                         WA_SEG_ERROR);
916                 }
917                 spin_unlock_irqrestore(&xfer->lock, flags);
918                 if (done)
919                         wa_xfer_completion(xfer);
920                 if (rpipe_ready)
921                         wa_xfer_delayed_run(rpipe);
922         }
923         /* taken when this URB was submitted. */
924         wa_xfer_put(xfer);
925 }
926
927 /*
928  * Callback for the segment request
929  *
930  * If successful transition state (unless already transitioned or
931  * outbound transfer); otherwise, take a note of the error, mark this
932  * segment done and try completion.
933  *
934  * Note we don't access until we are sure that the transfer hasn't
935  * been cancelled (ECONNRESET, ENOENT), which could mean that
936  * seg->xfer could be already gone.
937  *
938  * We have to check before setting the status to WA_SEG_PENDING
939  * because sometimes the xfer result callback arrives before this
940  * callback (geeeeeeze), so it might happen that we are already in
941  * another state. As well, we don't set it if the transfer is not inbound,
942  * as in that case, wa_seg_dto_cb will do it when the OUT data phase
943  * finishes.
944  */
945 static void wa_seg_tr_cb(struct urb *urb)
946 {
947         struct wa_seg *seg = urb->context;
948         struct wa_xfer *xfer = seg->xfer;
949         struct wahc *wa;
950         struct device *dev;
951         struct wa_rpipe *rpipe;
952         unsigned long flags;
953         unsigned rpipe_ready;
954         u8 done = 0;
955
956         switch (urb->status) {
957         case 0:
958                 spin_lock_irqsave(&xfer->lock, flags);
959                 wa = xfer->wa;
960                 dev = &wa->usb_iface->dev;
961                 dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
962                         xfer, wa_xfer_id(xfer), seg->index);
963                 if (xfer->is_inbound &&
964                         seg->status < WA_SEG_PENDING &&
965                         !(usb_pipeisoc(xfer->urb->pipe)))
966                         seg->status = WA_SEG_PENDING;
967                 spin_unlock_irqrestore(&xfer->lock, flags);
968                 break;
969         case -ECONNRESET:       /* URB unlinked; no need to do anything */
970         case -ENOENT:           /* as it was done by the who unlinked us */
971                 break;
972         default:                /* Other errors ... */
973                 spin_lock_irqsave(&xfer->lock, flags);
974                 wa = xfer->wa;
975                 dev = &wa->usb_iface->dev;
976                 rpipe = xfer->ep->hcpriv;
977                 if (printk_ratelimit())
978                         dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
979                                 xfer, wa_xfer_id(xfer), seg->index,
980                                 urb->status);
981                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
982                             EDC_ERROR_TIMEFRAME)){
983                         dev_err(dev, "DTO: URB max acceptable errors "
984                                 "exceeded, resetting device\n");
985                         wa_reset_all(wa);
986                 }
987                 usb_unlink_urb(seg->isoc_pack_desc_urb);
988                 usb_unlink_urb(seg->dto_urb);
989                 seg->result = urb->status;
990                 __wa_xfer_abort(xfer);
991                 rpipe_ready = rpipe_avail_inc(rpipe);
992                 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
993                 spin_unlock_irqrestore(&xfer->lock, flags);
994                 if (done)
995                         wa_xfer_completion(xfer);
996                 if (rpipe_ready)
997                         wa_xfer_delayed_run(rpipe);
998         }
999         /* taken when this URB was submitted. */
1000         wa_xfer_put(xfer);
1001 }
1002
1003 /*
1004  * Allocate an SG list to store bytes_to_transfer bytes and copy the
1005  * subset of the in_sg that matches the buffer subset
1006  * we are about to transfer.
1007  */
1008 static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
1009         const unsigned int bytes_transferred,
1010         const unsigned int bytes_to_transfer, int *out_num_sgs)
1011 {
1012         struct scatterlist *out_sg;
1013         unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
1014                 nents;
1015         struct scatterlist *current_xfer_sg = in_sg;
1016         struct scatterlist *current_seg_sg, *last_seg_sg;
1017
1018         /* skip previously transferred pages. */
1019         while ((current_xfer_sg) &&
1020                         (bytes_processed < bytes_transferred)) {
1021                 bytes_processed += current_xfer_sg->length;
1022
1023                 /* advance the sg if current segment starts on or past the
1024                         next page. */
1025                 if (bytes_processed <= bytes_transferred)
1026                         current_xfer_sg = sg_next(current_xfer_sg);
1027         }
1028
1029         /* the data for the current segment starts in current_xfer_sg.
1030                 calculate the offset. */
1031         if (bytes_processed > bytes_transferred) {
1032                 offset_into_current_page_data = current_xfer_sg->length -
1033                         (bytes_processed - bytes_transferred);
1034         }
1035
1036         /* calculate the number of pages needed by this segment. */
1037         nents = DIV_ROUND_UP((bytes_to_transfer +
1038                 offset_into_current_page_data +
1039                 current_xfer_sg->offset),
1040                 PAGE_SIZE);
1041
1042         out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
1043         if (out_sg) {
1044                 sg_init_table(out_sg, nents);
1045
1046                 /* copy the portion of the incoming SG that correlates to the
1047                  * data to be transferred by this segment to the segment SG. */
1048                 last_seg_sg = current_seg_sg = out_sg;
1049                 bytes_processed = 0;
1050
1051                 /* reset nents and calculate the actual number of sg entries
1052                         needed. */
1053                 nents = 0;
1054                 while ((bytes_processed < bytes_to_transfer) &&
1055                                 current_seg_sg && current_xfer_sg) {
1056                         unsigned int page_len = min((current_xfer_sg->length -
1057                                 offset_into_current_page_data),
1058                                 (bytes_to_transfer - bytes_processed));
1059
1060                         sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
1061                                 page_len,
1062                                 current_xfer_sg->offset +
1063                                 offset_into_current_page_data);
1064
1065                         bytes_processed += page_len;
1066
1067                         last_seg_sg = current_seg_sg;
1068                         current_seg_sg = sg_next(current_seg_sg);
1069                         current_xfer_sg = sg_next(current_xfer_sg);
1070
1071                         /* only the first page may require additional offset. */
1072                         offset_into_current_page_data = 0;
1073                         nents++;
1074                 }
1075
1076                 /* update num_sgs and terminate the list since we may have
1077                  *  concatenated pages. */
1078                 sg_mark_end(last_seg_sg);
1079                 *out_num_sgs = nents;
1080         }
1081
1082         return out_sg;
1083 }
1084
1085 /*
1086  * Populate DMA buffer info for the isoc dto urb.
1087  */
1088 static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
1089         struct wa_seg *seg, int curr_iso_frame)
1090 {
1091         seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1092         seg->dto_urb->sg = NULL;
1093         seg->dto_urb->num_sgs = 0;
1094         /* dto urb buffer address pulled from iso_frame_desc. */
1095         seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
1096                 xfer->urb->iso_frame_desc[curr_iso_frame].offset;
1097         /* The Alereon HWA sends a single URB with all isoc segs. */
1098         if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
1099                 seg->dto_urb->transfer_buffer_length = seg->isoc_size;
1100         else
1101                 seg->dto_urb->transfer_buffer_length =
1102                         xfer->urb->iso_frame_desc[curr_iso_frame].length;
1103 }
1104
1105 /*
1106  * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
1107  */
1108 static int __wa_populate_dto_urb(struct wa_xfer *xfer,
1109         struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
1110 {
1111         int result = 0;
1112
1113         if (xfer->is_dma) {
1114                 seg->dto_urb->transfer_dma =
1115                         xfer->urb->transfer_dma + buf_itr_offset;
1116                 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1117                 seg->dto_urb->sg = NULL;
1118                 seg->dto_urb->num_sgs = 0;
1119         } else {
1120                 /* do buffer or SG processing. */
1121                 seg->dto_urb->transfer_flags &=
1122                         ~URB_NO_TRANSFER_DMA_MAP;
1123                 /* this should always be 0 before a resubmit. */
1124                 seg->dto_urb->num_mapped_sgs = 0;
1125
1126                 if (xfer->urb->transfer_buffer) {
1127                         seg->dto_urb->transfer_buffer =
1128                                 xfer->urb->transfer_buffer +
1129                                 buf_itr_offset;
1130                         seg->dto_urb->sg = NULL;
1131                         seg->dto_urb->num_sgs = 0;
1132                 } else {
1133                         seg->dto_urb->transfer_buffer = NULL;
1134
1135                         /*
1136                          * allocate an SG list to store seg_size bytes
1137                          * and copy the subset of the xfer->urb->sg that
1138                          * matches the buffer subset we are about to
1139                          * read.
1140                          */
1141                         seg->dto_urb->sg = wa_xfer_create_subset_sg(
1142                                 xfer->urb->sg,
1143                                 buf_itr_offset, buf_itr_size,
1144                                 &(seg->dto_urb->num_sgs));
1145                         if (!(seg->dto_urb->sg))
1146                                 result = -ENOMEM;
1147                 }
1148         }
1149         seg->dto_urb->transfer_buffer_length = buf_itr_size;
1150
1151         return result;
1152 }
1153
1154 /*
1155  * Allocate the segs array and initialize each of them
1156  *
1157  * The segments are freed by wa_xfer_destroy() when the xfer use count
1158  * drops to zero; however, because each segment is given the same life
1159  * cycle as the USB URB it contains, it is actually freed by
1160  * usb_put_urb() on the contained USB URB (twisted, eh?).
1161  */
1162 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
1163 {
1164         int result, cnt, isoc_frame_offset = 0;
1165         size_t alloc_size = sizeof(*xfer->seg[0])
1166                 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
1167         struct usb_device *usb_dev = xfer->wa->usb_dev;
1168         const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
1169         struct wa_seg *seg;
1170         size_t buf_itr, buf_size, buf_itr_size;
1171
1172         result = -ENOMEM;
1173         xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
1174         if (xfer->seg == NULL)
1175                 goto error_segs_kzalloc;
1176         buf_itr = 0;
1177         buf_size = xfer->urb->transfer_buffer_length;
1178         for (cnt = 0; cnt < xfer->segs; cnt++) {
1179                 size_t iso_pkt_descr_size = 0;
1180                 int seg_isoc_frame_count = 0, seg_isoc_size = 0;
1181
1182                 /*
1183                  * Adjust the size of the segment object to contain space for
1184                  * the isoc packet descriptor buffer.
1185                  */
1186                 if (usb_pipeisoc(xfer->urb->pipe)) {
1187                         seg_isoc_frame_count =
1188                                 __wa_seg_calculate_isoc_frame_count(xfer,
1189                                         isoc_frame_offset, &seg_isoc_size);
1190
1191                         iso_pkt_descr_size =
1192                                 sizeof(struct wa_xfer_packet_info_hwaiso) +
1193                                 (seg_isoc_frame_count * sizeof(__le16));
1194                 }
1195                 seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
1196                                                 GFP_ATOMIC);
1197                 if (seg == NULL)
1198                         goto error_seg_kmalloc;
1199                 wa_seg_init(seg);
1200                 seg->xfer = xfer;
1201                 seg->index = cnt;
1202                 usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
1203                                   usb_sndbulkpipe(usb_dev,
1204                                                   dto_epd->bEndpointAddress),
1205                                   &seg->xfer_hdr, xfer_hdr_size,
1206                                   wa_seg_tr_cb, seg);
1207                 buf_itr_size = min(buf_size, xfer->seg_size);
1208
1209                 if (usb_pipeisoc(xfer->urb->pipe)) {
1210                         seg->isoc_frame_count = seg_isoc_frame_count;
1211                         seg->isoc_frame_offset = isoc_frame_offset;
1212                         seg->isoc_size = seg_isoc_size;
1213                         /* iso packet descriptor. */
1214                         seg->isoc_pack_desc_urb =
1215                                         usb_alloc_urb(0, GFP_ATOMIC);
1216                         if (seg->isoc_pack_desc_urb == NULL)
1217                                 goto error_iso_pack_desc_alloc;
1218                         /*
1219                          * The buffer for the isoc packet descriptor starts
1220                          * after the transfer request header in the
1221                          * segment object memory buffer.
1222                          */
1223                         usb_fill_bulk_urb(
1224                                 seg->isoc_pack_desc_urb, usb_dev,
1225                                 usb_sndbulkpipe(usb_dev,
1226                                         dto_epd->bEndpointAddress),
1227                                 (void *)(&seg->xfer_hdr) +
1228                                         xfer_hdr_size,
1229                                 iso_pkt_descr_size,
1230                                 wa_seg_iso_pack_desc_cb, seg);
1231
1232                         /* adjust starting frame offset for next seg. */
1233                         isoc_frame_offset += seg_isoc_frame_count;
1234                 }
1235
1236                 if (xfer->is_inbound == 0 && buf_size > 0) {
1237                         /* outbound data. */
1238                         seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
1239                         if (seg->dto_urb == NULL)
1240                                 goto error_dto_alloc;
1241                         usb_fill_bulk_urb(
1242                                 seg->dto_urb, usb_dev,
1243                                 usb_sndbulkpipe(usb_dev,
1244                                                 dto_epd->bEndpointAddress),
1245                                 NULL, 0, wa_seg_dto_cb, seg);
1246
1247                         if (usb_pipeisoc(xfer->urb->pipe)) {
1248                                 /*
1249                                  * Fill in the xfer buffer information for the
1250                                  * first isoc frame.  Subsequent frames in this
1251                                  * segment will be filled in and sent from the
1252                                  * DTO completion routine, if needed.
1253                                  */
1254                                 __wa_populate_dto_urb_isoc(xfer, seg,
1255                                         seg->isoc_frame_offset);
1256                         } else {
1257                                 /* fill in the xfer buffer information. */
1258                                 result = __wa_populate_dto_urb(xfer, seg,
1259                                                         buf_itr, buf_itr_size);
1260                                 if (result < 0)
1261                                         goto error_seg_outbound_populate;
1262
1263                                 buf_itr += buf_itr_size;
1264                                 buf_size -= buf_itr_size;
1265                         }
1266                 }
1267                 seg->status = WA_SEG_READY;
1268         }
1269         return 0;
1270
1271         /*
1272          * Free the memory for the current segment which failed to init.
1273          * Use the fact that cnt is left at were it failed.  The remaining
1274          * segments will be cleaned up by wa_xfer_destroy.
1275          */
1276 error_seg_outbound_populate:
1277         usb_free_urb(xfer->seg[cnt]->dto_urb);
1278 error_dto_alloc:
1279         usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
1280 error_iso_pack_desc_alloc:
1281         kfree(xfer->seg[cnt]);
1282         xfer->seg[cnt] = NULL;
1283 error_seg_kmalloc:
1284 error_segs_kzalloc:
1285         return result;
1286 }
1287
1288 /*
1289  * Allocates all the stuff needed to submit a transfer
1290  *
1291  * Breaks the whole data buffer in a list of segments, each one has a
1292  * structure allocated to it and linked in xfer->seg[index]
1293  *
1294  * FIXME: merge setup_segs() and the last part of this function, no
1295  *        need to do two for loops when we could run everything in a
1296  *        single one
1297  */
1298 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
1299 {
1300         int result;
1301         struct device *dev = &xfer->wa->usb_iface->dev;
1302         enum wa_xfer_type xfer_type = 0; /* shut up GCC */
1303         size_t xfer_hdr_size, cnt, transfer_size;
1304         struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
1305
1306         result = __wa_xfer_setup_sizes(xfer, &xfer_type);
1307         if (result < 0)
1308                 goto error_setup_sizes;
1309         xfer_hdr_size = result;
1310         result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
1311         if (result < 0) {
1312                 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
1313                         xfer, xfer->segs, result);
1314                 goto error_setup_segs;
1315         }
1316         /* Fill the first header */
1317         xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
1318         wa_xfer_id_init(xfer);
1319         __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
1320
1321         /* Fill remaining headers */
1322         xfer_hdr = xfer_hdr0;
1323         if (xfer_type == WA_XFER_TYPE_ISO) {
1324                 xfer_hdr0->dwTransferLength =
1325                         cpu_to_le32(xfer->seg[0]->isoc_size);
1326                 for (cnt = 1; cnt < xfer->segs; cnt++) {
1327                         struct wa_xfer_packet_info_hwaiso *packet_desc;
1328                         struct wa_seg *seg = xfer->seg[cnt];
1329                         struct wa_xfer_hwaiso *xfer_iso;
1330
1331                         xfer_hdr = &seg->xfer_hdr;
1332                         xfer_iso = container_of(xfer_hdr,
1333                                                 struct wa_xfer_hwaiso, hdr);
1334                         packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
1335                         /*
1336                          * Copy values from the 0th header. Segment specific
1337                          * values are set below.
1338                          */
1339                         memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1340                         xfer_hdr->bTransferSegment = cnt;
1341                         xfer_hdr->dwTransferLength =
1342                                 cpu_to_le32(seg->isoc_size);
1343                         xfer_iso->dwNumOfPackets =
1344                                         cpu_to_le32(seg->isoc_frame_count);
1345                         __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
1346                         seg->status = WA_SEG_READY;
1347                 }
1348         } else {
1349                 transfer_size = urb->transfer_buffer_length;
1350                 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
1351                         cpu_to_le32(xfer->seg_size) :
1352                         cpu_to_le32(transfer_size);
1353                 transfer_size -=  xfer->seg_size;
1354                 for (cnt = 1; cnt < xfer->segs; cnt++) {
1355                         xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
1356                         memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1357                         xfer_hdr->bTransferSegment = cnt;
1358                         xfer_hdr->dwTransferLength =
1359                                 transfer_size > xfer->seg_size ?
1360                                         cpu_to_le32(xfer->seg_size)
1361                                         : cpu_to_le32(transfer_size);
1362                         xfer->seg[cnt]->status = WA_SEG_READY;
1363                         transfer_size -=  xfer->seg_size;
1364                 }
1365         }
1366         xfer_hdr->bTransferSegment |= 0x80;     /* this is the last segment */
1367         result = 0;
1368 error_setup_segs:
1369 error_setup_sizes:
1370         return result;
1371 }
1372
1373 /*
1374  *
1375  *
1376  * rpipe->seg_lock is held!
1377  */
1378 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
1379                            struct wa_seg *seg, int *dto_done)
1380 {
1381         int result;
1382
1383         /* default to done unless we encounter a multi-frame isoc segment. */
1384         *dto_done = 1;
1385
1386         /*
1387          * Take a ref for each segment urb so the xfer cannot disappear until
1388          * all of the callbacks run.
1389          */
1390         wa_xfer_get(xfer);
1391         /* submit the transfer request. */
1392         seg->status = WA_SEG_SUBMITTED;
1393         result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
1394         if (result < 0) {
1395                 pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
1396                        __func__, xfer, seg->index, result);
1397                 wa_xfer_put(xfer);
1398                 goto error_tr_submit;
1399         }
1400         /* submit the isoc packet descriptor if present. */
1401         if (seg->isoc_pack_desc_urb) {
1402                 wa_xfer_get(xfer);
1403                 result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
1404                 seg->isoc_frame_index = 0;
1405                 if (result < 0) {
1406                         pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
1407                                __func__, xfer, seg->index, result);
1408                         wa_xfer_put(xfer);
1409                         goto error_iso_pack_desc_submit;
1410                 }
1411         }
1412         /* submit the out data if this is an out request. */
1413         if (seg->dto_urb) {
1414                 struct wahc *wa = xfer->wa;
1415                 wa_xfer_get(xfer);
1416                 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
1417                 if (result < 0) {
1418                         pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
1419                                __func__, xfer, seg->index, result);
1420                         wa_xfer_put(xfer);
1421                         goto error_dto_submit;
1422                 }
1423                 /*
1424                  * If this segment contains more than one isoc frame, hold
1425                  * onto the dto resource until we send all frames.
1426                  * Only applies to non-Alereon devices.
1427                  */
1428                 if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
1429                         && (seg->isoc_frame_count > 1))
1430                         *dto_done = 0;
1431         }
1432         rpipe_avail_dec(rpipe);
1433         return 0;
1434
1435 error_dto_submit:
1436         usb_unlink_urb(seg->isoc_pack_desc_urb);
1437 error_iso_pack_desc_submit:
1438         usb_unlink_urb(&seg->tr_urb);
1439 error_tr_submit:
1440         seg->status = WA_SEG_ERROR;
1441         seg->result = result;
1442         *dto_done = 1;
1443         return result;
1444 }
1445
1446 /*
1447  * Execute more queued request segments until the maximum concurrent allowed.
1448  * Return true if the DTO resource was acquired and released.
1449  *
1450  * The ugly unlock/lock sequence on the error path is needed as the
1451  * xfer->lock normally nests the seg_lock and not viceversa.
1452  */
1453 static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
1454 {
1455         int result, dto_acquired = 0, dto_done = 0;
1456         struct device *dev = &rpipe->wa->usb_iface->dev;
1457         struct wa_seg *seg;
1458         struct wa_xfer *xfer;
1459         unsigned long flags;
1460
1461         *dto_waiting = 0;
1462
1463         spin_lock_irqsave(&rpipe->seg_lock, flags);
1464         while (atomic_read(&rpipe->segs_available) > 0
1465               && !list_empty(&rpipe->seg_list)
1466               && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
1467                 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
1468                                  list_node);
1469                 list_del(&seg->list_node);
1470                 xfer = seg->xfer;
1471                 /*
1472                  * Get a reference to the xfer in case the callbacks for the
1473                  * URBs submitted by __wa_seg_submit attempt to complete
1474                  * the xfer before this function completes.
1475                  */
1476                 wa_xfer_get(xfer);
1477                 result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
1478                 /* release the dto resource if this RPIPE is done with it. */
1479                 if (dto_done)
1480                         __wa_dto_put(rpipe->wa);
1481                 dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
1482                         xfer, wa_xfer_id(xfer), seg->index,
1483                         atomic_read(&rpipe->segs_available), result);
1484                 if (unlikely(result < 0)) {
1485                         int done;
1486
1487                         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1488                         spin_lock_irqsave(&xfer->lock, flags);
1489                         __wa_xfer_abort(xfer);
1490                         /*
1491                          * This seg was marked as submitted when it was put on
1492                          * the RPIPE seg_list.  Mark it done.
1493                          */
1494                         xfer->segs_done++;
1495                         done = __wa_xfer_is_done(xfer);
1496                         spin_unlock_irqrestore(&xfer->lock, flags);
1497                         if (done)
1498                                 wa_xfer_completion(xfer);
1499                         spin_lock_irqsave(&rpipe->seg_lock, flags);
1500                 }
1501                 wa_xfer_put(xfer);
1502         }
1503         /*
1504          * Mark this RPIPE as waiting if dto was not acquired, there are
1505          * delayed segs and no active transfers to wake us up later.
1506          */
1507         if (!dto_acquired && !list_empty(&rpipe->seg_list)
1508                 && (atomic_read(&rpipe->segs_available) ==
1509                         le16_to_cpu(rpipe->descr.wRequests)))
1510                 *dto_waiting = 1;
1511
1512         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1513
1514         return dto_done;
1515 }
1516
1517 static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
1518 {
1519         int dto_waiting;
1520         int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
1521
1522         /*
1523          * If this RPIPE is waiting on the DTO resource, add it to the tail of
1524          * the waiting list.
1525          * Otherwise, if the WA DTO resource was acquired and released by
1526          *  __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
1527          * DTO and failed during that time.  Check the delayed list and process
1528          * any waiters.  Start searching from the next RPIPE index.
1529          */
1530         if (dto_waiting)
1531                 wa_add_delayed_rpipe(rpipe->wa, rpipe);
1532         else if (dto_done)
1533                 wa_check_for_delayed_rpipes(rpipe->wa);
1534 }
1535
1536 /*
1537  *
1538  * xfer->lock is taken
1539  *
1540  * On failure submitting we just stop submitting and return error;
1541  * wa_urb_enqueue_b() will execute the completion path
1542  */
1543 static int __wa_xfer_submit(struct wa_xfer *xfer)
1544 {
1545         int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
1546         struct wahc *wa = xfer->wa;
1547         struct device *dev = &wa->usb_iface->dev;
1548         unsigned cnt;
1549         struct wa_seg *seg;
1550         unsigned long flags;
1551         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1552         size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
1553         u8 available;
1554         u8 empty;
1555
1556         spin_lock_irqsave(&wa->xfer_list_lock, flags);
1557         list_add_tail(&xfer->list_node, &wa->xfer_list);
1558         spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1559
1560         BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
1561         result = 0;
1562         spin_lock_irqsave(&rpipe->seg_lock, flags);
1563         for (cnt = 0; cnt < xfer->segs; cnt++) {
1564                 int delay_seg = 1;
1565
1566                 available = atomic_read(&rpipe->segs_available);
1567                 empty = list_empty(&rpipe->seg_list);
1568                 seg = xfer->seg[cnt];
1569                 if (available && empty) {
1570                         /*
1571                          * Only attempt to acquire DTO if we have a segment
1572                          * to send.
1573                          */
1574                         dto_acquired = __wa_dto_try_get(rpipe->wa);
1575                         if (dto_acquired) {
1576                                 delay_seg = 0;
1577                                 result = __wa_seg_submit(rpipe, xfer, seg,
1578                                                         &dto_done);
1579                                 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
1580                                         xfer, wa_xfer_id(xfer), cnt, available,
1581                                         empty);
1582                                 if (dto_done)
1583                                         __wa_dto_put(rpipe->wa);
1584
1585                                 if (result < 0) {
1586                                         __wa_xfer_abort(xfer);
1587                                         goto error_seg_submit;
1588                                 }
1589                         }
1590                 }
1591
1592                 if (delay_seg) {
1593                         dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
1594                                 xfer, wa_xfer_id(xfer), cnt, available,  empty);
1595                         seg->status = WA_SEG_DELAYED;
1596                         list_add_tail(&seg->list_node, &rpipe->seg_list);
1597                 }
1598                 xfer->segs_submitted++;
1599         }
1600 error_seg_submit:
1601         /*
1602          * Mark this RPIPE as waiting if dto was not acquired, there are
1603          * delayed segs and no active transfers to wake us up later.
1604          */
1605         if (!dto_acquired && !list_empty(&rpipe->seg_list)
1606                 && (atomic_read(&rpipe->segs_available) ==
1607                         le16_to_cpu(rpipe->descr.wRequests)))
1608                 dto_waiting = 1;
1609         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1610
1611         if (dto_waiting)
1612                 wa_add_delayed_rpipe(rpipe->wa, rpipe);
1613         else if (dto_done)
1614                 wa_check_for_delayed_rpipes(rpipe->wa);
1615
1616         return result;
1617 }
1618
1619 /*
1620  * Second part of a URB/transfer enqueuement
1621  *
1622  * Assumes this comes from wa_urb_enqueue() [maybe through
1623  * wa_urb_enqueue_run()]. At this point:
1624  *
1625  * xfer->wa     filled and refcounted
1626  * xfer->ep     filled with rpipe refcounted if
1627  *              delayed == 0
1628  * xfer->urb    filled and refcounted (this is the case when called
1629  *              from wa_urb_enqueue() as we come from usb_submit_urb()
1630  *              and when called by wa_urb_enqueue_run(), as we took an
1631  *              extra ref dropped by _run() after we return).
1632  * xfer->gfp    filled
1633  *
1634  * If we fail at __wa_xfer_submit(), then we just check if we are done
1635  * and if so, we run the completion procedure. However, if we are not
1636  * yet done, we do nothing and wait for the completion handlers from
1637  * the submitted URBs or from the xfer-result path to kick in. If xfer
1638  * result never kicks in, the xfer will timeout from the USB code and
1639  * dequeue() will be called.
1640  */
1641 static int wa_urb_enqueue_b(struct wa_xfer *xfer)
1642 {
1643         int result;
1644         unsigned long flags;
1645         struct urb *urb = xfer->urb;
1646         struct wahc *wa = xfer->wa;
1647         struct wusbhc *wusbhc = wa->wusb;
1648         struct wusb_dev *wusb_dev;
1649         unsigned done;
1650
1651         result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1652         if (result < 0) {
1653                 pr_err("%s: error_rpipe_get\n", __func__);
1654                 goto error_rpipe_get;
1655         }
1656         result = -ENODEV;
1657         /* FIXME: segmentation broken -- kills DWA */
1658         mutex_lock(&wusbhc->mutex);             /* get a WUSB dev */
1659         if (urb->dev == NULL) {
1660                 mutex_unlock(&wusbhc->mutex);
1661                 pr_err("%s: error usb dev gone\n", __func__);
1662                 goto error_dev_gone;
1663         }
1664         wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1665         if (wusb_dev == NULL) {
1666                 mutex_unlock(&wusbhc->mutex);
1667                 dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
1668                         __func__);
1669                 goto error_dev_gone;
1670         }
1671         mutex_unlock(&wusbhc->mutex);
1672
1673         spin_lock_irqsave(&xfer->lock, flags);
1674         xfer->wusb_dev = wusb_dev;
1675         result = urb->status;
1676         if (urb->status != -EINPROGRESS) {
1677                 dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
1678                 goto error_dequeued;
1679         }
1680
1681         result = __wa_xfer_setup(xfer, urb);
1682         if (result < 0) {
1683                 dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
1684                 goto error_xfer_setup;
1685         }
1686         /*
1687          * Get a xfer reference since __wa_xfer_submit starts asynchronous
1688          * operations that may try to complete the xfer before this function
1689          * exits.
1690          */
1691         wa_xfer_get(xfer);
1692         result = __wa_xfer_submit(xfer);
1693         if (result < 0) {
1694                 dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
1695                 goto error_xfer_submit;
1696         }
1697         spin_unlock_irqrestore(&xfer->lock, flags);
1698         wa_xfer_put(xfer);
1699         return 0;
1700
1701         /*
1702          * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1703          * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
1704          * setup().
1705          */
1706 error_xfer_setup:
1707 error_dequeued:
1708         spin_unlock_irqrestore(&xfer->lock, flags);
1709         /* FIXME: segmentation broken, kills DWA */
1710         if (wusb_dev)
1711                 wusb_dev_put(wusb_dev);
1712 error_dev_gone:
1713         rpipe_put(xfer->ep->hcpriv);
1714 error_rpipe_get:
1715         xfer->result = result;
1716         return result;
1717
1718 error_xfer_submit:
1719         done = __wa_xfer_is_done(xfer);
1720         xfer->result = result;
1721         spin_unlock_irqrestore(&xfer->lock, flags);
1722         if (done)
1723                 wa_xfer_completion(xfer);
1724         wa_xfer_put(xfer);
1725         /* return success since the completion routine will run. */
1726         return 0;
1727 }
1728
1729 /*
1730  * Execute the delayed transfers in the Wire Adapter @wa
1731  *
1732  * We need to be careful here, as dequeue() could be called in the
1733  * middle.  That's why we do the whole thing under the
1734  * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1735  * and then checks the list -- so as we would be acquiring in inverse
1736  * order, we move the delayed list to a separate list while locked and then
1737  * submit them without the list lock held.
1738  */
1739 void wa_urb_enqueue_run(struct work_struct *ws)
1740 {
1741         struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1742         struct wa_xfer *xfer, *next;
1743         struct urb *urb;
1744         LIST_HEAD(tmp_list);
1745
1746         /* Create a copy of the wa->xfer_delayed_list while holding the lock */
1747         spin_lock_irq(&wa->xfer_list_lock);
1748         list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1749                         wa->xfer_delayed_list.prev);
1750         spin_unlock_irq(&wa->xfer_list_lock);
1751
1752         /*
1753          * enqueue from temp list without list lock held since wa_urb_enqueue_b
1754          * can take xfer->lock as well as lock mutexes.
1755          */
1756         list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1757                 list_del_init(&xfer->list_node);
1758
1759                 urb = xfer->urb;
1760                 if (wa_urb_enqueue_b(xfer) < 0)
1761                         wa_xfer_giveback(xfer);
1762                 usb_put_urb(urb);       /* taken when queuing */
1763         }
1764 }
1765 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1766
1767 /*
1768  * Process the errored transfers on the Wire Adapter outside of interrupt.
1769  */
1770 void wa_process_errored_transfers_run(struct work_struct *ws)
1771 {
1772         struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1773         struct wa_xfer *xfer, *next;
1774         LIST_HEAD(tmp_list);
1775
1776         pr_info("%s: Run delayed STALL processing.\n", __func__);
1777
1778         /* Create a copy of the wa->xfer_errored_list while holding the lock */
1779         spin_lock_irq(&wa->xfer_list_lock);
1780         list_cut_position(&tmp_list, &wa->xfer_errored_list,
1781                         wa->xfer_errored_list.prev);
1782         spin_unlock_irq(&wa->xfer_list_lock);
1783
1784         /*
1785          * run rpipe_clear_feature_stalled from temp list without list lock
1786          * held.
1787          */
1788         list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1789                 struct usb_host_endpoint *ep;
1790                 unsigned long flags;
1791                 struct wa_rpipe *rpipe;
1792
1793                 spin_lock_irqsave(&xfer->lock, flags);
1794                 ep = xfer->ep;
1795                 rpipe = ep->hcpriv;
1796                 spin_unlock_irqrestore(&xfer->lock, flags);
1797
1798                 /* clear RPIPE feature stalled without holding a lock. */
1799                 rpipe_clear_feature_stalled(wa, ep);
1800
1801                 /* complete the xfer. This removes it from the tmp list. */
1802                 wa_xfer_completion(xfer);
1803
1804                 /* check for work. */
1805                 wa_xfer_delayed_run(rpipe);
1806         }
1807 }
1808 EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1809
1810 /*
1811  * Submit a transfer to the Wire Adapter in a delayed way
1812  *
1813  * The process of enqueuing involves possible sleeps() [see
1814  * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1815  * in an atomic section, we defer the enqueue_b() call--else we call direct.
1816  *
1817  * @urb: We own a reference to it done by the HCI Linux USB stack that
1818  *       will be given up by calling usb_hcd_giveback_urb() or by
1819  *       returning error from this function -> ergo we don't have to
1820  *       refcount it.
1821  */
1822 int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1823                    struct urb *urb, gfp_t gfp)
1824 {
1825         int result;
1826         struct device *dev = &wa->usb_iface->dev;
1827         struct wa_xfer *xfer;
1828         unsigned long my_flags;
1829         unsigned cant_sleep = irqs_disabled() | in_atomic();
1830
1831         if ((urb->transfer_buffer == NULL)
1832             && (urb->sg == NULL)
1833             && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1834             && urb->transfer_buffer_length != 0) {
1835                 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1836                 dump_stack();
1837         }
1838
1839         spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1840         result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
1841         spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1842         if (result < 0)
1843                 goto error_link_urb;
1844
1845         result = -ENOMEM;
1846         xfer = kzalloc(sizeof(*xfer), gfp);
1847         if (xfer == NULL)
1848                 goto error_kmalloc;
1849
1850         result = -ENOENT;
1851         if (urb->status != -EINPROGRESS)        /* cancelled */
1852                 goto error_dequeued;            /* before starting? */
1853         wa_xfer_init(xfer);
1854         xfer->wa = wa_get(wa);
1855         xfer->urb = urb;
1856         xfer->gfp = gfp;
1857         xfer->ep = ep;
1858         urb->hcpriv = xfer;
1859
1860         dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1861                 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1862                 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1863                 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1864                 cant_sleep ? "deferred" : "inline");
1865
1866         if (cant_sleep) {
1867                 usb_get_urb(urb);
1868                 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1869                 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1870                 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1871                 queue_work(wusbd, &wa->xfer_enqueue_work);
1872         } else {
1873                 result = wa_urb_enqueue_b(xfer);
1874                 if (result < 0) {
1875                         /*
1876                          * URB submit/enqueue failed.  Clean up, return an
1877                          * error and do not run the callback.  This avoids
1878                          * an infinite submit/complete loop.
1879                          */
1880                         dev_err(dev, "%s: URB enqueue failed: %d\n",
1881                            __func__, result);
1882                         wa_put(xfer->wa);
1883                         wa_xfer_put(xfer);
1884                         spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1885                         usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1886                         spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1887                         return result;
1888                 }
1889         }
1890         return 0;
1891
1892 error_dequeued:
1893         kfree(xfer);
1894 error_kmalloc:
1895         spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1896         usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1897         spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1898 error_link_urb:
1899         return result;
1900 }
1901 EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1902
1903 /*
1904  * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1905  * handler] is called.
1906  *
1907  * Until a transfer goes successfully through wa_urb_enqueue() it
1908  * needs to be dequeued with completion calling; when stuck in delayed
1909  * or before wa_xfer_setup() is called, we need to do completion.
1910  *
1911  *  not setup  If there is no hcpriv yet, that means that that enqueue
1912  *             still had no time to set the xfer up. Because
1913  *             urb->status should be other than -EINPROGRESS,
1914  *             enqueue() will catch that and bail out.
1915  *
1916  * If the transfer has gone through setup, we just need to clean it
1917  * up. If it has gone through submit(), we have to abort it [with an
1918  * asynch request] and then make sure we cancel each segment.
1919  *
1920  */
1921 int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
1922 {
1923         unsigned long flags, flags2;
1924         struct wa_xfer *xfer;
1925         struct wa_seg *seg;
1926         struct wa_rpipe *rpipe;
1927         unsigned cnt, done = 0, xfer_abort_pending;
1928         unsigned rpipe_ready = 0;
1929         int result;
1930
1931         /* check if it is safe to unlink. */
1932         spin_lock_irqsave(&wa->xfer_list_lock, flags);
1933         result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
1934         if ((result == 0) && urb->hcpriv) {
1935                 /*
1936                  * Get a xfer ref to prevent a race with wa_xfer_giveback
1937                  * cleaning up the xfer while we are working with it.
1938                  */
1939                 wa_xfer_get(urb->hcpriv);
1940         }
1941         spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1942         if (result)
1943                 return result;
1944
1945         xfer = urb->hcpriv;
1946         if (xfer == NULL)
1947                 return -ENOENT;
1948         spin_lock_irqsave(&xfer->lock, flags);
1949         pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
1950         rpipe = xfer->ep->hcpriv;
1951         if (rpipe == NULL) {
1952                 pr_debug("%s: xfer %p id 0x%08X has no RPIPE.  %s",
1953                         __func__, xfer, wa_xfer_id(xfer),
1954                         "Probably already aborted.\n" );
1955                 result = -ENOENT;
1956                 goto out_unlock;
1957         }
1958         /*
1959          * Check for done to avoid racing with wa_xfer_giveback and completing
1960          * twice.
1961          */
1962         if (__wa_xfer_is_done(xfer)) {
1963                 pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
1964                         xfer, wa_xfer_id(xfer));
1965                 result = -ENOENT;
1966                 goto out_unlock;
1967         }
1968         /* Check the delayed list -> if there, release and complete */
1969         spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1970         if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1971                 goto dequeue_delayed;
1972         spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1973         if (xfer->seg == NULL)          /* still hasn't reached */
1974                 goto out_unlock;        /* setup(), enqueue_b() completes */
1975         /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1976         xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
1977         /*
1978          * grab the rpipe->seg_lock here to prevent racing with
1979          * __wa_xfer_delayed_run.
1980          */
1981         spin_lock(&rpipe->seg_lock);
1982         for (cnt = 0; cnt < xfer->segs; cnt++) {
1983                 seg = xfer->seg[cnt];
1984                 pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1985                         __func__, wa_xfer_id(xfer), cnt, seg->status);
1986                 switch (seg->status) {
1987                 case WA_SEG_NOTREADY:
1988                 case WA_SEG_READY:
1989                         printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1990                                xfer, cnt, seg->status);
1991                         WARN_ON(1);
1992                         break;
1993                 case WA_SEG_DELAYED:
1994                         /*
1995                          * delete from rpipe delayed list.  If no segments on
1996                          * this xfer have been submitted, __wa_xfer_is_done will
1997                          * trigger a giveback below.  Otherwise, the submitted
1998                          * segments will be completed in the DTI interrupt.
1999                          */
2000                         seg->status = WA_SEG_ABORTED;
2001                         seg->result = -ENOENT;
2002                         list_del(&seg->list_node);
2003                         xfer->segs_done++;
2004                         break;
2005                 case WA_SEG_DONE:
2006                 case WA_SEG_ERROR:
2007                 case WA_SEG_ABORTED:
2008                         break;
2009                         /*
2010                          * The buf_in data for a segment in the
2011                          * WA_SEG_DTI_PENDING state is actively being read.
2012                          * Let wa_buf_in_cb handle it since it will be called
2013                          * and will increment xfer->segs_done.  Cleaning up
2014                          * here could cause wa_buf_in_cb to access the xfer
2015                          * after it has been completed/freed.
2016                          */
2017                 case WA_SEG_DTI_PENDING:
2018                         break;
2019                         /*
2020                          * In the states below, the HWA device already knows
2021                          * about the transfer.  If an abort request was sent,
2022                          * allow the HWA to process it and wait for the
2023                          * results.  Otherwise, the DTI state and seg completed
2024                          * counts can get out of sync.
2025                          */
2026                 case WA_SEG_SUBMITTED:
2027                 case WA_SEG_PENDING:
2028                         /*
2029                          * Check if the abort was successfully sent.  This could
2030                          * be false if the HWA has been removed but we haven't
2031                          * gotten the disconnect notification yet.
2032                          */
2033                         if (!xfer_abort_pending) {
2034                                 seg->status = WA_SEG_ABORTED;
2035                                 rpipe_ready = rpipe_avail_inc(rpipe);
2036                                 xfer->segs_done++;
2037                         }
2038                         break;
2039                 }
2040         }
2041         spin_unlock(&rpipe->seg_lock);
2042         xfer->result = urb->status;     /* -ENOENT or -ECONNRESET */
2043         done = __wa_xfer_is_done(xfer);
2044         spin_unlock_irqrestore(&xfer->lock, flags);
2045         if (done)
2046                 wa_xfer_completion(xfer);
2047         if (rpipe_ready)
2048                 wa_xfer_delayed_run(rpipe);
2049         wa_xfer_put(xfer);
2050         return result;
2051
2052 out_unlock:
2053         spin_unlock_irqrestore(&xfer->lock, flags);
2054         wa_xfer_put(xfer);
2055         return result;
2056
2057 dequeue_delayed:
2058         list_del_init(&xfer->list_node);
2059         spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
2060         xfer->result = urb->status;
2061         spin_unlock_irqrestore(&xfer->lock, flags);
2062         wa_xfer_giveback(xfer);
2063         wa_xfer_put(xfer);
2064         usb_put_urb(urb);               /* we got a ref in enqueue() */
2065         return 0;
2066 }
2067 EXPORT_SYMBOL_GPL(wa_urb_dequeue);
2068
2069 /*
2070  * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
2071  * codes
2072  *
2073  * Positive errno values are internal inconsistencies and should be
2074  * flagged louder. Negative are to be passed up to the user in the
2075  * normal way.
2076  *
2077  * @status: USB WA status code -- high two bits are stripped.
2078  */
2079 static int wa_xfer_status_to_errno(u8 status)
2080 {
2081         int errno;
2082         u8 real_status = status;
2083         static int xlat[] = {
2084                 [WA_XFER_STATUS_SUCCESS] =              0,
2085                 [WA_XFER_STATUS_HALTED] =               -EPIPE,
2086                 [WA_XFER_STATUS_DATA_BUFFER_ERROR] =    -ENOBUFS,
2087                 [WA_XFER_STATUS_BABBLE] =               -EOVERFLOW,
2088                 [WA_XFER_RESERVED] =                    EINVAL,
2089                 [WA_XFER_STATUS_NOT_FOUND] =            0,
2090                 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
2091                 [WA_XFER_STATUS_TRANSACTION_ERROR] =    -EILSEQ,
2092                 [WA_XFER_STATUS_ABORTED] =              -ENOENT,
2093                 [WA_XFER_STATUS_RPIPE_NOT_READY] =      EINVAL,
2094                 [WA_XFER_INVALID_FORMAT] =              EINVAL,
2095                 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] =   EINVAL,
2096                 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] =  EINVAL,
2097         };
2098         status &= 0x3f;
2099
2100         if (status == 0)
2101                 return 0;
2102         if (status >= ARRAY_SIZE(xlat)) {
2103                 printk_ratelimited(KERN_ERR "%s(): BUG? "
2104                                "Unknown WA transfer status 0x%02x\n",
2105                                __func__, real_status);
2106                 return -EINVAL;
2107         }
2108         errno = xlat[status];
2109         if (unlikely(errno > 0)) {
2110                 printk_ratelimited(KERN_ERR "%s(): BUG? "
2111                                "Inconsistent WA status: 0x%02x\n",
2112                                __func__, real_status);
2113                 errno = -errno;
2114         }
2115         return errno;
2116 }
2117
2118 /*
2119  * If a last segment flag and/or a transfer result error is encountered,
2120  * no other segment transfer results will be returned from the device.
2121  * Mark the remaining submitted or pending xfers as completed so that
2122  * the xfer will complete cleanly.
2123  *
2124  * xfer->lock must be held
2125  *
2126  */
2127 static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
2128                 int starting_index, enum wa_seg_status status)
2129 {
2130         int index;
2131         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
2132
2133         for (index = starting_index; index < xfer->segs_submitted; index++) {
2134                 struct wa_seg *current_seg = xfer->seg[index];
2135
2136                 BUG_ON(current_seg == NULL);
2137
2138                 switch (current_seg->status) {
2139                 case WA_SEG_SUBMITTED:
2140                 case WA_SEG_PENDING:
2141                 case WA_SEG_DTI_PENDING:
2142                         rpipe_avail_inc(rpipe);
2143                 /*
2144                  * do not increment RPIPE avail for the WA_SEG_DELAYED case
2145                  * since it has not been submitted to the RPIPE.
2146                  */
2147                 case WA_SEG_DELAYED:
2148                         xfer->segs_done++;
2149                         current_seg->status = status;
2150                         break;
2151                 case WA_SEG_ABORTED:
2152                         break;
2153                 default:
2154                         WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
2155                                 __func__, wa_xfer_id(xfer), index,
2156                                 current_seg->status);
2157                         break;
2158                 }
2159         }
2160 }
2161
2162 /* Populate the given urb based on the current isoc transfer state. */
2163 static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
2164         struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
2165 {
2166         int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
2167         int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
2168         struct usb_iso_packet_descriptor *iso_frame_desc =
2169                                                 xfer->urb->iso_frame_desc;
2170         const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
2171         int next_frame_contiguous;
2172         struct usb_iso_packet_descriptor *iso_frame;
2173
2174         BUG_ON(buf_in_urb->status == -EINPROGRESS);
2175
2176         /*
2177          * If the current frame actual_length is contiguous with the next frame
2178          * and actual_length is a multiple of the DTI endpoint max packet size,
2179          * combine the current frame with the next frame in a single URB.  This
2180          * reduces the number of URBs that must be submitted in that case.
2181          */
2182         seg_index = seg->isoc_frame_index;
2183         do {
2184                 next_frame_contiguous = 0;
2185
2186                 iso_frame = &iso_frame_desc[urb_frame_index];
2187                 total_len += iso_frame->actual_length;
2188                 ++urb_frame_index;
2189                 ++seg_index;
2190
2191                 if (seg_index < seg->isoc_frame_count) {
2192                         struct usb_iso_packet_descriptor *next_iso_frame;
2193
2194                         next_iso_frame = &iso_frame_desc[urb_frame_index];
2195
2196                         if ((iso_frame->offset + iso_frame->actual_length) ==
2197                                 next_iso_frame->offset)
2198                                 next_frame_contiguous = 1;
2199                 }
2200         } while (next_frame_contiguous
2201                         && ((iso_frame->actual_length % dti_packet_size) == 0));
2202
2203         /* this should always be 0 before a resubmit. */
2204         buf_in_urb->num_mapped_sgs      = 0;
2205         buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
2206                 iso_frame_desc[urb_start_frame].offset;
2207         buf_in_urb->transfer_buffer_length = total_len;
2208         buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2209         buf_in_urb->transfer_buffer = NULL;
2210         buf_in_urb->sg = NULL;
2211         buf_in_urb->num_sgs = 0;
2212         buf_in_urb->context = seg;
2213
2214         /* return the number of frames included in this URB. */
2215         return seg_index - seg->isoc_frame_index;
2216 }
2217
2218 /* Populate the given urb based on the current transfer state. */
2219 static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
2220         unsigned int seg_idx, unsigned int bytes_transferred)
2221 {
2222         int result = 0;
2223         struct wa_seg *seg = xfer->seg[seg_idx];
2224
2225         BUG_ON(buf_in_urb->status == -EINPROGRESS);
2226         /* this should always be 0 before a resubmit. */
2227         buf_in_urb->num_mapped_sgs      = 0;
2228
2229         if (xfer->is_dma) {
2230                 buf_in_urb->transfer_dma = xfer->urb->transfer_dma
2231                         + (seg_idx * xfer->seg_size);
2232                 buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2233                 buf_in_urb->transfer_buffer = NULL;
2234                 buf_in_urb->sg = NULL;
2235                 buf_in_urb->num_sgs = 0;
2236         } else {
2237                 /* do buffer or SG processing. */
2238                 buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
2239
2240                 if (xfer->urb->transfer_buffer) {
2241                         buf_in_urb->transfer_buffer =
2242                                 xfer->urb->transfer_buffer
2243                                 + (seg_idx * xfer->seg_size);
2244                         buf_in_urb->sg = NULL;
2245                         buf_in_urb->num_sgs = 0;
2246                 } else {
2247                         /* allocate an SG list to store seg_size bytes
2248                                 and copy the subset of the xfer->urb->sg
2249                                 that matches the buffer subset we are
2250                                 about to read. */
2251                         buf_in_urb->sg = wa_xfer_create_subset_sg(
2252                                 xfer->urb->sg,
2253                                 seg_idx * xfer->seg_size,
2254                                 bytes_transferred,
2255                                 &(buf_in_urb->num_sgs));
2256
2257                         if (!(buf_in_urb->sg)) {
2258                                 buf_in_urb->num_sgs     = 0;
2259                                 result = -ENOMEM;
2260                         }
2261                         buf_in_urb->transfer_buffer = NULL;
2262                 }
2263         }
2264         buf_in_urb->transfer_buffer_length = bytes_transferred;
2265         buf_in_urb->context = seg;
2266
2267         return result;
2268 }
2269
2270 /*
2271  * Process a xfer result completion message
2272  *
2273  * inbound transfers: need to schedule a buf_in_urb read
2274  *
2275  * FIXME: this function needs to be broken up in parts
2276  */
2277 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
2278                 struct wa_xfer_result *xfer_result)
2279 {
2280         int result;
2281         struct device *dev = &wa->usb_iface->dev;
2282         unsigned long flags;
2283         unsigned int seg_idx;
2284         struct wa_seg *seg;
2285         struct wa_rpipe *rpipe;
2286         unsigned done = 0;
2287         u8 usb_status;
2288         unsigned rpipe_ready = 0;
2289         unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
2290         struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
2291
2292         spin_lock_irqsave(&xfer->lock, flags);
2293         seg_idx = xfer_result->bTransferSegment & 0x7f;
2294         if (unlikely(seg_idx >= xfer->segs))
2295                 goto error_bad_seg;
2296         seg = xfer->seg[seg_idx];
2297         rpipe = xfer->ep->hcpriv;
2298         usb_status = xfer_result->bTransferStatus;
2299         dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
2300                 xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
2301         if (seg->status == WA_SEG_ABORTED
2302             || seg->status == WA_SEG_ERROR)     /* already handled */
2303                 goto segment_aborted;
2304         if (seg->status == WA_SEG_SUBMITTED)    /* ops, got here */
2305                 seg->status = WA_SEG_PENDING;   /* before wa_seg{_dto}_cb() */
2306         if (seg->status != WA_SEG_PENDING) {
2307                 if (printk_ratelimit())
2308                         dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
2309                                 xfer, seg_idx, seg->status);
2310                 seg->status = WA_SEG_PENDING;   /* workaround/"fix" it */
2311         }
2312         if (usb_status & 0x80) {
2313                 seg->result = wa_xfer_status_to_errno(usb_status);
2314                 dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
2315                         xfer, xfer->id, seg->index, usb_status);
2316                 seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
2317                         WA_SEG_ABORTED : WA_SEG_ERROR;
2318                 goto error_complete;
2319         }
2320         /* FIXME: we ignore warnings, tally them for stats */
2321         if (usb_status & 0x40)          /* Warning?... */
2322                 usb_status = 0;         /* ... pass */
2323         /*
2324          * If the last segment bit is set, complete the remaining segments.
2325          * When the current segment is completed, either in wa_buf_in_cb for
2326          * transfers with data or below for no data, the xfer will complete.
2327          */
2328         if (xfer_result->bTransferSegment & 0x80)
2329                 wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
2330                         WA_SEG_DONE);
2331         if (usb_pipeisoc(xfer->urb->pipe)
2332                 && (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
2333                 /* set up WA state to read the isoc packet status next. */
2334                 wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
2335                 wa->dti_isoc_xfer_seg = seg_idx;
2336                 wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
2337         } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
2338                         && (bytes_transferred > 0)) {
2339                 /* IN data phase: read to buffer */
2340                 seg->status = WA_SEG_DTI_PENDING;
2341                 result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
2342                         bytes_transferred);
2343                 if (result < 0)
2344                         goto error_buf_in_populate;
2345                 ++(wa->active_buf_in_urbs);
2346                 result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2347                 if (result < 0) {
2348                         --(wa->active_buf_in_urbs);
2349                         goto error_submit_buf_in;
2350                 }
2351         } else {
2352                 /* OUT data phase or no data, complete it -- */
2353                 seg->result = bytes_transferred;
2354                 rpipe_ready = rpipe_avail_inc(rpipe);
2355                 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2356         }
2357         spin_unlock_irqrestore(&xfer->lock, flags);
2358         if (done)
2359                 wa_xfer_completion(xfer);
2360         if (rpipe_ready)
2361                 wa_xfer_delayed_run(rpipe);
2362         return;
2363
2364 error_submit_buf_in:
2365         if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2366                 dev_err(dev, "DTI: URB max acceptable errors "
2367                         "exceeded, resetting device\n");
2368                 wa_reset_all(wa);
2369         }
2370         if (printk_ratelimit())
2371                 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
2372                         xfer, seg_idx, result);
2373         seg->result = result;
2374         kfree(buf_in_urb->sg);
2375         buf_in_urb->sg = NULL;
2376 error_buf_in_populate:
2377         __wa_xfer_abort(xfer);
2378         seg->status = WA_SEG_ERROR;
2379 error_complete:
2380         xfer->segs_done++;
2381         rpipe_ready = rpipe_avail_inc(rpipe);
2382         wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
2383         done = __wa_xfer_is_done(xfer);
2384         /*
2385          * queue work item to clear STALL for control endpoints.
2386          * Otherwise, let endpoint_reset take care of it.
2387          */
2388         if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
2389                 usb_endpoint_xfer_control(&xfer->ep->desc) &&
2390                 done) {
2391
2392                 dev_info(dev, "Control EP stall.  Queue delayed work.\n");
2393                 spin_lock(&wa->xfer_list_lock);
2394                 /* move xfer from xfer_list to xfer_errored_list. */
2395                 list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
2396                 spin_unlock(&wa->xfer_list_lock);
2397                 spin_unlock_irqrestore(&xfer->lock, flags);
2398                 queue_work(wusbd, &wa->xfer_error_work);
2399         } else {
2400                 spin_unlock_irqrestore(&xfer->lock, flags);
2401                 if (done)
2402                         wa_xfer_completion(xfer);
2403                 if (rpipe_ready)
2404                         wa_xfer_delayed_run(rpipe);
2405         }
2406
2407         return;
2408
2409 error_bad_seg:
2410         spin_unlock_irqrestore(&xfer->lock, flags);
2411         wa_urb_dequeue(wa, xfer->urb, -ENOENT);
2412         if (printk_ratelimit())
2413                 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
2414         if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2415                 dev_err(dev, "DTI: URB max acceptable errors "
2416                         "exceeded, resetting device\n");
2417                 wa_reset_all(wa);
2418         }
2419         return;
2420
2421 segment_aborted:
2422         /* nothing to do, as the aborter did the completion */
2423         spin_unlock_irqrestore(&xfer->lock, flags);
2424 }
2425
2426 /*
2427  * Process a isochronous packet status message
2428  *
2429  * inbound transfers: need to schedule a buf_in_urb read
2430  */
2431 static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
2432 {
2433         struct device *dev = &wa->usb_iface->dev;
2434         struct wa_xfer_packet_status_hwaiso *packet_status;
2435         struct wa_xfer_packet_status_len_hwaiso *status_array;
2436         struct wa_xfer *xfer;
2437         unsigned long flags;
2438         struct wa_seg *seg;
2439         struct wa_rpipe *rpipe;
2440         unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
2441         unsigned first_frame_index = 0, rpipe_ready = 0;
2442         int expected_size;
2443
2444         /* We have a xfer result buffer; check it */
2445         dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
2446                 urb->actual_length, urb->transfer_buffer);
2447         packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
2448         if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
2449                 dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
2450                         packet_status->bPacketType);
2451                 goto error_parse_buffer;
2452         }
2453         xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
2454         if (xfer == NULL) {
2455                 dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
2456                         wa->dti_isoc_xfer_in_progress);
2457                 goto error_parse_buffer;
2458         }
2459         spin_lock_irqsave(&xfer->lock, flags);
2460         if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
2461                 goto error_bad_seg;
2462         seg = xfer->seg[wa->dti_isoc_xfer_seg];
2463         rpipe = xfer->ep->hcpriv;
2464         expected_size = sizeof(*packet_status) +
2465                         (sizeof(packet_status->PacketStatus[0]) *
2466                         seg->isoc_frame_count);
2467         if (urb->actual_length != expected_size) {
2468                 dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
2469                         urb->actual_length, expected_size);
2470                 goto error_bad_seg;
2471         }
2472         if (le16_to_cpu(packet_status->wLength) != expected_size) {
2473                 dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
2474                         le16_to_cpu(packet_status->wLength));
2475                 goto error_bad_seg;
2476         }
2477         /* write isoc packet status and lengths back to the xfer urb. */
2478         status_array = packet_status->PacketStatus;
2479         xfer->urb->start_frame =
2480                 wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
2481         for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
2482                 struct usb_iso_packet_descriptor *iso_frame_desc =
2483                         xfer->urb->iso_frame_desc;
2484                 const int xfer_frame_index =
2485                         seg->isoc_frame_offset + seg_index;
2486
2487                 iso_frame_desc[xfer_frame_index].status =
2488                         wa_xfer_status_to_errno(
2489                         le16_to_cpu(status_array[seg_index].PacketStatus));
2490                 iso_frame_desc[xfer_frame_index].actual_length =
2491                         le16_to_cpu(status_array[seg_index].PacketLength);
2492                 /* track the number of frames successfully transferred. */
2493                 if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
2494                         /* save the starting frame index for buf_in_urb. */
2495                         if (!data_frame_count)
2496                                 first_frame_index = seg_index;
2497                         ++data_frame_count;
2498                 }
2499         }
2500
2501         if (xfer->is_inbound && data_frame_count) {
2502                 int result, total_frames_read = 0, urb_index = 0;
2503                 struct urb *buf_in_urb;
2504
2505                 /* IN data phase: read to buffer */
2506                 seg->status = WA_SEG_DTI_PENDING;
2507
2508                 /* start with the first frame with data. */
2509                 seg->isoc_frame_index = first_frame_index;
2510                 /* submit up to WA_MAX_BUF_IN_URBS read URBs. */
2511                 do {
2512                         int urb_frame_index, urb_frame_count;
2513                         struct usb_iso_packet_descriptor *iso_frame_desc;
2514
2515                         buf_in_urb = &(wa->buf_in_urbs[urb_index]);
2516                         urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
2517                                 buf_in_urb, xfer, seg);
2518                         /* advance frame index to start of next read URB. */
2519                         seg->isoc_frame_index += urb_frame_count;
2520                         total_frames_read += urb_frame_count;
2521
2522                         ++(wa->active_buf_in_urbs);
2523                         result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2524
2525                         /* skip 0-byte frames. */
2526                         urb_frame_index =
2527                                 seg->isoc_frame_offset + seg->isoc_frame_index;
2528                         iso_frame_desc =
2529                                 &(xfer->urb->iso_frame_desc[urb_frame_index]);
2530                         while ((seg->isoc_frame_index <
2531                                                 seg->isoc_frame_count) &&
2532                                  (iso_frame_desc->actual_length == 0)) {
2533                                 ++(seg->isoc_frame_index);
2534                                 ++iso_frame_desc;
2535                         }
2536                         ++urb_index;
2537
2538                 } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
2539                                 && (seg->isoc_frame_index <
2540                                                 seg->isoc_frame_count));
2541
2542                 if (result < 0) {
2543                         --(wa->active_buf_in_urbs);
2544                         dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2545                                 result);
2546                         wa_reset_all(wa);
2547                 } else if (data_frame_count > total_frames_read)
2548                         /* If we need to read more frames, set DTI busy. */
2549                         dti_busy = 1;
2550         } else {
2551                 /* OUT transfer or no more IN data, complete it -- */
2552                 rpipe_ready = rpipe_avail_inc(rpipe);
2553                 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2554         }
2555         spin_unlock_irqrestore(&xfer->lock, flags);
2556         if (dti_busy)
2557                 wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
2558         else
2559                 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2560         if (done)
2561                 wa_xfer_completion(xfer);
2562         if (rpipe_ready)
2563                 wa_xfer_delayed_run(rpipe);
2564         wa_xfer_put(xfer);
2565         return dti_busy;
2566
2567 error_bad_seg:
2568         spin_unlock_irqrestore(&xfer->lock, flags);
2569         wa_xfer_put(xfer);
2570 error_parse_buffer:
2571         return dti_busy;
2572 }
2573
2574 /*
2575  * Callback for the IN data phase
2576  *
2577  * If successful transition state; otherwise, take a note of the
2578  * error, mark this segment done and try completion.
2579  *
2580  * Note we don't access until we are sure that the transfer hasn't
2581  * been cancelled (ECONNRESET, ENOENT), which could mean that
2582  * seg->xfer could be already gone.
2583  */
2584 static void wa_buf_in_cb(struct urb *urb)
2585 {
2586         struct wa_seg *seg = urb->context;
2587         struct wa_xfer *xfer = seg->xfer;
2588         struct wahc *wa;
2589         struct device *dev;
2590         struct wa_rpipe *rpipe;
2591         unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
2592         unsigned long flags;
2593         int resubmit_dti = 0, active_buf_in_urbs;
2594         u8 done = 0;
2595
2596         /* free the sg if it was used. */
2597         kfree(urb->sg);
2598         urb->sg = NULL;
2599
2600         spin_lock_irqsave(&xfer->lock, flags);
2601         wa = xfer->wa;
2602         dev = &wa->usb_iface->dev;
2603         --(wa->active_buf_in_urbs);
2604         active_buf_in_urbs = wa->active_buf_in_urbs;
2605
2606         if (usb_pipeisoc(xfer->urb->pipe)) {
2607                 struct usb_iso_packet_descriptor *iso_frame_desc =
2608                         xfer->urb->iso_frame_desc;
2609                 int     seg_index;
2610
2611                 /*
2612                  * Find the next isoc frame with data and count how many
2613                  * frames with data remain.
2614                  */
2615                 seg_index = seg->isoc_frame_index;
2616                 while (seg_index < seg->isoc_frame_count) {
2617                         const int urb_frame_index =
2618                                 seg->isoc_frame_offset + seg_index;
2619
2620                         if (iso_frame_desc[urb_frame_index].actual_length > 0) {
2621                                 /* save the index of the next frame with data */
2622                                 if (!isoc_data_frame_count)
2623                                         seg->isoc_frame_index = seg_index;
2624                                 ++isoc_data_frame_count;
2625                         }
2626                         ++seg_index;
2627                 }
2628         }
2629         spin_unlock_irqrestore(&xfer->lock, flags);
2630
2631         switch (urb->status) {
2632         case 0:
2633                 spin_lock_irqsave(&xfer->lock, flags);
2634
2635                 seg->result += urb->actual_length;
2636                 if (isoc_data_frame_count > 0) {
2637                         int result, urb_frame_count;
2638
2639                         /* submit a read URB for the next frame with data. */
2640                         urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
2641                                  xfer, seg);
2642                         /* advance index to start of next read URB. */
2643                         seg->isoc_frame_index += urb_frame_count;
2644                         ++(wa->active_buf_in_urbs);
2645                         result = usb_submit_urb(urb, GFP_ATOMIC);
2646                         if (result < 0) {
2647                                 --(wa->active_buf_in_urbs);
2648                                 dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2649                                         result);
2650                                 wa_reset_all(wa);
2651                         }
2652                         /*
2653                          * If we are in this callback and
2654                          * isoc_data_frame_count > 0, it means that the dti_urb
2655                          * submission was delayed in wa_dti_cb.  Once
2656                          * we submit the last buf_in_urb, we can submit the
2657                          * delayed dti_urb.
2658                          */
2659                           resubmit_dti = (isoc_data_frame_count ==
2660                                                         urb_frame_count);
2661                 } else if (active_buf_in_urbs == 0) {
2662                         rpipe = xfer->ep->hcpriv;
2663                         dev_dbg(dev,
2664                                 "xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
2665                                 xfer, wa_xfer_id(xfer), seg->index,
2666                                 seg->result);
2667                         rpipe_ready = rpipe_avail_inc(rpipe);
2668                         done = __wa_xfer_mark_seg_as_done(xfer, seg,
2669                                         WA_SEG_DONE);
2670                 }
2671                 spin_unlock_irqrestore(&xfer->lock, flags);
2672                 if (done)
2673                         wa_xfer_completion(xfer);
2674                 if (rpipe_ready)
2675                         wa_xfer_delayed_run(rpipe);
2676                 break;
2677         case -ECONNRESET:       /* URB unlinked; no need to do anything */
2678         case -ENOENT:           /* as it was done by the who unlinked us */
2679                 break;
2680         default:                /* Other errors ... */
2681                 /*
2682                  * Error on data buf read.  Only resubmit DTI if it hasn't
2683                  * already been done by previously hitting this error or by a
2684                  * successful completion of the previous buf_in_urb.
2685                  */
2686                 resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
2687                 spin_lock_irqsave(&xfer->lock, flags);
2688                 rpipe = xfer->ep->hcpriv;
2689                 if (printk_ratelimit())
2690                         dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
2691                                 xfer, wa_xfer_id(xfer), seg->index,
2692                                 urb->status);
2693                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
2694                             EDC_ERROR_TIMEFRAME)){
2695                         dev_err(dev, "DTO: URB max acceptable errors "
2696                                 "exceeded, resetting device\n");
2697                         wa_reset_all(wa);
2698                 }
2699                 seg->result = urb->status;
2700                 rpipe_ready = rpipe_avail_inc(rpipe);
2701                 if (active_buf_in_urbs == 0)
2702                         done = __wa_xfer_mark_seg_as_done(xfer, seg,
2703                                 WA_SEG_ERROR);
2704                 else
2705                         __wa_xfer_abort(xfer);
2706                 spin_unlock_irqrestore(&xfer->lock, flags);
2707                 if (done)
2708                         wa_xfer_completion(xfer);
2709                 if (rpipe_ready)
2710                         wa_xfer_delayed_run(rpipe);
2711         }
2712
2713         if (resubmit_dti) {
2714                 int result;
2715
2716                 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2717
2718                 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2719                 if (result < 0) {
2720                         dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2721                                 result);
2722                         wa_reset_all(wa);
2723                 }
2724         }
2725 }
2726
2727 /*
2728  * Handle an incoming transfer result buffer
2729  *
2730  * Given a transfer result buffer, it completes the transfer (possibly
2731  * scheduling and buffer in read) and then resubmits the DTI URB for a
2732  * new transfer result read.
2733  *
2734  *
2735  * The xfer_result DTI URB state machine
2736  *
2737  * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
2738  *
2739  * We start in OFF mode, the first xfer_result notification [through
2740  * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
2741  * read.
2742  *
2743  * We receive a buffer -- if it is not a xfer_result, we complain and
2744  * repost the DTI-URB. If it is a xfer_result then do the xfer seg
2745  * request accounting. If it is an IN segment, we move to RBI and post
2746  * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
2747  * repost the DTI-URB and move to RXR state. if there was no IN
2748  * segment, it will repost the DTI-URB.
2749  *
2750  * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
2751  * errors) in the URBs.
2752  */
2753 static void wa_dti_cb(struct urb *urb)
2754 {
2755         int result, dti_busy = 0;
2756         struct wahc *wa = urb->context;
2757         struct device *dev = &wa->usb_iface->dev;
2758         u32 xfer_id;
2759         u8 usb_status;
2760
2761         BUG_ON(wa->dti_urb != urb);
2762         switch (wa->dti_urb->status) {
2763         case 0:
2764                 if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
2765                         struct wa_xfer_result *xfer_result;
2766                         struct wa_xfer *xfer;
2767
2768                         /* We have a xfer result buffer; check it */
2769                         dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
2770                                 urb->actual_length, urb->transfer_buffer);
2771                         if (urb->actual_length != sizeof(*xfer_result)) {
2772                                 dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
2773                                         urb->actual_length,
2774                                         sizeof(*xfer_result));
2775                                 break;
2776                         }
2777                         xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
2778                         if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
2779                                 dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
2780                                         xfer_result->hdr.bLength);
2781                                 break;
2782                         }
2783                         if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
2784                                 dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
2785                                         xfer_result->hdr.bNotifyType);
2786                                 break;
2787                         }
2788                         xfer_id = le32_to_cpu(xfer_result->dwTransferID);
2789                         usb_status = xfer_result->bTransferStatus & 0x3f;
2790                         if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
2791                                 /* taken care of already */
2792                                 dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
2793                                         __func__, xfer_id,
2794                                         xfer_result->bTransferSegment & 0x7f);
2795                                 break;
2796                         }
2797                         xfer = wa_xfer_get_by_id(wa, xfer_id);
2798                         if (xfer == NULL) {
2799                                 /* FIXME: transaction not found. */
2800                                 dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
2801                                         xfer_id, usb_status);
2802                                 break;
2803                         }
2804                         wa_xfer_result_chew(wa, xfer, xfer_result);
2805                         wa_xfer_put(xfer);
2806                 } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
2807                         dti_busy = wa_process_iso_packet_status(wa, urb);
2808                 } else {
2809                         dev_err(dev, "DTI Error: unexpected EP state = %d\n",
2810                                 wa->dti_state);
2811                 }
2812                 break;
2813         case -ENOENT:           /* (we killed the URB)...so, no broadcast */
2814         case -ESHUTDOWN:        /* going away! */
2815                 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
2816                 goto out;
2817         default:
2818                 /* Unknown error */
2819                 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
2820                             EDC_ERROR_TIMEFRAME)) {
2821                         dev_err(dev, "DTI: URB max acceptable errors "
2822                                 "exceeded, resetting device\n");
2823                         wa_reset_all(wa);
2824                         goto out;
2825                 }
2826                 if (printk_ratelimit())
2827                         dev_err(dev, "DTI: URB error %d\n", urb->status);
2828                 break;
2829         }
2830
2831         /* Resubmit the DTI URB if we are not busy processing isoc in frames. */
2832         if (!dti_busy) {
2833                 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2834                 if (result < 0) {
2835                         dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2836                                 result);
2837                         wa_reset_all(wa);
2838                 }
2839         }
2840 out:
2841         return;
2842 }
2843
2844 /*
2845  * Initialize the DTI URB for reading transfer result notifications and also
2846  * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
2847  */
2848 int wa_dti_start(struct wahc *wa)
2849 {
2850         const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2851         struct device *dev = &wa->usb_iface->dev;
2852         int result = -ENOMEM, index;
2853
2854         if (wa->dti_urb != NULL)        /* DTI URB already started */
2855                 goto out;
2856
2857         wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
2858         if (wa->dti_urb == NULL) {
2859                 dev_err(dev, "Can't allocate DTI URB\n");
2860                 goto error_dti_urb_alloc;
2861         }
2862         usb_fill_bulk_urb(
2863                 wa->dti_urb, wa->usb_dev,
2864                 usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
2865                 wa->dti_buf, wa->dti_buf_size,
2866                 wa_dti_cb, wa);
2867
2868         /* init the buf in URBs */
2869         for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
2870                 usb_fill_bulk_urb(
2871                         &(wa->buf_in_urbs[index]), wa->usb_dev,
2872                         usb_rcvbulkpipe(wa->usb_dev,
2873                                 0x80 | dti_epd->bEndpointAddress),
2874                         NULL, 0, wa_buf_in_cb, wa);
2875         }
2876         result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
2877         if (result < 0) {
2878                 dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
2879                         result);
2880                 goto error_dti_urb_submit;
2881         }
2882 out:
2883         return 0;
2884
2885 error_dti_urb_submit:
2886         usb_put_urb(wa->dti_urb);
2887         wa->dti_urb = NULL;
2888 error_dti_urb_alloc:
2889         return result;
2890 }
2891 EXPORT_SYMBOL_GPL(wa_dti_start);
2892 /*
2893  * Transfer complete notification
2894  *
2895  * Called from the notif.c code. We get a notification on EP2 saying
2896  * that some endpoint has some transfer result data available. We are
2897  * about to read it.
2898  *
2899  * To speed up things, we always have a URB reading the DTI URB; we
2900  * don't really set it up and start it until the first xfer complete
2901  * notification arrives, which is what we do here.
2902  *
2903  * Follow up in wa_dti_cb(), as that's where the whole state
2904  * machine starts.
2905  *
2906  * @wa shall be referenced
2907  */
2908 void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
2909 {
2910         struct device *dev = &wa->usb_iface->dev;
2911         struct wa_notif_xfer *notif_xfer;
2912         const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2913
2914         notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
2915         BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
2916
2917         if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
2918                 /* FIXME: hardcoded limitation, adapt */
2919                 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
2920                         notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
2921                 goto error;
2922         }
2923
2924         /* attempt to start the DTI ep processing. */
2925         if (wa_dti_start(wa) < 0)
2926                 goto error;
2927
2928         return;
2929
2930 error:
2931         wa_reset_all(wa);
2932 }