Merge tag 'mfd-fixes-3.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/sameo...
[cascardo/linux.git] / drivers / staging / dwc2 / hcd_ddma.c
1 /*
2  * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
3  *
4  * Copyright (C) 2004-2013 Synopsys, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions, and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The names of the above-listed copyright holders may not be used
16  *    to endorse or promote products derived from this software without
17  *    specific prior written permission.
18  *
19  * ALTERNATIVELY, this software may be distributed under the terms of the
20  * GNU General Public License ("GPL") as published by the Free Software
21  * Foundation; either version 2 of the License, or (at your option) any
22  * later version.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 /*
38  * This file contains the Descriptor DMA implementation for Host mode
39  */
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/spinlock.h>
43 #include <linux/interrupt.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/io.h>
46 #include <linux/slab.h>
47 #include <linux/usb.h>
48
49 #include <linux/usb/hcd.h>
50 #include <linux/usb/ch11.h>
51
52 #include "core.h"
53 #include "hcd.h"
54
55 static u16 dwc2_frame_list_idx(u16 frame)
56 {
57         return frame & (FRLISTEN_64_SIZE - 1);
58 }
59
60 static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
61 {
62         return (idx + inc) &
63                 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
64                   MAX_DMA_DESC_NUM_GENERIC) - 1);
65 }
66
67 static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
68 {
69         return (idx - inc) &
70                 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
71                   MAX_DMA_DESC_NUM_GENERIC) - 1);
72 }
73
74 static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
75 {
76         return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
77                 qh->dev_speed == USB_SPEED_HIGH) ?
78                 MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
79 }
80
81 static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
82 {
83         return qh->dev_speed == USB_SPEED_HIGH ?
84                (qh->interval + 8 - 1) / 8 : qh->interval;
85 }
86
87 static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
88                                 gfp_t flags)
89 {
90         qh->desc_list = dma_alloc_coherent(hsotg->dev,
91                                 sizeof(struct dwc2_hcd_dma_desc) *
92                                 dwc2_max_desc_num(qh), &qh->desc_list_dma,
93                                 flags);
94
95         if (!qh->desc_list)
96                 return -ENOMEM;
97
98         memset(qh->desc_list, 0,
99                sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh));
100
101         qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags);
102         if (!qh->n_bytes) {
103                 dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc)
104                                   * dwc2_max_desc_num(qh), qh->desc_list,
105                                   qh->desc_list_dma);
106                 qh->desc_list = NULL;
107                 return -ENOMEM;
108         }
109
110         return 0;
111 }
112
113 static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
114 {
115         if (qh->desc_list) {
116                 dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc)
117                                   * dwc2_max_desc_num(qh), qh->desc_list,
118                                   qh->desc_list_dma);
119                 qh->desc_list = NULL;
120         }
121
122         kfree(qh->n_bytes);
123         qh->n_bytes = NULL;
124 }
125
126 static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
127 {
128         if (hsotg->frame_list)
129                 return 0;
130
131         hsotg->frame_list = dma_alloc_coherent(hsotg->dev,
132                                                4 * FRLISTEN_64_SIZE,
133                                                &hsotg->frame_list_dma,
134                                                mem_flags);
135         if (!hsotg->frame_list)
136                 return -ENOMEM;
137
138         memset(hsotg->frame_list, 0, 4 * FRLISTEN_64_SIZE);
139         return 0;
140 }
141
142 static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
143 {
144         u32 *frame_list;
145         dma_addr_t frame_list_dma;
146         unsigned long flags;
147
148         spin_lock_irqsave(&hsotg->lock, flags);
149
150         if (!hsotg->frame_list) {
151                 spin_unlock_irqrestore(&hsotg->lock, flags);
152                 return;
153         }
154
155         frame_list = hsotg->frame_list;
156         frame_list_dma = hsotg->frame_list_dma;
157         hsotg->frame_list = NULL;
158
159         spin_unlock_irqrestore(&hsotg->lock, flags);
160
161         dma_free_coherent(hsotg->dev, 4 * FRLISTEN_64_SIZE, frame_list,
162                           frame_list_dma);
163 }
164
165 static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
166 {
167         u32 hcfg;
168         unsigned long flags;
169
170         spin_lock_irqsave(&hsotg->lock, flags);
171
172         hcfg = readl(hsotg->regs + HCFG);
173         if (hcfg & HCFG_PERSCHEDENA) {
174                 /* already enabled */
175                 spin_unlock_irqrestore(&hsotg->lock, flags);
176                 return;
177         }
178
179         writel(hsotg->frame_list_dma, hsotg->regs + HFLBADDR);
180
181         hcfg &= ~HCFG_FRLISTEN_MASK;
182         hcfg |= fr_list_en | HCFG_PERSCHEDENA;
183         dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
184         writel(hcfg, hsotg->regs + HCFG);
185
186         spin_unlock_irqrestore(&hsotg->lock, flags);
187 }
188
189 static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
190 {
191         u32 hcfg;
192         unsigned long flags;
193
194         spin_lock_irqsave(&hsotg->lock, flags);
195
196         hcfg = readl(hsotg->regs + HCFG);
197         if (!(hcfg & HCFG_PERSCHEDENA)) {
198                 /* already disabled */
199                 spin_unlock_irqrestore(&hsotg->lock, flags);
200                 return;
201         }
202
203         hcfg &= ~HCFG_PERSCHEDENA;
204         dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
205         writel(hcfg, hsotg->regs + HCFG);
206
207         spin_unlock_irqrestore(&hsotg->lock, flags);
208 }
209
210 /*
211  * Activates/Deactivates FrameList entries for the channel based on endpoint
212  * servicing period
213  */
214 static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
215                                    int enable)
216 {
217         struct dwc2_host_chan *chan;
218         u16 i, j, inc;
219
220         if (!hsotg) {
221                 pr_err("hsotg = %p\n", hsotg);
222                 return;
223         }
224
225         if (!qh->channel) {
226                 dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
227                 return;
228         }
229
230         if (!hsotg->frame_list) {
231                 dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
232                         hsotg->frame_list);
233                 return;
234         }
235
236         chan = qh->channel;
237         inc = dwc2_frame_incr_val(qh);
238         if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
239                 i = dwc2_frame_list_idx(qh->sched_frame);
240         else
241                 i = 0;
242
243         j = i;
244         do {
245                 if (enable)
246                         hsotg->frame_list[j] |= 1 << chan->hc_num;
247                 else
248                         hsotg->frame_list[j] &= ~(1 << chan->hc_num);
249                 j = (j + inc) & (FRLISTEN_64_SIZE - 1);
250         } while (j != i);
251
252         if (!enable)
253                 return;
254
255         chan->schinfo = 0;
256         if (chan->speed == USB_SPEED_HIGH && qh->interval) {
257                 j = 1;
258                 /* TODO - check this */
259                 inc = (8 + qh->interval - 1) / qh->interval;
260                 for (i = 0; i < inc; i++) {
261                         chan->schinfo |= j;
262                         j = j << qh->interval;
263                 }
264         } else {
265                 chan->schinfo = 0xff;
266         }
267 }
268
269 static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
270                                       struct dwc2_qh *qh)
271 {
272         struct dwc2_host_chan *chan = qh->channel;
273
274         if (dwc2_qh_is_non_per(qh))
275                 hsotg->non_periodic_channels--;
276         else
277                 dwc2_update_frame_list(hsotg, qh, 0);
278
279         /*
280          * The condition is added to prevent double cleanup try in case of
281          * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
282          */
283         if (chan->qh) {
284                 if (!list_empty(&chan->hc_list_entry))
285                         list_del(&chan->hc_list_entry);
286                 dwc2_hc_cleanup(hsotg, chan);
287                 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
288                 chan->qh = NULL;
289         }
290
291         qh->channel = NULL;
292         qh->ntd = 0;
293
294         if (qh->desc_list)
295                 memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) *
296                        dwc2_max_desc_num(qh));
297 }
298
299 /**
300  * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
301  * related members
302  *
303  * @hsotg: The HCD state structure for the DWC OTG controller
304  * @qh:    The QH to init
305  *
306  * Return: 0 if successful, negative error code otherwise
307  *
308  * Allocates memory for the descriptor list. For the first periodic QH,
309  * allocates memory for the FrameList and enables periodic scheduling.
310  */
311 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
312                           gfp_t mem_flags)
313 {
314         int retval;
315
316         if (qh->do_split) {
317                 dev_err(hsotg->dev,
318                         "SPLIT Transfers are not supported in Descriptor DMA mode.\n");
319                 retval = -EINVAL;
320                 goto err0;
321         }
322
323         retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
324         if (retval)
325                 goto err0;
326
327         if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
328             qh->ep_type == USB_ENDPOINT_XFER_INT) {
329                 if (!hsotg->frame_list) {
330                         retval = dwc2_frame_list_alloc(hsotg, mem_flags);
331                         if (retval)
332                                 goto err1;
333                         /* Enable periodic schedule on first periodic QH */
334                         dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
335                 }
336         }
337
338         qh->ntd = 0;
339         return 0;
340
341 err1:
342         dwc2_desc_list_free(hsotg, qh);
343 err0:
344         return retval;
345 }
346
347 /**
348  * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
349  * members
350  *
351  * @hsotg: The HCD state structure for the DWC OTG controller
352  * @qh:    The QH to free
353  *
354  * Frees descriptor list memory associated with the QH. If QH is periodic and
355  * the last, frees FrameList memory and disables periodic scheduling.
356  */
357 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
358 {
359         dwc2_desc_list_free(hsotg, qh);
360
361         /*
362          * Channel still assigned due to some reasons.
363          * Seen on Isoc URB dequeue. Channel halted but no subsequent
364          * ChHalted interrupt to release the channel. Afterwards
365          * when it comes here from endpoint disable routine
366          * channel remains assigned.
367          */
368         if (qh->channel)
369                 dwc2_release_channel_ddma(hsotg, qh);
370
371         if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
372              qh->ep_type == USB_ENDPOINT_XFER_INT) &&
373             !hsotg->periodic_channels && hsotg->frame_list) {
374                 dwc2_per_sched_disable(hsotg);
375                 dwc2_frame_list_free(hsotg);
376         }
377 }
378
379 static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
380 {
381         if (qh->dev_speed == USB_SPEED_HIGH)
382                 /* Descriptor set (8 descriptors) index which is 8-aligned */
383                 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
384         else
385                 return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
386 }
387
388 /*
389  * Determine starting frame for Isochronous transfer.
390  * Few frames skipped to prevent race condition with HC.
391  */
392 static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
393                                     struct dwc2_qh *qh, u16 *skip_frames)
394 {
395         u16 frame;
396
397         hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
398
399         /* sched_frame is always frame number (not uFrame) both in FS and HS! */
400
401         /*
402          * skip_frames is used to limit activated descriptors number
403          * to avoid the situation when HC services the last activated
404          * descriptor firstly.
405          * Example for FS:
406          * Current frame is 1, scheduled frame is 3. Since HC always fetches
407          * the descriptor corresponding to curr_frame+1, the descriptor
408          * corresponding to frame 2 will be fetched. If the number of
409          * descriptors is max=64 (or greather) the list will be fully programmed
410          * with Active descriptors and it is possible case (rare) that the
411          * latest descriptor(considering rollback) corresponding to frame 2 will
412          * be serviced first. HS case is more probable because, in fact, up to
413          * 11 uframes (16 in the code) may be skipped.
414          */
415         if (qh->dev_speed == USB_SPEED_HIGH) {
416                 /*
417                  * Consider uframe counter also, to start xfer asap. If half of
418                  * the frame elapsed skip 2 frames otherwise just 1 frame.
419                  * Starting descriptor index must be 8-aligned, so if the
420                  * current frame is near to complete the next one is skipped as
421                  * well.
422                  */
423                 if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
424                         *skip_frames = 2 * 8;
425                         frame = dwc2_frame_num_inc(hsotg->frame_number,
426                                                    *skip_frames);
427                 } else {
428                         *skip_frames = 1 * 8;
429                         frame = dwc2_frame_num_inc(hsotg->frame_number,
430                                                    *skip_frames);
431                 }
432
433                 frame = dwc2_full_frame_num(frame);
434         } else {
435                 /*
436                  * Two frames are skipped for FS - the current and the next.
437                  * But for descriptor programming, 1 frame (descriptor) is
438                  * enough, see example above.
439                  */
440                 *skip_frames = 1;
441                 frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
442         }
443
444         return frame;
445 }
446
447 /*
448  * Calculate initial descriptor index for isochronous transfer based on
449  * scheduled frame
450  */
451 static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
452                                         struct dwc2_qh *qh)
453 {
454         u16 frame, fr_idx, fr_idx_tmp, skip_frames;
455
456         /*
457          * With current ISOC processing algorithm the channel is being released
458          * when no more QTDs in the list (qh->ntd == 0). Thus this function is
459          * called only when qh->ntd == 0 and qh->channel == 0.
460          *
461          * So qh->channel != NULL branch is not used and just not removed from
462          * the source file. It is required for another possible approach which
463          * is, do not disable and release the channel when ISOC session
464          * completed, just move QH to inactive schedule until new QTD arrives.
465          * On new QTD, the QH moved back to 'ready' schedule, starting frame and
466          * therefore starting desc_index are recalculated. In this case channel
467          * is released only on ep_disable.
468          */
469
470         /*
471          * Calculate starting descriptor index. For INTERRUPT endpoint it is
472          * always 0.
473          */
474         if (qh->channel) {
475                 frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
476                 /*
477                  * Calculate initial descriptor index based on FrameList current
478                  * bitmap and servicing period
479                  */
480                 fr_idx_tmp = dwc2_frame_list_idx(frame);
481                 fr_idx = (FRLISTEN_64_SIZE +
482                           dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp)
483                          % dwc2_frame_incr_val(qh);
484                 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
485         } else {
486                 qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh,
487                                                            &skip_frames);
488                 fr_idx = dwc2_frame_list_idx(qh->sched_frame);
489         }
490
491         qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
492
493         return skip_frames;
494 }
495
496 #define ISOC_URB_GIVEBACK_ASAP
497
498 #define MAX_ISOC_XFER_SIZE_FS   1023
499 #define MAX_ISOC_XFER_SIZE_HS   3072
500 #define DESCNUM_THRESHOLD       4
501
502 static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
503                                          struct dwc2_qtd *qtd,
504                                          struct dwc2_qh *qh, u32 max_xfer_size,
505                                          u16 idx)
506 {
507         struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
508         struct dwc2_hcd_iso_packet_desc *frame_desc;
509
510         memset(dma_desc, 0, sizeof(*dma_desc));
511         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
512
513         if (frame_desc->length > max_xfer_size)
514                 qh->n_bytes[idx] = max_xfer_size;
515         else
516                 qh->n_bytes[idx] = frame_desc->length;
517
518         dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
519         dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
520                            HOST_DMA_ISOC_NBYTES_MASK;
521
522 #ifdef ISOC_URB_GIVEBACK_ASAP
523         /* Set IOC for each descriptor corresponding to last frame of URB */
524         if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
525                 dma_desc->status |= HOST_DMA_IOC;
526 #endif
527
528         qh->ntd++;
529         qtd->isoc_frame_index_last++;
530 }
531
532 static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
533                                     struct dwc2_qh *qh, u16 skip_frames)
534 {
535         struct dwc2_qtd *qtd;
536         u32 max_xfer_size;
537         u16 idx, inc, n_desc, ntd_max = 0;
538
539         idx = qh->td_last;
540         inc = qh->interval;
541         n_desc = 0;
542
543         if (qh->interval) {
544                 ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) /
545                                 qh->interval;
546                 if (skip_frames && !qh->channel)
547                         ntd_max -= skip_frames / qh->interval;
548         }
549
550         max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
551                         MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
552
553         list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
554                 while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
555                                                 qtd->urb->packet_count) {
556                         if (n_desc > 1)
557                                 qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
558                         dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
559                                                      max_xfer_size, idx);
560                         idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
561                         n_desc++;
562                 }
563                 qtd->in_process = 1;
564         }
565
566         qh->td_last = idx;
567
568 #ifdef ISOC_URB_GIVEBACK_ASAP
569         /* Set IOC for last descriptor if descriptor list is full */
570         if (qh->ntd == ntd_max) {
571                 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
572                 qh->desc_list[idx].status |= HOST_DMA_IOC;
573         }
574 #else
575         /*
576          * Set IOC bit only for one descriptor. Always try to be ahead of HW
577          * processing, i.e. on IOC generation driver activates next descriptor
578          * but core continues to process descriptors following the one with IOC
579          * set.
580          */
581
582         if (n_desc > DESCNUM_THRESHOLD)
583                 /*
584                  * Move IOC "up". Required even if there is only one QTD
585                  * in the list, because QTDs might continue to be queued,
586                  * but during the activation it was only one queued.
587                  * Actually more than one QTD might be in the list if this
588                  * function called from XferCompletion - QTDs was queued during
589                  * HW processing of the previous descriptor chunk.
590                  */
591                 idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
592                                             qh->dev_speed);
593         else
594                 /*
595                  * Set the IOC for the latest descriptor if either number of
596                  * descriptors is not greater than threshold or no more new
597                  * descriptors activated
598                  */
599                 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
600
601         qh->desc_list[idx].status |= HOST_DMA_IOC;
602 #endif
603
604         if (n_desc) {
605                 qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
606                 if (n_desc > 1)
607                         qh->desc_list[0].status |= HOST_DMA_A;
608         }
609 }
610
611 static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
612                                     struct dwc2_host_chan *chan,
613                                     struct dwc2_qtd *qtd, struct dwc2_qh *qh,
614                                     int n_desc)
615 {
616         struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
617         int len = chan->xfer_len;
618
619         if (len > MAX_DMA_DESC_SIZE)
620                 len = MAX_DMA_DESC_SIZE - chan->max_packet + 1;
621
622         if (chan->ep_is_in) {
623                 int num_packets;
624
625                 if (len > 0 && chan->max_packet)
626                         num_packets = (len + chan->max_packet - 1)
627                                         / chan->max_packet;
628                 else
629                         /* Need 1 packet for transfer length of 0 */
630                         num_packets = 1;
631
632                 /* Always program an integral # of packets for IN transfers */
633                 len = num_packets * chan->max_packet;
634         }
635
636         dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
637         qh->n_bytes[n_desc] = len;
638
639         if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
640             qtd->control_phase == DWC2_CONTROL_SETUP)
641                 dma_desc->status |= HOST_DMA_SUP;
642
643         dma_desc->buf = (u32)chan->xfer_dma;
644
645         /*
646          * Last (or only) descriptor of IN transfer with actual size less
647          * than MaxPacket
648          */
649         if (len > chan->xfer_len) {
650                 chan->xfer_len = 0;
651         } else {
652                 chan->xfer_dma += len;
653                 chan->xfer_len -= len;
654         }
655 }
656
657 static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
658                                         struct dwc2_qh *qh)
659 {
660         struct dwc2_qtd *qtd;
661         struct dwc2_host_chan *chan = qh->channel;
662         int n_desc = 0;
663
664         dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
665                  (unsigned long)chan->xfer_dma, chan->xfer_len);
666
667         /*
668          * Start with chan->xfer_dma initialized in assign_and_init_hc(), then
669          * if SG transfer consists of multiple URBs, this pointer is re-assigned
670          * to the buffer of the currently processed QTD. For non-SG request
671          * there is always one QTD active.
672          */
673
674         list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
675                 dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
676
677                 if (n_desc) {
678                         /* SG request - more than 1 QTD */
679                         chan->xfer_dma = qtd->urb->dma +
680                                         qtd->urb->actual_length;
681                         chan->xfer_len = qtd->urb->length -
682                                         qtd->urb->actual_length;
683                         dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
684                                  (unsigned long)chan->xfer_dma, chan->xfer_len);
685                 }
686
687                 qtd->n_desc = 0;
688                 do {
689                         if (n_desc > 1) {
690                                 qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
691                                 dev_vdbg(hsotg->dev,
692                                          "set A bit in desc %d (%p)\n",
693                                          n_desc - 1,
694                                          &qh->desc_list[n_desc - 1]);
695                         }
696                         dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
697                         dev_vdbg(hsotg->dev,
698                                  "desc %d (%p) buf=%08x status=%08x\n",
699                                  n_desc, &qh->desc_list[n_desc],
700                                  qh->desc_list[n_desc].buf,
701                                  qh->desc_list[n_desc].status);
702                         qtd->n_desc++;
703                         n_desc++;
704                 } while (chan->xfer_len > 0 &&
705                          n_desc != MAX_DMA_DESC_NUM_GENERIC);
706
707                 dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
708                 qtd->in_process = 1;
709                 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
710                         break;
711                 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
712                         break;
713         }
714
715         if (n_desc) {
716                 qh->desc_list[n_desc - 1].status |=
717                                 HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
718                 dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
719                          n_desc - 1, &qh->desc_list[n_desc - 1]);
720                 if (n_desc > 1) {
721                         qh->desc_list[0].status |= HOST_DMA_A;
722                         dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
723                                  &qh->desc_list[0]);
724                 }
725                 chan->ntd = n_desc;
726         }
727 }
728
729 /**
730  * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
731  *
732  * @hsotg: The HCD state structure for the DWC OTG controller
733  * @qh:    The QH to init
734  *
735  * Return: 0 if successful, negative error code otherwise
736  *
737  * For Control and Bulk endpoints, initializes descriptor list and starts the
738  * transfer. For Interrupt and Isochronous endpoints, initializes descriptor
739  * list then updates FrameList, marking appropriate entries as active.
740  *
741  * For Isochronous endpoints the starting descriptor index is calculated based
742  * on the scheduled frame, but only on the first transfer descriptor within a
743  * session. Then the transfer is started via enabling the channel.
744  *
745  * For Isochronous endpoints the channel is not halted on XferComplete
746  * interrupt so remains assigned to the endpoint(QH) until session is done.
747  */
748 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
749 {
750         /* Channel is already assigned */
751         struct dwc2_host_chan *chan = qh->channel;
752         u16 skip_frames = 0;
753
754         switch (chan->ep_type) {
755         case USB_ENDPOINT_XFER_CONTROL:
756         case USB_ENDPOINT_XFER_BULK:
757                 dwc2_init_non_isoc_dma_desc(hsotg, qh);
758                 dwc2_hc_start_transfer_ddma(hsotg, chan);
759                 break;
760         case USB_ENDPOINT_XFER_INT:
761                 dwc2_init_non_isoc_dma_desc(hsotg, qh);
762                 dwc2_update_frame_list(hsotg, qh, 1);
763                 dwc2_hc_start_transfer_ddma(hsotg, chan);
764                 break;
765         case USB_ENDPOINT_XFER_ISOC:
766                 if (!qh->ntd)
767                         skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
768                 dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
769
770                 if (!chan->xfer_started) {
771                         dwc2_update_frame_list(hsotg, qh, 1);
772
773                         /*
774                          * Always set to max, instead of actual size. Otherwise
775                          * ntd will be changed with channel being enabled. Not
776                          * recommended.
777                          */
778                         chan->ntd = dwc2_max_desc_num(qh);
779
780                         /* Enable channel only once for ISOC */
781                         dwc2_hc_start_transfer_ddma(hsotg, chan);
782                 }
783
784                 break;
785         default:
786                 break;
787         }
788 }
789
790 #define DWC2_CMPL_DONE          1
791 #define DWC2_CMPL_STOP          2
792
793 static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
794                                         struct dwc2_host_chan *chan,
795                                         struct dwc2_qtd *qtd,
796                                         struct dwc2_qh *qh, u16 idx)
797 {
798         struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
799         struct dwc2_hcd_iso_packet_desc *frame_desc;
800         u16 remain = 0;
801         int rc = 0;
802
803         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
804         dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
805         if (chan->ep_is_in)
806                 remain = dma_desc->status >> HOST_DMA_ISOC_NBYTES_SHIFT &
807                         HOST_DMA_ISOC_NBYTES_MASK >> HOST_DMA_ISOC_NBYTES_SHIFT;
808
809         if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
810                 /*
811                  * XactError, or unable to complete all the transactions
812                  * in the scheduled micro-frame/frame, both indicated by
813                  * HOST_DMA_STS_PKTERR
814                  */
815                 qtd->urb->error_count++;
816                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
817                 frame_desc->status = -EPROTO;
818         } else {
819                 /* Success */
820                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
821                 frame_desc->status = 0;
822         }
823
824         if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
825                 /*
826                  * urb->status is not used for isoc transfers here. The
827                  * individual frame_desc status are used instead.
828                  */
829                 dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb, 0);
830                 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
831
832                 /*
833                  * This check is necessary because urb_dequeue can be called
834                  * from urb complete callback (sound driver for example). All
835                  * pending URBs are dequeued there, so no need for further
836                  * processing.
837                  */
838                 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
839                         return -1;
840                 rc = DWC2_CMPL_DONE;
841         }
842
843         qh->ntd--;
844
845         /* Stop if IOC requested descriptor reached */
846         if (dma_desc->status & HOST_DMA_IOC)
847                 rc = DWC2_CMPL_STOP;
848
849         return rc;
850 }
851
852 static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
853                                          struct dwc2_host_chan *chan,
854                                          enum dwc2_halt_status halt_status)
855 {
856         struct dwc2_hcd_iso_packet_desc *frame_desc;
857         struct dwc2_qtd *qtd, *qtd_tmp;
858         struct dwc2_qh *qh;
859         u16 idx;
860         int rc;
861
862         qh = chan->qh;
863         idx = qh->td_first;
864
865         if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
866                 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
867                         qtd->in_process = 0;
868                 return;
869         }
870
871         if (halt_status == DWC2_HC_XFER_AHB_ERR ||
872             halt_status == DWC2_HC_XFER_BABBLE_ERR) {
873                 /*
874                  * Channel is halted in these error cases, considered as serious
875                  * issues.
876                  * Complete all URBs marking all frames as failed, irrespective
877                  * whether some of the descriptors (frames) succeeded or not.
878                  * Pass error code to completion routine as well, to update
879                  * urb->status, some of class drivers might use it to stop
880                  * queing transfer requests.
881                  */
882                 int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
883                           -EIO : -EOVERFLOW;
884
885                 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
886                                          qtd_list_entry) {
887                         for (idx = 0; idx < qtd->urb->packet_count; idx++) {
888                                 frame_desc = &qtd->urb->iso_descs[idx];
889                                 frame_desc->status = err;
890                         }
891
892                         dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb,
893                                            err);
894                         dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
895                 }
896
897                 return;
898         }
899
900         list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
901                 if (!qtd->in_process)
902                         break;
903                 do {
904                         rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
905                                                           idx);
906                         if (rc < 0)
907                                 return;
908                         idx = dwc2_desclist_idx_inc(idx, qh->interval,
909                                                     chan->speed);
910                         if (rc == DWC2_CMPL_STOP)
911                                 goto stop_scan;
912                         if (rc == DWC2_CMPL_DONE)
913                                 break;
914                 } while (idx != qh->td_first);
915         }
916
917 stop_scan:
918         qh->td_first = idx;
919 }
920
921 static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
922                                         struct dwc2_host_chan *chan,
923                                         struct dwc2_qtd *qtd,
924                                         struct dwc2_hcd_dma_desc *dma_desc,
925                                         enum dwc2_halt_status halt_status,
926                                         u32 n_bytes, int *xfer_done)
927 {
928         struct dwc2_hcd_urb *urb = qtd->urb;
929         u16 remain = 0;
930
931         if (chan->ep_is_in)
932                 remain = dma_desc->status >> HOST_DMA_NBYTES_SHIFT &
933                          HOST_DMA_NBYTES_MASK >> HOST_DMA_NBYTES_SHIFT;
934
935         dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
936
937         if (halt_status == DWC2_HC_XFER_AHB_ERR) {
938                 dev_err(hsotg->dev, "EIO\n");
939                 urb->status = -EIO;
940                 return 1;
941         }
942
943         if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
944                 switch (halt_status) {
945                 case DWC2_HC_XFER_STALL:
946                         dev_vdbg(hsotg->dev, "Stall\n");
947                         urb->status = -EPIPE;
948                         break;
949                 case DWC2_HC_XFER_BABBLE_ERR:
950                         dev_err(hsotg->dev, "Babble\n");
951                         urb->status = -EOVERFLOW;
952                         break;
953                 case DWC2_HC_XFER_XACT_ERR:
954                         dev_err(hsotg->dev, "XactErr\n");
955                         urb->status = -EPROTO;
956                         break;
957                 default:
958                         dev_err(hsotg->dev,
959                                 "%s: Unhandled descriptor error status (%d)\n",
960                                 __func__, halt_status);
961                         break;
962                 }
963                 return 1;
964         }
965
966         if (dma_desc->status & HOST_DMA_A) {
967                 dev_vdbg(hsotg->dev,
968                          "Active descriptor encountered on channel %d\n",
969                          chan->hc_num);
970                 return 0;
971         }
972
973         if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
974                 if (qtd->control_phase == DWC2_CONTROL_DATA) {
975                         urb->actual_length += n_bytes - remain;
976                         if (remain || urb->actual_length >= urb->length) {
977                                 /*
978                                  * For Control Data stage do not set urb->status
979                                  * to 0, to prevent URB callback. Set it when
980                                  * Status phase is done. See below.
981                                  */
982                                 *xfer_done = 1;
983                         }
984                 } else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
985                         urb->status = 0;
986                         *xfer_done = 1;
987                 }
988                 /* No handling for SETUP stage */
989         } else {
990                 /* BULK and INTR */
991                 urb->actual_length += n_bytes - remain;
992                 dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
993                          urb->actual_length);
994                 if (remain || urb->actual_length >= urb->length) {
995                         urb->status = 0;
996                         *xfer_done = 1;
997                 }
998         }
999
1000         return 0;
1001 }
1002
1003 static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
1004                                       struct dwc2_host_chan *chan,
1005                                       int chnum, struct dwc2_qtd *qtd,
1006                                       int desc_num,
1007                                       enum dwc2_halt_status halt_status,
1008                                       int *xfer_done)
1009 {
1010         struct dwc2_qh *qh = chan->qh;
1011         struct dwc2_hcd_urb *urb = qtd->urb;
1012         struct dwc2_hcd_dma_desc *dma_desc;
1013         u32 n_bytes;
1014         int failed;
1015
1016         dev_vdbg(hsotg->dev, "%s()\n", __func__);
1017
1018         dma_desc = &qh->desc_list[desc_num];
1019         n_bytes = qh->n_bytes[desc_num];
1020         dev_vdbg(hsotg->dev,
1021                  "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
1022                  qtd, urb, desc_num, dma_desc, n_bytes);
1023         failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
1024                                                      halt_status, n_bytes,
1025                                                      xfer_done);
1026         if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
1027                 dwc2_host_complete(hsotg, urb->priv, urb, urb->status);
1028                 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1029                 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n",
1030                          failed, *xfer_done, urb->status);
1031                 return failed;
1032         }
1033
1034         if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1035                 switch (qtd->control_phase) {
1036                 case DWC2_CONTROL_SETUP:
1037                         if (urb->length > 0)
1038                                 qtd->control_phase = DWC2_CONTROL_DATA;
1039                         else
1040                                 qtd->control_phase = DWC2_CONTROL_STATUS;
1041                         dev_vdbg(hsotg->dev,
1042                                  "  Control setup transaction done\n");
1043                         break;
1044                 case DWC2_CONTROL_DATA:
1045                         if (*xfer_done) {
1046                                 qtd->control_phase = DWC2_CONTROL_STATUS;
1047                                 dev_vdbg(hsotg->dev,
1048                                          "  Control data transfer done\n");
1049                         } else if (desc_num + 1 == qtd->n_desc) {
1050                                 /*
1051                                  * Last descriptor for Control data stage which
1052                                  * is not completed yet
1053                                  */
1054                                 dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1055                                                           qtd);
1056                         }
1057                         break;
1058                 default:
1059                         break;
1060                 }
1061         }
1062
1063         return 0;
1064 }
1065
1066 static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1067                                              struct dwc2_host_chan *chan,
1068                                              int chnum,
1069                                              enum dwc2_halt_status halt_status)
1070 {
1071         struct list_head *qtd_item, *qtd_tmp;
1072         struct dwc2_qh *qh = chan->qh;
1073         struct dwc2_qtd *qtd = NULL;
1074         int xfer_done;
1075         int desc_num = 0;
1076
1077         if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1078                 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
1079                         qtd->in_process = 0;
1080                 return;
1081         }
1082
1083         list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
1084                 int i;
1085
1086                 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
1087                 xfer_done = 0;
1088
1089                 for (i = 0; i < qtd->n_desc; i++) {
1090                         if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1091                                                        desc_num, halt_status,
1092                                                        &xfer_done))
1093                                 break;
1094                         desc_num++;
1095                 }
1096         }
1097
1098         if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
1099                 /*
1100                  * Resetting the data toggle for bulk and interrupt endpoints
1101                  * in case of stall. See handle_hc_stall_intr().
1102                  */
1103                 if (halt_status == DWC2_HC_XFER_STALL)
1104                         qh->data_toggle = DWC2_HC_PID_DATA0;
1105                 else if (qtd)
1106                         dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1107         }
1108
1109         if (halt_status == DWC2_HC_XFER_COMPLETE) {
1110                 if (chan->hcint & HCINTMSK_NYET) {
1111                         /*
1112                          * Got a NYET on the last transaction of the transfer.
1113                          * It means that the endpoint should be in the PING
1114                          * state at the beginning of the next transfer.
1115                          */
1116                         qh->ping_state = 1;
1117                 }
1118         }
1119 }
1120
1121 /**
1122  * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
1123  * status and calls completion routine for the URB if it's done. Called from
1124  * interrupt handlers.
1125  *
1126  * @hsotg:       The HCD state structure for the DWC OTG controller
1127  * @chan:        Host channel the transfer is completed on
1128  * @chnum:       Index of Host channel registers
1129  * @halt_status: Reason the channel is being halted or just XferComplete
1130  *               for isochronous transfers
1131  *
1132  * Releases the channel to be used by other transfers.
1133  * In case of Isochronous endpoint the channel is not halted until the end of
1134  * the session, i.e. QTD list is empty.
1135  * If periodic channel released the FrameList is updated accordingly.
1136  * Calls transaction selection routines to activate pending transfers.
1137  */
1138 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
1139                                  struct dwc2_host_chan *chan, int chnum,
1140                                  enum dwc2_halt_status halt_status)
1141 {
1142         struct dwc2_qh *qh = chan->qh;
1143         int continue_isoc_xfer = 0;
1144         enum dwc2_transaction_type tr_type;
1145
1146         if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1147                 dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
1148
1149                 /* Release the channel if halted or session completed */
1150                 if (halt_status != DWC2_HC_XFER_COMPLETE ||
1151                     list_empty(&qh->qtd_list)) {
1152                         /* Halt the channel if session completed */
1153                         if (halt_status == DWC2_HC_XFER_COMPLETE)
1154                                 dwc2_hc_halt(hsotg, chan, halt_status);
1155                         dwc2_release_channel_ddma(hsotg, qh);
1156                         dwc2_hcd_qh_unlink(hsotg, qh);
1157                 } else {
1158                         /* Keep in assigned schedule to continue transfer */
1159                         list_move(&qh->qh_list_entry,
1160                                   &hsotg->periodic_sched_assigned);
1161                         continue_isoc_xfer = 1;
1162                 }
1163                 /*
1164                  * Todo: Consider the case when period exceeds FrameList size.
1165                  * Frame Rollover interrupt should be used.
1166                  */
1167         } else {
1168                 /*
1169                  * Scan descriptor list to complete the URB(s), then release
1170                  * the channel
1171                  */
1172                 dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
1173                                                  halt_status);
1174                 dwc2_release_channel_ddma(hsotg, qh);
1175                 dwc2_hcd_qh_unlink(hsotg, qh);
1176
1177                 if (!list_empty(&qh->qtd_list)) {
1178                         /*
1179                          * Add back to inactive non-periodic schedule on normal
1180                          * completion
1181                          */
1182                         dwc2_hcd_qh_add(hsotg, qh);
1183                 }
1184         }
1185
1186         tr_type = dwc2_hcd_select_transactions(hsotg);
1187         if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
1188                 if (continue_isoc_xfer) {
1189                         if (tr_type == DWC2_TRANSACTION_NONE)
1190                                 tr_type = DWC2_TRANSACTION_PERIODIC;
1191                         else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
1192                                 tr_type = DWC2_TRANSACTION_ALL;
1193                 }
1194                 dwc2_hcd_queue_transactions(hsotg, tr_type);
1195         }
1196 }