2 * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
4 * 2013 (c) Aeroflex Gaisler AB
6 * This driver supports GRUSBDC USB Device Controller cores available in the
7 * GRLIB VHDL IP core library.
9 * Full documentation of the GRUSBDC core can be found here:
10 * http://www.gaisler.com/products/grlib/grip.pdf
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * - Andreas Larsson <andreas@gaisler.com>
23 * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
24 * individually configurable to any of the four USB transfer types. This driver
25 * only supports cores in DMA mode.
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/errno.h>
33 #include <linux/list.h>
34 #include <linux/interrupt.h>
35 #include <linux/device.h>
36 #include <linux/usb/ch9.h>
37 #include <linux/usb/gadget.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/dmapool.h>
40 #include <linux/debugfs.h>
41 #include <linux/seq_file.h>
42 #include <linux/of_platform.h>
43 #include <linux/of_irq.h>
44 #include <linux/of_address.h>
46 #include <asm/byteorder.h>
50 #define DRIVER_NAME "gr_udc"
51 #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
53 static const char driver_name[] = DRIVER_NAME;
54 static const char driver_desc[] = DRIVER_DESC;
56 #define gr_read32(x) (ioread32be((x)))
57 #define gr_write32(x, v) (iowrite32be((v), (x)))
59 /* USB speed and corresponding string calculated from status register value */
60 #define GR_SPEED(status) \
61 ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
62 #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
64 /* Size of hardware buffer calculated from epctrl register value */
65 #define GR_BUFFER_SIZE(epctrl) \
66 ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
67 GR_EPCTRL_BUFSZ_SCALER)
69 /* ---------------------------------------------------------------------- */
70 /* Debug printout functionality */
72 static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
74 static const char *gr_ep0state_string(enum gr_ep0state state)
76 static const char *const names[] = {
77 [GR_EP0_DISCONNECT] = "disconnect",
78 [GR_EP0_SETUP] = "setup",
79 [GR_EP0_IDATA] = "idata",
80 [GR_EP0_ODATA] = "odata",
81 [GR_EP0_ISTATUS] = "istatus",
82 [GR_EP0_OSTATUS] = "ostatus",
83 [GR_EP0_STALL] = "stall",
84 [GR_EP0_SUSPEND] = "suspend",
87 if (state < 0 || state >= ARRAY_SIZE(names))
95 static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
96 struct gr_request *req)
98 int buflen = ep->is_in ? req->req.length : req->req.actual;
100 int plen = min(rowlen, buflen);
102 dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
103 (buflen > plen ? " (truncated)" : ""));
104 print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
105 rowlen, 4, req->req.buf, plen, false);
108 static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
109 u16 value, u16 index, u16 length)
111 dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
112 type, request, value, index, length);
114 #else /* !VERBOSE_DEBUG */
116 static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
117 struct gr_request *req) {}
119 static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
120 u16 value, u16 index, u16 length) {}
122 #endif /* VERBOSE_DEBUG */
124 /* ---------------------------------------------------------------------- */
125 /* Debugfs functionality */
127 #ifdef CONFIG_USB_GADGET_DEBUG_FS
129 static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
131 u32 epctrl = gr_read32(&ep->regs->epctrl);
132 u32 epstat = gr_read32(&ep->regs->epstat);
133 int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
134 struct gr_request *req;
136 seq_printf(seq, "%s:\n", ep->ep.name);
137 seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
138 seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
139 seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
140 seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
141 seq_printf(seq, " dma_start = %d\n", ep->dma_start);
142 seq_printf(seq, " stopped = %d\n", ep->stopped);
143 seq_printf(seq, " wedged = %d\n", ep->wedged);
144 seq_printf(seq, " callback = %d\n", ep->callback);
145 seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
146 seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
147 seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
148 if (mode == 1 || mode == 3)
149 seq_printf(seq, " nt = %d\n",
150 (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
152 seq_printf(seq, " Buffer 0: %s %s%d\n",
153 epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
154 epstat & GR_EPSTAT_BS ? " " : "selected ",
155 (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
156 seq_printf(seq, " Buffer 1: %s %s%d\n",
157 epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
158 epstat & GR_EPSTAT_BS ? "selected " : " ",
159 (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
161 if (list_empty(&ep->queue)) {
162 seq_puts(seq, " Queue: empty\n\n");
166 seq_puts(seq, " Queue:\n");
167 list_for_each_entry(req, &ep->queue, queue) {
168 struct gr_dma_desc *desc;
169 struct gr_dma_desc *next;
171 seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
172 &req->req.buf, req->req.actual, req->req.length);
174 next = req->first_desc;
177 next = desc->next_desc;
178 seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
179 desc == req->curr_desc ? 'c' : ' ',
180 desc, desc->paddr, desc->ctrl, desc->data);
181 } while (desc != req->last_desc);
187 static int gr_seq_show(struct seq_file *seq, void *v)
189 struct gr_udc *dev = seq->private;
190 u32 control = gr_read32(&dev->regs->control);
191 u32 status = gr_read32(&dev->regs->status);
194 seq_printf(seq, "usb state = %s\n",
195 usb_state_string(dev->gadget.state));
196 seq_printf(seq, "address = %d\n",
197 (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
198 seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
199 seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
200 seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
201 seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
202 seq_printf(seq, "test_mode = %d\n", dev->test_mode);
205 list_for_each_entry(ep, &dev->ep_list, ep_list)
206 gr_seq_ep_show(seq, ep);
211 static int gr_dfs_open(struct inode *inode, struct file *file)
213 return single_open(file, gr_seq_show, inode->i_private);
216 static const struct file_operations gr_dfs_fops = {
217 .owner = THIS_MODULE,
221 .release = single_release,
224 static void gr_dfs_create(struct gr_udc *dev)
226 const char *name = "gr_udc_state";
228 dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
229 dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root, dev,
233 static void gr_dfs_delete(struct gr_udc *dev)
235 /* Handles NULL and ERR pointers internally */
236 debugfs_remove(dev->dfs_state);
237 debugfs_remove(dev->dfs_root);
240 #else /* !CONFIG_USB_GADGET_DEBUG_FS */
242 static void gr_dfs_create(struct gr_udc *dev) {}
243 static void gr_dfs_delete(struct gr_udc *dev) {}
245 #endif /* CONFIG_USB_GADGET_DEBUG_FS */
247 /* ---------------------------------------------------------------------- */
248 /* DMA and request handling */
250 /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
251 static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
254 struct gr_dma_desc *dma_desc;
256 dma_desc = dma_pool_alloc(ep->dev->desc_pool, gfp_flags, &paddr);
258 dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
262 memset(dma_desc, 0, sizeof(*dma_desc));
263 dma_desc->paddr = paddr;
268 static inline void gr_free_dma_desc(struct gr_udc *dev,
269 struct gr_dma_desc *desc)
271 dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
274 /* Frees the chain of struct gr_dma_desc for the given request */
275 static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
277 struct gr_dma_desc *desc;
278 struct gr_dma_desc *next;
280 next = req->first_desc;
286 next = desc->next_desc;
287 gr_free_dma_desc(dev, desc);
288 } while (desc != req->last_desc);
290 req->first_desc = NULL;
291 req->curr_desc = NULL;
292 req->last_desc = NULL;
295 static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
298 * Frees allocated resources and calls the appropriate completion function/setup
299 * package handler for a finished request.
301 * Must be called with dev->lock held and irqs disabled.
303 static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
305 __releases(&dev->lock)
306 __acquires(&dev->lock)
310 list_del_init(&req->queue);
312 if (likely(req->req.status == -EINPROGRESS))
313 req->req.status = status;
315 status = req->req.status;
318 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
319 gr_free_dma_desc_chain(dev, req);
321 if (ep->is_in) /* For OUT, actual gets updated bit by bit */
322 req->req.actual = req->req.length;
326 gr_dbgprint_request("SENT", ep, req);
328 gr_dbgprint_request("RECV", ep, req);
331 /* Prevent changes to ep->queue during callback */
333 if (req == dev->ep0reqo && !status) {
335 gr_ep0_setup(dev, req);
338 "Unexpected non setup packet on ep0in\n");
339 } else if (req->req.complete) {
340 spin_unlock(&dev->lock);
342 req->req.complete(&ep->ep, &req->req);
344 spin_lock(&dev->lock);
349 static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
351 struct gr_request *req;
353 req = kzalloc(sizeof(*req), gfp_flags);
357 INIT_LIST_HEAD(&req->queue);
363 * Starts DMA for endpoint ep if there are requests in the queue.
365 * Must be called with dev->lock held and with !ep->stopped.
367 static void gr_start_dma(struct gr_ep *ep)
369 struct gr_request *req;
372 if (list_empty(&ep->queue)) {
377 req = list_first_entry(&ep->queue, struct gr_request, queue);
379 /* A descriptor should already have been allocated */
380 BUG_ON(!req->curr_desc);
382 wmb(); /* Make sure all is settled before handing it over to DMA */
384 /* Set the descriptor pointer in the hardware */
385 gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
387 /* Announce available descriptors */
388 dmactrl = gr_read32(&ep->regs->dmactrl);
389 gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
395 * Finishes the first request in the ep's queue and, if available, starts the
396 * next request in queue.
398 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
400 static void gr_dma_advance(struct gr_ep *ep, int status)
402 struct gr_request *req;
404 req = list_first_entry(&ep->queue, struct gr_request, queue);
405 gr_finish_request(ep, req, status);
406 gr_start_dma(ep); /* Regardless of ep->dma_start */
410 * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
411 * transfer to be canceled and clears GR_DMACTRL_DA.
413 * Must be called with dev->lock held.
415 static void gr_abort_dma(struct gr_ep *ep)
419 dmactrl = gr_read32(&ep->regs->dmactrl);
420 gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
424 * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
427 * Size is not used for OUT endpoints. Hardware can not be instructed to handle
428 * smaller buffer than MAXPL in the OUT direction.
430 static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
431 dma_addr_t data, unsigned size, gfp_t gfp_flags)
433 struct gr_dma_desc *desc;
435 desc = gr_alloc_dma_desc(ep, gfp_flags);
442 (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
444 desc->ctrl = GR_DESC_OUT_CTRL_IE;
446 if (!req->first_desc) {
447 req->first_desc = desc;
448 req->curr_desc = desc;
450 req->last_desc->next_desc = desc;
451 req->last_desc->next = desc->paddr;
452 req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
454 req->last_desc = desc;
460 * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
461 * together covers req->req.length bytes of the buffer at DMA address
462 * req->req.dma for the OUT direction.
464 * The first descriptor in the chain is enabled, the rest disabled. The
465 * interrupt handler will later enable them one by one when needed so we can
466 * find out when the transfer is finished. For OUT endpoints, all descriptors
467 * therefore generate interrutps.
469 static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
472 u16 bytes_left; /* Bytes left to provide descriptors for */
473 u16 bytes_used; /* Bytes accommodated for */
476 req->first_desc = NULL; /* Signals that no allocation is done yet */
477 bytes_left = req->req.length;
479 while (bytes_left > 0) {
480 dma_addr_t start = req->req.dma + bytes_used;
481 u16 size = min(bytes_left, ep->bytes_per_buffer);
483 /* Should not happen however - gr_queue stops such lengths */
484 if (size < ep->bytes_per_buffer)
485 dev_warn(ep->dev->dev,
486 "Buffer overrun risk: %u < %u bytes/buffer\n",
487 size, ep->bytes_per_buffer);
489 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
497 req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
502 gr_free_dma_desc_chain(ep->dev, req);
508 * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
509 * together covers req->req.length bytes of the buffer at DMA address
510 * req->req.dma for the IN direction.
512 * When more data is provided than the maximum payload size, the hardware splits
513 * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
514 * is always set to a multiple of the maximum payload (restricted to the valid
515 * number of maximum payloads during high bandwidth isochronous or interrupt
518 * All descriptors are enabled from the beginning and we only generate an
519 * interrupt for the last one indicating that the entire request has been pushed
522 static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
525 u16 bytes_left; /* Bytes left in req to provide descriptors for */
526 u16 bytes_used; /* Bytes in req accommodated for */
529 req->first_desc = NULL; /* Signals that no allocation is done yet */
530 bytes_left = req->req.length;
532 do { /* Allow for zero length packets */
533 dma_addr_t start = req->req.dma + bytes_used;
534 u16 size = min(bytes_left, ep->bytes_per_buffer);
536 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
542 } while (bytes_left > 0);
545 * Send an extra zero length packet to indicate that no more data is
546 * available when req->req.zero is set and the data length is even
547 * multiples of ep->ep.maxpacket.
549 if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
550 ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
556 * For IN packets we only want to know when the last packet has been
557 * transmitted (not just put into internal buffers).
559 req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
564 gr_free_dma_desc_chain(ep->dev, req);
569 /* Must be called with dev->lock held */
570 static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
572 struct gr_udc *dev = ep->dev;
575 if (unlikely(!ep->ep.desc && ep->num != 0)) {
576 dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
580 if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
582 "Invalid request for %s: buf=%p list_empty=%d\n",
583 ep->ep.name, req->req.buf, list_empty(&req->queue));
588 * The DMA controller can not handle smaller OUT buffers than
589 * maxpacket. It could lead to buffer overruns if unexpectedly long
590 * packet are received.
592 if (!ep->is_in && (req->req.length % ep->ep.maxpacket) != 0) {
594 "OUT request length %d is not multiple of maxpacket\n",
599 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
600 dev_err(dev->dev, "-ESHUTDOWN");
604 /* Can't touch registers when suspended */
605 if (dev->ep0state == GR_EP0_SUSPEND) {
606 dev_err(dev->dev, "-EBUSY");
610 /* Set up DMA mapping in case the caller didn't */
611 ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
613 dev_err(dev->dev, "usb_gadget_map_request");
618 ret = gr_setup_in_desc_list(ep, req, gfp_flags);
620 ret = gr_setup_out_desc_list(ep, req, gfp_flags);
624 req->req.status = -EINPROGRESS;
626 list_add_tail(&req->queue, &ep->queue);
628 /* Start DMA if not started, otherwise interrupt handler handles it */
629 if (!ep->dma_start && likely(!ep->stopped))
636 * Queue a request from within the driver.
638 * Must be called with dev->lock held.
640 static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
644 gr_dbgprint_request("RESP", ep, req);
646 return gr_queue(ep, req, gfp_flags);
649 /* ---------------------------------------------------------------------- */
650 /* General helper functions */
653 * Dequeue ALL requests.
655 * Must be called with dev->lock held and irqs disabled.
657 static void gr_ep_nuke(struct gr_ep *ep)
659 struct gr_request *req;
665 while (!list_empty(&ep->queue)) {
666 req = list_first_entry(&ep->queue, struct gr_request, queue);
667 gr_finish_request(ep, req, -ESHUTDOWN);
672 * Reset the hardware state of this endpoint.
674 * Must be called with dev->lock held.
676 static void gr_ep_reset(struct gr_ep *ep)
678 gr_write32(&ep->regs->epctrl, 0);
679 gr_write32(&ep->regs->dmactrl, 0);
681 ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
688 * Generate STALL on ep0in/out.
690 * Must be called with dev->lock held.
692 static void gr_control_stall(struct gr_udc *dev)
696 epctrl = gr_read32(&dev->epo[0].regs->epctrl);
697 gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
698 epctrl = gr_read32(&dev->epi[0].regs->epctrl);
699 gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
701 dev->ep0state = GR_EP0_STALL;
705 * Halts, halts and wedges, or clears halt for an endpoint.
707 * Must be called with dev->lock held.
709 static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
714 if (ep->num && !ep->ep.desc)
717 if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
720 /* Never actually halt ep0, and therefore never clear halt for ep0 */
722 if (halt && !fromhost) {
723 /* ep0 halt from gadget - generate protocol stall */
724 gr_control_stall(ep->dev);
725 dev_dbg(ep->dev->dev, "EP: stall ep0\n");
731 dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
732 (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
734 epctrl = gr_read32(&ep->regs->epctrl);
737 gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
742 gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
746 /* Things might have been queued up in the meantime */
754 /* Must be called with dev->lock held */
755 static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
757 if (dev->ep0state != value)
758 dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
759 gr_ep0state_string(value));
760 dev->ep0state = value;
764 * Should only be called when endpoints can not generate interrupts.
766 * Must be called with dev->lock held.
768 static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
770 gr_write32(&dev->regs->control, 0);
771 wmb(); /* Make sure that we do not deny one of our interrupts */
772 dev->irq_enabled = 0;
776 * Stop all device activity and disable data line pullup.
778 * Must be called with dev->lock held and irqs disabled.
780 static void gr_stop_activity(struct gr_udc *dev)
784 list_for_each_entry(ep, &dev->ep_list, ep_list)
787 gr_disable_interrupts_and_pullup(dev);
789 gr_set_ep0state(dev, GR_EP0_DISCONNECT);
790 usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
793 /* ---------------------------------------------------------------------- */
794 /* ep0 setup packet handling */
796 static void gr_ep0_testmode_complete(struct usb_ep *_ep,
797 struct usb_request *_req)
803 ep = container_of(_ep, struct gr_ep, ep);
806 spin_lock(&dev->lock);
808 control = gr_read32(&dev->regs->control);
809 control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
810 gr_write32(&dev->regs->control, control);
812 spin_unlock(&dev->lock);
815 static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
817 /* Nothing needs to be done here */
821 * Queue a response on ep0in.
823 * Must be called with dev->lock held.
825 static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
826 void (*complete)(struct usb_ep *ep,
827 struct usb_request *req))
829 u8 *reqbuf = dev->ep0reqi->req.buf;
833 for (i = 0; i < length; i++)
835 dev->ep0reqi->req.length = length;
836 dev->ep0reqi->req.complete = complete;
838 status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
841 "Could not queue ep0in setup response: %d\n", status);
847 * Queue a 2 byte response on ep0in.
849 * Must be called with dev->lock held.
851 static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
853 __le16 le_response = cpu_to_le16(response);
855 return gr_ep0_respond(dev, (u8 *)&le_response, 2,
856 gr_ep0_dummy_complete);
860 * Queue a ZLP response on ep0in.
862 * Must be called with dev->lock held.
864 static inline int gr_ep0_respond_empty(struct gr_udc *dev)
866 return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
870 * This is run when a SET_ADDRESS request is received. First writes
871 * the new address to the control register which is updated internally
872 * when the next IN packet is ACKED.
874 * Must be called with dev->lock held.
876 static void gr_set_address(struct gr_udc *dev, u8 address)
880 control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
881 control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
882 control |= GR_CONTROL_SU;
883 gr_write32(&dev->regs->control, control);
887 * Returns negative for STALL, 0 for successful handling and positive for
890 * Must be called with dev->lock held.
892 static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
893 u16 value, u16 index)
899 case USB_REQ_SET_ADDRESS:
900 dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
901 gr_set_address(dev, value & 0xff);
903 usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
905 usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
906 return gr_ep0_respond_empty(dev);
908 case USB_REQ_GET_STATUS:
909 /* Self powered | remote wakeup */
910 response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
911 return gr_ep0_respond_u16(dev, response);
913 case USB_REQ_SET_FEATURE:
915 case USB_DEVICE_REMOTE_WAKEUP:
916 /* Allow remote wakeup */
917 dev->remote_wakeup = 1;
918 return gr_ep0_respond_empty(dev);
920 case USB_DEVICE_TEST_MODE:
921 /* The hardware does not support TEST_FORCE_EN */
923 if (test >= TEST_J && test <= TEST_PACKET) {
924 dev->test_mode = test;
925 return gr_ep0_respond(dev, NULL, 0,
926 gr_ep0_testmode_complete);
931 case USB_REQ_CLEAR_FEATURE:
933 case USB_DEVICE_REMOTE_WAKEUP:
934 /* Disallow remote wakeup */
935 dev->remote_wakeup = 0;
936 return gr_ep0_respond_empty(dev);
941 return 1; /* Delegate the rest */
945 * Returns negative for STALL, 0 for successful handling and positive for
948 * Must be called with dev->lock held.
950 static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
951 u16 value, u16 index)
953 if (dev->gadget.state != USB_STATE_CONFIGURED)
957 * Should return STALL for invalid interfaces, but udc driver does not
958 * know anything about that. However, many gadget drivers do not handle
959 * GET_STATUS so we need to take care of that.
963 case USB_REQ_GET_STATUS:
964 return gr_ep0_respond_u16(dev, 0x0000);
966 case USB_REQ_SET_FEATURE:
967 case USB_REQ_CLEAR_FEATURE:
969 * No possible valid standard requests. Still let gadget drivers
975 return 1; /* Delegate the rest */
979 * Returns negative for STALL, 0 for successful handling and positive for
982 * Must be called with dev->lock held.
984 static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
985 u16 value, u16 index)
990 u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
991 u8 is_in = index & USB_ENDPOINT_DIR_MASK;
993 if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
996 if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
999 ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
1002 case USB_REQ_GET_STATUS:
1003 halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
1004 return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
1006 case USB_REQ_SET_FEATURE:
1008 case USB_ENDPOINT_HALT:
1009 status = gr_ep_halt_wedge(ep, 1, 0, 1);
1011 status = gr_ep0_respond_empty(dev);
1016 case USB_REQ_CLEAR_FEATURE:
1018 case USB_ENDPOINT_HALT:
1021 status = gr_ep_halt_wedge(ep, 0, 0, 1);
1023 status = gr_ep0_respond_empty(dev);
1029 return 1; /* Delegate the rest */
1032 /* Must be called with dev->lock held */
1033 static void gr_ep0out_requeue(struct gr_udc *dev)
1035 int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
1038 dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
1043 * The main function dealing with setup requests on ep0.
1045 * Must be called with dev->lock held and irqs disabled
1047 static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
1048 __releases(&dev->lock)
1049 __acquires(&dev->lock)
1052 struct usb_ctrlrequest ctrl;
1064 /* Restore from ep0 halt */
1065 if (dev->ep0state == GR_EP0_STALL) {
1066 gr_set_ep0state(dev, GR_EP0_SETUP);
1067 if (!req->req.actual)
1071 if (dev->ep0state == GR_EP0_ISTATUS) {
1072 gr_set_ep0state(dev, GR_EP0_SETUP);
1073 if (req->req.actual > 0)
1075 "Unexpected setup packet at state %s\n",
1076 gr_ep0state_string(GR_EP0_ISTATUS));
1078 goto out; /* Got expected ZLP */
1079 } else if (dev->ep0state != GR_EP0_SETUP) {
1081 "Unexpected ep0out request at state %s - stalling\n",
1082 gr_ep0state_string(dev->ep0state));
1083 gr_control_stall(dev);
1084 gr_set_ep0state(dev, GR_EP0_SETUP);
1086 } else if (!req->req.actual) {
1087 dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
1088 gr_ep0state_string(dev->ep0state));
1092 /* Handle SETUP packet */
1093 for (i = 0; i < req->req.actual; i++)
1094 u.raw[i] = ((u8 *)req->req.buf)[i];
1096 type = u.ctrl.bRequestType;
1097 request = u.ctrl.bRequest;
1098 value = le16_to_cpu(u.ctrl.wValue);
1099 index = le16_to_cpu(u.ctrl.wIndex);
1100 length = le16_to_cpu(u.ctrl.wLength);
1102 gr_dbgprint_devreq(dev, type, request, value, index, length);
1104 /* Check for data stage */
1106 if (type & USB_DIR_IN)
1107 gr_set_ep0state(dev, GR_EP0_IDATA);
1109 gr_set_ep0state(dev, GR_EP0_ODATA);
1112 status = 1; /* Positive status flags delegation */
1113 if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1114 switch (type & USB_RECIP_MASK) {
1115 case USB_RECIP_DEVICE:
1116 status = gr_device_request(dev, type, request,
1119 case USB_RECIP_ENDPOINT:
1120 status = gr_endpoint_request(dev, type, request,
1123 case USB_RECIP_INTERFACE:
1124 status = gr_interface_request(dev, type, request,
1131 spin_unlock(&dev->lock);
1133 dev_vdbg(dev->dev, "DELEGATE\n");
1134 status = dev->driver->setup(&dev->gadget, &u.ctrl);
1136 spin_lock(&dev->lock);
1139 /* Generate STALL on both ep0out and ep0in if requested */
1140 if (unlikely(status < 0)) {
1141 dev_vdbg(dev->dev, "STALL\n");
1142 gr_control_stall(dev);
1145 if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
1146 request == USB_REQ_SET_CONFIGURATION) {
1148 dev_dbg(dev->dev, "STATUS: deconfigured\n");
1149 usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
1150 } else if (status >= 0) {
1151 /* Not configured unless gadget OK:s it */
1152 dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
1153 usb_gadget_set_state(&dev->gadget,
1154 USB_STATE_CONFIGURED);
1158 /* Get ready for next stage */
1159 if (dev->ep0state == GR_EP0_ODATA)
1160 gr_set_ep0state(dev, GR_EP0_OSTATUS);
1161 else if (dev->ep0state == GR_EP0_IDATA)
1162 gr_set_ep0state(dev, GR_EP0_ISTATUS);
1164 gr_set_ep0state(dev, GR_EP0_SETUP);
1167 gr_ep0out_requeue(dev);
1170 /* ---------------------------------------------------------------------- */
1171 /* VBUS and USB reset handling */
1173 /* Must be called with dev->lock held and irqs disabled */
1174 static void gr_vbus_connected(struct gr_udc *dev, u32 status)
1178 dev->gadget.speed = GR_SPEED(status);
1179 usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
1181 /* Turn on full interrupts and pullup */
1182 control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
1183 GR_CONTROL_SP | GR_CONTROL_EP);
1184 gr_write32(&dev->regs->control, control);
1187 /* Must be called with dev->lock held */
1188 static void gr_enable_vbus_detect(struct gr_udc *dev)
1192 dev->irq_enabled = 1;
1193 wmb(); /* Make sure we do not ignore an interrupt */
1194 gr_write32(&dev->regs->control, GR_CONTROL_VI);
1196 /* Take care of the case we are already plugged in at this point */
1197 status = gr_read32(&dev->regs->status);
1198 if (status & GR_STATUS_VB)
1199 gr_vbus_connected(dev, status);
1202 /* Must be called with dev->lock held and irqs disabled */
1203 static void gr_vbus_disconnected(struct gr_udc *dev)
1205 gr_stop_activity(dev);
1207 /* Report disconnect */
1208 if (dev->driver && dev->driver->disconnect) {
1209 spin_unlock(&dev->lock);
1211 dev->driver->disconnect(&dev->gadget);
1213 spin_lock(&dev->lock);
1216 gr_enable_vbus_detect(dev);
1219 /* Must be called with dev->lock held and irqs disabled */
1220 static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
1222 gr_set_address(dev, 0);
1223 gr_set_ep0state(dev, GR_EP0_SETUP);
1224 usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
1225 dev->gadget.speed = GR_SPEED(status);
1227 gr_ep_nuke(&dev->epo[0]);
1228 gr_ep_nuke(&dev->epi[0]);
1229 dev->epo[0].stopped = 0;
1230 dev->epi[0].stopped = 0;
1231 gr_ep0out_requeue(dev);
1234 /* ---------------------------------------------------------------------- */
1238 * Handles interrupts from in endpoints. Returns whether something was handled.
1240 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1242 static int gr_handle_in_ep(struct gr_ep *ep)
1244 struct gr_request *req;
1246 req = list_first_entry(&ep->queue, struct gr_request, queue);
1247 if (!req->last_desc)
1250 if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
1251 return 0; /* Not put in hardware buffers yet */
1253 if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
1254 return 0; /* Not transmitted yet, still in hardware buffers */
1256 /* Write complete */
1257 gr_dma_advance(ep, 0);
1263 * Handles interrupts from out endpoints. Returns whether something was handled.
1265 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1267 static int gr_handle_out_ep(struct gr_ep *ep)
1272 struct gr_request *req;
1273 struct gr_udc *dev = ep->dev;
1275 req = list_first_entry(&ep->queue, struct gr_request, queue);
1276 if (!req->curr_desc)
1279 ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
1280 if (ctrl & GR_DESC_OUT_CTRL_EN)
1281 return 0; /* Not received yet */
1284 len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
1285 req->req.actual += len;
1286 if (ctrl & GR_DESC_OUT_CTRL_SE)
1289 if (len < ep->ep.maxpacket || req->req.actual == req->req.length) {
1290 /* Short packet or the expected size - we are done */
1292 if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
1294 * Send a status stage ZLP to ack the DATA stage in the
1295 * OUT direction. This needs to be done before
1296 * gr_dma_advance as that can lead to a call to
1297 * ep0_setup that can change dev->ep0state.
1299 gr_ep0_respond_empty(dev);
1300 gr_set_ep0state(dev, GR_EP0_SETUP);
1303 gr_dma_advance(ep, 0);
1305 /* Not done yet. Enable the next descriptor to receive more. */
1306 req->curr_desc = req->curr_desc->next_desc;
1307 req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
1309 ep_dmactrl = gr_read32(&ep->regs->dmactrl);
1310 gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
1317 * Handle state changes. Returns whether something was handled.
1319 * Must be called with dev->lock held and irqs disabled.
1321 static int gr_handle_state_changes(struct gr_udc *dev)
1323 u32 status = gr_read32(&dev->regs->status);
1325 int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
1326 dev->gadget.state == USB_STATE_ATTACHED);
1328 /* VBUS valid detected */
1329 if (!powstate && (status & GR_STATUS_VB)) {
1330 dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
1331 gr_vbus_connected(dev, status);
1336 if (powstate && !(status & GR_STATUS_VB)) {
1337 dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
1338 gr_vbus_disconnected(dev);
1342 /* USB reset detected */
1343 if (status & GR_STATUS_UR) {
1344 dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
1345 GR_SPEED_STR(status));
1346 gr_write32(&dev->regs->status, GR_STATUS_UR);
1347 gr_udc_usbreset(dev, status);
1352 if (dev->gadget.speed != GR_SPEED(status)) {
1353 dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
1354 GR_SPEED_STR(status));
1355 dev->gadget.speed = GR_SPEED(status);
1359 /* Going into suspend */
1360 if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
1361 dev_dbg(dev->dev, "STATUS: USB suspend\n");
1362 gr_set_ep0state(dev, GR_EP0_SUSPEND);
1363 dev->suspended_from = dev->gadget.state;
1364 usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
1366 if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
1367 dev->driver && dev->driver->suspend) {
1368 spin_unlock(&dev->lock);
1370 dev->driver->suspend(&dev->gadget);
1372 spin_lock(&dev->lock);
1377 /* Coming out of suspend */
1378 if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
1379 dev_dbg(dev->dev, "STATUS: USB resume\n");
1380 if (dev->suspended_from == USB_STATE_POWERED)
1381 gr_set_ep0state(dev, GR_EP0_DISCONNECT);
1383 gr_set_ep0state(dev, GR_EP0_SETUP);
1384 usb_gadget_set_state(&dev->gadget, dev->suspended_from);
1386 if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
1387 dev->driver && dev->driver->resume) {
1388 spin_unlock(&dev->lock);
1390 dev->driver->resume(&dev->gadget);
1392 spin_lock(&dev->lock);
1400 /* Non-interrupt context irq handler */
1401 static irqreturn_t gr_irq_handler(int irq, void *_dev)
1403 struct gr_udc *dev = _dev;
1407 unsigned long flags;
1409 spin_lock_irqsave(&dev->lock, flags);
1411 if (!dev->irq_enabled)
1415 * Check IN ep interrupts. We check these before the OUT eps because
1416 * some gadgets reuse the request that might already be currently
1417 * outstanding and needs to be completed (mainly setup requests).
1419 for (i = 0; i < dev->nepi; i++) {
1421 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1422 handled = gr_handle_in_ep(ep) || handled;
1425 /* Check OUT ep interrupts */
1426 for (i = 0; i < dev->nepo; i++) {
1428 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1429 handled = gr_handle_out_ep(ep) || handled;
1432 /* Check status interrupts */
1433 handled = gr_handle_state_changes(dev) || handled;
1436 * Check AMBA DMA errors. Only check if we didn't find anything else to
1437 * handle because this shouldn't happen if we did everything right.
1440 list_for_each_entry(ep, &dev->ep_list, ep_list) {
1441 if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
1443 "AMBA Error occurred for %s\n",
1451 spin_unlock_irqrestore(&dev->lock, flags);
1453 return handled ? IRQ_HANDLED : IRQ_NONE;
1456 /* Interrupt context irq handler */
1457 static irqreturn_t gr_irq(int irq, void *_dev)
1459 struct gr_udc *dev = _dev;
1461 if (!dev->irq_enabled)
1464 return IRQ_WAKE_THREAD;
1467 /* ---------------------------------------------------------------------- */
1470 /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
1471 static int gr_ep_enable(struct usb_ep *_ep,
1472 const struct usb_endpoint_descriptor *desc)
1479 u16 buffer_size = 0;
1482 ep = container_of(_ep, struct gr_ep, ep);
1483 if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
1488 /* 'ep0' IN and OUT are reserved */
1489 if (ep == &dev->epo[0] || ep == &dev->epi[0])
1492 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1495 /* Make sure we are clear for enabling */
1496 epctrl = gr_read32(&ep->regs->epctrl);
1497 if (epctrl & GR_EPCTRL_EV)
1500 /* Check that directions match */
1501 if (!ep->is_in != !usb_endpoint_dir_in(desc))
1505 if ((!ep->is_in && ep->num >= dev->nepo) ||
1506 (ep->is_in && ep->num >= dev->nepi))
1509 if (usb_endpoint_xfer_control(desc)) {
1511 } else if (usb_endpoint_xfer_isoc(desc)) {
1513 } else if (usb_endpoint_xfer_bulk(desc)) {
1515 } else if (usb_endpoint_xfer_int(desc)) {
1518 dev_err(dev->dev, "Unknown transfer type for %s\n",
1524 * Bits 10-0 set the max payload. 12-11 set the number of
1525 * additional transactions.
1527 max = 0x7ff & usb_endpoint_maxp(desc);
1528 nt = 0x3 & (usb_endpoint_maxp(desc) >> 11);
1529 buffer_size = GR_BUFFER_SIZE(epctrl);
1530 if (nt && (mode == 0 || mode == 2)) {
1532 "%s mode: multiple trans./microframe not valid\n",
1533 (mode == 2 ? "Bulk" : "Control"));
1535 } else if (nt == 0x11) {
1536 dev_err(dev->dev, "Invalid value for trans./microframe\n");
1538 } else if ((nt + 1) * max > buffer_size) {
1539 dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
1540 buffer_size, (nt + 1), max);
1542 } else if (max == 0) {
1543 dev_err(dev->dev, "Max payload cannot be set to 0\n");
1547 spin_lock(&ep->dev->lock);
1550 spin_unlock(&ep->dev->lock);
1557 ep->ep.maxpacket = max;
1563 * Maximum possible size of all payloads in one microframe
1564 * regardless of direction when using high-bandwidth mode.
1566 ep->bytes_per_buffer = (nt + 1) * max;
1567 } else if (ep->is_in) {
1569 * The biggest multiple of maximum packet size that fits into
1570 * the buffer. The hardware will split up into many packets in
1573 ep->bytes_per_buffer = (buffer_size / max) * max;
1576 * Only single packets will be placed the buffers in the OUT
1579 ep->bytes_per_buffer = max;
1582 epctrl = (max << GR_EPCTRL_MAXPL_POS)
1583 | (nt << GR_EPCTRL_NT_POS)
1584 | (mode << GR_EPCTRL_TT_POS)
1587 epctrl |= GR_EPCTRL_PI;
1588 gr_write32(&ep->regs->epctrl, epctrl);
1590 gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
1592 spin_unlock(&ep->dev->lock);
1594 dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
1595 ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
1599 /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
1600 static int gr_ep_disable(struct usb_ep *_ep)
1604 unsigned long flags;
1606 ep = container_of(_ep, struct gr_ep, ep);
1607 if (!_ep || !ep->ep.desc)
1612 /* 'ep0' IN and OUT are reserved */
1613 if (ep == &dev->epo[0] || ep == &dev->epi[0])
1616 if (dev->ep0state == GR_EP0_SUSPEND)
1619 dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
1621 spin_lock_irqsave(&dev->lock, flags);
1627 spin_unlock_irqrestore(&dev->lock, flags);
1633 * Frees a request, but not any DMA buffers associated with it
1634 * (gr_finish_request should already have taken care of that).
1636 static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
1638 struct gr_request *req;
1642 req = container_of(_req, struct gr_request, req);
1644 /* Leads to memory leak */
1645 WARN(!list_empty(&req->queue),
1646 "request not dequeued properly before freeing\n");
1651 /* Queue a request from the gadget */
1652 static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
1656 struct gr_request *req;
1660 if (unlikely(!_ep || !_req))
1663 ep = container_of(_ep, struct gr_ep, ep);
1664 req = container_of(_req, struct gr_request, req);
1667 spin_lock(&ep->dev->lock);
1670 * The ep0 pointer in the gadget struct is used both for ep0in and
1671 * ep0out. In a data stage in the out direction ep0out needs to be used
1672 * instead of the default ep0in. Completion functions might use
1673 * driver_data, so that needs to be copied as well.
1675 if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
1677 ep->ep.driver_data = dev->epi[0].ep.driver_data;
1681 gr_dbgprint_request("EXTERN", ep, req);
1683 ret = gr_queue(ep, req, gfp_flags);
1685 spin_unlock(&ep->dev->lock);
1690 /* Dequeue JUST ONE request */
1691 static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1693 struct gr_request *req;
1697 unsigned long flags;
1699 ep = container_of(_ep, struct gr_ep, ep);
1700 if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
1706 /* We can't touch (DMA) registers when suspended */
1707 if (dev->ep0state == GR_EP0_SUSPEND)
1710 spin_lock_irqsave(&dev->lock, flags);
1712 /* Make sure it's actually queued on this endpoint */
1713 list_for_each_entry(req, &ep->queue, queue) {
1714 if (&req->req == _req)
1717 if (&req->req != _req) {
1722 if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
1723 /* This request is currently being processed */
1726 gr_finish_request(ep, req, -ECONNRESET);
1728 gr_dma_advance(ep, -ECONNRESET);
1729 } else if (!list_empty(&req->queue)) {
1730 /* Not being processed - gr_finish_request dequeues it */
1731 gr_finish_request(ep, req, -ECONNRESET);
1737 spin_unlock_irqrestore(&dev->lock, flags);
1742 /* Helper for gr_set_halt and gr_set_wedge */
1743 static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
1750 ep = container_of(_ep, struct gr_ep, ep);
1752 spin_lock(&ep->dev->lock);
1754 /* Halting an IN endpoint should fail if queue is not empty */
1755 if (halt && ep->is_in && !list_empty(&ep->queue)) {
1760 ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
1763 spin_unlock(&ep->dev->lock);
1769 static int gr_set_halt(struct usb_ep *_ep, int halt)
1771 return gr_set_halt_wedge(_ep, halt, 0);
1774 /* Halt and wedge endpoint */
1775 static int gr_set_wedge(struct usb_ep *_ep)
1777 return gr_set_halt_wedge(_ep, 1, 1);
1781 * Return the total number of bytes currently stored in the internal buffers of
1784 static int gr_fifo_status(struct usb_ep *_ep)
1792 ep = container_of(_ep, struct gr_ep, ep);
1794 epstat = gr_read32(&ep->regs->epstat);
1796 if (epstat & GR_EPSTAT_B0)
1797 bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
1798 if (epstat & GR_EPSTAT_B1)
1799 bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
1805 /* Empty data from internal buffers of an endpoint. */
1806 static void gr_fifo_flush(struct usb_ep *_ep)
1813 ep = container_of(_ep, struct gr_ep, ep);
1814 dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
1816 spin_lock(&ep->dev->lock);
1818 epctrl = gr_read32(&ep->regs->epctrl);
1819 epctrl |= GR_EPCTRL_CB;
1820 gr_write32(&ep->regs->epctrl, epctrl);
1822 spin_unlock(&ep->dev->lock);
1825 static struct usb_ep_ops gr_ep_ops = {
1826 .enable = gr_ep_enable,
1827 .disable = gr_ep_disable,
1829 .alloc_request = gr_alloc_request,
1830 .free_request = gr_free_request,
1832 .queue = gr_queue_ext,
1833 .dequeue = gr_dequeue,
1835 .set_halt = gr_set_halt,
1836 .set_wedge = gr_set_wedge,
1837 .fifo_status = gr_fifo_status,
1838 .fifo_flush = gr_fifo_flush,
1841 /* ---------------------------------------------------------------------- */
1842 /* USB Gadget ops */
1844 static int gr_get_frame(struct usb_gadget *_gadget)
1850 dev = container_of(_gadget, struct gr_udc, gadget);
1851 return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
1854 static int gr_wakeup(struct usb_gadget *_gadget)
1860 dev = container_of(_gadget, struct gr_udc, gadget);
1862 /* Remote wakeup feature not enabled by host*/
1863 if (!dev->remote_wakeup)
1866 spin_lock(&dev->lock);
1868 gr_write32(&dev->regs->control,
1869 gr_read32(&dev->regs->control) | GR_CONTROL_RW);
1871 spin_unlock(&dev->lock);
1876 static int gr_pullup(struct usb_gadget *_gadget, int is_on)
1883 dev = container_of(_gadget, struct gr_udc, gadget);
1885 spin_lock(&dev->lock);
1887 control = gr_read32(&dev->regs->control);
1889 control |= GR_CONTROL_EP;
1891 control &= ~GR_CONTROL_EP;
1892 gr_write32(&dev->regs->control, control);
1894 spin_unlock(&dev->lock);
1899 static int gr_udc_start(struct usb_gadget *gadget,
1900 struct usb_gadget_driver *driver)
1902 struct gr_udc *dev = to_gr_udc(gadget);
1904 spin_lock(&dev->lock);
1906 /* Hook up the driver */
1907 driver->driver.bus = NULL;
1908 dev->driver = driver;
1910 /* Get ready for host detection */
1911 gr_enable_vbus_detect(dev);
1913 spin_unlock(&dev->lock);
1915 dev_info(dev->dev, "Started with gadget driver '%s'\n",
1916 driver->driver.name);
1921 static int gr_udc_stop(struct usb_gadget *gadget,
1922 struct usb_gadget_driver *driver)
1924 struct gr_udc *dev = to_gr_udc(gadget);
1925 unsigned long flags;
1927 spin_lock_irqsave(&dev->lock, flags);
1930 gr_stop_activity(dev);
1932 spin_unlock_irqrestore(&dev->lock, flags);
1934 dev_info(dev->dev, "Stopped\n");
1939 static const struct usb_gadget_ops gr_ops = {
1940 .get_frame = gr_get_frame,
1941 .wakeup = gr_wakeup,
1942 .pullup = gr_pullup,
1943 .udc_start = gr_udc_start,
1944 .udc_stop = gr_udc_stop,
1945 /* Other operations not supported */
1948 /* ---------------------------------------------------------------------- */
1949 /* Module probe, removal and of-matching */
1951 static const char * const onames[] = {
1952 "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
1953 "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
1954 "ep12out", "ep13out", "ep14out", "ep15out"
1957 static const char * const inames[] = {
1958 "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
1959 "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
1960 "ep12in", "ep13in", "ep14in", "ep15in"
1963 /* Must be called with dev->lock held */
1964 static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
1967 struct gr_request *req;
1968 struct usb_request *_req;
1972 ep = &dev->epi[num];
1973 ep->ep.name = inames[num];
1974 ep->regs = &dev->regs->epi[num];
1976 ep = &dev->epo[num];
1977 ep->ep.name = onames[num];
1978 ep->regs = &dev->regs->epo[num];
1985 ep->ep.ops = &gr_ep_ops;
1986 INIT_LIST_HEAD(&ep->queue);
1989 _req = gr_alloc_request(&ep->ep, GFP_KERNEL);
1990 buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_KERNEL);
1991 if (!_req || !buf) {
1992 /* possible _req freed by gr_probe via gr_remove */
1996 req = container_of(_req, struct gr_request, req);
1998 req->req.length = MAX_CTRL_PL_SIZE;
2001 dev->ep0reqi = req; /* Complete gets set as used */
2003 dev->ep0reqo = req; /* Completion treated separately */
2005 usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
2006 ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
2008 usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
2009 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2011 list_add_tail(&ep->ep_list, &dev->ep_list);
2016 /* Must be called with dev->lock held */
2017 static int gr_udc_init(struct gr_udc *dev)
2019 struct device_node *np = dev->dev->of_node;
2026 gr_set_address(dev, 0);
2028 INIT_LIST_HEAD(&dev->gadget.ep_list);
2029 dev->gadget.speed = USB_SPEED_UNKNOWN;
2030 dev->gadget.ep0 = &dev->epi[0].ep;
2032 INIT_LIST_HEAD(&dev->ep_list);
2033 gr_set_ep0state(dev, GR_EP0_DISCONNECT);
2035 for (i = 0; i < dev->nepo; i++) {
2036 if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
2038 ret = gr_ep_init(dev, i, 0, bufsize);
2043 for (i = 0; i < dev->nepi; i++) {
2044 if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
2046 ret = gr_ep_init(dev, i, 1, bufsize);
2051 /* Must be disabled by default */
2052 dev->remote_wakeup = 0;
2054 /* Enable ep0out and ep0in */
2055 epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
2056 dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
2057 gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
2058 gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
2059 gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
2060 gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
2065 static int gr_remove(struct platform_device *pdev)
2067 struct gr_udc *dev = platform_get_drvdata(pdev);
2070 usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
2076 dma_pool_destroy(dev->desc_pool);
2077 platform_set_drvdata(pdev, NULL);
2079 gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
2080 gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
2084 static int gr_request_irq(struct gr_udc *dev, int irq)
2086 return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
2087 IRQF_SHARED, driver_name, dev);
2090 static int gr_probe(struct platform_device *pdev)
2093 struct resource *res;
2094 struct gr_regs __iomem *regs;
2098 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
2101 dev->dev = &pdev->dev;
2103 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2104 regs = devm_ioremap_resource(dev->dev, res);
2106 return PTR_ERR(regs);
2108 dev->irq = platform_get_irq(pdev, 0);
2109 if (dev->irq <= 0) {
2110 dev_err(dev->dev, "No irq found\n");
2114 /* Some core configurations has separate irqs for IN and OUT events */
2115 dev->irqi = platform_get_irq(pdev, 1);
2116 if (dev->irqi > 0) {
2117 dev->irqo = platform_get_irq(pdev, 2);
2118 if (dev->irqo <= 0) {
2119 dev_err(dev->dev, "Found irqi but not irqo\n");
2126 dev->gadget.name = driver_name;
2127 dev->gadget.max_speed = USB_SPEED_HIGH;
2128 dev->gadget.ops = &gr_ops;
2129 dev->gadget.quirk_ep_out_aligned_size = true;
2131 spin_lock_init(&dev->lock);
2134 platform_set_drvdata(pdev, dev);
2136 /* Determine number of endpoints and data interface mode */
2137 status = gr_read32(&dev->regs->status);
2138 dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
2139 dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
2141 if (!(status & GR_STATUS_DM)) {
2142 dev_err(dev->dev, "Slave mode cores are not supported\n");
2146 /* --- Effects of the following calls might need explicit cleanup --- */
2148 /* Create DMA pool for descriptors */
2149 dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
2150 sizeof(struct gr_dma_desc), 4, 0);
2151 if (!dev->desc_pool) {
2152 dev_err(dev->dev, "Could not allocate DMA pool");
2156 spin_lock(&dev->lock);
2158 /* Inside lock so that no gadget can use this udc until probe is done */
2159 retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
2161 dev_err(dev->dev, "Could not add gadget udc");
2166 retval = gr_udc_init(dev);
2172 /* Clear all interrupt enables that might be left on since last boot */
2173 gr_disable_interrupts_and_pullup(dev);
2175 retval = gr_request_irq(dev, dev->irq);
2177 dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
2182 retval = gr_request_irq(dev, dev->irqi);
2184 dev_err(dev->dev, "Failed to request irqi %d\n",
2188 retval = gr_request_irq(dev, dev->irqo);
2190 dev_err(dev->dev, "Failed to request irqo %d\n",
2197 dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
2198 dev->irq, dev->irqi, dev->irqo);
2200 dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
2203 spin_unlock(&dev->lock);
2211 static struct of_device_id gr_match[] = {
2212 {.name = "GAISLER_USBDC"},
2216 MODULE_DEVICE_TABLE(of, gr_match);
2218 static struct platform_driver gr_driver = {
2220 .name = DRIVER_NAME,
2221 .owner = THIS_MODULE,
2222 .of_match_table = gr_match,
2225 .remove = gr_remove,
2227 module_platform_driver(gr_driver);
2229 MODULE_AUTHOR("Aeroflex Gaisler AB.");
2230 MODULE_DESCRIPTION(DRIVER_DESC);
2231 MODULE_LICENSE("GPL");