2 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
4 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
5 * Copyright (C) 2012 Broadcom Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <linux/bitops.h>
14 #include <linux/bug.h>
15 #include <linux/clk.h>
16 #include <linux/compiler.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kconfig.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/platform_device.h>
30 #include <linux/sched.h>
31 #include <linux/seq_file.h>
32 #include <linux/slab.h>
33 #include <linux/timer.h>
34 #include <linux/usb/ch9.h>
35 #include <linux/usb/gadget.h>
36 #include <linux/workqueue.h>
38 #include <bcm63xx_cpu.h>
39 #include <bcm63xx_iudma.h>
40 #include <bcm63xx_dev_usb_usbd.h>
41 #include <bcm63xx_io.h>
42 #include <bcm63xx_regs.h>
44 #define DRV_MODULE_NAME "bcm63xx_udc"
46 static const char bcm63xx_ep0name[] = "ep0";
50 const struct usb_ep_caps caps;
51 } bcm63xx_ep_info[] = {
52 #define EP_INFO(_name, _caps) \
58 EP_INFO(bcm63xx_ep0name,
59 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
61 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
62 EP_INFO("ep2out-bulk",
63 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
65 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
67 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
72 static bool use_fullspeed;
73 module_param(use_fullspeed, bool, S_IRUGO);
74 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
77 * RX IRQ coalescing options:
79 * false (default) - one IRQ per DATAx packet. Slow but reliable. The
80 * driver is able to pass the "testusb" suite and recover from conditions like:
82 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
83 * 2) Host sends 512 bytes of data
84 * 3) Host decides to reconfigure the device and sends SET_INTERFACE
85 * 4) Device shuts down the endpoint and cancels the RX transaction
87 * true - one IRQ per transfer, for transfers <= 2048B. Generates
88 * considerably fewer IRQs, but error recovery is less robust. Does not
89 * reliably pass "testusb".
91 * TX always uses coalescing, because we can cancel partially complete TX
92 * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
95 static bool irq_coalesce;
96 module_param(irq_coalesce, bool, S_IRUGO);
97 MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
99 #define BCM63XX_NUM_EP 5
100 #define BCM63XX_NUM_IUDMA 6
101 #define BCM63XX_NUM_FIFO_PAIRS 3
103 #define IUDMA_RESET_TIMEOUT_US 10000
105 #define IUDMA_EP0_RXCHAN 0
106 #define IUDMA_EP0_TXCHAN 1
108 #define IUDMA_MAX_FRAGMENT 2048
109 #define BCM63XX_MAX_CTRL_PKT 64
111 #define BCMEP_CTRL 0x00
112 #define BCMEP_ISOC 0x01
113 #define BCMEP_BULK 0x02
114 #define BCMEP_INTR 0x03
116 #define BCMEP_OUT 0x00
117 #define BCMEP_IN 0x01
119 #define BCM63XX_SPD_FULL 1
120 #define BCM63XX_SPD_HIGH 0
122 #define IUDMA_DMAC_OFFSET 0x200
123 #define IUDMA_DMAS_OFFSET 0x400
125 enum bcm63xx_ep0_state {
128 EP0_IN_DATA_PHASE_SETUP,
129 EP0_IN_DATA_PHASE_COMPLETE,
130 EP0_OUT_DATA_PHASE_SETUP,
131 EP0_OUT_DATA_PHASE_COMPLETE,
132 EP0_OUT_STATUS_PHASE,
133 EP0_IN_FAKE_STATUS_PHASE,
137 static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
140 "IN_DATA_PHASE_SETUP",
141 "IN_DATA_PHASE_COMPLETE",
142 "OUT_DATA_PHASE_SETUP",
143 "OUT_DATA_PHASE_COMPLETE",
145 "IN_FAKE_STATUS_PHASE",
150 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
151 * @ep_num: USB endpoint number.
152 * @n_bds: Number of buffer descriptors in the ring.
153 * @ep_type: Endpoint type (control, bulk, interrupt).
154 * @dir: Direction (in, out).
155 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
156 * @max_pkt_hs: Maximum packet size in high speed mode.
157 * @max_pkt_fs: Maximum packet size in full speed mode.
159 struct iudma_ch_cfg {
169 static const struct iudma_ch_cfg iudma_defaults[] = {
171 /* This controller was designed to support a CDC/RNDIS application.
172 It may be possible to reconfigure some of the endpoints, but
173 the hardware limitations (FIFO sizing and number of DMA channels)
174 may significantly impact flexibility and/or stability. Change
175 these values at your own risk.
177 ep_num ep_type n_fifo_slots max_pkt_fs
178 idx | n_bds | dir | max_pkt_hs |
180 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
181 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
182 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
183 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
184 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
185 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
191 * struct iudma_ch - Represents the current state of a single IUDMA channel.
192 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
193 * @ep_num: USB endpoint number. -1 for ep0 RX.
194 * @enabled: Whether bcm63xx_ep_enable() has been called.
195 * @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
196 * @is_tx: true for TX, false for RX.
197 * @bep: Pointer to the associated endpoint. NULL for ep0 RX.
198 * @udc: Reference to the device controller.
199 * @read_bd: Next buffer descriptor to reap from the hardware.
200 * @write_bd: Next BD available for a new packet.
201 * @end_bd: Points to the final BD in the ring.
202 * @n_bds_used: Number of BD entries currently occupied.
203 * @bd_ring: Base pointer to the BD ring.
204 * @bd_ring_dma: Physical (DMA) address of bd_ring.
205 * @n_bds: Total number of BDs in the ring.
207 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
208 * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
211 * Each bulk/intr endpoint has a single IUDMA channel and a single
220 struct bcm63xx_ep *bep;
221 struct bcm63xx_udc *udc;
223 struct bcm_enet_desc *read_bd;
224 struct bcm_enet_desc *write_bd;
225 struct bcm_enet_desc *end_bd;
228 struct bcm_enet_desc *bd_ring;
229 dma_addr_t bd_ring_dma;
234 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
235 * @ep_num: USB endpoint number.
236 * @iudma: Pointer to IUDMA channel state.
237 * @ep: USB gadget layer representation of the EP.
238 * @udc: Reference to the device controller.
239 * @queue: Linked list of outstanding requests for this EP.
240 * @halted: 1 if the EP is stalled; 0 otherwise.
244 struct iudma_ch *iudma;
246 struct bcm63xx_udc *udc;
247 struct list_head queue;
252 * struct bcm63xx_req - Internal (driver) state of a single request.
253 * @queue: Links back to the EP's request list.
254 * @req: USB gadget layer representation of the request.
255 * @offset: Current byte offset into the data buffer (next byte to queue).
256 * @bd_bytes: Number of data bytes in outstanding BD entries.
257 * @iudma: IUDMA channel used for the request.
260 struct list_head queue; /* ep's requests */
261 struct usb_request req;
263 unsigned int bd_bytes;
264 struct iudma_ch *iudma;
268 * struct bcm63xx_udc - Driver/hardware private context.
269 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
270 * @dev: Generic Linux device structure.
271 * @pd: Platform data (board/port info).
272 * @usbd_clk: Clock descriptor for the USB device block.
273 * @usbh_clk: Clock descriptor for the USB host block.
274 * @gadget: USB slave device.
275 * @driver: Driver for USB slave devices.
276 * @usbd_regs: Base address of the USBD/USB20D block.
277 * @iudma_regs: Base address of the USBD's associated IUDMA block.
278 * @bep: Array of endpoints, including ep0.
279 * @iudma: Array of all IUDMA channels used by this controller.
280 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
281 * @iface: USB interface number, from SET_INTERFACE wIndex.
282 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
283 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
284 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
285 * @ep0state: Current state of the ep0 state machine.
286 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
287 * @wedgemap: Bitmap of wedged endpoints.
288 * @ep0_req_reset: USB reset is pending.
289 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
290 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
291 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
292 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
293 * @ep0_reply: Pending reply from gadget driver.
294 * @ep0_request: Outstanding ep0 request.
295 * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
296 * @debugfs_usbd: debugfs file "usbd" for controller state.
297 * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
303 struct bcm63xx_usbd_platform_data *pd;
304 struct clk *usbd_clk;
305 struct clk *usbh_clk;
307 struct usb_gadget gadget;
308 struct usb_gadget_driver *driver;
310 void __iomem *usbd_regs;
311 void __iomem *iudma_regs;
313 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
314 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
320 struct bcm63xx_req ep0_ctrl_req;
324 struct work_struct ep0_wq;
326 unsigned long wedgemap;
328 unsigned ep0_req_reset:1;
329 unsigned ep0_req_set_cfg:1;
330 unsigned ep0_req_set_iface:1;
331 unsigned ep0_req_shutdown:1;
333 unsigned ep0_req_completed:1;
334 struct usb_request *ep0_reply;
335 struct usb_request *ep0_request;
337 struct dentry *debugfs_root;
338 struct dentry *debugfs_usbd;
339 struct dentry *debugfs_iudma;
342 static const struct usb_ep_ops bcm63xx_udc_ep_ops;
344 /***********************************************************************
345 * Convenience functions
346 ***********************************************************************/
348 static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
350 return container_of(g, struct bcm63xx_udc, gadget);
353 static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
355 return container_of(ep, struct bcm63xx_ep, ep);
358 static inline struct bcm63xx_req *our_req(struct usb_request *req)
360 return container_of(req, struct bcm63xx_req, req);
363 static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
365 return bcm_readl(udc->usbd_regs + off);
368 static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
370 bcm_writel(val, udc->usbd_regs + off);
373 static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
375 return bcm_readl(udc->iudma_regs + off);
378 static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
380 bcm_writel(val, udc->iudma_regs + off);
383 static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
385 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
386 (ENETDMA_CHAN_WIDTH * chan));
389 static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
392 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
393 (ENETDMA_CHAN_WIDTH * chan));
396 static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
398 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
399 (ENETDMA_CHAN_WIDTH * chan));
402 static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
405 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
406 (ENETDMA_CHAN_WIDTH * chan));
409 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
412 clk_enable(udc->usbh_clk);
413 clk_enable(udc->usbd_clk);
416 clk_disable(udc->usbd_clk);
417 clk_disable(udc->usbh_clk);
421 /***********************************************************************
422 * Low-level IUDMA / FIFO operations
423 ***********************************************************************/
426 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
427 * @udc: Reference to the device controller.
428 * @idx: Desired init_sel value.
430 * The "init_sel" signal is used as a selection index for both endpoints
431 * and IUDMA channels. Since these do not map 1:1, the use of this signal
432 * depends on the context.
434 static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
436 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
438 val &= ~USBD_CONTROL_INIT_SEL_MASK;
439 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
440 usbd_writel(udc, val, USBD_CONTROL_REG);
444 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
445 * @udc: Reference to the device controller.
446 * @bep: Endpoint on which to operate.
447 * @is_stalled: true to enable stall, false to disable.
449 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
450 * halt/stall conditions.
452 static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
457 val = USBD_STALL_UPDATE_MASK |
458 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
459 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
460 usbd_writel(udc, val, USBD_STALL_REG);
464 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
465 * @udc: Reference to the device controller.
467 * These parameters depend on the USB link speed. Settings are
468 * per-IUDMA-channel-pair.
470 static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
472 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
473 u32 i, val, rx_fifo_slot, tx_fifo_slot;
475 /* set up FIFO boundaries and packet sizes; this is done in pairs */
476 rx_fifo_slot = tx_fifo_slot = 0;
477 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
478 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
479 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
481 bcm63xx_ep_dma_select(udc, i >> 1);
483 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
484 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
485 USBD_RXFIFO_CONFIG_END_SHIFT);
486 rx_fifo_slot += rx_cfg->n_fifo_slots;
487 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
489 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
490 USBD_RXFIFO_EPSIZE_REG);
492 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
493 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
494 USBD_TXFIFO_CONFIG_END_SHIFT);
495 tx_fifo_slot += tx_cfg->n_fifo_slots;
496 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
498 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
499 USBD_TXFIFO_EPSIZE_REG);
501 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
506 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
507 * @udc: Reference to the device controller.
508 * @ep_num: Endpoint number.
510 static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
514 bcm63xx_ep_dma_select(udc, ep_num);
516 val = usbd_readl(udc, USBD_CONTROL_REG);
517 val |= USBD_CONTROL_FIFO_RESET_MASK;
518 usbd_writel(udc, val, USBD_CONTROL_REG);
519 usbd_readl(udc, USBD_CONTROL_REG);
523 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
524 * @udc: Reference to the device controller.
526 static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
530 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
531 bcm63xx_fifo_reset_ep(udc, i);
535 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
536 * @udc: Reference to the device controller.
538 static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
542 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
543 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
548 bcm63xx_ep_dma_select(udc, cfg->ep_num);
549 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
550 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
551 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
556 * bcm63xx_ep_setup - Configure per-endpoint settings.
557 * @udc: Reference to the device controller.
559 * This needs to be rerun if the speed/cfg/intf/altintf changes.
561 static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
565 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
567 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
568 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
569 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
570 cfg->max_pkt_hs : cfg->max_pkt_fs;
571 int idx = cfg->ep_num;
573 udc->iudma[i].max_pkt = max_pkt;
577 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
579 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
580 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
581 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
582 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
583 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
584 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
585 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
586 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
591 * iudma_write - Queue a single IUDMA transaction.
592 * @udc: Reference to the device controller.
593 * @iudma: IUDMA channel to use.
594 * @breq: Request containing the transaction data.
596 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
597 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
598 * So iudma_write() may be called several times to fulfill a single
601 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
603 static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
604 struct bcm63xx_req *breq)
606 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
607 unsigned int bytes_left = breq->req.length - breq->offset;
608 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
609 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
611 iudma->n_bds_used = 0;
615 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
619 struct bcm_enet_desc *d = iudma->write_bd;
621 unsigned int n_bytes;
623 if (d == iudma->end_bd) {
624 dmaflags |= DMADESC_WRAP_MASK;
625 iudma->write_bd = iudma->bd_ring;
631 n_bytes = min_t(int, bytes_left, max_bd_bytes);
633 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
635 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
636 DMADESC_USB_ZERO_MASK;
638 dmaflags |= DMADESC_OWNER_MASK;
640 dmaflags |= DMADESC_SOP_MASK;
645 * extra_zero_pkt forces one more iteration through the loop
646 * after all data is queued up, to send the zero packet
648 if (extra_zero_pkt && !bytes_left)
651 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
652 (n_bytes == bytes_left && !extra_zero_pkt)) {
654 dmaflags |= DMADESC_EOP_MASK;
657 d->address = breq->req.dma + breq->offset;
659 d->len_stat = dmaflags;
661 breq->offset += n_bytes;
662 breq->bd_bytes += n_bytes;
663 bytes_left -= n_bytes;
666 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
667 ENETDMAC_CHANCFG_REG, iudma->ch_idx);
671 * iudma_read - Check for IUDMA buffer completion.
672 * @udc: Reference to the device controller.
673 * @iudma: IUDMA channel to use.
675 * This checks to see if ALL of the outstanding BDs on the DMA channel
676 * have been filled. If so, it returns the actual transfer length;
677 * otherwise it returns -EBUSY.
679 static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
681 int i, actual_len = 0;
682 struct bcm_enet_desc *d = iudma->read_bd;
684 if (!iudma->n_bds_used)
687 for (i = 0; i < iudma->n_bds_used; i++) {
690 dmaflags = d->len_stat;
692 if (dmaflags & DMADESC_OWNER_MASK)
695 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
696 DMADESC_LENGTH_SHIFT;
697 if (d == iudma->end_bd)
704 iudma->n_bds_used = 0;
709 * iudma_reset_channel - Stop DMA on a single channel.
710 * @udc: Reference to the device controller.
711 * @iudma: IUDMA channel to reset.
713 static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
715 int timeout = IUDMA_RESET_TIMEOUT_US;
716 struct bcm_enet_desc *d;
717 int ch_idx = iudma->ch_idx;
720 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
722 /* stop DMA, then wait for the hardware to wrap up */
723 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
725 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
726 ENETDMAC_CHANCFG_EN_MASK) {
729 /* repeatedly flush the FIFO data until the BD completes */
730 if (iudma->is_tx && iudma->ep_num >= 0)
731 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
734 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
738 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
739 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
741 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
742 ENETDMAC_CHANCFG_REG, ch_idx);
745 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
747 /* don't leave "live" HW-owned entries for the next guy to step on */
748 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
752 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
753 iudma->n_bds_used = 0;
755 /* set up IRQs, UBUS burst size, and BD base for this channel */
756 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
757 ENETDMAC_IRMASK_REG, ch_idx);
758 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
760 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
761 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
765 * iudma_init_channel - One-time IUDMA channel initialization.
766 * @udc: Reference to the device controller.
767 * @ch_idx: Channel to initialize.
769 static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
771 struct iudma_ch *iudma = &udc->iudma[ch_idx];
772 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
773 unsigned int n_bds = cfg->n_bds;
774 struct bcm63xx_ep *bep = NULL;
776 iudma->ep_num = cfg->ep_num;
777 iudma->ch_idx = ch_idx;
778 iudma->is_tx = !!(ch_idx & 0x01);
779 if (iudma->ep_num >= 0) {
780 bep = &udc->bep[iudma->ep_num];
782 INIT_LIST_HEAD(&bep->queue);
788 /* ep0 is always active; others are controlled by the gadget driver */
789 if (iudma->ep_num <= 0)
790 iudma->enabled = true;
792 iudma->n_bds = n_bds;
793 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
794 n_bds * sizeof(struct bcm_enet_desc),
795 &iudma->bd_ring_dma, GFP_KERNEL);
798 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
804 * iudma_init - One-time initialization of all IUDMA channels.
805 * @udc: Reference to the device controller.
807 * Enable DMA, flush channels, and enable global IUDMA IRQs.
809 static int iudma_init(struct bcm63xx_udc *udc)
813 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
815 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
816 rc = iudma_init_channel(udc, i);
819 iudma_reset_channel(udc, &udc->iudma[i]);
822 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
827 * iudma_uninit - Uninitialize IUDMA channels.
828 * @udc: Reference to the device controller.
830 * Kill global IUDMA IRQs, flush channels, and kill DMA.
832 static void iudma_uninit(struct bcm63xx_udc *udc)
836 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
838 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
839 iudma_reset_channel(udc, &udc->iudma[i]);
841 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
844 /***********************************************************************
845 * Other low-level USBD operations
846 ***********************************************************************/
849 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
850 * @udc: Reference to the device controller.
851 * @enable_irqs: true to enable, false to disable.
853 static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
857 usbd_writel(udc, 0, USBD_STATUS_REG);
859 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
860 BIT(USBD_EVENT_IRQ_SETUP) |
861 BIT(USBD_EVENT_IRQ_SETCFG) |
862 BIT(USBD_EVENT_IRQ_SETINTF) |
863 BIT(USBD_EVENT_IRQ_USB_LINK);
864 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
865 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
869 * bcm63xx_select_phy_mode - Select between USB device and host mode.
870 * @udc: Reference to the device controller.
871 * @is_device: true for device, false for host.
873 * This should probably be reworked to use the drivers/usb/otg
876 * By default, the AFE/pullups are disabled in device mode, until
877 * bcm63xx_select_pullup() is called.
879 static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
881 u32 val, portmask = BIT(udc->pd->port_no);
883 if (BCMCPU_IS_6328()) {
884 /* configure pinmux to sense VBUS signal */
885 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
886 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
887 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
888 GPIO_PINMUX_OTHR_6328_USB_HOST;
889 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
892 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
894 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
895 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
897 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
898 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
900 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
902 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
904 val |= USBH_PRIV_SWAP_USBD_MASK;
906 val &= ~USBH_PRIV_SWAP_USBD_MASK;
907 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
911 * bcm63xx_select_pullup - Enable/disable the pullup on D+
912 * @udc: Reference to the device controller.
913 * @is_on: true to enable the pullup, false to disable.
915 * If the pullup is active, the host will sense a FS/HS device connected to
916 * the port. If the pullup is inactive, the host will think the USB
917 * device has been disconnected.
919 static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
921 u32 val, portmask = BIT(udc->pd->port_no);
923 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
925 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
927 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
928 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
932 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
933 * @udc: Reference to the device controller.
935 * This just masks the IUDMA IRQs and releases the clocks. It is assumed
936 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
938 static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
940 set_clocks(udc, true);
942 set_clocks(udc, false);
944 clk_put(udc->usbd_clk);
945 clk_put(udc->usbh_clk);
949 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
950 * @udc: Reference to the device controller.
952 static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
957 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
959 if (!udc->ep0_ctrl_buf)
962 INIT_LIST_HEAD(&udc->gadget.ep_list);
963 for (i = 0; i < BCM63XX_NUM_EP; i++) {
964 struct bcm63xx_ep *bep = &udc->bep[i];
966 bep->ep.name = bcm63xx_ep_info[i].name;
967 bep->ep.caps = bcm63xx_ep_info[i].caps;
969 bep->ep.ops = &bcm63xx_udc_ep_ops;
970 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
972 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
975 INIT_LIST_HEAD(&bep->queue);
978 udc->gadget.ep0 = &udc->bep[0].ep;
979 list_del(&udc->bep[0].ep.ep_list);
981 udc->gadget.speed = USB_SPEED_UNKNOWN;
982 udc->ep0state = EP0_SHUTDOWN;
984 udc->usbh_clk = clk_get(udc->dev, "usbh");
985 if (IS_ERR(udc->usbh_clk))
988 udc->usbd_clk = clk_get(udc->dev, "usbd");
989 if (IS_ERR(udc->usbd_clk)) {
990 clk_put(udc->usbh_clk);
994 set_clocks(udc, true);
996 val = USBD_CONTROL_AUTO_CSRS_MASK |
997 USBD_CONTROL_DONE_CSRS_MASK |
998 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
999 usbd_writel(udc, val, USBD_CONTROL_REG);
1001 val = USBD_STRAPS_APP_SELF_PWR_MASK |
1002 USBD_STRAPS_APP_RAM_IF_MASK |
1003 USBD_STRAPS_APP_CSRPRGSUP_MASK |
1004 USBD_STRAPS_APP_8BITPHY_MASK |
1005 USBD_STRAPS_APP_RMTWKUP_MASK;
1007 if (udc->gadget.max_speed == USB_SPEED_HIGH)
1008 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
1010 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1011 usbd_writel(udc, val, USBD_STRAPS_REG);
1013 bcm63xx_set_ctrl_irqs(udc, false);
1015 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1017 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1018 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1019 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1021 rc = iudma_init(udc);
1022 set_clocks(udc, false);
1024 bcm63xx_uninit_udc_hw(udc);
1029 /***********************************************************************
1030 * Standard EP gadget operations
1031 ***********************************************************************/
1034 * bcm63xx_ep_enable - Enable one endpoint.
1035 * @ep: Endpoint to enable.
1036 * @desc: Contains max packet, direction, etc.
1038 * Most of the endpoint parameters are fixed in this controller, so there
1039 * isn't much for this function to do.
1041 static int bcm63xx_ep_enable(struct usb_ep *ep,
1042 const struct usb_endpoint_descriptor *desc)
1044 struct bcm63xx_ep *bep = our_ep(ep);
1045 struct bcm63xx_udc *udc = bep->udc;
1046 struct iudma_ch *iudma = bep->iudma;
1047 unsigned long flags;
1049 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1055 spin_lock_irqsave(&udc->lock, flags);
1056 if (iudma->enabled) {
1057 spin_unlock_irqrestore(&udc->lock, flags);
1061 iudma->enabled = true;
1062 BUG_ON(!list_empty(&bep->queue));
1064 iudma_reset_channel(udc, iudma);
1067 bcm63xx_set_stall(udc, bep, false);
1068 clear_bit(bep->ep_num, &udc->wedgemap);
1071 ep->maxpacket = usb_endpoint_maxp(desc);
1073 spin_unlock_irqrestore(&udc->lock, flags);
1078 * bcm63xx_ep_disable - Disable one endpoint.
1079 * @ep: Endpoint to disable.
1081 static int bcm63xx_ep_disable(struct usb_ep *ep)
1083 struct bcm63xx_ep *bep = our_ep(ep);
1084 struct bcm63xx_udc *udc = bep->udc;
1085 struct iudma_ch *iudma = bep->iudma;
1086 struct bcm63xx_req *breq, *n;
1087 unsigned long flags;
1089 if (!ep || !ep->desc)
1092 spin_lock_irqsave(&udc->lock, flags);
1093 if (!iudma->enabled) {
1094 spin_unlock_irqrestore(&udc->lock, flags);
1097 iudma->enabled = false;
1099 iudma_reset_channel(udc, iudma);
1101 if (!list_empty(&bep->queue)) {
1102 list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1103 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1105 list_del(&breq->queue);
1106 breq->req.status = -ESHUTDOWN;
1108 spin_unlock_irqrestore(&udc->lock, flags);
1109 usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1110 spin_lock_irqsave(&udc->lock, flags);
1115 spin_unlock_irqrestore(&udc->lock, flags);
1120 * bcm63xx_udc_alloc_request - Allocate a new request.
1121 * @ep: Endpoint associated with the request.
1122 * @mem_flags: Flags to pass to kzalloc().
1124 static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1127 struct bcm63xx_req *breq;
1129 breq = kzalloc(sizeof(*breq), mem_flags);
1136 * bcm63xx_udc_free_request - Free a request.
1137 * @ep: Endpoint associated with the request.
1138 * @req: Request to free.
1140 static void bcm63xx_udc_free_request(struct usb_ep *ep,
1141 struct usb_request *req)
1143 struct bcm63xx_req *breq = our_req(req);
1148 * bcm63xx_udc_queue - Queue up a new request.
1149 * @ep: Endpoint associated with the request.
1150 * @req: Request to add.
1151 * @mem_flags: Unused.
1153 * If the queue is empty, start this request immediately. Otherwise, add
1156 * ep0 replies are sent through this function from the gadget driver, but
1157 * they are treated differently because they need to be handled by the ep0
1158 * state machine. (Sometimes they are replies to control requests that
1159 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1161 static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1164 struct bcm63xx_ep *bep = our_ep(ep);
1165 struct bcm63xx_udc *udc = bep->udc;
1166 struct bcm63xx_req *breq = our_req(req);
1167 unsigned long flags;
1170 if (unlikely(!req || !req->complete || !req->buf || !ep))
1177 if (bep == &udc->bep[0]) {
1178 /* only one reply per request, please */
1182 udc->ep0_reply = req;
1183 schedule_work(&udc->ep0_wq);
1187 spin_lock_irqsave(&udc->lock, flags);
1188 if (!bep->iudma->enabled) {
1193 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1195 list_add_tail(&breq->queue, &bep->queue);
1196 if (list_is_singular(&bep->queue))
1197 iudma_write(udc, bep->iudma, breq);
1201 spin_unlock_irqrestore(&udc->lock, flags);
1206 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1207 * @ep: Endpoint associated with the request.
1208 * @req: Request to remove.
1210 * If the request is not at the head of the queue, this is easy - just nuke
1211 * it. If the request is at the head of the queue, we'll need to stop the
1212 * DMA transaction and then queue up the successor.
1214 static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1216 struct bcm63xx_ep *bep = our_ep(ep);
1217 struct bcm63xx_udc *udc = bep->udc;
1218 struct bcm63xx_req *breq = our_req(req), *cur;
1219 unsigned long flags;
1222 spin_lock_irqsave(&udc->lock, flags);
1223 if (list_empty(&bep->queue)) {
1228 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1229 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1232 iudma_reset_channel(udc, bep->iudma);
1233 list_del(&breq->queue);
1235 if (!list_empty(&bep->queue)) {
1236 struct bcm63xx_req *next;
1238 next = list_first_entry(&bep->queue,
1239 struct bcm63xx_req, queue);
1240 iudma_write(udc, bep->iudma, next);
1243 list_del(&breq->queue);
1247 spin_unlock_irqrestore(&udc->lock, flags);
1249 req->status = -ESHUTDOWN;
1250 req->complete(ep, req);
1256 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1257 * @ep: Endpoint to halt.
1258 * @value: Zero to clear halt; nonzero to set halt.
1260 * See comments in bcm63xx_update_wedge().
1262 static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1264 struct bcm63xx_ep *bep = our_ep(ep);
1265 struct bcm63xx_udc *udc = bep->udc;
1266 unsigned long flags;
1268 spin_lock_irqsave(&udc->lock, flags);
1269 bcm63xx_set_stall(udc, bep, !!value);
1270 bep->halted = value;
1271 spin_unlock_irqrestore(&udc->lock, flags);
1277 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1278 * @ep: Endpoint to wedge.
1280 * See comments in bcm63xx_update_wedge().
1282 static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1284 struct bcm63xx_ep *bep = our_ep(ep);
1285 struct bcm63xx_udc *udc = bep->udc;
1286 unsigned long flags;
1288 spin_lock_irqsave(&udc->lock, flags);
1289 set_bit(bep->ep_num, &udc->wedgemap);
1290 bcm63xx_set_stall(udc, bep, true);
1291 spin_unlock_irqrestore(&udc->lock, flags);
1296 static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1297 .enable = bcm63xx_ep_enable,
1298 .disable = bcm63xx_ep_disable,
1300 .alloc_request = bcm63xx_udc_alloc_request,
1301 .free_request = bcm63xx_udc_free_request,
1303 .queue = bcm63xx_udc_queue,
1304 .dequeue = bcm63xx_udc_dequeue,
1306 .set_halt = bcm63xx_udc_set_halt,
1307 .set_wedge = bcm63xx_udc_set_wedge,
1310 /***********************************************************************
1312 ***********************************************************************/
1315 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1316 * @udc: Reference to the device controller.
1317 * @ctrl: 8-byte SETUP request.
1319 static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1320 struct usb_ctrlrequest *ctrl)
1324 spin_unlock_irq(&udc->lock);
1325 rc = udc->driver->setup(&udc->gadget, ctrl);
1326 spin_lock_irq(&udc->lock);
1331 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1332 * @udc: Reference to the device controller.
1334 * Many standard requests are handled automatically in the hardware, but
1335 * we still need to pass them to the gadget driver so that it can
1336 * reconfigure the interfaces/endpoints if necessary.
1338 * Unfortunately we are not able to send a STALL response if the host
1339 * requests an invalid configuration. If this happens, we'll have to be
1340 * content with printing a warning.
1342 static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1344 struct usb_ctrlrequest ctrl;
1347 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1348 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1349 ctrl.wValue = cpu_to_le16(udc->cfg);
1353 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1355 dev_warn_ratelimited(udc->dev,
1356 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1363 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1364 * @udc: Reference to the device controller.
1366 static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1368 struct usb_ctrlrequest ctrl;
1371 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1372 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1373 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1374 ctrl.wIndex = cpu_to_le16(udc->iface);
1377 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1379 dev_warn_ratelimited(udc->dev,
1380 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1381 udc->iface, udc->alt_iface);
1387 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1388 * @udc: Reference to the device controller.
1389 * @ch_idx: IUDMA channel number.
1390 * @req: USB gadget layer representation of the request.
1392 static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1393 struct usb_request *req)
1395 struct bcm63xx_req *breq = our_req(req);
1396 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1398 BUG_ON(udc->ep0_request);
1399 udc->ep0_request = req;
1403 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1404 iudma_write(udc, iudma, breq);
1408 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1409 * @udc: Reference to the device controller.
1410 * @req: USB gadget layer representation of the request.
1411 * @status: Status to return to the gadget driver.
1413 static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1414 struct usb_request *req, int status)
1416 req->status = status;
1419 if (req->complete) {
1420 spin_unlock_irq(&udc->lock);
1421 req->complete(&udc->bep[0].ep, req);
1422 spin_lock_irq(&udc->lock);
1427 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1429 * @udc: Reference to the device controller.
1430 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1432 static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1434 struct usb_request *req = udc->ep0_reply;
1436 udc->ep0_reply = NULL;
1437 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1438 if (udc->ep0_request == req) {
1439 udc->ep0_req_completed = 0;
1440 udc->ep0_request = NULL;
1442 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1446 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1448 * @udc: Reference to the device controller.
1450 static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1452 struct usb_request *req = udc->ep0_request;
1454 udc->ep0_req_completed = 0;
1455 udc->ep0_request = NULL;
1461 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1462 * @udc: Reference to the device controller.
1463 * @ch_idx: IUDMA channel number.
1464 * @length: Number of bytes to TX/RX.
1466 * Used for simple transfers performed by the ep0 worker. This will always
1467 * use ep0_ctrl_req / ep0_ctrl_buf.
1469 static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1472 struct usb_request *req = &udc->ep0_ctrl_req.req;
1474 req->buf = udc->ep0_ctrl_buf;
1475 req->length = length;
1476 req->complete = NULL;
1478 bcm63xx_ep0_map_write(udc, ch_idx, req);
1482 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1483 * @udc: Reference to the device controller.
1485 * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
1486 * for the next packet. Anything else means the transaction requires multiple
1487 * stages of handling.
1489 static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1492 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1494 rc = bcm63xx_ep0_read_complete(udc);
1497 dev_err(udc->dev, "missing SETUP packet\n");
1502 * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
1503 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1504 * just throw it away.
1509 /* Drop malformed SETUP packets */
1510 if (rc != sizeof(*ctrl)) {
1511 dev_warn_ratelimited(udc->dev,
1512 "malformed SETUP packet (%d bytes)\n", rc);
1516 /* Process new SETUP packet arriving on ep0 */
1517 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1519 bcm63xx_set_stall(udc, &udc->bep[0], true);
1525 else if (ctrl->bRequestType & USB_DIR_IN)
1526 return EP0_IN_DATA_PHASE_SETUP;
1528 return EP0_OUT_DATA_PHASE_SETUP;
1532 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1533 * @udc: Reference to the device controller.
1535 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1536 * filled with a SETUP packet from the host. This function handles new
1537 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1538 * and reset/shutdown events.
1540 * Returns 0 if work was done; -EAGAIN if nothing to do.
1542 static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1544 if (udc->ep0_req_reset) {
1545 udc->ep0_req_reset = 0;
1546 } else if (udc->ep0_req_set_cfg) {
1547 udc->ep0_req_set_cfg = 0;
1548 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1549 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1550 } else if (udc->ep0_req_set_iface) {
1551 udc->ep0_req_set_iface = 0;
1552 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1553 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1554 } else if (udc->ep0_req_completed) {
1555 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1556 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1557 } else if (udc->ep0_req_shutdown) {
1558 udc->ep0_req_shutdown = 0;
1559 udc->ep0_req_completed = 0;
1560 udc->ep0_request = NULL;
1561 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1562 usb_gadget_unmap_request(&udc->gadget,
1563 &udc->ep0_ctrl_req.req, 0);
1565 /* bcm63xx_udc_pullup() is waiting for this */
1567 udc->ep0state = EP0_SHUTDOWN;
1568 } else if (udc->ep0_reply) {
1570 * This could happen if a USB RESET shows up during an ep0
1571 * transaction (especially if a laggy driver like gadgetfs
1574 dev_warn(udc->dev, "nuking unexpected reply\n");
1575 bcm63xx_ep0_nuke_reply(udc, 0);
1584 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1585 * @udc: Reference to the device controller.
1587 * Returns 0 if work was done; -EAGAIN if nothing to do.
1589 static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1591 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1592 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1594 switch (udc->ep0state) {
1596 /* set up descriptor to receive SETUP packet */
1597 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1598 BCM63XX_MAX_CTRL_PKT);
1599 ep0state = EP0_IDLE;
1602 return bcm63xx_ep0_do_idle(udc);
1603 case EP0_IN_DATA_PHASE_SETUP:
1605 * Normal case: TX request is in ep0_reply (queued by the
1606 * callback), or will be queued shortly. When it's here,
1607 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1609 * Shutdown case: Stop waiting for the reply. Just
1610 * REQUEUE->IDLE. The gadget driver is NOT expected to
1611 * queue anything else now.
1613 if (udc->ep0_reply) {
1614 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1616 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1617 } else if (shutdown) {
1618 ep0state = EP0_REQUEUE;
1621 case EP0_IN_DATA_PHASE_COMPLETE: {
1623 * Normal case: TX packet (ep0_reply) is in flight; wait for
1624 * it to finish, then go back to REQUEUE->IDLE.
1626 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1627 * completion to the gadget driver, then REQUEUE->IDLE.
1629 if (udc->ep0_req_completed) {
1630 udc->ep0_reply = NULL;
1631 bcm63xx_ep0_read_complete(udc);
1633 * the "ack" sometimes gets eaten (see
1634 * bcm63xx_ep0_do_idle)
1636 ep0state = EP0_REQUEUE;
1637 } else if (shutdown) {
1638 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1639 bcm63xx_ep0_nuke_reply(udc, 1);
1640 ep0state = EP0_REQUEUE;
1644 case EP0_OUT_DATA_PHASE_SETUP:
1645 /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1646 if (udc->ep0_reply) {
1647 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1649 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1650 } else if (shutdown) {
1651 ep0state = EP0_REQUEUE;
1654 case EP0_OUT_DATA_PHASE_COMPLETE: {
1655 /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1656 if (udc->ep0_req_completed) {
1657 udc->ep0_reply = NULL;
1658 bcm63xx_ep0_read_complete(udc);
1660 /* send 0-byte ack to host */
1661 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1662 ep0state = EP0_OUT_STATUS_PHASE;
1663 } else if (shutdown) {
1664 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1665 bcm63xx_ep0_nuke_reply(udc, 0);
1666 ep0state = EP0_REQUEUE;
1670 case EP0_OUT_STATUS_PHASE:
1672 * Normal case: 0-byte OUT ack packet is in flight; wait
1673 * for it to finish, then go back to REQUEUE->IDLE.
1675 * Shutdown case: just cancel the transmission. Don't bother
1676 * calling the completion, because it originated from this
1677 * function anyway. Then go back to REQUEUE->IDLE.
1679 if (udc->ep0_req_completed) {
1680 bcm63xx_ep0_read_complete(udc);
1681 ep0state = EP0_REQUEUE;
1682 } else if (shutdown) {
1683 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1684 udc->ep0_request = NULL;
1685 ep0state = EP0_REQUEUE;
1688 case EP0_IN_FAKE_STATUS_PHASE: {
1690 * Normal case: we spoofed a SETUP packet and are now
1691 * waiting for the gadget driver to send a 0-byte reply.
1692 * This doesn't actually get sent to the HW because the
1693 * HW has already sent its own reply. Once we get the
1694 * response, return to IDLE.
1696 * Shutdown case: return to IDLE immediately.
1698 * Note that the ep0 RX descriptor has remained queued
1699 * (and possibly unfilled) during this entire transaction.
1700 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1701 * or SET_INTERFACE transactions.
1703 struct usb_request *r = udc->ep0_reply;
1707 ep0state = EP0_IDLE;
1711 bcm63xx_ep0_complete(udc, r, 0);
1712 udc->ep0_reply = NULL;
1713 ep0state = EP0_IDLE;
1720 if (udc->ep0state == ep0state)
1723 udc->ep0state = ep0state;
1728 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1729 * @w: Workqueue struct.
1731 * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
1732 * is used to synchronize ep0 events and ensure that both HW and SW events
1733 * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
1734 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1735 * by the USBD hardware.
1737 * The worker function will continue iterating around the state machine
1738 * until there is nothing left to do. Usually "nothing left to do" means
1739 * that we're waiting for a new event from the hardware.
1741 static void bcm63xx_ep0_process(struct work_struct *w)
1743 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1744 spin_lock_irq(&udc->lock);
1745 while (bcm63xx_ep0_one_round(udc) == 0)
1747 spin_unlock_irq(&udc->lock);
1750 /***********************************************************************
1751 * Standard UDC gadget operations
1752 ***********************************************************************/
1755 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1756 * @gadget: USB slave device.
1758 static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1760 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1762 return (usbd_readl(udc, USBD_STATUS_REG) &
1763 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1767 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1768 * @gadget: USB slave device.
1769 * @is_on: 0 to disable pullup, 1 to enable.
1771 * See notes in bcm63xx_select_pullup().
1773 static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1775 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1776 unsigned long flags;
1777 int i, rc = -EINVAL;
1779 spin_lock_irqsave(&udc->lock, flags);
1780 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1781 udc->gadget.speed = USB_SPEED_UNKNOWN;
1782 udc->ep0state = EP0_REQUEUE;
1783 bcm63xx_fifo_setup(udc);
1784 bcm63xx_fifo_reset(udc);
1785 bcm63xx_ep_setup(udc);
1787 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1788 for (i = 0; i < BCM63XX_NUM_EP; i++)
1789 bcm63xx_set_stall(udc, &udc->bep[i], false);
1791 bcm63xx_set_ctrl_irqs(udc, true);
1792 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1794 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1795 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1797 udc->ep0_req_shutdown = 1;
1798 spin_unlock_irqrestore(&udc->lock, flags);
1801 schedule_work(&udc->ep0_wq);
1802 if (udc->ep0state == EP0_SHUTDOWN)
1806 bcm63xx_set_ctrl_irqs(udc, false);
1807 cancel_work_sync(&udc->ep0_wq);
1811 spin_unlock_irqrestore(&udc->lock, flags);
1816 * bcm63xx_udc_start - Start the controller.
1817 * @gadget: USB slave device.
1818 * @driver: Driver for USB slave devices.
1820 static int bcm63xx_udc_start(struct usb_gadget *gadget,
1821 struct usb_gadget_driver *driver)
1823 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1824 unsigned long flags;
1826 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1834 spin_lock_irqsave(&udc->lock, flags);
1836 set_clocks(udc, true);
1837 bcm63xx_fifo_setup(udc);
1838 bcm63xx_ep_init(udc);
1839 bcm63xx_ep_setup(udc);
1840 bcm63xx_fifo_reset(udc);
1841 bcm63xx_select_phy_mode(udc, true);
1843 udc->driver = driver;
1844 driver->driver.bus = NULL;
1845 udc->gadget.dev.of_node = udc->dev->of_node;
1847 spin_unlock_irqrestore(&udc->lock, flags);
1853 * bcm63xx_udc_stop - Shut down the controller.
1854 * @gadget: USB slave device.
1855 * @driver: Driver for USB slave devices.
1857 static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1859 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1860 unsigned long flags;
1862 spin_lock_irqsave(&udc->lock, flags);
1867 * If we switch the PHY too abruptly after dropping D+, the host
1868 * will often complain:
1870 * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1874 bcm63xx_select_phy_mode(udc, false);
1875 set_clocks(udc, false);
1877 spin_unlock_irqrestore(&udc->lock, flags);
1882 static const struct usb_gadget_ops bcm63xx_udc_ops = {
1883 .get_frame = bcm63xx_udc_get_frame,
1884 .pullup = bcm63xx_udc_pullup,
1885 .udc_start = bcm63xx_udc_start,
1886 .udc_stop = bcm63xx_udc_stop,
1889 /***********************************************************************
1891 ***********************************************************************/
1894 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1895 * @udc: Reference to the device controller.
1897 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1898 * The driver never sees the raw control packets coming in on the ep0
1899 * IUDMA channel, but at least we get an interrupt event to tell us that
1900 * new values are waiting in the USBD_STATUS register.
1902 static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1904 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1906 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1907 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1908 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1909 USBD_STATUS_ALTINTF_SHIFT;
1910 bcm63xx_ep_setup(udc);
1914 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1915 * @udc: Reference to the device controller.
1917 * The link speed update coincides with a SETUP IRQ. Returns 1 if the
1918 * speed has changed, so that the caller can update the endpoint settings.
1920 static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1922 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1923 enum usb_device_speed oldspeed = udc->gadget.speed;
1925 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1926 case BCM63XX_SPD_HIGH:
1927 udc->gadget.speed = USB_SPEED_HIGH;
1929 case BCM63XX_SPD_FULL:
1930 udc->gadget.speed = USB_SPEED_FULL;
1933 /* this should never happen */
1934 udc->gadget.speed = USB_SPEED_UNKNOWN;
1936 "received SETUP packet with invalid link speed\n");
1940 if (udc->gadget.speed != oldspeed) {
1941 dev_info(udc->dev, "link up, %s-speed mode\n",
1942 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1950 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1951 * @udc: Reference to the device controller.
1952 * @new_status: true to "refresh" wedge status; false to clear it.
1954 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1955 * because the controller hardware is designed to automatically clear
1956 * stalls in response to a CLEAR_FEATURE request from the host.
1958 * On a RESET interrupt, we do want to restore all wedged endpoints.
1960 static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1964 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1965 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1967 clear_bit(i, &udc->wedgemap);
1972 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1973 * @irq: IRQ number (unused).
1974 * @dev_id: Reference to the device controller.
1976 * This is where we handle link (VBUS) down, USB reset, speed changes,
1977 * SET_CONFIGURATION, and SET_INTERFACE events.
1979 static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1981 struct bcm63xx_udc *udc = dev_id;
1983 bool disconnected = false, bus_reset = false;
1985 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1986 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1988 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1990 spin_lock(&udc->lock);
1991 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1994 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1995 USBD_EVENTS_USB_LINK_MASK) &&
1996 udc->gadget.speed != USB_SPEED_UNKNOWN)
1997 dev_info(udc->dev, "link down\n");
1999 udc->gadget.speed = USB_SPEED_UNKNOWN;
2000 disconnected = true;
2002 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
2003 bcm63xx_fifo_setup(udc);
2004 bcm63xx_fifo_reset(udc);
2005 bcm63xx_ep_setup(udc);
2007 bcm63xx_update_wedge(udc, false);
2009 udc->ep0_req_reset = 1;
2010 schedule_work(&udc->ep0_wq);
2013 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2014 if (bcm63xx_update_link_speed(udc)) {
2015 bcm63xx_fifo_setup(udc);
2016 bcm63xx_ep_setup(udc);
2018 bcm63xx_update_wedge(udc, true);
2020 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2021 bcm63xx_update_cfg_iface(udc);
2022 udc->ep0_req_set_cfg = 1;
2023 schedule_work(&udc->ep0_wq);
2025 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2026 bcm63xx_update_cfg_iface(udc);
2027 udc->ep0_req_set_iface = 1;
2028 schedule_work(&udc->ep0_wq);
2030 spin_unlock(&udc->lock);
2032 if (disconnected && udc->driver)
2033 udc->driver->disconnect(&udc->gadget);
2034 else if (bus_reset && udc->driver)
2035 usb_gadget_udc_reset(&udc->gadget, udc->driver);
2041 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2042 * @irq: IRQ number (unused).
2043 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2045 * For the two ep0 channels, we have special handling that triggers the
2046 * ep0 worker thread. For normal bulk/intr channels, either queue up
2047 * the next buffer descriptor for the transaction (incomplete transaction),
2048 * or invoke the completion callback (complete transactions).
2050 static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2052 struct iudma_ch *iudma = dev_id;
2053 struct bcm63xx_udc *udc = iudma->udc;
2054 struct bcm63xx_ep *bep;
2055 struct usb_request *req = NULL;
2056 struct bcm63xx_req *breq = NULL;
2058 bool is_done = false;
2060 spin_lock(&udc->lock);
2062 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2063 ENETDMAC_IR_REG, iudma->ch_idx);
2065 rc = iudma_read(udc, iudma);
2067 /* special handling for EP0 RX (0) and TX (1) */
2068 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2069 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2070 req = udc->ep0_request;
2071 breq = our_req(req);
2073 /* a single request could require multiple submissions */
2077 if (req->actual >= req->length || breq->bd_bytes > rc) {
2078 udc->ep0_req_completed = 1;
2080 schedule_work(&udc->ep0_wq);
2082 /* "actual" on a ZLP is 1 byte */
2083 req->actual = min(req->actual, req->length);
2085 /* queue up the next BD (same request) */
2086 iudma_write(udc, iudma, breq);
2089 } else if (!list_empty(&bep->queue)) {
2090 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2096 if (req->actual >= req->length || breq->bd_bytes > rc) {
2098 list_del(&breq->queue);
2100 req->actual = min(req->actual, req->length);
2102 if (!list_empty(&bep->queue)) {
2103 struct bcm63xx_req *next;
2105 next = list_first_entry(&bep->queue,
2106 struct bcm63xx_req, queue);
2107 iudma_write(udc, iudma, next);
2110 iudma_write(udc, iudma, breq);
2114 spin_unlock(&udc->lock);
2117 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2119 req->complete(&bep->ep, req);
2125 /***********************************************************************
2127 ***********************************************************************/
2130 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2131 * @s: seq_file to which the information will be written.
2134 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2136 static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2138 struct bcm63xx_udc *udc = s->private;
2143 seq_printf(s, "ep0 state: %s\n",
2144 bcm63xx_ep0_state_names[udc->ep0state]);
2145 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2146 udc->ep0_req_reset ? "reset " : "",
2147 udc->ep0_req_set_cfg ? "set_cfg " : "",
2148 udc->ep0_req_set_iface ? "set_iface " : "",
2149 udc->ep0_req_shutdown ? "shutdown " : "",
2150 udc->ep0_request ? "pending " : "",
2151 udc->ep0_req_completed ? "completed " : "",
2152 udc->ep0_reply ? "reply " : "");
2153 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2154 udc->cfg, udc->iface, udc->alt_iface);
2155 seq_printf(s, "regs:\n");
2156 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2157 usbd_readl(udc, USBD_CONTROL_REG),
2158 usbd_readl(udc, USBD_STRAPS_REG),
2159 usbd_readl(udc, USBD_STATUS_REG));
2160 seq_printf(s, " events: %08x; stall: %08x\n",
2161 usbd_readl(udc, USBD_EVENTS_REG),
2162 usbd_readl(udc, USBD_STALL_REG));
2168 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2169 * @s: seq_file to which the information will be written.
2172 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2174 static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2176 struct bcm63xx_udc *udc = s->private;
2183 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2184 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2185 struct list_head *pos;
2187 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2188 switch (iudma_defaults[ch_idx].ep_type) {
2190 seq_printf(s, "control");
2193 seq_printf(s, "bulk");
2196 seq_printf(s, "interrupt");
2199 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2200 seq_printf(s, " [ep%d]:\n",
2201 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2202 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2203 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2204 usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2205 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2206 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2208 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2209 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2210 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2211 usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2212 sram2 >> 16, sram2 & 0xffff,
2213 sram3 >> 16, sram3 & 0xffff,
2214 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2215 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2220 list_for_each(pos, &iudma->bep->queue)
2222 seq_printf(s, "; %d queued\n", i);
2224 seq_printf(s, "\n");
2227 for (i = 0; i < iudma->n_bds; i++) {
2228 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2230 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2232 d->len_stat >> 16, d->len_stat & 0xffff,
2234 if (d == iudma->read_bd)
2235 seq_printf(s, " <<RD");
2236 if (d == iudma->write_bd)
2237 seq_printf(s, " <<WR");
2238 seq_printf(s, "\n");
2241 seq_printf(s, "\n");
2247 static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2249 return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2252 static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2254 return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2257 static const struct file_operations usbd_dbg_fops = {
2258 .owner = THIS_MODULE,
2259 .open = bcm63xx_usbd_dbg_open,
2260 .llseek = seq_lseek,
2262 .release = single_release,
2265 static const struct file_operations iudma_dbg_fops = {
2266 .owner = THIS_MODULE,
2267 .open = bcm63xx_iudma_dbg_open,
2268 .llseek = seq_lseek,
2270 .release = single_release,
2275 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2276 * @udc: Reference to the device controller.
2278 static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2280 struct dentry *root, *usbd, *iudma;
2282 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2285 root = debugfs_create_dir(udc->gadget.name, NULL);
2286 if (IS_ERR(root) || !root)
2289 usbd = debugfs_create_file("usbd", 0400, root, udc,
2293 iudma = debugfs_create_file("iudma", 0400, root, udc,
2298 udc->debugfs_root = root;
2299 udc->debugfs_usbd = usbd;
2300 udc->debugfs_iudma = iudma;
2303 debugfs_remove(usbd);
2305 debugfs_remove(root);
2307 dev_err(udc->dev, "debugfs is not available\n");
2311 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2312 * @udc: Reference to the device controller.
2314 * debugfs_remove() is safe to call with a NULL argument.
2316 static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2318 debugfs_remove(udc->debugfs_iudma);
2319 debugfs_remove(udc->debugfs_usbd);
2320 debugfs_remove(udc->debugfs_root);
2321 udc->debugfs_iudma = NULL;
2322 udc->debugfs_usbd = NULL;
2323 udc->debugfs_root = NULL;
2326 /***********************************************************************
2328 ***********************************************************************/
2331 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2332 * @pdev: Platform device struct from the bcm63xx BSP code.
2334 * Note that platform data is required, because pd.port_no varies from chip
2335 * to chip and is used to switch the correct USB port to device mode.
2337 static int bcm63xx_udc_probe(struct platform_device *pdev)
2339 struct device *dev = &pdev->dev;
2340 struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2341 struct bcm63xx_udc *udc;
2342 struct resource *res;
2343 int rc = -ENOMEM, i, irq;
2345 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2349 platform_set_drvdata(pdev, udc);
2354 dev_err(dev, "missing platform data\n");
2358 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2359 udc->usbd_regs = devm_ioremap_resource(dev, res);
2360 if (IS_ERR(udc->usbd_regs))
2361 return PTR_ERR(udc->usbd_regs);
2363 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2364 udc->iudma_regs = devm_ioremap_resource(dev, res);
2365 if (IS_ERR(udc->iudma_regs))
2366 return PTR_ERR(udc->iudma_regs);
2368 spin_lock_init(&udc->lock);
2369 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2371 udc->gadget.ops = &bcm63xx_udc_ops;
2372 udc->gadget.name = dev_name(dev);
2374 if (!pd->use_fullspeed && !use_fullspeed)
2375 udc->gadget.max_speed = USB_SPEED_HIGH;
2377 udc->gadget.max_speed = USB_SPEED_FULL;
2379 /* request clocks, allocate buffers, and clear any pending IRQs */
2380 rc = bcm63xx_init_udc_hw(udc);
2386 /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2387 irq = platform_get_irq(pdev, 0);
2389 dev_err(dev, "missing IRQ resource #0\n");
2392 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2393 dev_name(dev), udc) < 0) {
2394 dev_err(dev, "error requesting IRQ #%d\n", irq);
2398 /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2399 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2400 irq = platform_get_irq(pdev, i + 1);
2402 dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2405 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2406 dev_name(dev), &udc->iudma[i]) < 0) {
2407 dev_err(dev, "error requesting IRQ #%d\n", irq);
2412 bcm63xx_udc_init_debugfs(udc);
2413 rc = usb_add_gadget_udc(dev, &udc->gadget);
2417 bcm63xx_udc_cleanup_debugfs(udc);
2419 bcm63xx_uninit_udc_hw(udc);
2424 * bcm63xx_udc_remove - Remove the device from the system.
2425 * @pdev: Platform device struct from the bcm63xx BSP code.
2427 static int bcm63xx_udc_remove(struct platform_device *pdev)
2429 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2431 bcm63xx_udc_cleanup_debugfs(udc);
2432 usb_del_gadget_udc(&udc->gadget);
2433 BUG_ON(udc->driver);
2435 bcm63xx_uninit_udc_hw(udc);
2440 static struct platform_driver bcm63xx_udc_driver = {
2441 .probe = bcm63xx_udc_probe,
2442 .remove = bcm63xx_udc_remove,
2444 .name = DRV_MODULE_NAME,
2447 module_platform_driver(bcm63xx_udc_driver);
2449 MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2450 MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2451 MODULE_LICENSE("GPL");
2452 MODULE_ALIAS("platform:" DRV_MODULE_NAME);