2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/pci.h>
23 #include <brcmu_utils.h>
30 * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
31 * a contiguous 8kB physical address.
33 #define D64RINGALIGN_BITS 13
34 #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
35 #define D64RINGALIGN (1 << D64RINGALIGN_BITS)
37 #define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc))
39 /* transmit channel control */
40 #define D64_XC_XE 0x00000001 /* transmit enable */
41 #define D64_XC_SE 0x00000002 /* transmit suspend request */
42 #define D64_XC_LE 0x00000004 /* loopback enable */
43 #define D64_XC_FL 0x00000010 /* flush request */
44 #define D64_XC_PD 0x00000800 /* parity check disable */
45 #define D64_XC_AE 0x00030000 /* address extension bits */
46 #define D64_XC_AE_SHIFT 16
48 /* transmit descriptor table pointer */
49 #define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */
51 /* transmit channel status */
52 #define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */
53 #define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
54 #define D64_XS0_XS_SHIFT 28
55 #define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
56 #define D64_XS0_XS_ACTIVE 0x10000000 /* active */
57 #define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
58 #define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
59 #define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
61 #define D64_XS1_AD_MASK 0x00001fff /* active descriptor */
62 #define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
63 #define D64_XS1_XE_SHIFT 28
64 #define D64_XS1_XE_NOERR 0x00000000 /* no error */
65 #define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
66 #define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
67 #define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
68 #define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
69 #define D64_XS1_XE_COREE 0x50000000 /* core error */
71 /* receive channel control */
73 #define D64_RC_RE 0x00000001
74 /* receive frame offset */
75 #define D64_RC_RO_MASK 0x000000fe
76 #define D64_RC_RO_SHIFT 1
77 /* direct fifo receive (pio) mode */
78 #define D64_RC_FM 0x00000100
79 /* separate rx header descriptor enable */
80 #define D64_RC_SH 0x00000200
81 /* overflow continue */
82 #define D64_RC_OC 0x00000400
83 /* parity check disable */
84 #define D64_RC_PD 0x00000800
85 /* address extension bits */
86 #define D64_RC_AE 0x00030000
87 #define D64_RC_AE_SHIFT 16
89 /* flags for dma controller */
91 #define DMA_CTRL_PEN (1 << 0)
92 /* rx overflow continue */
93 #define DMA_CTRL_ROC (1 << 1)
94 /* allow rx scatter to multiple descriptors */
95 #define DMA_CTRL_RXMULTI (1 << 2)
96 /* Unframed Rx/Tx data */
97 #define DMA_CTRL_UNFRAMED (1 << 3)
99 /* receive descriptor table pointer */
100 #define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
102 /* receive channel status */
103 #define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */
104 #define D64_RS0_RS_MASK 0xf0000000 /* receive state */
105 #define D64_RS0_RS_SHIFT 28
106 #define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
107 #define D64_RS0_RS_ACTIVE 0x10000000 /* active */
108 #define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
109 #define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
110 #define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
112 #define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
113 #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
114 #define D64_RS1_RE_SHIFT 28
115 #define D64_RS1_RE_NOERR 0x00000000 /* no error */
116 #define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
117 #define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
118 #define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
119 #define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
120 #define D64_RS1_RE_COREE 0x50000000 /* core error */
123 #define D64_FA_OFF_MASK 0xffff /* offset */
124 #define D64_FA_SEL_MASK 0xf0000 /* select */
125 #define D64_FA_SEL_SHIFT 16
126 #define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
127 #define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
128 #define D64_FA_SEL_RDD 0x40000 /* receive dma data */
129 #define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
130 #define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
131 #define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
132 #define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
133 #define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
134 #define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
135 #define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
137 /* descriptor control flags 1 */
138 #define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */
139 #define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */
140 #define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */
141 #define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */
142 #define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */
144 /* descriptor control flags 2 */
145 /* buffer byte count. real data len must <= 16KB */
146 #define D64_CTRL2_BC_MASK 0x00007fff
147 /* address extension bits */
148 #define D64_CTRL2_AE 0x00030000
149 #define D64_CTRL2_AE_SHIFT 16
151 #define D64_CTRL2_PARITY 0x00040000
153 /* control flags in the range [27:20] are core-specific and not defined here */
154 #define D64_CTRL_CORE_MASK 0x0ff00000
156 #define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */
157 #define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */
158 #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */
159 #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */
162 * packet headroom necessary to accommodate the largest header
163 * in the system, (i.e TXOFF). By doing, we avoid the need to
164 * allocate an extra buffer for the header when bridging to WL.
165 * There is a compile time check in wlc.c which ensure that this
166 * value is at least as big as TXOFF. This value is used in
170 #define BCMEXTRAHDROOM 172
174 #define DMA_ERROR(fmt, ...) \
176 if (*di->msg_level & 1) \
177 pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
179 #define DMA_TRACE(fmt, ...) \
181 if (*di->msg_level & 2) \
182 pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
185 #define DMA_ERROR(fmt, ...) \
186 no_printk(fmt, ##__VA_ARGS__)
187 #define DMA_TRACE(fmt, ...) \
188 no_printk(fmt, ##__VA_ARGS__)
191 #define DMA_NONE(fmt, ...) \
192 no_printk(fmt, ##__VA_ARGS__)
194 #define MAXNAMEL 8 /* 8 char names */
196 /* macros to convert between byte offsets and indexes */
197 #define B2I(bytes, type) ((bytes) / sizeof(type))
198 #define I2B(index, type) ((index) * sizeof(type))
200 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
201 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
203 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
204 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
208 * Descriptors are only read by the hardware, never written back.
211 __le32 ctrl1; /* misc control bits & bufcount */
212 __le32 ctrl2; /* buffer count and address extension */
213 __le32 addrlow; /* memory address of the date buffer, bits 31:0 */
214 __le32 addrhigh; /* memory address of the date buffer, bits 63:32 */
217 /* dma engine software state */
219 struct dma_pub dma; /* exported structure */
220 uint *msg_level; /* message level pointer */
221 char name[MAXNAMEL]; /* callers name for diag msgs */
223 struct pci_dev *pbus; /* bus handle */
225 bool dma64; /* this dma engine is operating in 64-bit mode */
226 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
228 /* 64-bit dma tx engine registers */
229 struct dma64regs __iomem *d64txregs;
230 /* 64-bit dma rx engine registers */
231 struct dma64regs __iomem *d64rxregs;
232 /* pointer to dma64 tx descriptor ring */
233 struct dma64desc *txd64;
234 /* pointer to dma64 rx descriptor ring */
235 struct dma64desc *rxd64;
237 u16 dmadesc_align; /* alignment requirement for dma descriptors */
239 u16 ntxd; /* # tx descriptors tunable */
240 u16 txin; /* index of next descriptor to reclaim */
241 u16 txout; /* index of next descriptor to post */
242 /* pointer to parallel array of pointers to packets */
243 struct sk_buff **txp;
244 /* Aligned physical address of descriptor ring */
246 /* Original physical address of descriptor ring */
247 dma_addr_t txdpaorig;
248 u16 txdalign; /* #bytes added to alloc'd mem to align txd */
249 u32 txdalloc; /* #bytes allocated for the ring */
250 u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
251 * is not just an index, it needs all 13 bits to be
252 * an offset from the addr register.
255 u16 nrxd; /* # rx descriptors tunable */
256 u16 rxin; /* index of next descriptor to reclaim */
257 u16 rxout; /* index of next descriptor to post */
258 /* pointer to parallel array of pointers to packets */
259 struct sk_buff **rxp;
260 /* Aligned physical address of descriptor ring */
262 /* Original physical address of descriptor ring */
263 dma_addr_t rxdpaorig;
264 u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
265 u32 rxdalloc; /* #bytes allocated for the ring */
266 u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
269 unsigned int rxbufsize; /* rx buffer size in bytes, not including
272 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper
273 * stack, e.g. some rx pkt buffers will be
274 * bridged to tx side without byte copying.
275 * The extra headroom needs to be large enough
276 * to fit txheader needs. Some dongle driver may
279 uint nrxpost; /* # rx buffers to keep posted */
280 unsigned int rxoffset; /* rxcontrol offset */
281 /* add to get dma address of descriptor ring, low 32 bits */
285 /* add to get dma address of data buffer, low 32 bits */
289 /* descriptor base need to be aligned or not */
294 * default dma message level (if input msg_level
295 * pointer is null in dma_attach())
297 static uint dma_msg_level;
299 /* Check for odd number of 1's */
300 static u32 parity32(__le32 data)
302 /* no swap needed for counting 1's */
303 u32 par_data = *(u32 *)&data;
305 par_data ^= par_data >> 16;
306 par_data ^= par_data >> 8;
307 par_data ^= par_data >> 4;
308 par_data ^= par_data >> 2;
309 par_data ^= par_data >> 1;
314 static bool dma64_dd_parity(struct dma64desc *dd)
316 return parity32(dd->addrlow ^ dd->addrhigh ^ dd->ctrl1 ^ dd->ctrl2);
319 /* descriptor bumping functions */
321 static uint xxd(uint x, uint n)
323 return x & (n - 1); /* faster than %, but n must be power of 2 */
326 static uint txd(struct dma_info *di, uint x)
328 return xxd(x, di->ntxd);
331 static uint rxd(struct dma_info *di, uint x)
333 return xxd(x, di->nrxd);
336 static uint nexttxd(struct dma_info *di, uint i)
338 return txd(di, i + 1);
341 static uint prevtxd(struct dma_info *di, uint i)
343 return txd(di, i - 1);
346 static uint nextrxd(struct dma_info *di, uint i)
348 return txd(di, i + 1);
351 static uint ntxdactive(struct dma_info *di, uint h, uint t)
356 static uint nrxdactive(struct dma_info *di, uint h, uint t)
361 static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
366 DMA_ERROR("NULL dma handle\n");
370 dmactrlflags = di->dma.dmactrlflags;
371 dmactrlflags &= ~mask;
372 dmactrlflags |= flags;
374 /* If trying to enable parity, check if parity is actually supported */
375 if (dmactrlflags & DMA_CTRL_PEN) {
378 control = R_REG(&di->d64txregs->control);
379 W_REG(&di->d64txregs->control,
380 control | D64_XC_PD);
381 if (R_REG(&di->d64txregs->control) & D64_XC_PD)
382 /* We *can* disable it so it is supported,
383 * restore control register
385 W_REG(&di->d64txregs->control,
388 /* Not supported, don't allow it to be enabled */
389 dmactrlflags &= ~DMA_CTRL_PEN;
392 di->dma.dmactrlflags = dmactrlflags;
397 static bool _dma64_addrext(struct dma64regs __iomem *dma64regs)
400 OR_REG(&dma64regs->control, D64_XC_AE);
401 w = R_REG(&dma64regs->control);
402 AND_REG(&dma64regs->control, ~D64_XC_AE);
403 return (w & D64_XC_AE) == D64_XC_AE;
407 * return true if this dma engine supports DmaExtendedAddrChanges,
410 static bool _dma_isaddrext(struct dma_info *di)
412 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
414 /* not all tx or rx channel are available */
415 if (di->d64txregs != NULL) {
416 if (!_dma64_addrext(di->d64txregs))
417 DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
420 } else if (di->d64rxregs != NULL) {
421 if (!_dma64_addrext(di->d64rxregs))
422 DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
430 static bool _dma_descriptor_align(struct dma_info *di)
434 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
435 if (di->d64txregs != NULL) {
436 W_REG(&di->d64txregs->addrlow, 0xff0);
437 addrl = R_REG(&di->d64txregs->addrlow);
440 } else if (di->d64rxregs != NULL) {
441 W_REG(&di->d64rxregs->addrlow, 0xff0);
442 addrl = R_REG(&di->d64rxregs->addrlow);
450 * Descriptor table must start at the DMA hardware dictated alignment, so
451 * allocated memory must be large enough to support this requirement.
453 static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
454 u16 align_bits, uint *alloced,
458 u16 align = (1 << align_bits);
459 if (!IS_ALIGNED(PAGE_SIZE, align))
463 return pci_alloc_consistent(pdev, size, pap);
467 u8 dma_align_sizetobits(uint size)
475 /* This function ensures that the DMA descriptor ring will not get allocated
476 * across Page boundary. If the allocation is done across the page boundary
477 * at the first time, then it is freed and the allocation is done at
478 * descriptor ring size aligned location. This will ensure that the ring will
479 * not cross page boundary
481 static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
482 u16 *alignbits, uint *alloced,
487 u32 alignbytes = 1 << *alignbits;
489 va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
494 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
495 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
497 *alignbits = dma_align_sizetobits(size);
498 pci_free_consistent(di->pbus, size, va, *descpa);
499 va = dma_alloc_consistent(di->pbus, size, *alignbits,
505 static bool dma64_alloc(struct dma_info *di, uint direction)
514 ddlen = sizeof(struct dma64desc);
516 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
517 align_bits = di->dmadesc_align;
518 align = (1 << align_bits);
520 if (direction == DMA_TX) {
521 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
522 &alloced, &di->txdpaorig);
524 DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
528 align = (1 << align_bits);
529 di->txd64 = (struct dma64desc *)
530 roundup((unsigned long)va, align);
531 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
532 di->txdpa = di->txdpaorig + di->txdalign;
533 di->txdalloc = alloced;
535 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
536 &alloced, &di->rxdpaorig);
538 DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
542 align = (1 << align_bits);
543 di->rxd64 = (struct dma64desc *)
544 roundup((unsigned long)va, align);
545 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
546 di->rxdpa = di->rxdpaorig + di->rxdalign;
547 di->rxdalloc = alloced;
553 static bool _dma_alloc(struct dma_info *di, uint direction)
555 return dma64_alloc(di, direction);
558 struct dma_pub *dma_attach(char *name, struct si_pub *sih,
559 void __iomem *dmaregstx, void __iomem *dmaregsrx,
560 uint ntxd, uint nrxd,
561 uint rxbufsize, int rxextheadroom,
562 uint nrxpost, uint rxoffset, uint *msg_level)
567 /* allocate private info structure */
568 di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
572 di->msg_level = msg_level ? msg_level : &dma_msg_level;
575 di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
577 /* init dma reg pointer */
578 di->d64txregs = (struct dma64regs __iomem *) dmaregstx;
579 di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx;
582 * Default flags (which can be changed by the driver calling
583 * dma_ctrlflags before enable): For backwards compatibility
584 * both Rx Overflow Continue and Parity are DISABLED.
586 _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
588 DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n",
590 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
591 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx);
593 /* make a private copy of our callers name */
594 strncpy(di->name, name, MAXNAMEL);
595 di->name[MAXNAMEL - 1] = '\0';
597 di->pbus = ((struct si_info *)sih)->pbus;
600 di->ntxd = (u16) ntxd;
601 di->nrxd = (u16) nrxd;
603 /* the actual dma size doesn't include the extra headroom */
605 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
606 if (rxbufsize > BCMEXTRAHDROOM)
607 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
609 di->rxbufsize = (u16) rxbufsize;
611 di->nrxpost = (u16) nrxpost;
612 di->rxoffset = (u8) rxoffset;
615 * figure out the DMA physical address offset for dd and data
616 * PCI/PCIE: they map silicon backplace address to zero
617 * based memory, need offset
618 * Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram
619 * swapped region for data buffer, not descriptor
622 di->dataoffsetlow = 0;
623 /* add offset for pcie with DMA64 bus */
625 di->ddoffsethigh = SI_PCIE_DMA_H32;
626 di->dataoffsetlow = di->ddoffsetlow;
627 di->dataoffsethigh = di->ddoffsethigh;
628 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
629 if ((ai_coreid(sih) == SDIOD_CORE_ID)
630 && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
632 else if ((ai_coreid(sih) == I2S_CORE_ID) &&
633 ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
636 di->addrext = _dma_isaddrext(di);
638 /* does the descriptor need to be aligned and if yes, on 4K/8K or not */
639 di->aligndesc_4k = _dma_descriptor_align(di);
640 if (di->aligndesc_4k) {
641 di->dmadesc_align = D64RINGALIGN_BITS;
642 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2))
643 /* for smaller dd table, HW relax alignment reqmnt */
644 di->dmadesc_align = D64RINGALIGN_BITS - 1;
646 di->dmadesc_align = 4; /* 16 byte alignment */
649 DMA_NONE("DMA descriptor align_needed %d, align %d\n",
650 di->aligndesc_4k, di->dmadesc_align);
652 /* allocate tx packet pointer vector */
654 size = ntxd * sizeof(void *);
655 di->txp = kzalloc(size, GFP_ATOMIC);
660 /* allocate rx packet pointer vector */
662 size = nrxd * sizeof(void *);
663 di->rxp = kzalloc(size, GFP_ATOMIC);
669 * allocate transmit descriptor ring, only need ntxd descriptors
670 * but it must be aligned
673 if (!_dma_alloc(di, DMA_TX))
678 * allocate receive descriptor ring, only need nrxd descriptors
679 * but it must be aligned
682 if (!_dma_alloc(di, DMA_RX))
686 if ((di->ddoffsetlow != 0) && !di->addrext) {
687 if (di->txdpa > SI_PCI_DMA_SZ) {
688 DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n",
689 di->name, (u32)di->txdpa);
692 if (di->rxdpa > SI_PCI_DMA_SZ) {
693 DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n",
694 di->name, (u32)di->rxdpa);
699 DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
700 di->ddoffsetlow, di->ddoffsethigh,
701 di->dataoffsetlow, di->dataoffsethigh,
704 return (struct dma_pub *) di;
707 dma_detach((struct dma_pub *)di);
712 dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring,
713 dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount)
715 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
717 /* PCI bus with big(>1G) physical address, use address extension */
718 if ((di->dataoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) {
719 ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
720 ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
721 ddring[outidx].ctrl1 = cpu_to_le32(*flags);
722 ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
724 /* address extension for 32-bit PCI */
727 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
728 pa &= ~PCI32ADDR_HIGH;
730 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
731 ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
732 ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
733 ddring[outidx].ctrl1 = cpu_to_le32(*flags);
734 ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
736 if (di->dma.dmactrlflags & DMA_CTRL_PEN) {
737 if (dma64_dd_parity(&ddring[outidx]))
738 ddring[outidx].ctrl2 =
739 cpu_to_le32(ctrl2 | D64_CTRL2_PARITY);
743 /* !! may be called with core in reset */
744 void dma_detach(struct dma_pub *pub)
746 struct dma_info *di = (struct dma_info *)pub;
748 DMA_TRACE("%s:\n", di->name);
750 /* free dma descriptor rings */
752 pci_free_consistent(di->pbus, di->txdalloc,
753 ((s8 *)di->txd64 - di->txdalign),
756 pci_free_consistent(di->pbus, di->rxdalloc,
757 ((s8 *)di->rxd64 - di->rxdalign),
760 /* free packet pointer vectors */
764 /* free our private info structure */
769 /* initialize descriptor table base address */
771 _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
773 if (!di->aligndesc_4k) {
774 if (direction == DMA_TX)
780 if ((di->ddoffsetlow == 0)
781 || !(pa & PCI32ADDR_HIGH)) {
782 if (direction == DMA_TX) {
783 W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
784 W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
786 W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
787 W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
790 /* DMA64 32bits address extension */
793 /* shift the high bit(s) from pa to ae */
794 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
795 pa &= ~PCI32ADDR_HIGH;
797 if (direction == DMA_TX) {
798 W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
799 W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
800 SET_REG(&di->d64txregs->control,
801 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
803 W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
804 W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
805 SET_REG(&di->d64rxregs->control,
806 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
811 static void _dma_rxenable(struct dma_info *di)
813 uint dmactrlflags = di->dma.dmactrlflags;
816 DMA_TRACE("%s:\n", di->name);
819 (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
822 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
823 control |= D64_RC_PD;
825 if (dmactrlflags & DMA_CTRL_ROC)
826 control |= D64_RC_OC;
828 W_REG(&di->d64rxregs->control,
829 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
832 void dma_rxinit(struct dma_pub *pub)
834 struct dma_info *di = (struct dma_info *)pub;
836 DMA_TRACE("%s:\n", di->name);
841 di->rxin = di->rxout = 0;
843 /* clear rx descriptor ring */
844 memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc));
846 /* DMA engine with out alignment requirement requires table to be inited
847 * before enabling the engine
849 if (!di->aligndesc_4k)
850 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
854 if (di->aligndesc_4k)
855 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
858 static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
866 /* return if no packets posted */
871 B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
872 di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
874 /* ignore curr if forceall */
875 if (!forceall && (i == curr))
878 /* get the packet pointer that corresponds to the rx descriptor */
882 pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow;
884 /* clear this packet from the descriptor ring */
885 pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
887 di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef);
888 di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
890 di->rxin = nextrxd(di, i);
895 static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
900 return dma64_getnextrxp(di, forceall);
904 * !! rx entry routine
905 * returns the number packages in the next frame, or 0 if there are no more
906 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
907 * supported with pkts chain
908 * otherwise, it's treated as giant pkt and will be tossed.
909 * The DMA scattering starts with normal DMA header, followed by first
910 * buffer data. After it reaches the max size of buffer, the data continues
911 * in next DMA descriptor buffer WITHOUT DMA header
913 int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
915 struct dma_info *di = (struct dma_info *)pub;
916 struct sk_buff_head dma_frames;
917 struct sk_buff *p, *next;
923 skb_queue_head_init(&dma_frames);
925 p = _dma_getnextrxp(di, false);
929 len = le16_to_cpu(*(__le16 *) (p->data));
930 DMA_TRACE("%s: dma_rx len %d\n", di->name, len);
931 dma_spin_for_len(len, p);
933 /* set actual length */
934 pkt_len = min((di->rxoffset + len), di->rxbufsize);
935 __skb_trim(p, pkt_len);
936 skb_queue_tail(&dma_frames, p);
937 resid = len - (di->rxbufsize - di->rxoffset);
939 /* check for single or multi-buffer rx */
941 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
942 pkt_len = min_t(uint, resid, di->rxbufsize);
943 __skb_trim(p, pkt_len);
944 skb_queue_tail(&dma_frames, p);
945 resid -= di->rxbufsize;
953 B2I(((R_REG(&di->d64rxregs->status0) &
955 di->rcvptrbase) & D64_RS0_CD_MASK,
957 DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
958 di->rxin, di->rxout, cur);
962 if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
963 DMA_ERROR("%s: bad frame length (%d)\n",
965 skb_queue_walk_safe(&dma_frames, p, next) {
966 skb_unlink(p, &dma_frames);
967 brcmu_pkt_buf_free_skb(p);
975 skb_queue_splice_tail(&dma_frames, skb_list);
979 static bool dma64_rxidle(struct dma_info *di)
981 DMA_TRACE("%s:\n", di->name);
986 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
987 (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
991 * post receive buffers
992 * return false is refill failed completely and ring is empty this will stall
993 * the rx dma and user might want to call rxfill again asap. This unlikely
994 * happens on memory-rich NIC, but often on memory-constrained dongle
996 bool dma_rxfill(struct dma_pub *pub)
998 struct dma_info *di = (struct dma_info *)pub;
1005 uint extra_offset = 0;
1011 * Determine how many receive buffers we're lacking
1012 * from the full complement, allocate, initialize,
1013 * and post them, then update the chip rx lastdscr.
1019 n = di->nrxpost - nrxdactive(di, rxin, rxout);
1021 DMA_TRACE("%s: post %d\n", di->name, n);
1023 if (di->rxbufsize > BCMEXTRAHDROOM)
1024 extra_offset = di->rxextrahdrroom;
1026 for (i = 0; i < n; i++) {
1028 * the di->rxbufsize doesn't include the extra headroom,
1029 * we need to add it to the size to be allocated
1031 p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
1034 DMA_ERROR("%s: out of rxbufs\n", di->name);
1035 if (i == 0 && dma64_rxidle(di)) {
1036 DMA_ERROR("%s: ring is empty !\n", di->name);
1042 /* reserve an extra headroom, if applicable */
1044 skb_pull(p, extra_offset);
1046 /* Do a cached write instead of uncached write since DMA_MAP
1047 * will flush the cache.
1049 *(u32 *) (p->data) = 0;
1051 pa = pci_map_single(di->pbus, p->data,
1052 di->rxbufsize, PCI_DMA_FROMDEVICE);
1054 /* save the free packet pointer */
1057 /* reset flags for each descriptor */
1059 if (rxout == (di->nrxd - 1))
1060 flags = D64_CTRL1_EOT;
1062 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
1064 rxout = nextrxd(di, rxout);
1069 /* update the chip lastdscr pointer */
1070 W_REG(&di->d64rxregs->ptr,
1071 di->rcvptrbase + I2B(rxout, struct dma64desc));
1076 void dma_rxreclaim(struct dma_pub *pub)
1078 struct dma_info *di = (struct dma_info *)pub;
1081 DMA_TRACE("%s:\n", di->name);
1083 while ((p = _dma_getnextrxp(di, true)))
1084 brcmu_pkt_buf_free_skb(p);
1087 void dma_counterreset(struct dma_pub *pub)
1089 /* reset all software counters */
1095 /* get the address of the var in order to change later */
1096 unsigned long dma_getvar(struct dma_pub *pub, const char *name)
1098 struct dma_info *di = (struct dma_info *)pub;
1100 if (!strcmp(name, "&txavail"))
1101 return (unsigned long)&(di->dma.txavail);
1105 /* 64-bit DMA functions */
1107 void dma_txinit(struct dma_pub *pub)
1109 struct dma_info *di = (struct dma_info *)pub;
1110 u32 control = D64_XC_XE;
1112 DMA_TRACE("%s:\n", di->name);
1117 di->txin = di->txout = 0;
1118 di->dma.txavail = di->ntxd - 1;
1120 /* clear tx descriptor ring */
1121 memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc)));
1123 /* DMA engine with out alignment requirement requires table to be inited
1124 * before enabling the engine
1126 if (!di->aligndesc_4k)
1127 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1129 if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
1130 control |= D64_XC_PD;
1131 OR_REG(&di->d64txregs->control, control);
1133 /* DMA engine with alignment requirement requires table to be inited
1134 * before enabling the engine
1136 if (di->aligndesc_4k)
1137 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1140 void dma_txsuspend(struct dma_pub *pub)
1142 struct dma_info *di = (struct dma_info *)pub;
1144 DMA_TRACE("%s:\n", di->name);
1149 OR_REG(&di->d64txregs->control, D64_XC_SE);
1152 void dma_txresume(struct dma_pub *pub)
1154 struct dma_info *di = (struct dma_info *)pub;
1156 DMA_TRACE("%s:\n", di->name);
1161 AND_REG(&di->d64txregs->control, ~D64_XC_SE);
1164 bool dma_txsuspended(struct dma_pub *pub)
1166 struct dma_info *di = (struct dma_info *)pub;
1168 return (di->ntxd == 0) ||
1169 ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
1173 void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
1175 struct dma_info *di = (struct dma_info *)pub;
1178 DMA_TRACE("%s: %s\n",
1180 range == DMA_RANGE_ALL ? "all" :
1181 range == DMA_RANGE_TRANSMITTED ? "transmitted" :
1184 if (di->txin == di->txout)
1187 while ((p = dma_getnexttxp(pub, range))) {
1188 /* For unframed data, we don't have any packets to free */
1189 if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED))
1190 brcmu_pkt_buf_free_skb(p);
1194 bool dma_txreset(struct dma_pub *pub)
1196 struct dma_info *di = (struct dma_info *)pub;
1202 /* suspend tx DMA first */
1203 W_REG(&di->d64txregs->control, D64_XC_SE);
1205 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1206 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
1207 && (status != D64_XS0_XS_STOPPED), 10000);
1209 W_REG(&di->d64txregs->control, 0);
1211 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1212 != D64_XS0_XS_DISABLED), 10000);
1214 /* wait for the last transaction to complete */
1217 return status == D64_XS0_XS_DISABLED;
1220 bool dma_rxreset(struct dma_pub *pub)
1222 struct dma_info *di = (struct dma_info *)pub;
1228 W_REG(&di->d64rxregs->control, 0);
1230 (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
1231 != D64_RS0_RS_DISABLED), 10000);
1233 return status == D64_RS0_RS_DISABLED;
1237 * !! tx entry routine
1238 * WARNING: call must check the return value for error.
1239 * the error(toss frames) could be fatal and cause many subsequent hard
1242 int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
1244 struct dma_info *di = (struct dma_info *)pub;
1245 unsigned char *data;
1251 DMA_TRACE("%s:\n", di->name);
1256 * obtain and initialize transmit descriptor entry.
1261 /* no use to transmit a zero length packet */
1265 /* return nonzero if out of tx descriptors */
1266 if (nexttxd(di, txout) == di->txin)
1269 /* get physical address of buffer start */
1270 pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
1272 /* With a DMA segment list, Descriptor table is filled
1273 * using the segment list instead of looping over
1274 * buffers in multi-chain DMA. Therefore, EOF for SGLIST
1275 * is when end of segment list is reached.
1277 flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
1278 if (txout == (di->ntxd - 1))
1279 flags |= D64_CTRL1_EOT;
1281 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1283 txout = nexttxd(di, txout);
1285 /* save the packet */
1286 di->txp[prevtxd(di, txout)] = p;
1288 /* bump the tx descriptor index */
1293 W_REG(&di->d64txregs->ptr,
1294 di->xmtptrbase + I2B(txout, struct dma64desc));
1296 /* tx flow control */
1297 di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
1302 DMA_ERROR("%s: out of txds !!!\n", di->name);
1303 brcmu_pkt_buf_free_skb(p);
1304 di->dma.txavail = 0;
1310 * Reclaim next completed txd (txds if using chained buffers) in the range
1311 * specified and return associated packet.
1312 * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1313 * transmitted as noted by the hardware "CurrDescr" pointer.
1314 * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be
1315 * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
1316 * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1317 * return associated packet regardless of the value of hardware pointers.
1319 struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
1321 struct dma_info *di = (struct dma_info *)pub;
1324 struct sk_buff *txp;
1326 DMA_TRACE("%s: %s\n",
1328 range == DMA_RANGE_ALL ? "all" :
1329 range == DMA_RANGE_TRANSMITTED ? "transmitted" :
1338 if (range == DMA_RANGE_ALL)
1341 struct dma64regs __iomem *dregs = di->d64txregs;
1343 end = (u16) (B2I(((R_REG(&dregs->status0) &
1345 di->xmtptrbase) & D64_XS0_CD_MASK,
1348 if (range == DMA_RANGE_TRANSFERED) {
1350 (u16) (R_REG(&dregs->status1) &
1353 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
1354 active_desc = B2I(active_desc, struct dma64desc);
1355 if (end != active_desc)
1356 end = prevtxd(di, active_desc);
1360 if ((start == 0) && (end > di->txout))
1363 for (i = start; i != end && !txp; i = nexttxd(di, i)) {
1367 pa = le32_to_cpu(di->txd64[i].addrlow) - di->dataoffsetlow;
1370 (le32_to_cpu(di->txd64[i].ctrl2) &
1373 di->txd64[i].addrlow = cpu_to_le32(0xdeadbeef);
1374 di->txd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
1379 pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
1384 /* tx flow control */
1385 di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
1390 DMA_NONE("bogus curr: start %d end %d txout %d\n",
1391 start, end, di->txout);
1396 * Mac80211 initiated actions sometimes require packets in the DMA queue to be
1397 * modified. The modified portion of the packet is not under control of the DMA
1398 * engine. This function calls a caller-supplied function for each packet in
1399 * the caller specified dma chain.
1401 void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
1402 (void *pkt, void *arg_a), void *arg_a)
1404 struct dma_info *di = (struct dma_info *) dmah;
1406 uint end = di->txout;
1407 struct sk_buff *skb;
1408 struct ieee80211_tx_info *tx_info;
1411 skb = (struct sk_buff *)di->txp[i];
1413 tx_info = (struct ieee80211_tx_info *)skb->cb;
1414 (callback_fnc)(tx_info, arg_a);