2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/netdevice.h>
20 #include <linux/pci.h>
24 #include <bcmendian.h>
33 #include <asm/addrspace.h>
38 #define DMA_ERROR(args) \
40 if (!(*di->msg_level & 1)) \
45 #define DMA_TRACE(args) \
47 if (!(*di->msg_level & 2)) \
53 #define DMA_ERROR(args)
54 #define DMA_TRACE(args)
57 #define DMA_NONE(args)
59 #define d64txregs dregs.d64_u.txregs_64
60 #define d64rxregs dregs.d64_u.rxregs_64
61 #define txd64 dregs.d64_u.txd_64
62 #define rxd64 dregs.d64_u.rxd_64
64 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
65 static uint dma_msg_level;
67 #define MAXNAMEL 8 /* 8 char names */
69 #define DI_INFO(dmah) ((dma_info_t *)dmah)
71 #define R_SM(r) (*(r))
72 #define W_SM(r, v) (*(r) = (v))
74 /* dma engine software state */
75 typedef struct dma_info {
76 struct hnddma_pub hnddma; /* exported structure */
77 uint *msg_level; /* message level pointer */
78 char name[MAXNAMEL]; /* callers name for diag msgs */
80 void *osh; /* os handle */
81 si_t *sih; /* sb handle */
83 bool dma64; /* this dma engine is operating in 64-bit mode */
84 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
88 dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */
89 dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */
90 dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */
91 dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */
95 u16 dmadesc_align; /* alignment requirement for dma descriptors */
97 u16 ntxd; /* # tx descriptors tunable */
98 u16 txin; /* index of next descriptor to reclaim */
99 u16 txout; /* index of next descriptor to post */
100 void **txp; /* pointer to parallel array of pointers to packets */
101 osldma_t *tx_dmah; /* DMA TX descriptor ring handle */
102 hnddma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */
103 dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */
104 dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */
105 u16 txdalign; /* #bytes added to alloc'd mem to align txd */
106 u32 txdalloc; /* #bytes allocated for the ring */
107 u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
108 * is not just an index, it needs all 13 bits to be
109 * an offset from the addr register.
112 u16 nrxd; /* # rx descriptors tunable */
113 u16 rxin; /* index of next descriptor to reclaim */
114 u16 rxout; /* index of next descriptor to post */
115 void **rxp; /* pointer to parallel array of pointers to packets */
116 osldma_t *rx_dmah; /* DMA RX descriptor ring handle */
117 hnddma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */
118 dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */
119 dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */
120 u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
121 u32 rxdalloc; /* #bytes allocated for the ring */
122 u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
125 unsigned int rxbufsize; /* rx buffer size in bytes,
126 * not including the extra headroom
128 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack
129 * e.g. some rx pkt buffers will be bridged to tx side
130 * without byte copying. The extra headroom needs to be
131 * large enough to fit txheader needs.
132 * Some dongle driver may not need it.
134 uint nrxpost; /* # rx buffers to keep posted */
135 unsigned int rxoffset; /* rxcontrol offset */
136 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
137 uint ddoffsethigh; /* high 32 bits */
138 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
139 uint dataoffsethigh; /* high 32 bits */
140 bool aligndesc_4k; /* descriptor base need to be aligned or not */
143 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
144 #ifdef BCMDMASGLISTOSL
145 #define DMASGLIST_ENAB true
147 #define DMASGLIST_ENAB false
148 #endif /* BCMDMASGLISTOSL */
150 /* descriptor bumping macros */
151 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
152 #define TXD(x) XXD((x), di->ntxd)
153 #define RXD(x) XXD((x), di->nrxd)
154 #define NEXTTXD(i) TXD((i) + 1)
155 #define PREVTXD(i) TXD((i) - 1)
156 #define NEXTRXD(i) RXD((i) + 1)
157 #define PREVRXD(i) RXD((i) - 1)
159 #define NTXDACTIVE(h, t) TXD((t) - (h))
160 #define NRXDACTIVE(h, t) RXD((t) - (h))
162 /* macros to convert between byte offsets and indexes */
163 #define B2I(bytes, type) ((bytes) / sizeof(type))
164 #define I2B(index, type) ((index) * sizeof(type))
166 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
167 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
169 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
170 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
172 /* Common prototypes */
173 static bool _dma_isaddrext(dma_info_t *di);
174 static bool _dma_descriptor_align(dma_info_t *di);
175 static bool _dma_alloc(dma_info_t *di, uint direction);
176 static void _dma_detach(dma_info_t *di);
177 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa);
178 static void _dma_rxinit(dma_info_t *di);
179 static void *_dma_rx(dma_info_t *di);
180 static bool _dma_rxfill(dma_info_t *di);
181 static void _dma_rxreclaim(dma_info_t *di);
182 static void _dma_rxenable(dma_info_t *di);
183 static void *_dma_getnextrxp(dma_info_t *di, bool forceall);
184 static void _dma_rx_param_get(dma_info_t *di, u16 *rxoffset,
187 static void _dma_txblock(dma_info_t *di);
188 static void _dma_txunblock(dma_info_t *di);
189 static uint _dma_txactive(dma_info_t *di);
190 static uint _dma_rxactive(dma_info_t *di);
191 static uint _dma_txpending(dma_info_t *di);
192 static uint _dma_txcommitted(dma_info_t *di);
194 static void *_dma_peeknexttxp(dma_info_t *di);
195 static void *_dma_peeknextrxp(dma_info_t *di);
196 static unsigned long _dma_getvar(dma_info_t *di, const char *name);
197 static void _dma_counterreset(dma_info_t *di);
198 static void _dma_fifoloopbackenable(dma_info_t *di);
199 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
200 static u8 dma_align_sizetobits(uint size);
201 static void *dma_ringalloc(struct osl_info *osh, u32 boundary, uint size,
202 u16 *alignbits, uint *alloced,
203 dmaaddr_t *descpa, osldma_t **dmah);
205 /* Prototypes for 64-bit routines */
206 static bool dma64_alloc(dma_info_t *di, uint direction);
207 static bool dma64_txreset(dma_info_t *di);
208 static bool dma64_rxreset(dma_info_t *di);
209 static bool dma64_txsuspendedidle(dma_info_t *di);
210 static int dma64_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
211 static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit);
212 static void *dma64_getpos(dma_info_t *di, bool direction);
213 static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range);
214 static void *dma64_getnextrxp(dma_info_t *di, bool forceall);
215 static void dma64_txrotate(dma_info_t *di);
217 static bool dma64_rxidle(dma_info_t *di);
218 static void dma64_txinit(dma_info_t *di);
219 static bool dma64_txenabled(dma_info_t *di);
220 static void dma64_txsuspend(dma_info_t *di);
221 static void dma64_txresume(dma_info_t *di);
222 static bool dma64_txsuspended(dma_info_t *di);
223 static void dma64_txreclaim(dma_info_t *di, txd_range_t range);
224 static bool dma64_txstopped(dma_info_t *di);
225 static bool dma64_rxstopped(dma_info_t *di);
226 static bool dma64_rxenabled(dma_info_t *di);
227 static bool _dma64_addrext(struct osl_info *osh, dma64regs_t *dma64regs);
229 static inline u32 parity32(u32 data);
231 const di_fcn_t dma64proc = {
232 (di_detach_t) _dma_detach,
233 (di_txinit_t) dma64_txinit,
234 (di_txreset_t) dma64_txreset,
235 (di_txenabled_t) dma64_txenabled,
236 (di_txsuspend_t) dma64_txsuspend,
237 (di_txresume_t) dma64_txresume,
238 (di_txsuspended_t) dma64_txsuspended,
239 (di_txsuspendedidle_t) dma64_txsuspendedidle,
240 (di_txfast_t) dma64_txfast,
241 (di_txunframed_t) dma64_txunframed,
242 (di_getpos_t) dma64_getpos,
243 (di_txstopped_t) dma64_txstopped,
244 (di_txreclaim_t) dma64_txreclaim,
245 (di_getnexttxp_t) dma64_getnexttxp,
246 (di_peeknexttxp_t) _dma_peeknexttxp,
247 (di_txblock_t) _dma_txblock,
248 (di_txunblock_t) _dma_txunblock,
249 (di_txactive_t) _dma_txactive,
250 (di_txrotate_t) dma64_txrotate,
252 (di_rxinit_t) _dma_rxinit,
253 (di_rxreset_t) dma64_rxreset,
254 (di_rxidle_t) dma64_rxidle,
255 (di_rxstopped_t) dma64_rxstopped,
256 (di_rxenable_t) _dma_rxenable,
257 (di_rxenabled_t) dma64_rxenabled,
259 (di_rxfill_t) _dma_rxfill,
260 (di_rxreclaim_t) _dma_rxreclaim,
261 (di_getnextrxp_t) _dma_getnextrxp,
262 (di_peeknextrxp_t) _dma_peeknextrxp,
263 (di_rxparam_get_t) _dma_rx_param_get,
265 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
266 (di_getvar_t) _dma_getvar,
267 (di_counterreset_t) _dma_counterreset,
268 (di_ctrlflags_t) _dma_ctrlflags,
272 (di_rxactive_t) _dma_rxactive,
273 (di_txpending_t) _dma_txpending,
274 (di_txcommitted_t) _dma_txcommitted,
278 struct hnddma_pub *dma_attach(struct osl_info *osh, char *name, si_t *sih,
279 void *dmaregstx, void *dmaregsrx, uint ntxd,
280 uint nrxd, uint rxbufsize, int rxextheadroom,
281 uint nrxpost, uint rxoffset, uint *msg_level)
286 /* allocate private info structure */
287 di = kzalloc(sizeof(dma_info_t), GFP_ATOMIC);
290 printk(KERN_ERR "dma_attach: out of memory\n");
295 di->msg_level = msg_level ? msg_level : &dma_msg_level;
297 /* old chips w/o sb is no longer supported */
300 di->dma64 = ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
302 /* check arguments */
303 ASSERT(ISPOWEROF2(ntxd));
304 ASSERT(ISPOWEROF2(nrxd));
307 ASSERT(dmaregsrx == NULL);
309 ASSERT(dmaregstx == NULL);
311 /* init dma reg pointer */
312 ASSERT(ntxd <= D64MAXDD);
313 ASSERT(nrxd <= D64MAXDD);
314 di->d64txregs = (dma64regs_t *) dmaregstx;
315 di->d64rxregs = (dma64regs_t *) dmaregsrx;
316 di->hnddma.di_fn = (const di_fcn_t *)&dma64proc;
318 /* Default flags (which can be changed by the driver calling dma_ctrlflags
319 * before enable): For backwards compatibility both Rx Overflow Continue
320 * and Parity are DISABLED.
323 di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN,
326 DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d "
327 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
328 "dmaregstx %p dmaregsrx %p\n", name, "DMA64", osh,
329 di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize,
330 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
332 /* make a private copy of our callers name */
333 strncpy(di->name, name, MAXNAMEL);
334 di->name[MAXNAMEL - 1] = '\0';
340 di->ntxd = (u16) ntxd;
341 di->nrxd = (u16) nrxd;
343 /* the actual dma size doesn't include the extra headroom */
345 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
346 if (rxbufsize > BCMEXTRAHDROOM)
347 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
349 di->rxbufsize = (u16) rxbufsize;
351 di->nrxpost = (u16) nrxpost;
352 di->rxoffset = (u8) rxoffset;
355 * figure out the DMA physical address offset for dd and data
356 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
357 * Other bus: use zero
358 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
361 di->dataoffsetlow = 0;
362 /* for pci bus, add offset */
363 if (sih->bustype == PCI_BUS) {
364 /* pcie with DMA64 */
366 di->ddoffsethigh = SI_PCIE_DMA_H32;
367 di->dataoffsetlow = di->ddoffsetlow;
368 di->dataoffsethigh = di->ddoffsethigh;
370 #if defined(__mips__) && defined(IL_BIGENDIAN)
371 di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED;
372 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
373 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
374 if ((si_coreid(sih) == SDIOD_CORE_ID)
375 && ((si_corerev(sih) > 0) && (si_corerev(sih) <= 2)))
377 else if ((si_coreid(sih) == I2S_CORE_ID) &&
378 ((si_corerev(sih) == 0) || (si_corerev(sih) == 1)))
381 di->addrext = _dma_isaddrext(di);
383 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
384 di->aligndesc_4k = _dma_descriptor_align(di);
385 if (di->aligndesc_4k) {
386 di->dmadesc_align = D64RINGALIGN_BITS;
387 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
388 /* for smaller dd table, HW relax alignment reqmnt */
389 di->dmadesc_align = D64RINGALIGN_BITS - 1;
392 di->dmadesc_align = 4; /* 16 byte alignment */
394 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
395 di->aligndesc_4k, di->dmadesc_align));
397 /* allocate tx packet pointer vector */
399 size = ntxd * sizeof(void *);
400 di->txp = kzalloc(size, GFP_ATOMIC);
401 if (di->txp == NULL) {
402 DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name));
407 /* allocate rx packet pointer vector */
409 size = nrxd * sizeof(void *);
410 di->rxp = kzalloc(size, GFP_ATOMIC);
411 if (di->rxp == NULL) {
412 DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name));
417 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
419 if (!_dma_alloc(di, DMA_TX))
423 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
425 if (!_dma_alloc(di, DMA_RX))
429 if ((di->ddoffsetlow != 0) && !di->addrext) {
430 if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
431 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa)));
434 if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
435 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa)));
440 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
442 /* allocate DMA mapping vectors */
443 if (DMASGLIST_ENAB) {
445 size = ntxd * sizeof(hnddma_seg_map_t);
446 di->txp_dmah = kzalloc(size, GFP_ATOMIC);
447 if (di->txp_dmah == NULL)
452 size = nrxd * sizeof(hnddma_seg_map_t);
453 di->rxp_dmah = kzalloc(size, GFP_ATOMIC);
454 if (di->rxp_dmah == NULL)
459 return (struct hnddma_pub *) di;
466 /* Check for odd number of 1's */
467 static inline u32 parity32(u32 data)
478 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
481 dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
482 u32 *flags, u32 bufcount)
484 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
486 /* PCI bus with big(>1G) physical address, use address extension */
487 #if defined(__mips__) && defined(IL_BIGENDIAN)
488 if ((di->dataoffsetlow == SI_SDRAM_SWAPPED)
489 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
491 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
492 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
493 ASSERT((PHYSADDRHI(pa) & PCI64ADDR_HIGH) == 0);
495 W_SM(&ddring[outidx].addrlow,
496 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
497 W_SM(&ddring[outidx].addrhigh,
498 BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh));
499 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
500 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
502 /* address extension for 32-bit PCI */
506 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
507 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
508 ASSERT(PHYSADDRHI(pa) == 0);
510 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
511 W_SM(&ddring[outidx].addrlow,
512 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
513 W_SM(&ddring[outidx].addrhigh,
514 BUS_SWAP32(0 + di->dataoffsethigh));
515 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
516 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
518 if (di->hnddma.dmactrlflags & DMA_CTRL_PEN) {
519 if (DMA64_DD_PARITY(&ddring[outidx])) {
520 W_SM(&ddring[outidx].ctrl2,
521 BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY));
526 static bool _dma_alloc(dma_info_t *di, uint direction)
528 return dma64_alloc(di, direction);
531 /* !! may be called with core in reset */
532 static void _dma_detach(dma_info_t *di)
535 DMA_TRACE(("%s: dma_detach\n", di->name));
537 /* shouldn't be here if descriptors are unreclaimed */
538 ASSERT(di->txin == di->txout);
539 ASSERT(di->rxin == di->rxout);
541 /* free dma descriptor rings */
543 DMA_FREE_CONSISTENT(di->osh,
545 di->txdalign), di->txdalloc,
546 (di->txdpaorig), &di->tx_dmah);
548 DMA_FREE_CONSISTENT(di->osh,
550 di->rxdalign), di->rxdalloc,
551 (di->rxdpaorig), &di->rx_dmah);
553 /* free packet pointer vectors */
555 kfree((void *)di->txp);
557 kfree((void *)di->rxp);
559 /* free tx packet DMA handles */
563 /* free rx packet DMA handles */
567 /* free our private info structure */
572 static bool _dma_descriptor_align(dma_info_t *di)
576 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
577 if (di->d64txregs != NULL) {
578 W_REG(di->osh, &di->d64txregs->addrlow, 0xff0);
579 addrl = R_REG(di->osh, &di->d64txregs->addrlow);
582 } else if (di->d64rxregs != NULL) {
583 W_REG(di->osh, &di->d64rxregs->addrlow, 0xff0);
584 addrl = R_REG(di->osh, &di->d64rxregs->addrlow);
591 /* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
592 static bool _dma_isaddrext(dma_info_t *di)
594 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
596 /* not all tx or rx channel are available */
597 if (di->d64txregs != NULL) {
598 if (!_dma64_addrext(di->osh, di->d64txregs)) {
599 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
600 "AE set\n", di->name));
604 } else if (di->d64rxregs != NULL) {
605 if (!_dma64_addrext(di->osh, di->d64rxregs)) {
606 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
607 "AE set\n", di->name));
615 /* initialize descriptor table base address */
616 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
618 if (!di->aligndesc_4k) {
619 if (direction == DMA_TX)
620 di->xmtptrbase = PHYSADDRLO(pa);
622 di->rcvptrbase = PHYSADDRLO(pa);
625 if ((di->ddoffsetlow == 0)
626 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
627 if (direction == DMA_TX) {
628 W_REG(di->osh, &di->d64txregs->addrlow,
629 (PHYSADDRLO(pa) + di->ddoffsetlow));
630 W_REG(di->osh, &di->d64txregs->addrhigh,
631 (PHYSADDRHI(pa) + di->ddoffsethigh));
633 W_REG(di->osh, &di->d64rxregs->addrlow,
634 (PHYSADDRLO(pa) + di->ddoffsetlow));
635 W_REG(di->osh, &di->d64rxregs->addrhigh,
636 (PHYSADDRHI(pa) + di->ddoffsethigh));
639 /* DMA64 32bits address extension */
642 ASSERT(PHYSADDRHI(pa) == 0);
644 /* shift the high bit(s) from pa to ae */
645 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
646 PCI32ADDR_HIGH_SHIFT;
647 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
649 if (direction == DMA_TX) {
650 W_REG(di->osh, &di->d64txregs->addrlow,
651 (PHYSADDRLO(pa) + di->ddoffsetlow));
652 W_REG(di->osh, &di->d64txregs->addrhigh,
654 SET_REG(di->osh, &di->d64txregs->control,
655 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
657 W_REG(di->osh, &di->d64rxregs->addrlow,
658 (PHYSADDRLO(pa) + di->ddoffsetlow));
659 W_REG(di->osh, &di->d64rxregs->addrhigh,
661 SET_REG(di->osh, &di->d64rxregs->control,
662 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
667 static void _dma_fifoloopbackenable(dma_info_t *di)
669 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
671 OR_REG(di->osh, &di->d64txregs->control, D64_XC_LE);
674 static void _dma_rxinit(dma_info_t *di)
676 DMA_TRACE(("%s: dma_rxinit\n", di->name));
681 di->rxin = di->rxout = 0;
683 /* clear rx descriptor ring */
684 memset((void *)di->rxd64, '\0',
685 (di->nrxd * sizeof(dma64dd_t)));
687 /* DMA engine with out alignment requirement requires table to be inited
688 * before enabling the engine
690 if (!di->aligndesc_4k)
691 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
695 if (di->aligndesc_4k)
696 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
699 static void _dma_rxenable(dma_info_t *di)
701 uint dmactrlflags = di->hnddma.dmactrlflags;
704 DMA_TRACE(("%s: dma_rxenable\n", di->name));
707 (R_REG(di->osh, &di->d64rxregs->control) & D64_RC_AE) |
710 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
711 control |= D64_RC_PD;
713 if (dmactrlflags & DMA_CTRL_ROC)
714 control |= D64_RC_OC;
716 W_REG(di->osh, &di->d64rxregs->control,
717 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
721 _dma_rx_param_get(dma_info_t *di, u16 *rxoffset, u16 *rxbufsize)
723 /* the normal values fit into 16 bits */
724 *rxoffset = (u16) di->rxoffset;
725 *rxbufsize = (u16) di->rxbufsize;
728 /* !! rx entry routine
729 * returns a pointer to the next frame received, or NULL if there are no more
730 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
732 * otherwise, it's treated as giant pkt and will be tossed.
733 * The DMA scattering starts with normal DMA header, followed by first buffer data.
734 * After it reaches the max size of buffer, the data continues in next DMA descriptor
735 * buffer WITHOUT DMA header
737 static void *BCMFASTPATH _dma_rx(dma_info_t *di)
739 struct sk_buff *p, *head, *tail;
745 head = _dma_getnextrxp(di, false);
749 len = le16_to_cpu(*(u16 *) (head->data));
750 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
752 #if defined(__mips__)
753 #define OSL_UNCACHED(va) ((void *)KSEG1ADDR((va)))
755 while (!(len = *(u16 *) OSL_UNCACHED(head->data)))
758 *(u16 *) (head->data) = cpu_to_le16((u16) len);
760 #endif /* defined(__mips__) */
762 /* set actual length */
763 pkt_len = min((di->rxoffset + len), di->rxbufsize);
764 __skb_trim(head, pkt_len);
765 resid = len - (di->rxbufsize - di->rxoffset);
767 /* check for single or multi-buffer rx */
770 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
772 pkt_len = min(resid, (int)di->rxbufsize);
773 __skb_trim(p, pkt_len);
776 resid -= di->rxbufsize;
784 B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
786 di->rcvptrbase) & D64_RS0_CD_MASK,
788 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
789 di->rxin, di->rxout, cur));
793 if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
794 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
796 pkt_buf_free_skb(di->osh, head, false);
797 di->hnddma.rxgiants++;
805 /* post receive buffers
806 * return false is refill failed completely and ring is empty
807 * this will stall the rx dma and user might want to call rxfill again asap
808 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
810 static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)
818 uint extra_offset = 0;
824 * Determine how many receive buffers we're lacking
825 * from the full complement, allocate, initialize,
826 * and post them, then update the chip rx lastdscr.
832 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
834 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
836 if (di->rxbufsize > BCMEXTRAHDROOM)
837 extra_offset = di->rxextrahdrroom;
839 for (i = 0; i < n; i++) {
840 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
844 p = pkt_buf_get_skb(di->osh, di->rxbufsize + extra_offset);
847 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
849 if (i == 0 && dma64_rxidle(di)) {
850 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
854 di->hnddma.rxnobuf++;
857 /* reserve an extra headroom, if applicable */
859 skb_pull(p, extra_offset);
861 /* Do a cached write instead of uncached write since DMA_MAP
862 * will flush the cache.
864 *(u32 *) (p->data) = 0;
867 memset(&di->rxp_dmah[rxout], 0,
868 sizeof(hnddma_seg_map_t));
870 pa = DMA_MAP(di->osh, p->data,
871 di->rxbufsize, DMA_RX, p, &di->rxp_dmah[rxout]);
873 ASSERT(IS_ALIGNED(PHYSADDRLO(pa), 4));
875 /* save the free packet pointer */
876 ASSERT(di->rxp[rxout] == NULL);
879 /* reset flags for each descriptor */
881 if (rxout == (di->nrxd - 1))
882 flags = D64_CTRL1_EOT;
884 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
886 rxout = NEXTRXD(rxout);
891 /* update the chip lastdscr pointer */
892 W_REG(di->osh, &di->d64rxregs->ptr,
893 di->rcvptrbase + I2B(rxout, dma64dd_t));
898 /* like getnexttxp but no reclaim */
899 static void *_dma_peeknexttxp(dma_info_t *di)
907 B2I(((R_REG(di->osh, &di->d64txregs->status0) &
908 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
911 for (i = di->txin; i != end; i = NEXTTXD(i))
918 /* like getnextrxp but not take off the ring */
919 static void *_dma_peeknextrxp(dma_info_t *di)
927 B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
928 D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
931 for (i = di->rxin; i != end; i = NEXTRXD(i))
938 static void _dma_rxreclaim(dma_info_t *di)
942 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
944 while ((p = _dma_getnextrxp(di, true)))
945 pkt_buf_free_skb(di->osh, p, false);
948 static void *BCMFASTPATH _dma_getnextrxp(dma_info_t *di, bool forceall)
953 return dma64_getnextrxp(di, forceall);
956 static void _dma_txblock(dma_info_t *di)
958 di->hnddma.txavail = 0;
961 static void _dma_txunblock(dma_info_t *di)
963 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
966 static uint _dma_txactive(dma_info_t *di)
968 return NTXDACTIVE(di->txin, di->txout);
971 static uint _dma_txpending(dma_info_t *di)
976 B2I(((R_REG(di->osh, &di->d64txregs->status0) &
977 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
980 return NTXDACTIVE(curr, di->txout);
983 static uint _dma_txcommitted(dma_info_t *di)
986 uint txin = di->txin;
988 if (txin == di->txout)
991 ptr = B2I(R_REG(di->osh, &di->d64txregs->ptr), dma64dd_t);
993 return NTXDACTIVE(di->txin, ptr);
996 static uint _dma_rxactive(dma_info_t *di)
998 return NRXDACTIVE(di->rxin, di->rxout);
1001 static void _dma_counterreset(dma_info_t *di)
1003 /* reset all software counter */
1004 di->hnddma.rxgiants = 0;
1005 di->hnddma.rxnobuf = 0;
1006 di->hnddma.txnobuf = 0;
1009 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
1011 uint dmactrlflags = di->hnddma.dmactrlflags;
1014 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
1018 ASSERT((flags & ~mask) == 0);
1020 dmactrlflags &= ~mask;
1021 dmactrlflags |= flags;
1023 /* If trying to enable parity, check if parity is actually supported */
1024 if (dmactrlflags & DMA_CTRL_PEN) {
1027 control = R_REG(di->osh, &di->d64txregs->control);
1028 W_REG(di->osh, &di->d64txregs->control,
1029 control | D64_XC_PD);
1030 if (R_REG(di->osh, &di->d64txregs->control) & D64_XC_PD) {
1031 /* We *can* disable it so it is supported,
1032 * restore control register
1034 W_REG(di->osh, &di->d64txregs->control,
1037 /* Not supported, don't allow it to be enabled */
1038 dmactrlflags &= ~DMA_CTRL_PEN;
1042 di->hnddma.dmactrlflags = dmactrlflags;
1044 return dmactrlflags;
1047 /* get the address of the var in order to change later */
1048 static unsigned long _dma_getvar(dma_info_t *di, const char *name)
1050 if (!strcmp(name, "&txavail"))
1051 return (unsigned long)&(di->hnddma.txavail);
1059 u8 dma_align_sizetobits(uint size)
1063 ASSERT(!(size & (size - 1)));
1064 while (size >>= 1) {
1070 /* This function ensures that the DMA descriptor ring will not get allocated
1071 * across Page boundary. If the allocation is done across the page boundary
1072 * at the first time, then it is freed and the allocation is done at
1073 * descriptor ring size aligned location. This will ensure that the ring will
1074 * not cross page boundary
1076 static void *dma_ringalloc(struct osl_info *osh, u32 boundary, uint size,
1077 u16 *alignbits, uint *alloced,
1078 dmaaddr_t *descpa, osldma_t **dmah)
1082 u32 alignbytes = 1 << *alignbits;
1084 va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced, descpa,
1089 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
1090 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
1092 *alignbits = dma_align_sizetobits(size);
1093 DMA_FREE_CONSISTENT(osh, va, size, *descpa, dmah);
1094 va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced,
1100 /* 64-bit DMA functions */
1102 static void dma64_txinit(dma_info_t *di)
1104 u32 control = D64_XC_XE;
1106 DMA_TRACE(("%s: dma_txinit\n", di->name));
1111 di->txin = di->txout = 0;
1112 di->hnddma.txavail = di->ntxd - 1;
1114 /* clear tx descriptor ring */
1115 memset((void *)di->txd64, '\0', (di->ntxd * sizeof(dma64dd_t)));
1117 /* DMA engine with out alignment requirement requires table to be inited
1118 * before enabling the engine
1120 if (!di->aligndesc_4k)
1121 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1123 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
1124 control |= D64_XC_PD;
1125 OR_REG(di->osh, &di->d64txregs->control, control);
1127 /* DMA engine with alignment requirement requires table to be inited
1128 * before enabling the engine
1130 if (di->aligndesc_4k)
1131 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1134 static bool dma64_txenabled(dma_info_t *di)
1138 /* If the chip is dead, it is not enabled :-) */
1139 xc = R_REG(di->osh, &di->d64txregs->control);
1140 return (xc != 0xffffffff) && (xc & D64_XC_XE);
1143 static void dma64_txsuspend(dma_info_t *di)
1145 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1150 OR_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
1153 static void dma64_txresume(dma_info_t *di)
1155 DMA_TRACE(("%s: dma_txresume\n", di->name));
1160 AND_REG(di->osh, &di->d64txregs->control, ~D64_XC_SE);
1163 static bool dma64_txsuspended(dma_info_t *di)
1165 return (di->ntxd == 0) ||
1166 ((R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE) ==
1170 static void BCMFASTPATH dma64_txreclaim(dma_info_t *di, txd_range_t range)
1174 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1175 (range == HNDDMA_RANGE_ALL) ? "all" :
1177 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1180 if (di->txin == di->txout)
1183 while ((p = dma64_getnexttxp(di, range))) {
1184 /* For unframed data, we don't have any packets to free */
1185 if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED))
1186 pkt_buf_free_skb(di->osh, p, true);
1190 static bool dma64_txstopped(dma_info_t *di)
1192 return ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1193 D64_XS0_XS_STOPPED);
1196 static bool dma64_rxstopped(dma_info_t *di)
1198 return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
1199 D64_RS0_RS_STOPPED);
1202 static bool dma64_alloc(dma_info_t *di, uint direction)
1211 ddlen = sizeof(dma64dd_t);
1213 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1214 align_bits = di->dmadesc_align;
1215 align = (1 << align_bits);
1217 if (direction == DMA_TX) {
1218 va = dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
1219 &alloced, &di->txdpaorig, &di->tx_dmah);
1221 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
1224 align = (1 << align_bits);
1225 di->txd64 = (dma64dd_t *) roundup((unsigned long)va, align);
1226 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
1227 PHYSADDRLOSET(di->txdpa,
1228 PHYSADDRLO(di->txdpaorig) + di->txdalign);
1229 /* Make sure that alignment didn't overflow */
1230 ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
1232 PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig));
1233 di->txdalloc = alloced;
1234 ASSERT(IS_ALIGNED((unsigned long)di->txd64, align));
1236 va = dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
1237 &alloced, &di->rxdpaorig, &di->rx_dmah);
1239 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
1242 align = (1 << align_bits);
1243 di->rxd64 = (dma64dd_t *) roundup((unsigned long)va, align);
1244 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
1245 PHYSADDRLOSET(di->rxdpa,
1246 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
1247 /* Make sure that alignment didn't overflow */
1248 ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
1250 PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig));
1251 di->rxdalloc = alloced;
1252 ASSERT(IS_ALIGNED((unsigned long)di->rxd64, align));
1258 static bool dma64_txreset(dma_info_t *di)
1265 /* suspend tx DMA first */
1266 W_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
1268 (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK))
1269 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
1270 && (status != D64_XS0_XS_STOPPED), 10000);
1272 W_REG(di->osh, &di->d64txregs->control, 0);
1274 (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK))
1275 != D64_XS0_XS_DISABLED), 10000);
1277 /* wait for the last transaction to complete */
1280 return status == D64_XS0_XS_DISABLED;
1283 static bool dma64_rxidle(dma_info_t *di)
1285 DMA_TRACE(("%s: dma_rxidle\n", di->name));
1290 return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
1291 (R_REG(di->osh, &di->d64rxregs->ptr) & D64_RS0_CD_MASK));
1294 static bool dma64_rxreset(dma_info_t *di)
1301 W_REG(di->osh, &di->d64rxregs->control, 0);
1303 (R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK))
1304 != D64_RS0_RS_DISABLED), 10000);
1306 return status == D64_RS0_RS_DISABLED;
1309 static bool dma64_rxenabled(dma_info_t *di)
1313 rc = R_REG(di->osh, &di->d64rxregs->control);
1314 return (rc != 0xffffffff) && (rc & D64_RC_RE);
1317 static bool dma64_txsuspendedidle(dma_info_t *di)
1323 if (!(R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE))
1326 if ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1333 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
1334 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
1335 * If DMA is idle, we return NULL.
1337 static void *dma64_getpos(dma_info_t *di, bool direction)
1343 if (direction == DMA_TX) {
1345 R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK;
1346 idle = !NTXDACTIVE(di->txin, di->txout);
1347 va = di->txp[B2I(cd_offset, dma64dd_t)];
1350 R_REG(di->osh, &di->d64rxregs->status0) & D64_XS0_CD_MASK;
1351 idle = !NRXDACTIVE(di->rxin, di->rxout);
1352 va = di->rxp[B2I(cd_offset, dma64dd_t)];
1355 /* If DMA is IDLE, return NULL */
1357 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__));
1364 /* TX of unframed data
1366 * Adds a DMA ring descriptor for the data pointed to by "buf".
1367 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
1368 * that take a pointer to a "packet"
1369 * Each call to this is results in a single descriptor being added for "len" bytes of
1370 * data starting at "buf", it doesn't handle chained buffers.
1372 static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
1376 dmaaddr_t pa; /* phys addr */
1380 /* return nonzero if out of tx descriptors */
1381 if (NEXTTXD(txout) == di->txin)
1387 pa = DMA_MAP(di->osh, buf, len, DMA_TX, NULL, &di->txp_dmah[txout]);
1389 flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF);
1391 if (txout == (di->ntxd - 1))
1392 flags |= D64_CTRL1_EOT;
1394 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1395 ASSERT(di->txp[txout] == NULL);
1397 /* save the buffer pointer - used by dma_getpos */
1398 di->txp[txout] = buf;
1400 txout = NEXTTXD(txout);
1401 /* bump the tx descriptor index */
1406 W_REG(di->osh, &di->d64txregs->ptr,
1407 di->xmtptrbase + I2B(txout, dma64dd_t));
1410 /* tx flow control */
1411 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1416 DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__));
1417 di->hnddma.txavail = 0;
1418 di->hnddma.txnobuf++;
1422 /* !! tx entry routine
1423 * WARNING: call must check the return value for error.
1424 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1426 static int BCMFASTPATH dma64_txfast(dma_info_t *di, struct sk_buff *p0,
1429 struct sk_buff *p, *next;
1430 unsigned char *data;
1436 DMA_TRACE(("%s: dma_txfast\n", di->name));
1441 * Walk the chain of packet buffers
1442 * allocating and initializing transmit descriptor entries.
1444 for (p = p0; p; p = next) {
1446 hnddma_seg_map_t *map;
1451 len += PKTDMAPAD(di->osh, p);
1452 #endif /* BCM_DMAPAD */
1455 /* return nonzero if out of tx descriptors */
1456 if (NEXTTXD(txout) == di->txin)
1462 /* get physical address of buffer start */
1464 memset(&di->txp_dmah[txout], 0,
1465 sizeof(hnddma_seg_map_t));
1467 pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
1468 &di->txp_dmah[txout]);
1470 if (DMASGLIST_ENAB) {
1471 map = &di->txp_dmah[txout];
1473 /* See if all the segments can be accounted for */
1475 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
1483 for (j = 1; j <= nsegs; j++) {
1485 if (p == p0 && j == 1)
1486 flags |= D64_CTRL1_SOF;
1488 /* With a DMA segment list, Descriptor table is filled
1489 * using the segment list instead of looping over
1490 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1491 * end of segment list is reached.
1493 if ((!DMASGLIST_ENAB && next == NULL) ||
1494 (DMASGLIST_ENAB && j == nsegs))
1495 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
1496 if (txout == (di->ntxd - 1))
1497 flags |= D64_CTRL1_EOT;
1499 if (DMASGLIST_ENAB) {
1500 len = map->segs[j - 1].length;
1501 pa = map->segs[j - 1].addr;
1503 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1504 ASSERT(di->txp[txout] == NULL);
1506 txout = NEXTTXD(txout);
1509 /* See above. No need to loop over individual buffers */
1514 /* if last txd eof not set, fix it */
1515 if (!(flags & D64_CTRL1_EOF))
1516 W_SM(&di->txd64[PREVTXD(txout)].ctrl1,
1517 BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
1519 /* save the packet */
1520 di->txp[PREVTXD(txout)] = p0;
1522 /* bump the tx descriptor index */
1527 W_REG(di->osh, &di->d64txregs->ptr,
1528 di->xmtptrbase + I2B(txout, dma64dd_t));
1530 /* tx flow control */
1531 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1536 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
1537 pkt_buf_free_skb(di->osh, p0, true);
1538 di->hnddma.txavail = 0;
1539 di->hnddma.txnobuf++;
1544 * Reclaim next completed txd (txds if using chained buffers) in the range
1545 * specified and return associated packet.
1546 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1547 * transmitted as noted by the hardware "CurrDescr" pointer.
1548 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
1549 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
1550 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1551 * return associated packet regardless of the value of hardware pointers.
1553 static void *BCMFASTPATH dma64_getnexttxp(dma_info_t *di, txd_range_t range)
1559 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1560 (range == HNDDMA_RANGE_ALL) ? "all" :
1562 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1571 if (range == HNDDMA_RANGE_ALL)
1574 dma64regs_t *dregs = di->d64txregs;
1578 (((R_REG(di->osh, &dregs->status0) &
1580 di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t));
1582 if (range == HNDDMA_RANGE_TRANSFERED) {
1584 (u16) (R_REG(di->osh, &dregs->status1) &
1587 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
1588 active_desc = B2I(active_desc, dma64dd_t);
1589 if (end != active_desc)
1590 end = PREVTXD(active_desc);
1594 if ((start == 0) && (end > di->txout))
1597 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
1599 hnddma_seg_map_t *map = NULL;
1600 uint size, j, nsegs;
1603 (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) -
1604 di->dataoffsetlow));
1606 (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) -
1607 di->dataoffsethigh));
1609 if (DMASGLIST_ENAB) {
1610 map = &di->txp_dmah[i];
1611 size = map->origsize;
1615 (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) &
1620 for (j = nsegs; j > 0; j--) {
1621 W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
1622 W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
1630 DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map);
1635 /* tx flow control */
1636 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1641 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
1645 static void *BCMFASTPATH dma64_getnextrxp(dma_info_t *di, bool forceall)
1651 /* if forcing, dma engine must be disabled */
1652 ASSERT(!forceall || !dma64_rxenabled(di));
1656 /* return if no packets posted */
1661 B2I(((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) -
1662 di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t);
1664 /* ignore curr if forceall */
1665 if (!forceall && (i == curr))
1668 /* get the packet pointer that corresponds to the rx descriptor */
1674 (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) -
1675 di->dataoffsetlow));
1677 (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) -
1678 di->dataoffsethigh));
1680 /* clear this packet from the descriptor ring */
1681 DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
1683 W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
1684 W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
1686 di->rxin = NEXTRXD(i);
1691 static bool _dma64_addrext(struct osl_info *osh, dma64regs_t * dma64regs)
1694 OR_REG(osh, &dma64regs->control, D64_XC_AE);
1695 w = R_REG(osh, &dma64regs->control);
1696 AND_REG(osh, &dma64regs->control, ~D64_XC_AE);
1697 return (w & D64_XC_AE) == D64_XC_AE;
1701 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1703 static void dma64_txrotate(dma_info_t *di)
1712 ASSERT(dma64_txsuspendedidle(di));
1714 nactive = _dma_txactive(di);
1716 ((((R_REG(di->osh, &di->d64txregs->status1) &
1718 - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t));
1719 rot = TXD(ad - di->txin);
1721 ASSERT(rot < di->ntxd);
1723 /* full-ring case is a lot harder - don't worry about this */
1724 if (rot >= (di->ntxd - nactive)) {
1725 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
1730 last = PREVTXD(di->txout);
1732 /* move entries starting at last and moving backwards to first */
1733 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
1734 new = TXD(old + rot);
1737 * Move the tx dma descriptor.
1738 * EOT is set only in the last entry in the ring.
1740 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
1741 if (new == (di->ntxd - 1))
1743 W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w));
1745 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2));
1746 W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w));
1748 W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
1749 W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
1751 /* zap the old tx dma descriptor address field */
1752 W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef));
1753 W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef));
1755 /* move the corresponding txp[] entry */
1756 ASSERT(di->txp[new] == NULL);
1757 di->txp[new] = di->txp[old];
1760 if (DMASGLIST_ENAB) {
1761 memcpy(&di->txp_dmah[new], &di->txp_dmah[old],
1762 sizeof(hnddma_seg_map_t));
1763 memset(&di->txp_dmah[old], 0, sizeof(hnddma_seg_map_t));
1766 di->txp[old] = NULL;
1769 /* update txin and txout */
1771 di->txout = TXD(di->txout + rot);
1772 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1775 W_REG(di->osh, &di->d64txregs->ptr,
1776 di->xmtptrbase + I2B(di->txout, dma64dd_t));
1779 uint dma_addrwidth(si_t *sih, void *dmaregs)
1781 struct osl_info *osh;
1785 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
1786 /* DMA engine is 64-bit capable */
1787 if ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) {
1788 /* backplane are 64-bit capable */
1789 if (si_backplane64(sih))
1790 /* If bus is System Backplane or PCIE then we can access 64-bits */
1791 if ((sih->bustype == SI_BUS) ||
1792 ((sih->bustype == PCI_BUS) &&
1793 (sih->buscoretype == PCIE_CORE_ID)))
1794 return DMADDRWIDTH_64;
1796 ASSERT(0); /* DMA hardware not supported by this driver*/
1797 return DMADDRWIDTH_64;