2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 * Support for Copy Engine hardware, which is mainly used for
25 * communication between Host and Target over a PCIe interconnect.
29 * A single CopyEngine (CE) comprises two "rings":
33 * Each ring consists of a number of descriptors which specify
34 * an address, length, and meta-data.
36 * Typically, one side of the PCIe interconnect (Host or Target)
37 * controls one ring and the other side controls the other ring.
38 * The source side chooses when to initiate a transfer and it
39 * chooses what to send (buffer address, length). The destination
40 * side keeps a supply of "anonymous receive buffers" available and
41 * it handles incoming data as it arrives (when the destination
42 * recieves an interrupt).
44 * The sender may send a simple buffer (address/length) or it may
45 * send a small list of buffers. When a small list is sent, hardware
46 * "gathers" these and they end up in a single destination buffer
47 * with a single interrupt.
49 * There are several "contexts" managed by this layer -- more, it
50 * may seem -- than should be needed. These are provided mainly for
51 * maximum flexibility and especially to facilitate a simpler HIF
52 * implementation. There are per-CopyEngine recv, send, and watermark
53 * contexts. These are supplied by the caller when a recv, send,
54 * or watermark handler is established and they are echoed back to
55 * the caller when the respective callbacks are invoked. There is
56 * also a per-transfer context supplied by the caller when a buffer
57 * (or sendlist) is sent and when a buffer is enqueued for recv.
58 * These per-transfer contexts are echoed back to the caller when
59 * the buffer is sent/received.
62 static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
66 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
69 static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
72 return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
75 static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
79 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
80 void __iomem *indicator_addr;
82 if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
83 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
87 /* workaround for QCA988x_1.0 HW CE */
88 indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;
90 if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
91 iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
93 unsigned long irq_flags;
94 local_irq_save(irq_flags);
95 iowrite32(1, indicator_addr);
98 * PCIE write waits for ACK in IPQ8K, there is no
99 * need to read back value.
101 (void)ioread32(indicator_addr);
102 (void)ioread32(indicator_addr); /* conservative */
104 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
106 iowrite32(0, indicator_addr);
107 local_irq_restore(irq_flags);
111 static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
114 return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
117 static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
120 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
123 static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
127 ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
130 static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
134 ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
137 static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
141 u32 ctrl1_addr = ath10k_pci_read32((ar),
142 (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
144 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
145 (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
146 CE_CTRL1_DMAX_LENGTH_SET(n));
149 static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
153 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
155 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
156 (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
157 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
160 static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
164 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
166 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
167 (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
168 CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
171 static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
174 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
177 static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
181 ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
184 static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
188 ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
191 static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
195 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
197 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
198 (addr & ~SRC_WATERMARK_HIGH_MASK) |
199 SRC_WATERMARK_HIGH_SET(n));
202 static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
206 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
208 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
209 (addr & ~SRC_WATERMARK_LOW_MASK) |
210 SRC_WATERMARK_LOW_SET(n));
213 static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
217 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
219 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
220 (addr & ~DST_WATERMARK_HIGH_MASK) |
221 DST_WATERMARK_HIGH_SET(n));
224 static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
228 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
230 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
231 (addr & ~DST_WATERMARK_LOW_MASK) |
232 DST_WATERMARK_LOW_SET(n));
235 static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
238 u32 host_ie_addr = ath10k_pci_read32(ar,
239 ce_ctrl_addr + HOST_IE_ADDRESS);
241 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
242 host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
245 static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
248 u32 host_ie_addr = ath10k_pci_read32(ar,
249 ce_ctrl_addr + HOST_IE_ADDRESS);
251 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
252 host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
255 static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
258 u32 host_ie_addr = ath10k_pci_read32(ar,
259 ce_ctrl_addr + HOST_IE_ADDRESS);
261 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
262 host_ie_addr & ~CE_WATERMARK_MASK);
265 static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
268 u32 misc_ie_addr = ath10k_pci_read32(ar,
269 ce_ctrl_addr + MISC_IE_ADDRESS);
271 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
272 misc_ie_addr | CE_ERROR_MASK);
275 static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
279 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
284 * Guts of ath10k_ce_send, used by both ath10k_ce_send and
285 * ath10k_ce_sendlist_send.
286 * The caller takes responsibility for any needed locking.
288 static int ath10k_ce_send_nolock(struct ce_state *ce_state,
289 void *per_transfer_context,
292 unsigned int transfer_id,
295 struct ath10k *ar = ce_state->ar;
296 struct ce_ring_state *src_ring = ce_state->src_ring;
297 struct ce_desc *desc, *sdesc;
298 unsigned int nentries_mask = src_ring->nentries_mask;
299 unsigned int sw_index = src_ring->sw_index;
300 unsigned int write_index = src_ring->write_index;
301 u32 ctrl_addr = ce_state->ctrl_addr;
305 if (nbytes > ce_state->src_sz_max)
306 ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
307 __func__, nbytes, ce_state->src_sz_max);
311 if (unlikely(CE_RING_DELTA(nentries_mask,
312 write_index, sw_index - 1) <= 0)) {
317 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
319 sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
321 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
323 if (flags & CE_SEND_FLAG_GATHER)
324 desc_flags |= CE_DESC_FLAGS_GATHER;
325 if (flags & CE_SEND_FLAG_BYTE_SWAP)
326 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
328 sdesc->addr = __cpu_to_le32(buffer);
329 sdesc->nbytes = __cpu_to_le16(nbytes);
330 sdesc->flags = __cpu_to_le16(desc_flags);
334 src_ring->per_transfer_context[write_index] = per_transfer_context;
336 /* Update Source Ring Write Index */
337 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
340 if (!(flags & CE_SEND_FLAG_GATHER))
341 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
343 src_ring->write_index = write_index;
345 ath10k_pci_sleep(ar);
349 int ath10k_ce_send(struct ce_state *ce_state,
350 void *per_transfer_context,
353 unsigned int transfer_id,
356 struct ath10k *ar = ce_state->ar;
357 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
360 spin_lock_bh(&ar_pci->ce_lock);
361 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
362 buffer, nbytes, transfer_id, flags);
363 spin_unlock_bh(&ar_pci->ce_lock);
368 void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
369 unsigned int nbytes, u32 flags)
371 unsigned int num_items = sendlist->num_items;
372 struct ce_sendlist_item *item;
374 item = &sendlist->item[num_items];
376 item->u.nbytes = nbytes;
378 sendlist->num_items++;
381 int ath10k_ce_sendlist_send(struct ce_state *ce_state,
382 void *per_transfer_context,
383 struct ce_sendlist *sendlist,
384 unsigned int transfer_id)
386 struct ce_ring_state *src_ring = ce_state->src_ring;
387 struct ce_sendlist_item *item;
388 struct ath10k *ar = ce_state->ar;
389 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
390 unsigned int nentries_mask = src_ring->nentries_mask;
391 unsigned int num_items = sendlist->num_items;
392 unsigned int sw_index;
393 unsigned int write_index;
394 int i, delta, ret = -ENOMEM;
396 spin_lock_bh(&ar_pci->ce_lock);
398 sw_index = src_ring->sw_index;
399 write_index = src_ring->write_index;
401 delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
403 if (delta >= num_items) {
405 * Handle all but the last item uniformly.
407 for (i = 0; i < num_items - 1; i++) {
408 item = &sendlist->item[i];
409 ret = ath10k_ce_send_nolock(ce_state,
410 CE_SENDLIST_ITEM_CTXT,
412 item->u.nbytes, transfer_id,
414 CE_SEND_FLAG_GATHER);
416 ath10k_warn("CE send failed for item: %d\n", i);
419 * Provide valid context pointer for final item.
421 item = &sendlist->item[i];
422 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
423 (u32) item->data, item->u.nbytes,
424 transfer_id, item->flags);
426 ath10k_warn("CE send failed for last item: %d\n", i);
429 spin_unlock_bh(&ar_pci->ce_lock);
434 int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
435 void *per_recv_context,
438 struct ce_ring_state *dest_ring = ce_state->dest_ring;
439 u32 ctrl_addr = ce_state->ctrl_addr;
440 struct ath10k *ar = ce_state->ar;
441 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
442 unsigned int nentries_mask = dest_ring->nentries_mask;
443 unsigned int write_index;
444 unsigned int sw_index;
447 spin_lock_bh(&ar_pci->ce_lock);
448 write_index = dest_ring->write_index;
449 sw_index = dest_ring->sw_index;
453 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
454 struct ce_desc *base = dest_ring->base_addr_owner_space;
455 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
457 /* Update destination descriptor */
458 desc->addr = __cpu_to_le32(buffer);
461 dest_ring->per_transfer_context[write_index] =
464 /* Update Destination Ring Write Index */
465 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
466 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
467 dest_ring->write_index = write_index;
472 ath10k_pci_sleep(ar);
473 spin_unlock_bh(&ar_pci->ce_lock);
479 * Guts of ath10k_ce_completed_recv_next.
480 * The caller takes responsibility for any necessary locking.
482 static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
483 void **per_transfer_contextp,
485 unsigned int *nbytesp,
486 unsigned int *transfer_idp,
487 unsigned int *flagsp)
489 struct ce_ring_state *dest_ring = ce_state->dest_ring;
490 unsigned int nentries_mask = dest_ring->nentries_mask;
491 unsigned int sw_index = dest_ring->sw_index;
493 struct ce_desc *base = dest_ring->base_addr_owner_space;
494 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
495 struct ce_desc sdesc;
498 /* Copy in one go for performance reasons */
501 nbytes = __le16_to_cpu(sdesc.nbytes);
504 * This closes a relatively unusual race where the Host
505 * sees the updated DRRI before the update to the
506 * corresponding descriptor has completed. We treat this
507 * as a descriptor that is not yet done.
514 /* Return data from completed destination descriptor */
515 *bufferp = __le32_to_cpu(sdesc.addr);
517 *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
519 if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
520 *flagsp = CE_RECV_FLAG_SWAPPED;
524 if (per_transfer_contextp)
525 *per_transfer_contextp =
526 dest_ring->per_transfer_context[sw_index];
529 dest_ring->per_transfer_context[sw_index] = NULL;
531 /* Update sw_index */
532 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
533 dest_ring->sw_index = sw_index;
538 int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
539 void **per_transfer_contextp,
541 unsigned int *nbytesp,
542 unsigned int *transfer_idp,
543 unsigned int *flagsp)
545 struct ath10k *ar = ce_state->ar;
546 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
549 spin_lock_bh(&ar_pci->ce_lock);
550 ret = ath10k_ce_completed_recv_next_nolock(ce_state,
551 per_transfer_contextp,
553 transfer_idp, flagsp);
554 spin_unlock_bh(&ar_pci->ce_lock);
559 int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
560 void **per_transfer_contextp,
563 struct ce_ring_state *dest_ring;
564 unsigned int nentries_mask;
565 unsigned int sw_index;
566 unsigned int write_index;
569 struct ath10k_pci *ar_pci;
571 dest_ring = ce_state->dest_ring;
577 ar_pci = ath10k_pci_priv(ar);
579 spin_lock_bh(&ar_pci->ce_lock);
581 nentries_mask = dest_ring->nentries_mask;
582 sw_index = dest_ring->sw_index;
583 write_index = dest_ring->write_index;
584 if (write_index != sw_index) {
585 struct ce_desc *base = dest_ring->base_addr_owner_space;
586 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
588 /* Return data from completed destination descriptor */
589 *bufferp = __le32_to_cpu(desc->addr);
591 if (per_transfer_contextp)
592 *per_transfer_contextp =
593 dest_ring->per_transfer_context[sw_index];
596 dest_ring->per_transfer_context[sw_index] = NULL;
598 /* Update sw_index */
599 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
600 dest_ring->sw_index = sw_index;
606 spin_unlock_bh(&ar_pci->ce_lock);
612 * Guts of ath10k_ce_completed_send_next.
613 * The caller takes responsibility for any necessary locking.
615 static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
616 void **per_transfer_contextp,
618 unsigned int *nbytesp,
619 unsigned int *transfer_idp)
621 struct ce_ring_state *src_ring = ce_state->src_ring;
622 u32 ctrl_addr = ce_state->ctrl_addr;
623 struct ath10k *ar = ce_state->ar;
624 unsigned int nentries_mask = src_ring->nentries_mask;
625 unsigned int sw_index = src_ring->sw_index;
626 unsigned int read_index;
629 if (src_ring->hw_index == sw_index) {
631 * The SW completion index has caught up with the cached
632 * version of the HW completion index.
633 * Update the cached HW completion index to see whether
634 * the SW has really caught up to the HW, or if the cached
635 * value of the HW index has become stale.
639 ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
640 ath10k_pci_sleep(ar);
642 read_index = src_ring->hw_index;
644 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
645 struct ce_desc *sbase = src_ring->shadow_base;
646 struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
648 /* Return data from completed source descriptor */
649 *bufferp = __le32_to_cpu(sdesc->addr);
650 *nbytesp = __le16_to_cpu(sdesc->nbytes);
651 *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
652 CE_DESC_FLAGS_META_DATA);
654 if (per_transfer_contextp)
655 *per_transfer_contextp =
656 src_ring->per_transfer_context[sw_index];
659 src_ring->per_transfer_context[sw_index] = NULL;
661 /* Update sw_index */
662 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
663 src_ring->sw_index = sw_index;
670 /* NB: Modeled after ath10k_ce_completed_send_next */
671 int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
672 void **per_transfer_contextp,
674 unsigned int *nbytesp,
675 unsigned int *transfer_idp)
677 struct ce_ring_state *src_ring;
678 unsigned int nentries_mask;
679 unsigned int sw_index;
680 unsigned int write_index;
683 struct ath10k_pci *ar_pci;
685 src_ring = ce_state->src_ring;
691 ar_pci = ath10k_pci_priv(ar);
693 spin_lock_bh(&ar_pci->ce_lock);
695 nentries_mask = src_ring->nentries_mask;
696 sw_index = src_ring->sw_index;
697 write_index = src_ring->write_index;
699 if (write_index != sw_index) {
700 struct ce_desc *base = src_ring->base_addr_owner_space;
701 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
703 /* Return data from completed source descriptor */
704 *bufferp = __le32_to_cpu(desc->addr);
705 *nbytesp = __le16_to_cpu(desc->nbytes);
706 *transfer_idp = MS(__le16_to_cpu(desc->flags),
707 CE_DESC_FLAGS_META_DATA);
709 if (per_transfer_contextp)
710 *per_transfer_contextp =
711 src_ring->per_transfer_context[sw_index];
714 src_ring->per_transfer_context[sw_index] = NULL;
716 /* Update sw_index */
717 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
718 src_ring->sw_index = sw_index;
724 spin_unlock_bh(&ar_pci->ce_lock);
729 int ath10k_ce_completed_send_next(struct ce_state *ce_state,
730 void **per_transfer_contextp,
732 unsigned int *nbytesp,
733 unsigned int *transfer_idp)
735 struct ath10k *ar = ce_state->ar;
736 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
739 spin_lock_bh(&ar_pci->ce_lock);
740 ret = ath10k_ce_completed_send_next_nolock(ce_state,
741 per_transfer_contextp,
744 spin_unlock_bh(&ar_pci->ce_lock);
750 * Guts of interrupt handler for per-engine interrupts on a particular CE.
752 * Invokes registered callbacks for recv_complete,
753 * send_complete, and watermarks.
755 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
757 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
758 struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
759 u32 ctrl_addr = ce_state->ctrl_addr;
760 void *transfer_context;
767 spin_lock_bh(&ar_pci->ce_lock);
769 /* Clear the copy-complete interrupts that will be handled here. */
770 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
771 HOST_IS_COPY_COMPLETE_MASK);
773 if (ce_state->recv_cb) {
775 * Pop completed recv buffers and call the registered
776 * recv callback for each
778 while (ath10k_ce_completed_recv_next_nolock(ce_state,
782 spin_unlock_bh(&ar_pci->ce_lock);
783 ce_state->recv_cb(ce_state, transfer_context, buf,
785 spin_lock_bh(&ar_pci->ce_lock);
789 if (ce_state->send_cb) {
791 * Pop completed send buffers and call the registered
792 * send callback for each
794 while (ath10k_ce_completed_send_next_nolock(ce_state,
799 spin_unlock_bh(&ar_pci->ce_lock);
800 ce_state->send_cb(ce_state, transfer_context,
802 spin_lock_bh(&ar_pci->ce_lock);
807 * Misc CE interrupts are not being handled, but still need
810 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
812 spin_unlock_bh(&ar_pci->ce_lock);
813 ath10k_pci_sleep(ar);
817 * Handler for per-engine interrupts on ALL active CEs.
818 * This is used in cases where the system is sharing a
819 * single interrput for all CEs
822 void ath10k_ce_per_engine_service_any(struct ath10k *ar)
824 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
829 intr_summary = CE_INTERRUPT_SUMMARY(ar);
831 for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
832 if (intr_summary & (1 << ce_id))
833 intr_summary &= ~(1 << ce_id);
835 /* no intr pending on this CE */
838 ath10k_ce_per_engine_service(ar, ce_id);
841 ath10k_pci_sleep(ar);
845 * Adjust interrupts for the copy complete handler.
846 * If it's needed for either send or recv, then unmask
847 * this interrupt; otherwise, mask it.
849 * Called with ce_lock held.
851 static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
852 int disable_copy_compl_intr)
854 u32 ctrl_addr = ce_state->ctrl_addr;
855 struct ath10k *ar = ce_state->ar;
859 if ((!disable_copy_compl_intr) &&
860 (ce_state->send_cb || ce_state->recv_cb))
861 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
863 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
865 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
867 ath10k_pci_sleep(ar);
870 void ath10k_ce_disable_interrupts(struct ath10k *ar)
872 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
876 for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
877 struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
878 u32 ctrl_addr = ce_state->ctrl_addr;
880 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
882 ath10k_pci_sleep(ar);
885 void ath10k_ce_send_cb_register(struct ce_state *ce_state,
886 void (*send_cb) (struct ce_state *ce_state,
887 void *transfer_context,
890 unsigned int transfer_id),
891 int disable_interrupts)
893 struct ath10k *ar = ce_state->ar;
894 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
896 spin_lock_bh(&ar_pci->ce_lock);
897 ce_state->send_cb = send_cb;
898 ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
899 spin_unlock_bh(&ar_pci->ce_lock);
902 void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
903 void (*recv_cb) (struct ce_state *ce_state,
904 void *transfer_context,
907 unsigned int transfer_id,
910 struct ath10k *ar = ce_state->ar;
911 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
913 spin_lock_bh(&ar_pci->ce_lock);
914 ce_state->recv_cb = recv_cb;
915 ath10k_ce_per_engine_handler_adjust(ce_state, 0);
916 spin_unlock_bh(&ar_pci->ce_lock);
919 static int ath10k_ce_init_src_ring(struct ath10k *ar,
921 struct ce_state *ce_state,
922 const struct ce_attr *attr)
924 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
925 struct ce_ring_state *src_ring;
926 unsigned int nentries = attr->src_nentries;
927 unsigned int ce_nbytes;
928 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
929 dma_addr_t base_addr;
932 nentries = roundup_pow_of_two(nentries);
934 if (ce_state->src_ring) {
935 WARN_ON(ce_state->src_ring->nentries != nentries);
939 ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
940 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
944 ce_state->src_ring = (struct ce_ring_state *)ptr;
945 src_ring = ce_state->src_ring;
947 ptr += sizeof(struct ce_ring_state);
948 src_ring->nentries = nentries;
949 src_ring->nentries_mask = nentries - 1;
952 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
953 src_ring->hw_index = src_ring->sw_index;
955 src_ring->write_index =
956 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
957 ath10k_pci_sleep(ar);
959 src_ring->per_transfer_context = (void **)ptr;
962 * Legacy platforms that do not support cache
963 * coherent DMA are unsupported
965 src_ring->base_addr_owner_space_unaligned =
966 pci_alloc_consistent(ar_pci->pdev,
967 (nentries * sizeof(struct ce_desc) +
970 src_ring->base_addr_ce_space_unaligned = base_addr;
972 src_ring->base_addr_owner_space = PTR_ALIGN(
973 src_ring->base_addr_owner_space_unaligned,
975 src_ring->base_addr_ce_space = ALIGN(
976 src_ring->base_addr_ce_space_unaligned,
980 * Also allocate a shadow src ring in regular
981 * mem to use for faster access.
983 src_ring->shadow_base_unaligned =
984 kmalloc((nentries * sizeof(struct ce_desc) +
985 CE_DESC_RING_ALIGN), GFP_KERNEL);
987 src_ring->shadow_base = PTR_ALIGN(
988 src_ring->shadow_base_unaligned,
992 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
993 src_ring->base_addr_ce_space);
994 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
995 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
996 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
997 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
998 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
999 ath10k_pci_sleep(ar);
1004 static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1006 struct ce_state *ce_state,
1007 const struct ce_attr *attr)
1009 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1010 struct ce_ring_state *dest_ring;
1011 unsigned int nentries = attr->dest_nentries;
1012 unsigned int ce_nbytes;
1013 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1014 dma_addr_t base_addr;
1017 nentries = roundup_pow_of_two(nentries);
1019 if (ce_state->dest_ring) {
1020 WARN_ON(ce_state->dest_ring->nentries != nentries);
1024 ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
1025 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
1029 ce_state->dest_ring = (struct ce_ring_state *)ptr;
1030 dest_ring = ce_state->dest_ring;
1032 ptr += sizeof(struct ce_ring_state);
1033 dest_ring->nentries = nentries;
1034 dest_ring->nentries_mask = nentries - 1;
1036 ath10k_pci_wake(ar);
1037 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
1038 dest_ring->write_index =
1039 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
1040 ath10k_pci_sleep(ar);
1042 dest_ring->per_transfer_context = (void **)ptr;
1045 * Legacy platforms that do not support cache
1046 * coherent DMA are unsupported
1048 dest_ring->base_addr_owner_space_unaligned =
1049 pci_alloc_consistent(ar_pci->pdev,
1050 (nentries * sizeof(struct ce_desc) +
1051 CE_DESC_RING_ALIGN),
1053 dest_ring->base_addr_ce_space_unaligned = base_addr;
1056 * Correctly initialize memory to 0 to prevent garbage
1057 * data crashing system when download firmware
1059 memset(dest_ring->base_addr_owner_space_unaligned, 0,
1060 nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
1062 dest_ring->base_addr_owner_space = PTR_ALIGN(
1063 dest_ring->base_addr_owner_space_unaligned,
1064 CE_DESC_RING_ALIGN);
1065 dest_ring->base_addr_ce_space = ALIGN(
1066 dest_ring->base_addr_ce_space_unaligned,
1067 CE_DESC_RING_ALIGN);
1069 ath10k_pci_wake(ar);
1070 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
1071 dest_ring->base_addr_ce_space);
1072 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1073 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1074 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1075 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1076 ath10k_pci_sleep(ar);
1081 static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
1083 const struct ce_attr *attr)
1085 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1086 struct ce_state *ce_state = NULL;
1087 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1089 spin_lock_bh(&ar_pci->ce_lock);
1091 if (!ar_pci->ce_id_to_state[ce_id]) {
1092 ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC);
1093 if (ce_state == NULL) {
1094 spin_unlock_bh(&ar_pci->ce_lock);
1098 ar_pci->ce_id_to_state[ce_id] = ce_state;
1100 ce_state->id = ce_id;
1101 ce_state->ctrl_addr = ctrl_addr;
1102 ce_state->state = CE_RUNNING;
1103 /* Save attribute flags */
1104 ce_state->attr_flags = attr->flags;
1105 ce_state->src_sz_max = attr->src_sz_max;
1108 spin_unlock_bh(&ar_pci->ce_lock);
1114 * Initialize a Copy Engine based on caller-supplied attributes.
1115 * This may be called once to initialize both source and destination
1116 * rings or it may be called twice for separate source and destination
1117 * initialization. It may be that only one side or the other is
1118 * initialized by software/firmware.
1120 struct ce_state *ath10k_ce_init(struct ath10k *ar,
1122 const struct ce_attr *attr)
1124 struct ce_state *ce_state;
1125 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1127 ce_state = ath10k_ce_init_state(ar, ce_id, attr);
1129 ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
1133 if (attr->src_nentries) {
1134 if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) {
1135 ath10k_err("Failed to initialize CE src ring for ID: %d\n",
1137 ath10k_ce_deinit(ce_state);
1142 if (attr->dest_nentries) {
1143 if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) {
1144 ath10k_err("Failed to initialize CE dest ring for ID: %d\n",
1146 ath10k_ce_deinit(ce_state);
1151 /* Enable CE error interrupts */
1152 ath10k_pci_wake(ar);
1153 ath10k_ce_error_intr_enable(ar, ctrl_addr);
1154 ath10k_pci_sleep(ar);
1159 void ath10k_ce_deinit(struct ce_state *ce_state)
1161 unsigned int ce_id = ce_state->id;
1162 struct ath10k *ar = ce_state->ar;
1163 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1165 ce_state->state = CE_UNUSED;
1166 ar_pci->ce_id_to_state[ce_id] = NULL;
1168 if (ce_state->src_ring) {
1169 kfree(ce_state->src_ring->shadow_base_unaligned);
1170 pci_free_consistent(ar_pci->pdev,
1171 (ce_state->src_ring->nentries *
1172 sizeof(struct ce_desc) +
1173 CE_DESC_RING_ALIGN),
1174 ce_state->src_ring->base_addr_owner_space,
1175 ce_state->src_ring->base_addr_ce_space);
1176 kfree(ce_state->src_ring);
1179 if (ce_state->dest_ring) {
1180 pci_free_consistent(ar_pci->pdev,
1181 (ce_state->dest_ring->nentries *
1182 sizeof(struct ce_desc) +
1183 CE_DESC_RING_ALIGN),
1184 ce_state->dest_ring->base_addr_owner_space,
1185 ce_state->dest_ring->base_addr_ce_space);
1186 kfree(ce_state->dest_ring);