2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 * Support for Copy Engine hardware, which is mainly used for
25 * communication between Host and Target over a PCIe interconnect.
29 * A single CopyEngine (CE) comprises two "rings":
33 * Each ring consists of a number of descriptors which specify
34 * an address, length, and meta-data.
36 * Typically, one side of the PCIe interconnect (Host or Target)
37 * controls one ring and the other side controls the other ring.
38 * The source side chooses when to initiate a transfer and it
39 * chooses what to send (buffer address, length). The destination
40 * side keeps a supply of "anonymous receive buffers" available and
41 * it handles incoming data as it arrives (when the destination
42 * recieves an interrupt).
44 * The sender may send a simple buffer (address/length) or it may
45 * send a small list of buffers. When a small list is sent, hardware
46 * "gathers" these and they end up in a single destination buffer
47 * with a single interrupt.
49 * There are several "contexts" managed by this layer -- more, it
50 * may seem -- than should be needed. These are provided mainly for
51 * maximum flexibility and especially to facilitate a simpler HIF
52 * implementation. There are per-CopyEngine recv, send, and watermark
53 * contexts. These are supplied by the caller when a recv, send,
54 * or watermark handler is established and they are echoed back to
55 * the caller when the respective callbacks are invoked. There is
56 * also a per-transfer context supplied by the caller when a buffer
57 * (or sendlist) is sent and when a buffer is enqueued for recv.
58 * These per-transfer contexts are echoed back to the caller when
59 * the buffer is sent/received.
62 static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
66 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
69 static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
72 return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
75 static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
79 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
82 static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
85 return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
88 static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
91 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
94 static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
98 ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
101 static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
105 ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
108 static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
112 u32 ctrl1_addr = ath10k_pci_read32((ar),
113 (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
115 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
116 (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
117 CE_CTRL1_DMAX_LENGTH_SET(n));
120 static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
124 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
126 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
127 (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
128 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
131 static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
135 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
137 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
138 (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
139 CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
142 static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
145 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
148 static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
152 ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
155 static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
159 ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
162 static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
166 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
168 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
169 (addr & ~SRC_WATERMARK_HIGH_MASK) |
170 SRC_WATERMARK_HIGH_SET(n));
173 static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
177 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
179 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
180 (addr & ~SRC_WATERMARK_LOW_MASK) |
181 SRC_WATERMARK_LOW_SET(n));
184 static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
188 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
190 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
191 (addr & ~DST_WATERMARK_HIGH_MASK) |
192 DST_WATERMARK_HIGH_SET(n));
195 static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
199 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
201 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
202 (addr & ~DST_WATERMARK_LOW_MASK) |
203 DST_WATERMARK_LOW_SET(n));
206 static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
209 u32 host_ie_addr = ath10k_pci_read32(ar,
210 ce_ctrl_addr + HOST_IE_ADDRESS);
212 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
213 host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
216 static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
219 u32 host_ie_addr = ath10k_pci_read32(ar,
220 ce_ctrl_addr + HOST_IE_ADDRESS);
222 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
223 host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
226 static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
229 u32 host_ie_addr = ath10k_pci_read32(ar,
230 ce_ctrl_addr + HOST_IE_ADDRESS);
232 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
233 host_ie_addr & ~CE_WATERMARK_MASK);
236 static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
239 u32 misc_ie_addr = ath10k_pci_read32(ar,
240 ce_ctrl_addr + MISC_IE_ADDRESS);
242 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
243 misc_ie_addr | CE_ERROR_MASK);
246 static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
250 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
255 * Guts of ath10k_ce_send, used by both ath10k_ce_send and
256 * ath10k_ce_sendlist_send.
257 * The caller takes responsibility for any needed locking.
259 static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
260 void *per_transfer_context,
263 unsigned int transfer_id,
266 struct ath10k *ar = ce_state->ar;
267 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
268 struct ce_desc *desc, *sdesc;
269 unsigned int nentries_mask = src_ring->nentries_mask;
270 unsigned int sw_index = src_ring->sw_index;
271 unsigned int write_index = src_ring->write_index;
272 u32 ctrl_addr = ce_state->ctrl_addr;
276 if (nbytes > ce_state->src_sz_max)
277 ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
278 __func__, nbytes, ce_state->src_sz_max);
282 if (unlikely(CE_RING_DELTA(nentries_mask,
283 write_index, sw_index - 1) <= 0)) {
288 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
290 sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
292 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
294 if (flags & CE_SEND_FLAG_GATHER)
295 desc_flags |= CE_DESC_FLAGS_GATHER;
296 if (flags & CE_SEND_FLAG_BYTE_SWAP)
297 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
299 sdesc->addr = __cpu_to_le32(buffer);
300 sdesc->nbytes = __cpu_to_le16(nbytes);
301 sdesc->flags = __cpu_to_le16(desc_flags);
305 src_ring->per_transfer_context[write_index] = per_transfer_context;
307 /* Update Source Ring Write Index */
308 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
311 if (!(flags & CE_SEND_FLAG_GATHER))
312 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
314 src_ring->write_index = write_index;
316 ath10k_pci_sleep(ar);
320 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
321 void *per_transfer_context,
324 unsigned int transfer_id,
327 struct ath10k *ar = ce_state->ar;
328 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
331 spin_lock_bh(&ar_pci->ce_lock);
332 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
333 buffer, nbytes, transfer_id, flags);
334 spin_unlock_bh(&ar_pci->ce_lock);
339 void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
340 unsigned int nbytes, u32 flags)
342 unsigned int num_items = sendlist->num_items;
343 struct ce_sendlist_item *item;
345 item = &sendlist->item[num_items];
347 item->u.nbytes = nbytes;
349 sendlist->num_items++;
352 int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
353 void *per_transfer_context,
354 struct ce_sendlist *sendlist,
355 unsigned int transfer_id)
357 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
358 struct ce_sendlist_item *item;
359 struct ath10k *ar = ce_state->ar;
360 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
361 unsigned int nentries_mask = src_ring->nentries_mask;
362 unsigned int num_items = sendlist->num_items;
363 unsigned int sw_index;
364 unsigned int write_index;
365 int i, delta, ret = -ENOMEM;
367 spin_lock_bh(&ar_pci->ce_lock);
369 sw_index = src_ring->sw_index;
370 write_index = src_ring->write_index;
372 delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
374 if (delta >= num_items) {
376 * Handle all but the last item uniformly.
378 for (i = 0; i < num_items - 1; i++) {
379 item = &sendlist->item[i];
380 ret = ath10k_ce_send_nolock(ce_state,
381 CE_SENDLIST_ITEM_CTXT,
383 item->u.nbytes, transfer_id,
385 CE_SEND_FLAG_GATHER);
387 ath10k_warn("CE send failed for item: %d\n", i);
390 * Provide valid context pointer for final item.
392 item = &sendlist->item[i];
393 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
394 (u32) item->data, item->u.nbytes,
395 transfer_id, item->flags);
397 ath10k_warn("CE send failed for last item: %d\n", i);
400 spin_unlock_bh(&ar_pci->ce_lock);
405 int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
406 void *per_recv_context,
409 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
410 u32 ctrl_addr = ce_state->ctrl_addr;
411 struct ath10k *ar = ce_state->ar;
412 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
413 unsigned int nentries_mask = dest_ring->nentries_mask;
414 unsigned int write_index;
415 unsigned int sw_index;
418 spin_lock_bh(&ar_pci->ce_lock);
419 write_index = dest_ring->write_index;
420 sw_index = dest_ring->sw_index;
424 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
425 struct ce_desc *base = dest_ring->base_addr_owner_space;
426 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
428 /* Update destination descriptor */
429 desc->addr = __cpu_to_le32(buffer);
432 dest_ring->per_transfer_context[write_index] =
435 /* Update Destination Ring Write Index */
436 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
437 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
438 dest_ring->write_index = write_index;
443 ath10k_pci_sleep(ar);
444 spin_unlock_bh(&ar_pci->ce_lock);
450 * Guts of ath10k_ce_completed_recv_next.
451 * The caller takes responsibility for any necessary locking.
453 static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
454 void **per_transfer_contextp,
456 unsigned int *nbytesp,
457 unsigned int *transfer_idp,
458 unsigned int *flagsp)
460 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
461 unsigned int nentries_mask = dest_ring->nentries_mask;
462 unsigned int sw_index = dest_ring->sw_index;
464 struct ce_desc *base = dest_ring->base_addr_owner_space;
465 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
466 struct ce_desc sdesc;
469 /* Copy in one go for performance reasons */
472 nbytes = __le16_to_cpu(sdesc.nbytes);
475 * This closes a relatively unusual race where the Host
476 * sees the updated DRRI before the update to the
477 * corresponding descriptor has completed. We treat this
478 * as a descriptor that is not yet done.
485 /* Return data from completed destination descriptor */
486 *bufferp = __le32_to_cpu(sdesc.addr);
488 *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
490 if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
491 *flagsp = CE_RECV_FLAG_SWAPPED;
495 if (per_transfer_contextp)
496 *per_transfer_contextp =
497 dest_ring->per_transfer_context[sw_index];
500 dest_ring->per_transfer_context[sw_index] = NULL;
502 /* Update sw_index */
503 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
504 dest_ring->sw_index = sw_index;
509 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
510 void **per_transfer_contextp,
512 unsigned int *nbytesp,
513 unsigned int *transfer_idp,
514 unsigned int *flagsp)
516 struct ath10k *ar = ce_state->ar;
517 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
520 spin_lock_bh(&ar_pci->ce_lock);
521 ret = ath10k_ce_completed_recv_next_nolock(ce_state,
522 per_transfer_contextp,
524 transfer_idp, flagsp);
525 spin_unlock_bh(&ar_pci->ce_lock);
530 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
531 void **per_transfer_contextp,
534 struct ath10k_ce_ring *dest_ring;
535 unsigned int nentries_mask;
536 unsigned int sw_index;
537 unsigned int write_index;
540 struct ath10k_pci *ar_pci;
542 dest_ring = ce_state->dest_ring;
548 ar_pci = ath10k_pci_priv(ar);
550 spin_lock_bh(&ar_pci->ce_lock);
552 nentries_mask = dest_ring->nentries_mask;
553 sw_index = dest_ring->sw_index;
554 write_index = dest_ring->write_index;
555 if (write_index != sw_index) {
556 struct ce_desc *base = dest_ring->base_addr_owner_space;
557 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
559 /* Return data from completed destination descriptor */
560 *bufferp = __le32_to_cpu(desc->addr);
562 if (per_transfer_contextp)
563 *per_transfer_contextp =
564 dest_ring->per_transfer_context[sw_index];
567 dest_ring->per_transfer_context[sw_index] = NULL;
569 /* Update sw_index */
570 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
571 dest_ring->sw_index = sw_index;
577 spin_unlock_bh(&ar_pci->ce_lock);
583 * Guts of ath10k_ce_completed_send_next.
584 * The caller takes responsibility for any necessary locking.
586 static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
587 void **per_transfer_contextp,
589 unsigned int *nbytesp,
590 unsigned int *transfer_idp)
592 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
593 u32 ctrl_addr = ce_state->ctrl_addr;
594 struct ath10k *ar = ce_state->ar;
595 unsigned int nentries_mask = src_ring->nentries_mask;
596 unsigned int sw_index = src_ring->sw_index;
597 struct ce_desc *sdesc, *sbase;
598 unsigned int read_index;
600 if (src_ring->hw_index == sw_index) {
602 * The SW completion index has caught up with the cached
603 * version of the HW completion index.
604 * Update the cached HW completion index to see whether
605 * the SW has really caught up to the HW, or if the cached
606 * value of the HW index has become stale.
610 ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
611 src_ring->hw_index &= nentries_mask;
612 ath10k_pci_sleep(ar);
615 read_index = src_ring->hw_index;
617 if ((read_index == sw_index) || (read_index == 0xffffffff))
620 sbase = src_ring->shadow_base;
621 sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
623 /* Return data from completed source descriptor */
624 *bufferp = __le32_to_cpu(sdesc->addr);
625 *nbytesp = __le16_to_cpu(sdesc->nbytes);
626 *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
627 CE_DESC_FLAGS_META_DATA);
629 if (per_transfer_contextp)
630 *per_transfer_contextp =
631 src_ring->per_transfer_context[sw_index];
634 src_ring->per_transfer_context[sw_index] = NULL;
636 /* Update sw_index */
637 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
638 src_ring->sw_index = sw_index;
643 /* NB: Modeled after ath10k_ce_completed_send_next */
644 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
645 void **per_transfer_contextp,
647 unsigned int *nbytesp,
648 unsigned int *transfer_idp)
650 struct ath10k_ce_ring *src_ring;
651 unsigned int nentries_mask;
652 unsigned int sw_index;
653 unsigned int write_index;
656 struct ath10k_pci *ar_pci;
658 src_ring = ce_state->src_ring;
664 ar_pci = ath10k_pci_priv(ar);
666 spin_lock_bh(&ar_pci->ce_lock);
668 nentries_mask = src_ring->nentries_mask;
669 sw_index = src_ring->sw_index;
670 write_index = src_ring->write_index;
672 if (write_index != sw_index) {
673 struct ce_desc *base = src_ring->base_addr_owner_space;
674 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
676 /* Return data from completed source descriptor */
677 *bufferp = __le32_to_cpu(desc->addr);
678 *nbytesp = __le16_to_cpu(desc->nbytes);
679 *transfer_idp = MS(__le16_to_cpu(desc->flags),
680 CE_DESC_FLAGS_META_DATA);
682 if (per_transfer_contextp)
683 *per_transfer_contextp =
684 src_ring->per_transfer_context[sw_index];
687 src_ring->per_transfer_context[sw_index] = NULL;
689 /* Update sw_index */
690 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
691 src_ring->sw_index = sw_index;
697 spin_unlock_bh(&ar_pci->ce_lock);
702 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
703 void **per_transfer_contextp,
705 unsigned int *nbytesp,
706 unsigned int *transfer_idp)
708 struct ath10k *ar = ce_state->ar;
709 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
712 spin_lock_bh(&ar_pci->ce_lock);
713 ret = ath10k_ce_completed_send_next_nolock(ce_state,
714 per_transfer_contextp,
717 spin_unlock_bh(&ar_pci->ce_lock);
723 * Guts of interrupt handler for per-engine interrupts on a particular CE.
725 * Invokes registered callbacks for recv_complete,
726 * send_complete, and watermarks.
728 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
730 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
731 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
732 u32 ctrl_addr = ce_state->ctrl_addr;
733 void *transfer_context;
740 spin_lock_bh(&ar_pci->ce_lock);
742 /* Clear the copy-complete interrupts that will be handled here. */
743 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
744 HOST_IS_COPY_COMPLETE_MASK);
746 if (ce_state->recv_cb) {
748 * Pop completed recv buffers and call the registered
749 * recv callback for each
751 while (ath10k_ce_completed_recv_next_nolock(ce_state,
755 spin_unlock_bh(&ar_pci->ce_lock);
756 ce_state->recv_cb(ce_state, transfer_context, buf,
758 spin_lock_bh(&ar_pci->ce_lock);
762 if (ce_state->send_cb) {
764 * Pop completed send buffers and call the registered
765 * send callback for each
767 while (ath10k_ce_completed_send_next_nolock(ce_state,
772 spin_unlock_bh(&ar_pci->ce_lock);
773 ce_state->send_cb(ce_state, transfer_context,
775 spin_lock_bh(&ar_pci->ce_lock);
780 * Misc CE interrupts are not being handled, but still need
783 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
785 spin_unlock_bh(&ar_pci->ce_lock);
786 ath10k_pci_sleep(ar);
790 * Handler for per-engine interrupts on ALL active CEs.
791 * This is used in cases where the system is sharing a
792 * single interrput for all CEs
795 void ath10k_ce_per_engine_service_any(struct ath10k *ar)
797 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
802 intr_summary = CE_INTERRUPT_SUMMARY(ar);
804 for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
805 if (intr_summary & (1 << ce_id))
806 intr_summary &= ~(1 << ce_id);
808 /* no intr pending on this CE */
811 ath10k_ce_per_engine_service(ar, ce_id);
814 ath10k_pci_sleep(ar);
818 * Adjust interrupts for the copy complete handler.
819 * If it's needed for either send or recv, then unmask
820 * this interrupt; otherwise, mask it.
822 * Called with ce_lock held.
824 static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
825 int disable_copy_compl_intr)
827 u32 ctrl_addr = ce_state->ctrl_addr;
828 struct ath10k *ar = ce_state->ar;
832 if ((!disable_copy_compl_intr) &&
833 (ce_state->send_cb || ce_state->recv_cb))
834 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
836 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
838 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
840 ath10k_pci_sleep(ar);
843 void ath10k_ce_disable_interrupts(struct ath10k *ar)
845 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
849 for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
850 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
851 u32 ctrl_addr = ce_state->ctrl_addr;
853 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
855 ath10k_pci_sleep(ar);
858 void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
859 void (*send_cb)(struct ath10k_ce_pipe *ce_state,
860 void *transfer_context,
863 unsigned int transfer_id),
864 int disable_interrupts)
866 struct ath10k *ar = ce_state->ar;
867 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
869 spin_lock_bh(&ar_pci->ce_lock);
870 ce_state->send_cb = send_cb;
871 ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
872 spin_unlock_bh(&ar_pci->ce_lock);
875 void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
876 void (*recv_cb)(struct ath10k_ce_pipe *ce_state,
877 void *transfer_context,
880 unsigned int transfer_id,
883 struct ath10k *ar = ce_state->ar;
884 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
886 spin_lock_bh(&ar_pci->ce_lock);
887 ce_state->recv_cb = recv_cb;
888 ath10k_ce_per_engine_handler_adjust(ce_state, 0);
889 spin_unlock_bh(&ar_pci->ce_lock);
892 static int ath10k_ce_init_src_ring(struct ath10k *ar,
894 struct ath10k_ce_pipe *ce_state,
895 const struct ce_attr *attr)
897 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
898 struct ath10k_ce_ring *src_ring;
899 unsigned int nentries = attr->src_nentries;
900 unsigned int ce_nbytes;
901 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
902 dma_addr_t base_addr;
905 nentries = roundup_pow_of_two(nentries);
907 if (ce_state->src_ring) {
908 WARN_ON(ce_state->src_ring->nentries != nentries);
912 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
913 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
917 ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
918 src_ring = ce_state->src_ring;
920 ptr += sizeof(struct ath10k_ce_ring);
921 src_ring->nentries = nentries;
922 src_ring->nentries_mask = nentries - 1;
925 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
926 src_ring->sw_index &= src_ring->nentries_mask;
927 src_ring->hw_index = src_ring->sw_index;
929 src_ring->write_index =
930 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
931 src_ring->write_index &= src_ring->nentries_mask;
932 ath10k_pci_sleep(ar);
934 src_ring->per_transfer_context = (void **)ptr;
937 * Legacy platforms that do not support cache
938 * coherent DMA are unsupported
940 src_ring->base_addr_owner_space_unaligned =
941 pci_alloc_consistent(ar_pci->pdev,
942 (nentries * sizeof(struct ce_desc) +
945 if (!src_ring->base_addr_owner_space_unaligned) {
946 kfree(ce_state->src_ring);
947 ce_state->src_ring = NULL;
951 src_ring->base_addr_ce_space_unaligned = base_addr;
953 src_ring->base_addr_owner_space = PTR_ALIGN(
954 src_ring->base_addr_owner_space_unaligned,
956 src_ring->base_addr_ce_space = ALIGN(
957 src_ring->base_addr_ce_space_unaligned,
961 * Also allocate a shadow src ring in regular
962 * mem to use for faster access.
964 src_ring->shadow_base_unaligned =
965 kmalloc((nentries * sizeof(struct ce_desc) +
966 CE_DESC_RING_ALIGN), GFP_KERNEL);
967 if (!src_ring->shadow_base_unaligned) {
968 pci_free_consistent(ar_pci->pdev,
969 (nentries * sizeof(struct ce_desc) +
971 src_ring->base_addr_owner_space,
972 src_ring->base_addr_ce_space);
973 kfree(ce_state->src_ring);
974 ce_state->src_ring = NULL;
978 src_ring->shadow_base = PTR_ALIGN(
979 src_ring->shadow_base_unaligned,
983 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
984 src_ring->base_addr_ce_space);
985 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
986 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
987 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
988 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
989 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
990 ath10k_pci_sleep(ar);
995 static int ath10k_ce_init_dest_ring(struct ath10k *ar,
997 struct ath10k_ce_pipe *ce_state,
998 const struct ce_attr *attr)
1000 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1001 struct ath10k_ce_ring *dest_ring;
1002 unsigned int nentries = attr->dest_nentries;
1003 unsigned int ce_nbytes;
1004 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1005 dma_addr_t base_addr;
1008 nentries = roundup_pow_of_two(nentries);
1010 if (ce_state->dest_ring) {
1011 WARN_ON(ce_state->dest_ring->nentries != nentries);
1015 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
1016 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
1020 ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
1021 dest_ring = ce_state->dest_ring;
1023 ptr += sizeof(struct ath10k_ce_ring);
1024 dest_ring->nentries = nentries;
1025 dest_ring->nentries_mask = nentries - 1;
1027 ath10k_pci_wake(ar);
1028 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
1029 dest_ring->sw_index &= dest_ring->nentries_mask;
1030 dest_ring->write_index =
1031 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
1032 dest_ring->write_index &= dest_ring->nentries_mask;
1033 ath10k_pci_sleep(ar);
1035 dest_ring->per_transfer_context = (void **)ptr;
1038 * Legacy platforms that do not support cache
1039 * coherent DMA are unsupported
1041 dest_ring->base_addr_owner_space_unaligned =
1042 pci_alloc_consistent(ar_pci->pdev,
1043 (nentries * sizeof(struct ce_desc) +
1044 CE_DESC_RING_ALIGN),
1046 if (!dest_ring->base_addr_owner_space_unaligned) {
1047 kfree(ce_state->dest_ring);
1048 ce_state->dest_ring = NULL;
1052 dest_ring->base_addr_ce_space_unaligned = base_addr;
1055 * Correctly initialize memory to 0 to prevent garbage
1056 * data crashing system when download firmware
1058 memset(dest_ring->base_addr_owner_space_unaligned, 0,
1059 nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
1061 dest_ring->base_addr_owner_space = PTR_ALIGN(
1062 dest_ring->base_addr_owner_space_unaligned,
1063 CE_DESC_RING_ALIGN);
1064 dest_ring->base_addr_ce_space = ALIGN(
1065 dest_ring->base_addr_ce_space_unaligned,
1066 CE_DESC_RING_ALIGN);
1068 ath10k_pci_wake(ar);
1069 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
1070 dest_ring->base_addr_ce_space);
1071 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1072 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1073 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1074 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1075 ath10k_pci_sleep(ar);
1080 static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
1082 const struct ce_attr *attr)
1084 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1085 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1086 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1088 spin_lock_bh(&ar_pci->ce_lock);
1091 ce_state->id = ce_id;
1092 ce_state->ctrl_addr = ctrl_addr;
1093 ce_state->attr_flags = attr->flags;
1094 ce_state->src_sz_max = attr->src_sz_max;
1096 spin_unlock_bh(&ar_pci->ce_lock);
1102 * Initialize a Copy Engine based on caller-supplied attributes.
1103 * This may be called once to initialize both source and destination
1104 * rings or it may be called twice for separate source and destination
1105 * initialization. It may be that only one side or the other is
1106 * initialized by software/firmware.
1108 struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
1110 const struct ce_attr *attr)
1112 struct ath10k_ce_pipe *ce_state;
1113 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1116 ce_state = ath10k_ce_init_state(ar, ce_id, attr);
1118 ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
1122 if (attr->src_nentries) {
1123 ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
1125 ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
1127 ath10k_ce_deinit(ce_state);
1132 if (attr->dest_nentries) {
1133 ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
1135 ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
1137 ath10k_ce_deinit(ce_state);
1142 /* Enable CE error interrupts */
1143 ath10k_pci_wake(ar);
1144 ath10k_ce_error_intr_enable(ar, ctrl_addr);
1145 ath10k_pci_sleep(ar);
1150 void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
1152 struct ath10k *ar = ce_state->ar;
1153 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1155 if (ce_state->src_ring) {
1156 kfree(ce_state->src_ring->shadow_base_unaligned);
1157 pci_free_consistent(ar_pci->pdev,
1158 (ce_state->src_ring->nentries *
1159 sizeof(struct ce_desc) +
1160 CE_DESC_RING_ALIGN),
1161 ce_state->src_ring->base_addr_owner_space,
1162 ce_state->src_ring->base_addr_ce_space);
1163 kfree(ce_state->src_ring);
1166 if (ce_state->dest_ring) {
1167 pci_free_consistent(ar_pci->pdev,
1168 (ce_state->dest_ring->nentries *
1169 sizeof(struct ce_desc) +
1170 CE_DESC_RING_ALIGN),
1171 ce_state->dest_ring->base_addr_owner_space,
1172 ce_state->dest_ring->base_addr_ce_space);
1173 kfree(ce_state->dest_ring);
1176 ce_state->src_ring = NULL;
1177 ce_state->dest_ring = NULL;