ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
__func__, nbytes, ce_state->src_sz_max);
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return ret;
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) <= 0)) {
write_index = dest_ring->write_index;
sw_index = dest_ring->sw_index;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ goto out;
if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
struct ce_desc *base = dest_ring->base_addr_owner_space;
ret = -EIO;
}
ath10k_pci_sleep(ar);
+
+out:
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
struct ath10k *ar = ce_state->ar;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
+ struct ce_desc *sdesc, *sbase;
unsigned int read_index;
- int ret = -EIO;
+ int ret;
if (src_ring->hw_index == sw_index) {
/*
* the SW has really caught up to the HW, or if the cached
* value of the HW index has become stale.
*/
- ath10k_pci_wake(ar);
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return ret;
+
src_ring->hw_index =
ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->hw_index &= nentries_mask;
+
ath10k_pci_sleep(ar);
}
+
read_index = src_ring->hw_index;
- if ((read_index != sw_index) && (read_index != 0xffffffff)) {
- struct ce_desc *sbase = src_ring->shadow_base;
- struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
+ if ((read_index == sw_index) || (read_index == 0xffffffff))
+ return -EIO;
- /* Return data from completed source descriptor */
- *bufferp = __le32_to_cpu(sdesc->addr);
- *nbytesp = __le16_to_cpu(sdesc->nbytes);
- *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
- CE_DESC_FLAGS_META_DATA);
+ sbase = src_ring->shadow_base;
+ sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
- if (per_transfer_contextp)
- *per_transfer_contextp =
- src_ring->per_transfer_context[sw_index];
+ /* Return data from completed source descriptor */
+ *bufferp = __le32_to_cpu(sdesc->addr);
+ *nbytesp = __le16_to_cpu(sdesc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
+ CE_DESC_FLAGS_META_DATA);
- /* sanity */
- src_ring->per_transfer_context[sw_index] = NULL;
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
- /* Update sw_index */
- sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
- src_ring->sw_index = sw_index;
- ret = 0;
- }
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
- return ret;
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+
+ return 0;
}
/* NB: Modeled after ath10k_ce_completed_send_next */
unsigned int nbytes;
unsigned int id;
unsigned int flags;
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
- ath10k_pci_wake(ar);
spin_lock_bh(&ar_pci->ce_lock);
/* Clear the copy-complete interrupts that will be handled here. */
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ce_id;
+ int ce_id, ret;
u32 intr_summary;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
+
intr_summary = CE_INTERRUPT_SUMMARY(ar);
for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
{
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
+ int ret;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
if ((!disable_copy_compl_intr) &&
(ce_state->send_cb || ce_state->recv_cb))
void ath10k_ce_disable_interrupts(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ce_id;
+ int ce_id, ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
- ath10k_pci_wake(ar);
for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;