2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep,
30 * Check whether HIF has any prior sends that have finished,
31 * have not had the post-processing done.
33 ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force);
36 static void ath10k_htc_control_tx_complete(struct ath10k *ar,
42 static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
45 struct ath10k_skb_cb *skb_cb;
47 skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
49 ath10k_warn("Unable to allocate ctrl skb\n");
53 skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
54 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
56 skb_cb = ATH10K_SKB_CB(skb);
57 memset(skb_cb, 0, sizeof(*skb_cb));
59 ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
63 static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
66 ath10k_skb_unmap(htc->ar->dev, skb);
67 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
70 static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
73 ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
76 ath10k_htc_restore_tx_skb(ep->htc, skb);
78 if (!ep->ep_ops.ep_tx_complete) {
79 ath10k_warn("no tx handler for eid %d\n", ep->eid);
80 dev_kfree_skb_any(skb);
84 ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
87 /* assumes tx_lock is held */
88 static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
90 if (!ep->tx_credit_flow_enabled)
92 if (ep->tx_credits >= ep->tx_credits_per_max_message)
95 ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
100 static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
103 struct ath10k_htc_hdr *hdr;
105 hdr = (struct ath10k_htc_hdr *)skb->data;
106 memset(hdr, 0, sizeof(*hdr));
109 hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
111 spin_lock_bh(&ep->htc->tx_lock);
112 hdr->seq_no = ep->seq_no++;
114 if (ath10k_htc_ep_need_credit_update(ep))
115 hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
117 spin_unlock_bh(&ep->htc->tx_lock);
120 static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
121 struct ath10k_htc_ep *ep,
125 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
128 ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
131 ath10k_htc_prepare_tx_skb(ep, skb);
133 ret = ath10k_skb_map(htc->ar->dev, skb);
137 ret = ath10k_hif_send_head(htc->ar,
147 ath10k_warn("HTC issue failed: %d\n", ret);
149 spin_lock_bh(&htc->tx_lock);
150 ep->tx_credits += credits;
151 spin_unlock_bh(&htc->tx_lock);
153 /* this is the simplest way to handle out-of-resources for non-credit
154 * based endpoints. credit based endpoints can still get -ENOSR, but
155 * this is highly unlikely as credit reservation should prevent that */
157 spin_lock_bh(&htc->tx_lock);
158 __skb_queue_head(&ep->tx_queue, skb);
159 spin_unlock_bh(&htc->tx_lock);
164 skb_cb->is_aborted = true;
165 ath10k_htc_notify_tx_completion(ep, skb);
170 static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
171 struct ath10k_htc_ep *ep,
175 struct ath10k_skb_cb *skb_cb;
176 int credits_required;
178 unsigned int transfer_len;
180 lockdep_assert_held(&htc->tx_lock);
182 skb = __skb_dequeue(&ep->tx_queue);
186 skb_cb = ATH10K_SKB_CB(skb);
187 transfer_len = skb->len;
189 if (likely(transfer_len <= htc->target_credit_size)) {
190 credits_required = 1;
192 /* figure out how many credits this message requires */
193 credits_required = transfer_len / htc->target_credit_size;
194 remainder = transfer_len % htc->target_credit_size;
200 ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
201 credits_required, ep->tx_credits);
203 if (ep->tx_credits < credits_required) {
204 __skb_queue_head(&ep->tx_queue, skb);
208 ep->tx_credits -= credits_required;
209 *credits = credits_required;
213 static void ath10k_htc_send_work(struct work_struct *work)
215 struct ath10k_htc_ep *ep = container_of(work,
216 struct ath10k_htc_ep, send_work);
217 struct ath10k_htc *htc = ep->htc;
223 if (ep->ul_is_polled)
224 ath10k_htc_send_complete_check(ep, 0);
226 spin_lock_bh(&htc->tx_lock);
227 if (ep->tx_credit_flow_enabled)
228 skb = ath10k_htc_get_skb_credit_based(htc, ep,
231 skb = __skb_dequeue(&ep->tx_queue);
232 spin_unlock_bh(&htc->tx_lock);
237 ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
243 int ath10k_htc_send(struct ath10k_htc *htc,
244 enum ath10k_htc_ep_id eid,
247 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
249 if (eid >= ATH10K_HTC_EP_COUNT) {
250 ath10k_warn("Invalid endpoint id: %d\n", eid);
254 skb_push(skb, sizeof(struct ath10k_htc_hdr));
256 spin_lock_bh(&htc->tx_lock);
257 __skb_queue_tail(&ep->tx_queue, skb);
258 spin_unlock_bh(&htc->tx_lock);
260 queue_work(htc->ar->workqueue, &ep->send_work);
264 static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
268 struct ath10k_htc *htc = ar->htc;
269 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
272 ath10k_htc_notify_tx_completion(ep, skb);
273 /* the skb now belongs to the completion handler */
275 spin_lock_bh(&htc->tx_lock);
276 stopping = htc->stopping;
277 spin_unlock_bh(&htc->tx_lock);
279 if (!ep->tx_credit_flow_enabled && !stopping)
281 * note: when using TX credit flow, the re-checking of
282 * queues happens when credits flow back from the target.
283 * in the non-TX credit case, we recheck after the packet
286 queue_work(ar->workqueue, &ep->send_work);
291 /* flush endpoint TX queue */
292 static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
293 struct ath10k_htc_ep *ep)
296 struct ath10k_skb_cb *skb_cb;
298 spin_lock_bh(&htc->tx_lock);
300 skb = __skb_dequeue(&ep->tx_queue);
304 skb_cb = ATH10K_SKB_CB(skb);
305 skb_cb->is_aborted = true;
306 ath10k_htc_notify_tx_completion(ep, skb);
308 spin_unlock_bh(&htc->tx_lock);
310 cancel_work_sync(&ep->send_work);
318 ath10k_htc_process_credit_report(struct ath10k_htc *htc,
319 const struct ath10k_htc_credit_report *report,
321 enum ath10k_htc_ep_id eid)
323 struct ath10k_htc_ep *ep;
326 if (len % sizeof(*report))
327 ath10k_warn("Uneven credit report len %d", len);
329 n_reports = len / sizeof(*report);
331 spin_lock_bh(&htc->tx_lock);
332 for (i = 0; i < n_reports; i++, report++) {
333 if (report->eid >= ATH10K_HTC_EP_COUNT)
336 ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
337 report->eid, report->credits);
339 ep = &htc->endpoint[report->eid];
340 ep->tx_credits += report->credits;
342 if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
343 queue_work(htc->ar->workqueue, &ep->send_work);
345 spin_unlock_bh(&htc->tx_lock);
348 static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
351 enum ath10k_htc_ep_id src_eid)
354 struct ath10k_htc_record *record;
359 orig_buffer = buffer;
360 orig_length = length;
363 record = (struct ath10k_htc_record *)buffer;
365 if (length < sizeof(record->hdr)) {
370 if (record->hdr.len > length) {
371 /* no room left in buffer for record */
372 ath10k_warn("Invalid record length: %d\n",
378 switch (record->hdr.id) {
379 case ATH10K_HTC_RECORD_CREDITS:
380 len = sizeof(struct ath10k_htc_credit_report);
381 if (record->hdr.len < len) {
382 ath10k_warn("Credit report too long\n");
386 ath10k_htc_process_credit_report(htc,
387 record->credit_report,
392 ath10k_warn("Unhandled record: id:%d length:%d\n",
393 record->hdr.id, record->hdr.len);
400 /* multiple records may be present in a trailer */
401 buffer += sizeof(record->hdr) + record->hdr.len;
402 length -= sizeof(record->hdr) + record->hdr.len;
406 ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "",
407 orig_buffer, orig_length);
412 static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
417 struct ath10k_htc *htc = ar->htc;
418 struct ath10k_htc_hdr *hdr;
419 struct ath10k_htc_ep *ep;
424 bool trailer_present;
426 hdr = (struct ath10k_htc_hdr *)skb->data;
427 skb_pull(skb, sizeof(*hdr));
431 if (eid >= ATH10K_HTC_EP_COUNT) {
432 ath10k_warn("HTC Rx: invalid eid %d\n", eid);
433 ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "",
439 ep = &htc->endpoint[eid];
442 * If this endpoint that received a message from the target has
443 * a to-target HIF pipe whose send completions are polled rather
444 * than interrupt-driven, this is a good point to ask HIF to check
445 * whether it has any completed sends to handle.
447 if (ep->ul_is_polled)
448 ath10k_htc_send_complete_check(ep, 1);
450 payload_len = __le16_to_cpu(hdr->len);
452 if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
453 ath10k_warn("HTC rx frame too long, len: %zu\n",
454 payload_len + sizeof(*hdr));
455 ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "",
461 if (skb->len < payload_len) {
462 ath10k_dbg(ATH10K_DBG_HTC,
463 "HTC Rx: insufficient length, got %d, expected %d\n",
464 skb->len, payload_len);
465 ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len",
466 "", hdr, sizeof(*hdr));
471 /* get flags to check for trailer */
472 trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
473 if (trailer_present) {
476 trailer_len = hdr->trailer_len;
477 min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
479 if ((trailer_len < min_len) ||
480 (trailer_len > payload_len)) {
481 ath10k_warn("Invalid trailer length: %d\n",
488 trailer += sizeof(*hdr);
489 trailer += payload_len;
490 trailer -= trailer_len;
491 status = ath10k_htc_process_trailer(htc, trailer,
492 trailer_len, hdr->eid);
496 skb_trim(skb, skb->len - trailer_len);
499 if (((int)payload_len - (int)trailer_len) <= 0)
500 /* zero length packet with trailer data, just drop these */
503 if (eid == ATH10K_HTC_EP_0) {
504 struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
506 switch (__le16_to_cpu(msg->hdr.message_id)) {
508 /* handle HTC control message */
509 if (completion_done(&htc->ctl_resp)) {
511 * this is a fatal error, target should not be
512 * sending unsolicited messages on the ep 0
514 ath10k_warn("HTC rx ctrl still processing\n");
516 complete(&htc->ctl_resp);
520 htc->control_resp_len =
522 ATH10K_HTC_MAX_CTRL_MSG_LEN);
524 memcpy(htc->control_resp_buffer, skb->data,
525 htc->control_resp_len);
527 complete(&htc->ctl_resp);
529 case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
530 htc->htc_ops.target_send_suspend_complete(ar);
535 ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
537 ep->ep_ops.ep_rx_complete(ar, skb);
539 /* skb is now owned by the rx completion handler */
547 static void ath10k_htc_control_rx_complete(struct ath10k *ar,
550 /* This is unexpected. FW is not supposed to send regular rx on this
552 ath10k_warn("unexpected htc rx\n");
560 static const char *htc_service_name(enum ath10k_htc_svc_id id)
563 case ATH10K_HTC_SVC_ID_RESERVED:
565 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
567 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
569 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
571 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
573 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
575 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
577 case ATH10K_HTC_SVC_ID_NMI_CONTROL:
578 return "NMI Control";
579 case ATH10K_HTC_SVC_ID_NMI_DATA:
581 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
583 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
590 static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
592 struct ath10k_htc_ep *ep;
595 for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
596 ep = &htc->endpoint[i];
597 ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
598 ep->max_ep_message_len = 0;
599 ep->max_tx_queue_depth = 0;
601 skb_queue_head_init(&ep->tx_queue);
603 ep->tx_credit_flow_enabled = true;
604 INIT_WORK(&ep->send_work, ath10k_htc_send_work);
608 static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
610 struct ath10k_htc_svc_tx_credits *entry;
612 entry = &htc->service_tx_alloc[0];
615 * for PCIE allocate all credists/HTC buffers to WMI.
616 * no buffers are used/required for data. data always
620 entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
621 entry->credit_allocation = htc->total_transmit_credits;
624 static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
630 for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
631 if (htc->service_tx_alloc[i].service_id == service_id)
633 htc->service_tx_alloc[i].credit_allocation;
639 int ath10k_htc_wait_target(struct ath10k_htc *htc)
642 struct ath10k_htc_svc_conn_req conn_req;
643 struct ath10k_htc_svc_conn_resp conn_resp;
644 struct ath10k_htc_msg *msg;
649 INIT_COMPLETION(htc->ctl_resp);
651 status = ath10k_hif_start(htc->ar);
653 ath10k_err("could not start HIF (%d)\n", status);
657 status = wait_for_completion_timeout(&htc->ctl_resp,
658 ATH10K_HTC_WAIT_TIMEOUT_HZ);
663 ath10k_err("ctl_resp never came in (%d)\n", status);
667 if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
668 ath10k_err("Invalid HTC ready msg len:%d\n",
669 htc->control_resp_len);
675 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
676 message_id = __le16_to_cpu(msg->hdr.message_id);
677 credit_count = __le16_to_cpu(msg->ready.credit_count);
678 credit_size = __le16_to_cpu(msg->ready.credit_size);
680 if (message_id != ATH10K_HTC_MSG_READY_ID) {
681 ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id);
686 htc->total_transmit_credits = credit_count;
687 htc->target_credit_size = credit_size;
689 ath10k_dbg(ATH10K_DBG_HTC,
690 "Target ready! transmit resources: %d size:%d\n",
691 htc->total_transmit_credits,
692 htc->target_credit_size);
694 if ((htc->total_transmit_credits == 0) ||
695 (htc->target_credit_size == 0)) {
697 ath10k_err("Invalid credit size received\n");
701 ath10k_htc_setup_target_buffer_assignments(htc);
703 /* setup our pseudo HTC control endpoint connection */
704 memset(&conn_req, 0, sizeof(conn_req));
705 memset(&conn_resp, 0, sizeof(conn_resp));
706 conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
707 conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
708 conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
709 conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
711 /* connect fake service */
712 status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
714 ath10k_err("could not connect to htc service (%d)\n", status);
720 ath10k_hif_stop(htc->ar);
725 int ath10k_htc_connect_service(struct ath10k_htc *htc,
726 struct ath10k_htc_svc_conn_req *conn_req,
727 struct ath10k_htc_svc_conn_resp *conn_resp)
729 struct ath10k_htc_msg *msg;
730 struct ath10k_htc_conn_svc *req_msg;
731 struct ath10k_htc_conn_svc_response resp_msg_dummy;
732 struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
733 enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
734 struct ath10k_htc_ep *ep;
736 unsigned int max_msg_size = 0;
738 bool disable_credit_flow_ctrl = false;
739 u16 message_id, service_id, flags = 0;
742 /* special case for HTC pseudo control service */
743 if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
744 disable_credit_flow_ctrl = true;
745 assigned_eid = ATH10K_HTC_EP_0;
746 max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
747 memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
751 tx_alloc = ath10k_htc_get_credit_allocation(htc,
752 conn_req->service_id);
754 ath10k_dbg(ATH10K_DBG_HTC,
755 "HTC Service %s does not allocate target credits\n",
756 htc_service_name(conn_req->service_id));
758 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
760 ath10k_err("Failed to allocate HTC packet\n");
764 length = sizeof(msg->hdr) + sizeof(msg->connect_service);
765 skb_put(skb, length);
766 memset(skb->data, 0, length);
768 msg = (struct ath10k_htc_msg *)skb->data;
769 msg->hdr.message_id =
770 __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
772 flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
774 req_msg = &msg->connect_service;
775 req_msg->flags = __cpu_to_le16(flags);
776 req_msg->service_id = __cpu_to_le16(conn_req->service_id);
778 /* Only enable credit flow control for WMI ctrl service */
779 if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
780 flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
781 disable_credit_flow_ctrl = true;
784 INIT_COMPLETION(htc->ctl_resp);
786 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
792 /* wait for response */
793 status = wait_for_completion_timeout(&htc->ctl_resp,
794 ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
798 ath10k_err("Service connect timeout: %d\n", status);
802 /* we controlled the buffer creation, it's aligned */
803 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
804 resp_msg = &msg->connect_service_response;
805 message_id = __le16_to_cpu(msg->hdr.message_id);
806 service_id = __le16_to_cpu(resp_msg->service_id);
808 if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
809 (htc->control_resp_len < sizeof(msg->hdr) +
810 sizeof(msg->connect_service_response))) {
811 ath10k_err("Invalid resp message ID 0x%x", message_id);
815 ath10k_dbg(ATH10K_DBG_HTC,
816 "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
817 htc_service_name(service_id),
818 resp_msg->status, resp_msg->eid);
820 conn_resp->connect_resp_code = resp_msg->status;
822 /* check response status */
823 if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
824 ath10k_err("HTC Service %s connect request failed: 0x%x)\n",
825 htc_service_name(service_id),
830 assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
831 max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
835 if (assigned_eid >= ATH10K_HTC_EP_COUNT)
838 if (max_msg_size == 0)
841 ep = &htc->endpoint[assigned_eid];
842 ep->eid = assigned_eid;
844 if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
847 /* return assigned endpoint to caller */
848 conn_resp->eid = assigned_eid;
849 conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
851 /* setup the endpoint */
852 ep->service_id = conn_req->service_id;
853 ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
854 ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
855 ep->tx_credits = tx_alloc;
856 ep->tx_credit_size = htc->target_credit_size;
857 ep->tx_credits_per_max_message = ep->max_ep_message_len /
858 htc->target_credit_size;
860 if (ep->max_ep_message_len % htc->target_credit_size)
861 ep->tx_credits_per_max_message++;
863 /* copy all the callbacks */
864 ep->ep_ops = conn_req->ep_ops;
866 status = ath10k_hif_map_service_to_pipe(htc->ar,
875 ath10k_dbg(ATH10K_DBG_HTC,
876 "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
877 htc_service_name(ep->service_id), ep->ul_pipe_id,
878 ep->dl_pipe_id, ep->eid);
880 ath10k_dbg(ATH10K_DBG_HTC,
881 "EP %d UL polled: %d, DL polled: %d\n",
882 ep->eid, ep->ul_is_polled, ep->dl_is_polled);
884 if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
885 ep->tx_credit_flow_enabled = false;
886 ath10k_dbg(ATH10K_DBG_HTC,
887 "HTC service: %s eid: %d TX flow control disabled\n",
888 htc_service_name(ep->service_id), assigned_eid);
894 struct sk_buff *ath10k_htc_alloc_skb(int size)
898 skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
900 ath10k_warn("could not allocate HTC tx skb\n");
904 skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
906 /* FW/HTC requires 4-byte aligned streams */
907 if (!IS_ALIGNED((unsigned long)skb->data, 4))
908 ath10k_warn("Unaligned HTC tx skb\n");
913 int ath10k_htc_start(struct ath10k_htc *htc)
917 struct ath10k_htc_msg *msg;
919 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
923 skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
924 memset(skb->data, 0, skb->len);
926 msg = (struct ath10k_htc_msg *)skb->data;
927 msg->hdr.message_id =
928 __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
930 ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
932 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
942 * stop HTC communications, i.e. stop interrupt reception, and flush all
945 void ath10k_htc_stop(struct ath10k_htc *htc)
948 struct ath10k_htc_ep *ep;
950 spin_lock_bh(&htc->tx_lock);
951 htc->stopping = true;
952 spin_unlock_bh(&htc->tx_lock);
954 for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
955 ep = &htc->endpoint[i];
956 ath10k_htc_flush_endpoint_tx(htc, ep);
959 ath10k_hif_stop(htc->ar);
960 ath10k_htc_reset_endpoint_states(htc);
963 /* registered target arrival callback from the HIF layer */
964 struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
965 struct ath10k_htc_ops *htc_ops)
967 struct ath10k_hif_cb htc_callbacks;
968 struct ath10k_htc_ep *ep = NULL;
969 struct ath10k_htc *htc = NULL;
971 /* FIXME: use struct ath10k instead */
972 htc = kzalloc(sizeof(struct ath10k_htc), GFP_KERNEL);
974 return ERR_PTR(-ENOMEM);
976 spin_lock_init(&htc->tx_lock);
978 memcpy(&htc->htc_ops, htc_ops, sizeof(struct ath10k_htc_ops));
980 ath10k_htc_reset_endpoint_states(htc);
982 /* setup HIF layer callbacks */
983 htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
984 htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
987 /* Get HIF default pipe for HTC message exchange */
988 ep = &htc->endpoint[ATH10K_HTC_EP_0];
990 ath10k_hif_set_callbacks(ar, &htc_callbacks);
991 ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
993 init_completion(&htc->ctl_resp);
998 void ath10k_htc_destroy(struct ath10k_htc *htc)