2 * Copyright (c) 2007-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #define HTC_PACKET_CONTAINER_ALLOCATION 32
22 #define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH)
24 static int ath6kl_htc_pipe_tx(struct htc_target *handle,
25 struct htc_packet *packet);
26 static void ath6kl_htc_pipe_cleanup(struct htc_target *handle);
28 /* htc pipe tx path */
29 static inline void restore_tx_packet(struct htc_packet *packet)
31 if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) {
32 skb_pull(packet->skb, sizeof(struct htc_frame_hdr));
33 packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF;
37 static void do_send_completion(struct htc_endpoint *ep,
38 struct list_head *queue_to_indicate)
40 struct htc_packet *packet;
42 if (list_empty(queue_to_indicate)) {
43 /* nothing to indicate */
47 if (ep->ep_cb.tx_comp_multi != NULL) {
48 ath6kl_dbg(ATH6KL_DBG_HTC,
49 "%s: calling ep %d, send complete multiple callback (%d pkts)\n",
51 get_queue_depth(queue_to_indicate));
53 * a multiple send complete handler is being used,
54 * pass the queue to the handler
56 ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate);
58 * all packets are now owned by the callback,
59 * reset queue to be safe
61 INIT_LIST_HEAD(queue_to_indicate);
63 /* using legacy EpTxComplete */
65 packet = list_first_entry(queue_to_indicate,
66 struct htc_packet, list);
68 list_del(&packet->list);
69 ath6kl_dbg(ATH6KL_DBG_HTC,
70 "%s: calling ep %d send complete callback on packet 0x%p\n",
71 __func__, ep->eid, packet);
72 ep->ep_cb.tx_complete(ep->target, packet);
73 } while (!list_empty(queue_to_indicate));
77 static void send_packet_completion(struct htc_target *target,
78 struct htc_packet *packet)
80 struct htc_endpoint *ep = &target->endpoint[packet->endpoint];
81 struct list_head container;
83 restore_tx_packet(packet);
84 INIT_LIST_HEAD(&container);
85 list_add_tail(&packet->list, &container);
88 do_send_completion(ep, &container);
91 static void get_htc_packet_credit_based(struct htc_target *target,
92 struct htc_endpoint *ep,
93 struct list_head *queue)
98 struct htc_packet *packet;
99 unsigned int transfer_len;
101 /* NOTE : the TX lock is held when this function is called */
103 /* loop until we can grab as many packets out of the queue as we can */
106 if (list_empty(&ep->txq))
109 /* get packet at head, but don't remove it */
110 packet = list_first_entry(&ep->txq, struct htc_packet, list);
112 ath6kl_dbg(ATH6KL_DBG_HTC,
113 "%s: got head packet:0x%p , queue depth: %d\n",
114 __func__, packet, get_queue_depth(&ep->txq));
116 transfer_len = packet->act_len + HTC_HDR_LENGTH;
118 if (transfer_len <= target->tgt_cred_sz) {
119 credits_required = 1;
121 /* figure out how many credits this message requires */
122 credits_required = transfer_len / target->tgt_cred_sz;
123 remainder = transfer_len % target->tgt_cred_sz;
129 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n",
130 __func__, credits_required, ep->cred_dist.credits);
132 if (ep->eid == ENDPOINT_0) {
134 * endpoint 0 is special, it always has a credit and
135 * does not require credit based flow control
137 credits_required = 0;
141 if (ep->cred_dist.credits < credits_required)
144 ep->cred_dist.credits -= credits_required;
145 ep->ep_st.cred_cosumd += credits_required;
147 /* check if we need credits back from the target */
148 if (ep->cred_dist.credits <
149 ep->cred_dist.cred_per_msg) {
150 /* tell the target we need credits ASAP! */
151 send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
152 ep->ep_st.cred_low_indicate += 1;
153 ath6kl_dbg(ATH6KL_DBG_HTC,
154 "%s: host needs credits\n",
159 /* now we can fully dequeue */
160 packet = list_first_entry(&ep->txq, struct htc_packet, list);
162 list_del(&packet->list);
163 /* save the number of credits this packet consumed */
164 packet->info.tx.cred_used = credits_required;
165 /* save send flags */
166 packet->info.tx.flags = send_flags;
167 packet->info.tx.seqno = ep->seqno;
169 /* queue this packet into the caller's queue */
170 list_add_tail(&packet->list, queue);
175 static void get_htc_packet(struct htc_target *target,
176 struct htc_endpoint *ep,
177 struct list_head *queue, int resources)
179 struct htc_packet *packet;
181 /* NOTE : the TX lock is held when this function is called */
183 /* loop until we can grab as many packets out of the queue as we can */
185 if (list_empty(&ep->txq))
188 packet = list_first_entry(&ep->txq, struct htc_packet, list);
189 list_del(&packet->list);
191 ath6kl_dbg(ATH6KL_DBG_HTC,
192 "%s: got packet:0x%p , new queue depth: %d\n",
193 __func__, packet, get_queue_depth(&ep->txq));
194 packet->info.tx.seqno = ep->seqno;
195 packet->info.tx.flags = 0;
196 packet->info.tx.cred_used = 0;
199 /* queue this packet into the caller's queue */
200 list_add_tail(&packet->list, queue);
205 static int htc_issue_packets(struct htc_target *target,
206 struct htc_endpoint *ep,
207 struct list_head *pkt_queue)
212 struct htc_frame_hdr *htc_hdr;
213 struct htc_packet *packet;
215 ath6kl_dbg(ATH6KL_DBG_HTC,
216 "%s: queue: 0x%p, pkts %d\n", __func__,
217 pkt_queue, get_queue_depth(pkt_queue));
219 while (!list_empty(pkt_queue)) {
220 packet = list_first_entry(pkt_queue, struct htc_packet, list);
221 list_del(&packet->list);
230 payload_len = packet->act_len;
232 /* setup HTC frame header */
233 htc_hdr = (struct htc_frame_hdr *) skb_push(skb,
241 packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF;
244 put_unaligned((u16) payload_len, &htc_hdr->payld_len);
245 htc_hdr->flags = packet->info.tx.flags;
246 htc_hdr->eid = (u8) packet->endpoint;
247 htc_hdr->ctrl[0] = 0;
248 htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno;
250 spin_lock_bh(&target->tx_lock);
252 /* store in look up queue to match completions */
253 list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue);
254 ep->ep_st.tx_issued += 1;
255 spin_unlock_bh(&target->tx_lock);
257 status = ath6kl_hif_pipe_send(target->dev->ar,
258 ep->pipe.pipeid_ul, NULL, skb);
261 if (status != -ENOMEM) {
262 /* TODO: if more than 1 endpoint maps to the
263 * same PipeID, it is possible to run out of
264 * resources in the HIF layer.
265 * Don't emit the error
267 ath6kl_dbg(ATH6KL_DBG_HTC,
268 "%s: failed status:%d\n",
271 spin_lock_bh(&target->tx_lock);
272 list_del(&packet->list);
274 /* reclaim credits */
275 ep->cred_dist.credits += packet->info.tx.cred_used;
276 spin_unlock_bh(&target->tx_lock);
278 /* put it back into the callers queue */
279 list_add(&packet->list, pkt_queue);
286 while (!list_empty(pkt_queue)) {
287 if (status != -ENOMEM) {
288 ath6kl_dbg(ATH6KL_DBG_HTC,
289 "%s: failed pkt:0x%p status:%d\n",
290 __func__, packet, status);
293 packet = list_first_entry(pkt_queue,
294 struct htc_packet, list);
295 list_del(&packet->list);
296 packet->status = status;
297 send_packet_completion(target, packet);
304 static enum htc_send_queue_result htc_try_send(struct htc_target *target,
305 struct htc_endpoint *ep,
306 struct list_head *txq)
308 struct list_head send_queue; /* temp queue to hold packets */
309 struct htc_packet *packet, *tmp_pkt;
310 struct ath6kl *ar = target->dev->ar;
311 enum htc_send_full_action action;
312 int tx_resources, overflow, txqueue_depth, i, good_pkts;
315 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n",
317 (txq == NULL) ? 0 : get_queue_depth(txq));
319 /* init the local send queue */
320 INIT_LIST_HEAD(&send_queue);
323 * txq equals to NULL means
324 * caller didn't provide a queue, just wants us to
325 * check queues and send
328 if (list_empty(txq)) {
330 return HTC_SEND_QUEUE_DROP;
333 spin_lock_bh(&target->tx_lock);
334 txqueue_depth = get_queue_depth(&ep->txq);
335 spin_unlock_bh(&target->tx_lock);
337 if (txqueue_depth >= ep->max_txq_depth) {
338 /* we've already overflowed */
339 overflow = get_queue_depth(txq);
341 /* get how much we will overflow by */
342 overflow = txqueue_depth;
343 overflow += get_queue_depth(txq);
344 /* get how much we will overflow the TX queue by */
345 overflow -= ep->max_txq_depth;
348 /* if overflow is negative or zero, we are okay */
350 ath6kl_dbg(ATH6KL_DBG_HTC,
351 "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
352 __func__, ep->eid, overflow, txqueue_depth,
355 if ((overflow <= 0) ||
356 (ep->ep_cb.tx_full == NULL)) {
358 * all packets will fit or caller did not provide send
359 * full indication handler -- just move all of them
360 * to the local send_queue object
362 list_splice_tail_init(txq, &send_queue);
364 good_pkts = get_queue_depth(txq) - overflow;
367 return HTC_SEND_QUEUE_DROP;
370 /* we have overflowed, and a callback is provided */
371 /* dequeue all non-overflow packets to the sendqueue */
372 for (i = 0; i < good_pkts; i++) {
373 /* pop off caller's queue */
374 packet = list_first_entry(txq,
377 /* move to local queue */
378 list_move_tail(&packet->list, &send_queue);
382 * the caller's queue has all the packets that won't fit
383 * walk through the caller's queue and indicate each to
384 * the send full handler
386 list_for_each_entry_safe(packet, tmp_pkt,
389 ath6kl_dbg(ATH6KL_DBG_HTC,
390 "%s: Indicat overflowed TX pkts: %p\n",
392 action = ep->ep_cb.tx_full(ep->target, packet);
393 if (action == HTC_SEND_FULL_DROP) {
394 /* callback wants the packet dropped */
395 ep->ep_st.tx_dropped += 1;
397 /* leave this one in the caller's queue
400 /* callback wants to keep this packet,
401 * move from caller's queue to the send
403 list_move_tail(&packet->list,
409 if (list_empty(&send_queue)) {
410 /* no packets made it in, caller will cleanup */
411 return HTC_SEND_QUEUE_DROP;
416 if (!ep->pipe.tx_credit_flow_enabled) {
418 ath6kl_hif_pipe_get_free_queue_number(ar,
424 spin_lock_bh(&target->tx_lock);
425 if (!list_empty(&send_queue)) {
426 /* transfer packets to tail */
427 list_splice_tail_init(&send_queue, &ep->txq);
428 if (!list_empty(&send_queue)) {
430 spin_unlock_bh(&target->tx_lock);
431 return HTC_SEND_QUEUE_DROP;
433 INIT_LIST_HEAD(&send_queue);
436 /* increment tx processing count on entry */
439 if (ep->tx_proc_cnt > 1) {
441 * Another thread or task is draining the TX queues on this
442 * endpoint that thread will reset the tx processing count
443 * when the queue is drained.
446 spin_unlock_bh(&target->tx_lock);
447 return HTC_SEND_QUEUE_OK;
450 /***** beyond this point only 1 thread may enter ******/
453 * Now drain the endpoint TX queue for transmission as long as we have
454 * enough transmit resources.
458 if (get_queue_depth(&ep->txq) == 0)
461 if (ep->pipe.tx_credit_flow_enabled) {
463 * Credit based mechanism provides flow control
464 * based on target transmit resource availability,
465 * we assume that the HIF layer will always have
466 * bus resources greater than target transmit
469 get_htc_packet_credit_based(target, ep, &send_queue);
472 * Get all packets for this endpoint that we can
475 get_htc_packet(target, ep, &send_queue, tx_resources);
478 if (get_queue_depth(&send_queue) == 0) {
480 * Didn't get packets due to out of resources or TX
486 spin_unlock_bh(&target->tx_lock);
488 /* send what we can */
489 htc_issue_packets(target, ep, &send_queue);
491 if (!ep->pipe.tx_credit_flow_enabled) {
492 pipeid = ep->pipe.pipeid_ul;
494 ath6kl_hif_pipe_get_free_queue_number(ar, pipeid);
497 spin_lock_bh(&target->tx_lock);
500 /* done with this endpoint, we can clear the count */
502 spin_unlock_bh(&target->tx_lock);
504 return HTC_SEND_QUEUE_OK;
507 /* htc control packet manipulation */
508 static void destroy_htc_txctrl_packet(struct htc_packet *packet)
518 static struct htc_packet *build_htc_txctrl_packet(void)
520 struct htc_packet *packet = NULL;
523 packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
527 skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL);
538 static void htc_free_txctrl_packet(struct htc_target *target,
539 struct htc_packet *packet)
541 destroy_htc_txctrl_packet(packet);
544 static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target)
546 return build_htc_txctrl_packet();
549 static void htc_txctrl_complete(struct htc_target *target,
550 struct htc_packet *packet)
552 htc_free_txctrl_packet(target, packet);
555 #define MAX_MESSAGE_SIZE 1536
557 static int htc_setup_target_buffer_assignments(struct htc_target *target)
559 int status, credits, credit_per_maxmsg, i;
560 struct htc_pipe_txcredit_alloc *entry;
561 unsigned int hif_usbaudioclass = 0;
563 credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz;
564 if (MAX_MESSAGE_SIZE % target->tgt_cred_sz)
567 /* TODO, this should be configured by the caller! */
569 credits = target->tgt_creds;
570 entry = &target->pipe.txcredit_alloc[0];
574 /* FIXME: hif_usbaudioclass is always zero */
575 if (hif_usbaudioclass) {
576 ath6kl_dbg(ATH6KL_DBG_HTC,
577 "%s: For USB Audio Class- Total:%d\n",
581 /* Setup VO Service To have Max Credits */
582 entry->service_id = WMI_DATA_VO_SVC;
583 entry->credit_alloc = (credits - 6);
584 if (entry->credit_alloc == 0)
585 entry->credit_alloc++;
587 credits -= (int) entry->credit_alloc;
592 entry->service_id = WMI_CONTROL_SVC;
593 entry->credit_alloc = credit_per_maxmsg;
594 credits -= (int) entry->credit_alloc;
598 /* leftovers go to best effort */
601 entry->service_id = WMI_DATA_BE_SVC;
602 entry->credit_alloc = (u8) credits;
606 entry->service_id = WMI_DATA_VI_SVC;
607 entry->credit_alloc = credits / 4;
608 if (entry->credit_alloc == 0)
609 entry->credit_alloc++;
611 credits -= (int) entry->credit_alloc;
616 entry->service_id = WMI_DATA_VO_SVC;
617 entry->credit_alloc = credits / 4;
618 if (entry->credit_alloc == 0)
619 entry->credit_alloc++;
621 credits -= (int) entry->credit_alloc;
626 entry->service_id = WMI_CONTROL_SVC;
627 entry->credit_alloc = credit_per_maxmsg;
628 credits -= (int) entry->credit_alloc;
633 entry->service_id = WMI_DATA_BK_SVC;
634 entry->credit_alloc = credit_per_maxmsg;
635 credits -= (int) entry->credit_alloc;
639 /* leftovers go to best effort */
641 entry->service_id = WMI_DATA_BE_SVC;
642 entry->credit_alloc = (u8) credits;
647 for (i = 0; i < ENDPOINT_MAX; i++) {
648 if (target->pipe.txcredit_alloc[i].service_id != 0) {
649 ath6kl_dbg(ATH6KL_DBG_HTC,
650 "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
652 target->pipe.txcredit_alloc[i].
654 target->pipe.txcredit_alloc[i].
662 /* process credit reports and call distribution function */
663 static void htc_process_credit_report(struct htc_target *target,
664 struct htc_credit_report *rpt,
666 enum htc_endpoint_id from_ep)
668 int total_credits = 0, i;
669 struct htc_endpoint *ep;
671 /* lock out TX while we update credits */
672 spin_lock_bh(&target->tx_lock);
674 for (i = 0; i < num_entries; i++, rpt++) {
675 if (rpt->eid >= ENDPOINT_MAX) {
677 spin_unlock_bh(&target->tx_lock);
681 ep = &target->endpoint[rpt->eid];
682 ep->cred_dist.credits += rpt->credits;
684 if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) {
685 spin_unlock_bh(&target->tx_lock);
686 htc_try_send(target, ep, NULL);
687 spin_lock_bh(&target->tx_lock);
690 total_credits += rpt->credits;
692 ath6kl_dbg(ATH6KL_DBG_HTC,
693 "Report indicated %d credits to distribute\n",
696 spin_unlock_bh(&target->tx_lock);
699 /* flush endpoint TX queue */
700 static void htc_flush_tx_endpoint(struct htc_target *target,
701 struct htc_endpoint *ep, u16 tag)
703 struct htc_packet *packet;
705 spin_lock_bh(&target->tx_lock);
706 while (get_queue_depth(&ep->txq)) {
707 packet = list_first_entry(&ep->txq, struct htc_packet, list);
708 list_del(&packet->list);
710 send_packet_completion(target, packet);
712 spin_unlock_bh(&target->tx_lock);
716 * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC,
717 * since upper layers expects struct htc_packet containers we use the completed
718 * skb and lookup it's corresponding HTC packet buffer from a lookup list.
719 * This is extra overhead that can be fixed by re-aligning HIF interfaces with
722 static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target,
723 struct htc_endpoint *ep,
726 struct htc_packet *packet, *tmp_pkt, *found_packet = NULL;
728 spin_lock_bh(&target->tx_lock);
731 * interate from the front of tx lookup queue
732 * this lookup should be fast since lower layers completes in-order and
733 * so the completed packet should be at the head of the list generally
735 list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue,
737 /* check for removal */
738 if (skb == packet->skb) {
740 list_del(&packet->list);
741 found_packet = packet;
746 spin_unlock_bh(&target->tx_lock);
751 static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
753 struct htc_target *target = ar->htc_target;
754 struct htc_frame_hdr *htc_hdr;
755 struct htc_endpoint *ep;
756 struct htc_packet *packet;
763 htc_hdr = (struct htc_frame_hdr *) netdata;
765 ep_id = htc_hdr->eid;
766 ep = &target->endpoint[ep_id];
768 packet = htc_lookup_tx_packet(target, ep, skb);
769 if (packet == NULL) {
770 /* may have already been flushed and freed */
771 ath6kl_err("HTC TX lookup failed!\n");
773 /* will be giving this buffer back to upper layers */
775 send_packet_completion(target, packet);
779 if (!ep->pipe.tx_credit_flow_enabled) {
781 * note: when using TX credit flow, the re-checking of queues
782 * happens when credits flow back from the target. in the
783 * non-TX credit case, we recheck after the packet completes
785 htc_try_send(target, ep, NULL);
791 static int htc_send_packets_multiple(struct htc_target *target,
792 struct list_head *pkt_queue)
794 struct htc_endpoint *ep;
795 struct htc_packet *packet, *tmp_pkt;
797 if (list_empty(pkt_queue))
800 /* get first packet to find out which ep the packets will go into */
801 packet = list_first_entry(pkt_queue, struct htc_packet, list);
803 if (packet->endpoint >= ENDPOINT_MAX) {
807 ep = &target->endpoint[packet->endpoint];
809 htc_try_send(target, ep, pkt_queue);
811 /* do completion on any packets that couldn't get in */
812 if (!list_empty(pkt_queue)) {
813 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
814 packet->status = -ENOMEM;
817 do_send_completion(ep, pkt_queue);
823 /* htc pipe rx path */
824 static struct htc_packet *alloc_htc_packet_container(struct htc_target *target)
826 struct htc_packet *packet;
827 spin_lock_bh(&target->rx_lock);
829 if (target->pipe.htc_packet_pool == NULL) {
830 spin_unlock_bh(&target->rx_lock);
834 packet = target->pipe.htc_packet_pool;
835 target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next;
837 spin_unlock_bh(&target->rx_lock);
839 packet->list.next = NULL;
843 static void free_htc_packet_container(struct htc_target *target,
844 struct htc_packet *packet)
846 struct list_head *lh;
848 spin_lock_bh(&target->rx_lock);
850 if (target->pipe.htc_packet_pool == NULL) {
851 target->pipe.htc_packet_pool = packet;
852 packet->list.next = NULL;
854 lh = (struct list_head *) target->pipe.htc_packet_pool;
855 packet->list.next = lh;
856 target->pipe.htc_packet_pool = packet;
859 spin_unlock_bh(&target->rx_lock);
862 static int htc_process_trailer(struct htc_target *target, u8 *buffer,
863 int len, enum htc_endpoint_id from_ep)
865 struct htc_credit_report *report;
866 struct htc_record_hdr *record;
867 u8 *record_buf, *orig_buf;
868 int orig_len, status;
875 if (len < sizeof(struct htc_record_hdr)) {
880 /* these are byte aligned structs */
881 record = (struct htc_record_hdr *) buffer;
882 len -= sizeof(struct htc_record_hdr);
883 buffer += sizeof(struct htc_record_hdr);
885 if (record->len > len) {
886 /* no room left in buffer for record */
887 ath6kl_dbg(ATH6KL_DBG_HTC,
888 "invalid length: %d (id:%d) buffer has: %d bytes left\n",
889 record->len, record->rec_id, len);
894 /* start of record follows the header */
897 switch (record->rec_id) {
898 case HTC_RECORD_CREDITS:
899 if (record->len < sizeof(struct htc_credit_report)) {
904 report = (struct htc_credit_report *) record_buf;
905 htc_process_credit_report(target, report,
906 record->len / sizeof(*report),
910 ath6kl_dbg(ATH6KL_DBG_HTC,
911 "unhandled record: id:%d length:%d\n",
912 record->rec_id, record->len);
919 /* advance buffer past this record for next time around */
920 buffer += record->len;
927 static void do_recv_completion(struct htc_endpoint *ep,
928 struct list_head *queue_to_indicate)
930 struct htc_packet *packet;
932 if (list_empty(queue_to_indicate)) {
933 /* nothing to indicate */
937 /* using legacy EpRecv */
938 while (!list_empty(queue_to_indicate)) {
939 packet = list_first_entry(queue_to_indicate,
940 struct htc_packet, list);
941 list_del(&packet->list);
942 ep->ep_cb.rx(ep->target, packet);
948 static void recv_packet_completion(struct htc_target *target,
949 struct htc_endpoint *ep,
950 struct htc_packet *packet)
952 struct list_head container;
953 INIT_LIST_HEAD(&container);
954 list_add_tail(&packet->list, &container);
957 do_recv_completion(ep, &container);
960 static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
963 struct htc_target *target = ar->htc_target;
964 u8 *netdata, *trailer, hdr_info;
965 struct htc_frame_hdr *htc_hdr;
966 u32 netlen, trailerlen = 0;
967 struct htc_packet *packet;
968 struct htc_endpoint *ep;
975 htc_hdr = (struct htc_frame_hdr *) netdata;
977 ep = &target->endpoint[htc_hdr->eid];
979 if (htc_hdr->eid >= ENDPOINT_MAX) {
980 ath6kl_dbg(ATH6KL_DBG_HTC,
981 "HTC Rx: invalid EndpointID=%d\n",
987 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
989 if (netlen < (payload_len + HTC_HDR_LENGTH)) {
990 ath6kl_dbg(ATH6KL_DBG_HTC,
991 "HTC Rx: insufficient length, got:%d expected =%u\n",
992 netlen, payload_len + HTC_HDR_LENGTH);
997 /* get flags to check for trailer */
998 hdr_info = htc_hdr->flags;
999 if (hdr_info & HTC_FLG_RX_TRAILER) {
1000 /* extract the trailer length */
1001 hdr_info = htc_hdr->ctrl[0];
1002 if ((hdr_info < sizeof(struct htc_record_hdr)) ||
1003 (hdr_info > payload_len)) {
1004 ath6kl_dbg(ATH6KL_DBG_HTC,
1005 "invalid header: payloadlen should be %d, CB[0]: %d\n",
1006 payload_len, hdr_info);
1011 trailerlen = hdr_info;
1012 /* process trailer after hdr/apps payload */
1013 trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH +
1014 payload_len - hdr_info;
1015 status = htc_process_trailer(target, trailer, hdr_info,
1021 if (((int) payload_len - (int) trailerlen) <= 0) {
1022 /* zero length packet with trailer, just drop these */
1026 if (htc_hdr->eid == ENDPOINT_0) {
1027 /* handle HTC control message */
1028 if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) {
1030 * fatal: target should not send unsolicited
1031 * messageson the endpoint 0
1033 ath6kl_dbg(ATH6KL_DBG_HTC,
1034 "HTC ignores Rx Ctrl after setup complete\n");
1039 /* remove HTC header */
1040 skb_pull(skb, HTC_HDR_LENGTH);
1042 netdata = skb->data;
1045 spin_lock_bh(&target->rx_lock);
1047 target->pipe.ctrl_response_valid = true;
1048 target->pipe.ctrl_response_len = min_t(int, netlen,
1049 HTC_MAX_CTRL_MSG_LEN);
1050 memcpy(target->pipe.ctrl_response_buf, netdata,
1051 target->pipe.ctrl_response_len);
1053 spin_unlock_bh(&target->rx_lock);
1061 * TODO: the message based HIF architecture allocates net bufs
1062 * for recv packets since it bridges that HIF to upper layers,
1063 * which expects HTC packets, we form the packets here
1065 packet = alloc_htc_packet_container(target);
1066 if (packet == NULL) {
1072 packet->endpoint = htc_hdr->eid;
1073 packet->pkt_cntxt = skb;
1075 /* TODO: for backwards compatibility */
1076 packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH;
1077 packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen;
1080 * TODO: this is a hack because the driver layer will set the
1081 * actual len of the skb again which will just double the len
1085 recv_packet_completion(target, ep, packet);
1087 /* recover the packet container */
1088 free_htc_packet_container(target, packet);
1099 static void htc_flush_rx_queue(struct htc_target *target,
1100 struct htc_endpoint *ep)
1102 struct list_head container;
1103 struct htc_packet *packet;
1105 spin_lock_bh(&target->rx_lock);
1108 if (list_empty(&ep->rx_bufq))
1111 packet = list_first_entry(&ep->rx_bufq,
1112 struct htc_packet, list);
1113 list_del(&packet->list);
1115 spin_unlock_bh(&target->rx_lock);
1116 packet->status = -ECANCELED;
1117 packet->act_len = 0;
1119 ath6kl_dbg(ATH6KL_DBG_HTC,
1120 "Flushing RX packet:0x%p, length:%d, ep:%d\n",
1121 packet, packet->buf_len,
1124 INIT_LIST_HEAD(&container);
1125 list_add_tail(&packet->list, &container);
1127 /* give the packet back */
1128 do_recv_completion(ep, &container);
1129 spin_lock_bh(&target->rx_lock);
1132 spin_unlock_bh(&target->rx_lock);
1135 /* polling routine to wait for a control packet to be received */
1136 static int htc_wait_recv_ctrl_message(struct htc_target *target)
1138 int count = HTC_TARGET_RESPONSE_POLL_COUNT;
1141 spin_lock_bh(&target->rx_lock);
1143 if (target->pipe.ctrl_response_valid) {
1144 target->pipe.ctrl_response_valid = false;
1145 spin_unlock_bh(&target->rx_lock);
1149 spin_unlock_bh(&target->rx_lock);
1153 msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT);
1157 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Timeout!\n", __func__);
1164 static void htc_rxctrl_complete(struct htc_target *context,
1165 struct htc_packet *packet)
1167 /* TODO, can't really receive HTC control messages yet.... */
1168 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: invalid call function\n", __func__);
1171 /* htc pipe initialization */
1172 static void reset_endpoint_states(struct htc_target *target)
1174 struct htc_endpoint *ep;
1177 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1178 ep = &target->endpoint[i];
1181 ep->max_txq_depth = 0;
1183 INIT_LIST_HEAD(&ep->txq);
1184 INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
1185 INIT_LIST_HEAD(&ep->rx_bufq);
1186 ep->target = target;
1187 ep->pipe.tx_credit_flow_enabled = (bool) 1; /* FIXME */
1191 /* start HTC, this is called after all services are connected */
1192 static int htc_config_target_hif_pipe(struct htc_target *target)
1197 /* htc service functions */
1198 static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id)
1203 for (i = 0; i < ENDPOINT_MAX; i++) {
1204 if (target->pipe.txcredit_alloc[i].service_id == service_id)
1206 target->pipe.txcredit_alloc[i].credit_alloc;
1209 if (allocation == 0) {
1210 ath6kl_dbg(ATH6KL_DBG_HTC,
1211 "HTC Service TX : 0x%2.2X : allocation is zero!\n",
1218 static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
1219 struct htc_service_connect_req *conn_req,
1220 struct htc_service_connect_resp *conn_resp)
1222 struct ath6kl *ar = target->dev->ar;
1223 struct htc_packet *packet = NULL;
1224 struct htc_conn_service_resp *resp_msg;
1225 struct htc_conn_service_msg *conn_msg;
1226 enum htc_endpoint_id assigned_epid = ENDPOINT_MAX;
1227 bool disable_credit_flowctrl = false;
1228 unsigned int max_msg_size = 0;
1229 struct htc_endpoint *ep;
1230 int length, status = 0;
1231 struct sk_buff *skb;
1235 if (conn_req->svc_id == 0) {
1241 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
1242 /* special case for pseudo control service */
1243 assigned_epid = ENDPOINT_0;
1244 max_msg_size = HTC_MAX_CTRL_MSG_LEN;
1249 tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
1250 if (tx_alloc == 0) {
1255 /* allocate a packet to send to the target */
1256 packet = htc_alloc_txctrl_packet(target);
1258 if (packet == NULL) {
1265 length = sizeof(struct htc_conn_service_msg);
1267 /* assemble connect service message */
1268 conn_msg = (struct htc_conn_service_msg *) skb_put(skb,
1270 if (conn_msg == NULL) {
1277 sizeof(struct htc_conn_service_msg));
1278 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
1279 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
1280 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags &
1281 ~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK);
1283 /* tell target desired recv alloc for this ep */
1284 flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT;
1285 conn_msg->conn_flags |= cpu_to_le16(flags);
1287 if (conn_req->conn_flags &
1288 HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL) {
1289 disable_credit_flowctrl = true;
1292 set_htc_pkt_info(packet, NULL, (u8 *) conn_msg,
1294 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1296 status = ath6kl_htc_pipe_tx(target, packet);
1298 /* we don't own it anymore */
1303 /* wait for response */
1304 status = htc_wait_recv_ctrl_message(target);
1308 /* we controlled the buffer creation so it has to be
1311 resp_msg = (struct htc_conn_service_resp *)
1312 target->pipe.ctrl_response_buf;
1314 if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) ||
1315 (target->pipe.ctrl_response_len < sizeof(*resp_msg))) {
1316 /* this message is not valid */
1322 ath6kl_dbg(ATH6KL_DBG_TRC,
1323 "%s: service 0x%X conn resp: status: %d ep: %d\n",
1324 __func__, resp_msg->svc_id, resp_msg->status,
1327 conn_resp->resp_code = resp_msg->status;
1328 /* check response status */
1329 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
1330 ath6kl_dbg(ATH6KL_DBG_HTC,
1331 "Target failed service 0x%X connect request (status:%d)\n",
1332 resp_msg->svc_id, resp_msg->status);
1337 assigned_epid = (enum htc_endpoint_id) resp_msg->eid;
1338 max_msg_size = le16_to_cpu(resp_msg->max_msg_sz);
1341 /* the rest are parameter checks so set the error status */
1344 if (assigned_epid >= ENDPOINT_MAX) {
1349 if (max_msg_size == 0) {
1354 ep = &target->endpoint[assigned_epid];
1355 ep->eid = assigned_epid;
1356 if (ep->svc_id != 0) {
1357 /* endpoint already in use! */
1362 /* return assigned endpoint to caller */
1363 conn_resp->endpoint = assigned_epid;
1364 conn_resp->len_max = max_msg_size;
1366 /* setup the endpoint */
1367 ep->svc_id = conn_req->svc_id; /* this marks ep in use */
1368 ep->max_txq_depth = conn_req->max_txq_depth;
1369 ep->len_max = max_msg_size;
1370 ep->cred_dist.credits = tx_alloc;
1371 ep->cred_dist.cred_sz = target->tgt_cred_sz;
1372 ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz;
1373 if (max_msg_size % target->tgt_cred_sz)
1374 ep->cred_dist.cred_per_msg++;
1376 /* copy all the callbacks */
1377 ep->ep_cb = conn_req->ep_cb;
1379 /* initialize tx_drop_packet_threshold */
1380 ep->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
1382 status = ath6kl_hif_pipe_map_service(ar, ep->svc_id,
1383 &ep->pipe.pipeid_ul,
1384 &ep->pipe.pipeid_dl);
1388 ath6kl_dbg(ATH6KL_DBG_HTC,
1389 "SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n",
1390 ep->svc_id, ep->pipe.pipeid_ul,
1391 ep->pipe.pipeid_dl, ep->eid);
1393 if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) {
1394 ep->pipe.tx_credit_flow_enabled = false;
1395 ath6kl_dbg(ATH6KL_DBG_HTC,
1396 "SVC: 0x%4.4X ep:%d TX flow control off\n",
1397 ep->svc_id, assigned_epid);
1402 htc_free_txctrl_packet(target, packet);
1406 /* htc export functions */
1407 static void *ath6kl_htc_pipe_create(struct ath6kl *ar)
1410 struct htc_endpoint *ep = NULL;
1411 struct htc_target *target = NULL;
1412 struct htc_packet *packet;
1415 target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
1416 if (target == NULL) {
1417 ath6kl_err("htc create unable to allocate memory\n");
1419 goto fail_htc_create;
1422 spin_lock_init(&target->htc_lock);
1423 spin_lock_init(&target->rx_lock);
1424 spin_lock_init(&target->tx_lock);
1426 reset_endpoint_states(target);
1428 for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) {
1429 packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
1432 free_htc_packet_container(target, packet);
1435 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
1437 ath6kl_err("unable to allocate memory\n");
1439 goto fail_htc_create;
1441 target->dev->ar = ar;
1442 target->dev->htc_cnxt = target;
1444 /* Get HIF default pipe for HTC message exchange */
1445 ep = &target->endpoint[ENDPOINT_0];
1447 ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul,
1448 &ep->pipe.pipeid_dl);
1455 ath6kl_htc_pipe_cleanup(target);
1462 /* cleanup the HTC instance */
1463 static void ath6kl_htc_pipe_cleanup(struct htc_target *target)
1465 struct htc_packet *packet;
1468 packet = alloc_htc_packet_container(target);
1476 /* kfree our instance */
1480 static int ath6kl_htc_pipe_start(struct htc_target *target)
1482 struct sk_buff *skb;
1483 struct htc_setup_comp_ext_msg *setup;
1484 struct htc_packet *packet;
1486 htc_config_target_hif_pipe(target);
1488 /* allocate a buffer to send */
1489 packet = htc_alloc_txctrl_packet(target);
1490 if (packet == NULL) {
1497 /* assemble setup complete message */
1498 setup = (struct htc_setup_comp_ext_msg *) skb_put(skb,
1500 memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg));
1501 setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
1503 ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n");
1505 set_htc_pkt_info(packet, NULL, (u8 *) setup,
1506 sizeof(struct htc_setup_comp_ext_msg),
1507 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1509 target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE;
1511 return ath6kl_htc_pipe_tx(target, packet);
1514 static void ath6kl_htc_pipe_stop(struct htc_target *target)
1517 struct htc_endpoint *ep;
1519 /* cleanup endpoints */
1520 for (i = 0; i < ENDPOINT_MAX; i++) {
1521 ep = &target->endpoint[i];
1522 htc_flush_rx_queue(target, ep);
1523 htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL);
1526 reset_endpoint_states(target);
1527 target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE;
1530 static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target,
1531 enum htc_endpoint_id endpoint)
1535 spin_lock_bh(&target->rx_lock);
1536 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
1537 spin_unlock_bh(&target->rx_lock);
1542 static int ath6kl_htc_pipe_tx(struct htc_target *target,
1543 struct htc_packet *packet)
1545 struct list_head queue;
1547 ath6kl_dbg(ATH6KL_DBG_HTC,
1548 "%s: endPointId: %d, buffer: 0x%p, length: %d\n",
1549 __func__, packet->endpoint, packet->buf,
1552 INIT_LIST_HEAD(&queue);
1553 list_add_tail(&packet->list, &queue);
1555 return htc_send_packets_multiple(target, &queue);
1558 static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
1560 struct htc_ready_ext_msg *ready_msg;
1561 struct htc_service_connect_req connect;
1562 struct htc_service_connect_resp resp;
1565 status = htc_wait_recv_ctrl_message(target);
1570 if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
1571 ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg len:%d!\n",
1572 target->pipe.ctrl_response_len);
1576 ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
1578 if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
1579 ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg : 0x%X !\n",
1580 ready_msg->ver2_0_info.msg_id);
1584 ath6kl_dbg(ATH6KL_DBG_HTC,
1585 "Target Ready! : transmit resources : %d size:%d\n",
1586 ready_msg->ver2_0_info.cred_cnt,
1587 ready_msg->ver2_0_info.cred_sz);
1589 target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt);
1590 target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz);
1592 if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0))
1595 htc_setup_target_buffer_assignments(target);
1597 /* setup our pseudo HTC control endpoint connection */
1598 memset(&connect, 0, sizeof(connect));
1599 memset(&resp, 0, sizeof(resp));
1600 connect.ep_cb.tx_complete = htc_txctrl_complete;
1601 connect.ep_cb.rx = htc_rxctrl_complete;
1602 connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS;
1603 connect.svc_id = HTC_CTRL_RSVD_SVC;
1605 /* connect fake service */
1606 status = ath6kl_htc_pipe_conn_service(target, &connect, &resp);
1611 static void ath6kl_htc_pipe_flush_txep(struct htc_target *target,
1612 enum htc_endpoint_id endpoint, u16 tag)
1614 struct htc_endpoint *ep = &target->endpoint[endpoint];
1616 if (ep->svc_id == 0) {
1622 htc_flush_tx_endpoint(target, ep, tag);
1625 static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target,
1626 struct list_head *pkt_queue)
1628 struct htc_packet *packet, *tmp_pkt, *first;
1629 struct htc_endpoint *ep;
1632 if (list_empty(pkt_queue))
1635 first = list_first_entry(pkt_queue, struct htc_packet, list);
1637 if (first->endpoint >= ENDPOINT_MAX) {
1642 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n",
1643 __func__, first->endpoint, get_queue_depth(pkt_queue),
1646 ep = &target->endpoint[first->endpoint];
1648 spin_lock_bh(&target->rx_lock);
1650 /* store receive packets */
1651 list_splice_tail_init(pkt_queue, &ep->rx_bufq);
1653 spin_unlock_bh(&target->rx_lock);
1656 /* walk through queue and mark each one canceled */
1657 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
1658 packet->status = -ECANCELED;
1661 do_recv_completion(ep, pkt_queue);
1667 static void ath6kl_htc_pipe_activity_changed(struct htc_target *target,
1668 enum htc_endpoint_id ep,
1674 static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target)
1679 static int ath6kl_htc_pipe_credit_setup(struct htc_target *target,
1680 struct ath6kl_htc_credit_info *info)
1685 static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = {
1686 .create = ath6kl_htc_pipe_create,
1687 .wait_target = ath6kl_htc_pipe_wait_target,
1688 .start = ath6kl_htc_pipe_start,
1689 .conn_service = ath6kl_htc_pipe_conn_service,
1690 .tx = ath6kl_htc_pipe_tx,
1691 .stop = ath6kl_htc_pipe_stop,
1692 .cleanup = ath6kl_htc_pipe_cleanup,
1693 .flush_txep = ath6kl_htc_pipe_flush_txep,
1694 .flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf,
1695 .activity_changed = ath6kl_htc_pipe_activity_changed,
1696 .get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num,
1697 .add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple,
1698 .credit_setup = ath6kl_htc_pipe_credit_setup,
1699 .tx_complete = ath6kl_htc_pipe_tx_complete,
1700 .rx_complete = ath6kl_htc_pipe_rx_complete,
1703 void ath6kl_htc_pipe_attach(struct ath6kl *ar)
1705 ar->htc_ops = &ath6kl_htc_pipe_ops;