ath6kl: Remove callback msg_pending() and used the function directly
[cascardo/linux.git] / drivers / net / wireless / ath / ath6kl / htc.c
1 /*
2  * Copyright (c) 2007-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16
17 #include "core.h"
18 #include "htc_hif.h"
19 #include "debug.h"
20 #include "hif-ops.h"
21 #include <asm/unaligned.h>
22
23 #define CALC_TXRX_PADDED_LEN(dev, len)  (__ALIGN_MASK((len), (dev)->block_mask))
24
25 static void htc_prep_send_pkt(struct htc_packet *packet, u8 flags, int ctrl0,
26                               int ctrl1)
27 {
28         struct htc_frame_hdr *hdr;
29
30         packet->buf -= HTC_HDR_LENGTH;
31         hdr =  (struct htc_frame_hdr *)packet->buf;
32
33         /* Endianess? */
34         put_unaligned((u16)packet->act_len, &hdr->payld_len);
35         hdr->flags = flags;
36         hdr->eid = packet->endpoint;
37         hdr->ctrl[0] = ctrl0;
38         hdr->ctrl[1] = ctrl1;
39 }
40
41 static void htc_reclaim_txctrl_buf(struct htc_target *target,
42                                    struct htc_packet *pkt)
43 {
44         spin_lock_bh(&target->htc_lock);
45         list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
46         spin_unlock_bh(&target->htc_lock);
47 }
48
49 static struct htc_packet *htc_get_control_buf(struct htc_target *target,
50                                               bool tx)
51 {
52         struct htc_packet *packet = NULL;
53         struct list_head *buf_list;
54
55         buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
56
57         spin_lock_bh(&target->htc_lock);
58
59         if (list_empty(buf_list)) {
60                 spin_unlock_bh(&target->htc_lock);
61                 return NULL;
62         }
63
64         packet = list_first_entry(buf_list, struct htc_packet, list);
65         list_del(&packet->list);
66         spin_unlock_bh(&target->htc_lock);
67
68         if (tx)
69                 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
70
71         return packet;
72 }
73
74 static void htc_tx_comp_update(struct htc_target *target,
75                                struct htc_endpoint *endpoint,
76                                struct htc_packet *packet)
77 {
78         packet->completion = NULL;
79         packet->buf += HTC_HDR_LENGTH;
80
81         if (!packet->status)
82                 return;
83
84         ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
85                    packet->status, packet->endpoint, packet->act_len,
86                    packet->info.tx.cred_used);
87
88         /* on failure to submit, reclaim credits for this packet */
89         spin_lock_bh(&target->tx_lock);
90         endpoint->cred_dist.cred_to_dist +=
91                                 packet->info.tx.cred_used;
92         endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
93
94         ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
95                    target->cred_dist_cntxt, &target->cred_dist_list);
96
97         ath6k_credit_distribute(target->cred_dist_cntxt,
98                                 &target->cred_dist_list,
99                                 HTC_CREDIT_DIST_SEND_COMPLETE);
100
101         spin_unlock_bh(&target->tx_lock);
102 }
103
104 static void htc_tx_complete(struct htc_endpoint *endpoint,
105                             struct list_head *txq)
106 {
107         if (list_empty(txq))
108                 return;
109
110         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
111                    "send complete ep %d, (%d pkts)\n",
112                    endpoint->eid, get_queue_depth(txq));
113
114         ath6kl_tx_complete(endpoint->target->dev->ar, txq);
115 }
116
117 static void htc_tx_comp_handler(struct htc_target *target,
118                                 struct htc_packet *packet)
119 {
120         struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
121         struct list_head container;
122
123         htc_tx_comp_update(target, endpoint, packet);
124         INIT_LIST_HEAD(&container);
125         list_add_tail(&packet->list, &container);
126         /* do completion */
127         htc_tx_complete(endpoint, &container);
128 }
129
130 static void htc_async_tx_scat_complete(struct htc_target *target,
131                                        struct hif_scatter_req *scat_req)
132 {
133         struct htc_endpoint *endpoint;
134         struct htc_packet *packet;
135         struct list_head tx_compq;
136         int i;
137
138         INIT_LIST_HEAD(&tx_compq);
139
140         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
141                 "htc_async_tx_scat_complete  total len: %d  entries: %d\n",
142                 scat_req->len, scat_req->scat_entries);
143
144         if (scat_req->status)
145                 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
146
147         packet = scat_req->scat_list[0].packet;
148         endpoint = &target->endpoint[packet->endpoint];
149
150         /* walk through the scatter list and process */
151         for (i = 0; i < scat_req->scat_entries; i++) {
152                 packet = scat_req->scat_list[i].packet;
153                 if (!packet) {
154                         WARN_ON(1);
155                         return;
156                 }
157
158                 packet->status = scat_req->status;
159                 htc_tx_comp_update(target, endpoint, packet);
160                 list_add_tail(&packet->list, &tx_compq);
161         }
162
163         /* free scatter request */
164         hif_scatter_req_add(target->dev->ar, scat_req);
165
166         /* complete all packets */
167         htc_tx_complete(endpoint, &tx_compq);
168 }
169
170 static int htc_issue_send(struct htc_target *target, struct htc_packet *packet)
171 {
172         int status;
173         bool sync = false;
174         u32 padded_len, send_len;
175
176         if (!packet->completion)
177                 sync = true;
178
179         send_len = packet->act_len + HTC_HDR_LENGTH;
180
181         ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
182                    __func__, send_len, sync ? "sync" : "async");
183
184         padded_len = CALC_TXRX_PADDED_LEN(target->dev, send_len);
185
186         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
187                 "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
188                 padded_len,
189                 target->dev->ar->mbox_info.htc_addr,
190                 sync ? "sync" : "async");
191
192         if (sync) {
193                 status = hif_read_write_sync(target->dev->ar,
194                                 target->dev->ar->mbox_info.htc_addr,
195                                  packet->buf, padded_len,
196                                  HIF_WR_SYNC_BLOCK_INC);
197
198                 packet->status = status;
199                  packet->buf += HTC_HDR_LENGTH;
200         } else
201                 status = hif_write_async(target->dev->ar,
202                                 target->dev->ar->mbox_info.htc_addr,
203                                 packet->buf, padded_len,
204                                 HIF_WR_ASYNC_BLOCK_INC, packet);
205
206         return status;
207 }
208
209 static int htc_check_credits(struct htc_target *target,
210                              struct htc_endpoint *ep, u8 *flags,
211                              enum htc_endpoint_id eid, unsigned int len,
212                              int *req_cred)
213 {
214
215         *req_cred = (len > target->tgt_cred_sz) ?
216                      DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
217
218         ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
219                    *req_cred, ep->cred_dist.credits);
220
221         if (ep->cred_dist.credits < *req_cred) {
222                 if (eid == ENDPOINT_0)
223                         return -EINVAL;
224
225                 /* Seek more credits */
226                 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
227
228                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
229                            target->cred_dist_cntxt, &ep->cred_dist);
230
231                 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
232
233                 ep->cred_dist.seek_cred = 0;
234
235                 if (ep->cred_dist.credits < *req_cred) {
236                         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
237                                    "not enough credits for ep %d - leaving packet in queue\n",
238                                    eid);
239                         return -EINVAL;
240                 }
241         }
242
243         ep->cred_dist.credits -= *req_cred;
244         ep->ep_st.cred_cosumd += *req_cred;
245
246          /* When we are getting low on credits, ask for more */
247         if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
248                 ep->cred_dist.seek_cred =
249                 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
250
251                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
252                            target->cred_dist_cntxt, &ep->cred_dist);
253
254                 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
255
256                 /* see if we were successful in getting more */
257                 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
258                         /* tell the target we need credits ASAP! */
259                         *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
260                         ep->ep_st.cred_low_indicate += 1;
261                         ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
262                 }
263         }
264
265         return 0;
266 }
267
268 static void htc_tx_pkts_get(struct htc_target *target,
269                             struct htc_endpoint *endpoint,
270                             struct list_head *queue)
271 {
272         int req_cred;
273         u8 flags;
274         struct htc_packet *packet;
275         unsigned int len;
276
277         while (true) {
278
279                 flags = 0;
280
281                 if (list_empty(&endpoint->txq))
282                         break;
283                 packet = list_first_entry(&endpoint->txq, struct htc_packet,
284                                           list);
285
286                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
287                         "got head pkt:0x%p , queue depth: %d\n",
288                         packet, get_queue_depth(&endpoint->txq));
289
290                 len = CALC_TXRX_PADDED_LEN(target->dev,
291                                            packet->act_len + HTC_HDR_LENGTH);
292
293                 if (htc_check_credits(target, endpoint, &flags,
294                                       packet->endpoint, len, &req_cred))
295                         break;
296
297                 /* now we can fully move onto caller's queue */
298                 packet = list_first_entry(&endpoint->txq, struct htc_packet,
299                                           list);
300                 list_move_tail(&packet->list, queue);
301
302                 /* save the number of credits this packet consumed */
303                 packet->info.tx.cred_used = req_cred;
304
305                 /* all TX packets are handled asynchronously */
306                 packet->completion = htc_tx_comp_handler;
307                 packet->context = target;
308                 endpoint->ep_st.tx_issued += 1;
309
310                 /* save send flags */
311                 packet->info.tx.flags = flags;
312                 packet->info.tx.seqno = endpoint->seqno;
313                 endpoint->seqno++;
314         }
315 }
316
317 /* See if the padded tx length falls on a credit boundary */
318 static int htc_get_credit_padding(unsigned int cred_sz, int *len,
319                                   struct htc_endpoint *ep)
320 {
321         int rem_cred, cred_pad;
322
323         rem_cred = *len % cred_sz;
324
325         /* No padding needed */
326         if  (!rem_cred)
327                 return 0;
328
329         if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
330                 return -1;
331
332         /*
333          * The transfer consumes a "partial" credit, this
334          * packet cannot be bundled unless we add
335          * additional "dummy" padding (max 255 bytes) to
336          * consume the entire credit.
337          */
338         cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
339
340         if ((cred_pad > 0) && (cred_pad <= 255))
341                 *len += cred_pad;
342         else
343                 /* The amount of padding is too large, send as non-bundled */
344                 return -1;
345
346         return cred_pad;
347 }
348
349 static int htc_setup_send_scat_list(struct htc_target *target,
350                                     struct htc_endpoint *endpoint,
351                                     struct hif_scatter_req *scat_req,
352                                     int n_scat,
353                                     struct list_head *queue)
354 {
355         struct htc_packet *packet;
356         int i, len, rem_scat, cred_pad;
357         int status = 0;
358
359         rem_scat = target->dev->max_tx_bndl_sz;
360
361         for (i = 0; i < n_scat; i++) {
362                 scat_req->scat_list[i].packet = NULL;
363
364                 if (list_empty(queue))
365                         break;
366
367                 packet = list_first_entry(queue, struct htc_packet, list);
368                 len = CALC_TXRX_PADDED_LEN(target->dev,
369                                            packet->act_len + HTC_HDR_LENGTH);
370
371                 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
372                                                   &len, endpoint);
373                 if (cred_pad < 0) {
374                         status = -EINVAL;
375                         break;
376                 }
377
378                 if (rem_scat < len) {
379                         /* exceeds what we can transfer */
380                         status = -ENOSPC;
381                         break;
382                 }
383
384                 rem_scat -= len;
385                 /* now remove it from the queue */
386                 packet = list_first_entry(queue, struct htc_packet, list);
387                 list_del(&packet->list);
388
389                 scat_req->scat_list[i].packet = packet;
390                 /* prepare packet and flag message as part of a send bundle */
391                 htc_prep_send_pkt(packet,
392                                 packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
393                                 cred_pad, packet->info.tx.seqno);
394                 scat_req->scat_list[i].buf = packet->buf;
395                 scat_req->scat_list[i].len = len;
396
397                 scat_req->len += len;
398                 scat_req->scat_entries++;
399                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
400                            "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
401                            i, packet, len, rem_scat);
402         }
403
404         /* Roll back scatter setup in case of any failure */
405         if (status || (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
406                 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
407                         packet = scat_req->scat_list[i].packet;
408                         if (packet) {
409                                 packet->buf += HTC_HDR_LENGTH;
410                                 list_add(&packet->list, queue);
411                         }
412                 }
413                 return -EINVAL;
414         }
415
416         return 0;
417 }
418
419 /*
420  * htc_issue_send_bundle: drain a queue and send as bundles
421  * this function may return without fully draining the queue
422  * when
423  *
424  *    1. scatter resources are exhausted
425  *    2. a message that will consume a partial credit will stop the
426  *    bundling process early
427  *    3. we drop below the minimum number of messages for a bundle
428  */
429 static void htc_issue_send_bundle(struct htc_endpoint *endpoint,
430                                   struct list_head *queue,
431                                   int *sent_bundle, int *n_bundle_pkts)
432 {
433         struct htc_target *target = endpoint->target;
434         struct hif_scatter_req *scat_req = NULL;
435         struct hif_dev_scat_sup_info hif_info;
436         int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
437
438         hif_info = target->dev->hif_scat_info;
439
440         while (true) {
441                 n_scat = get_queue_depth(queue);
442                 n_scat = min(n_scat, target->msg_per_bndl_max);
443
444                 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
445                         /* not enough to bundle */
446                         break;
447
448                 scat_req = hif_scatter_req_get(target->dev->ar);
449
450                 if (!scat_req) {
451                         /* no scatter resources  */
452                         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
453                                 "no more scatter resources\n");
454                         break;
455                 }
456
457                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
458                            n_scat);
459
460                 scat_req->len = 0;
461                 scat_req->scat_entries = 0;
462
463                 if (htc_setup_send_scat_list(target, endpoint, scat_req,
464                                              n_scat, queue)) {
465                         hif_scatter_req_add(target->dev->ar, scat_req);
466                         break;
467                 }
468
469                 /* send path is always asynchronous */
470                 scat_req->complete = htc_async_tx_scat_complete;
471                 n_sent_bundle++;
472                 tot_pkts_bundle += scat_req->scat_entries;
473
474                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
475                            "send scatter total bytes: %d , entries: %d\n",
476                            scat_req->len, scat_req->scat_entries);
477                 ath6kldev_submit_scat_req(target->dev, scat_req, false);
478         }
479
480         *sent_bundle = n_sent_bundle;
481         *n_bundle_pkts = tot_pkts_bundle;
482         ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_issue_send_bundle (sent:%d)\n",
483                    n_sent_bundle);
484
485         return;
486 }
487
488 static void htc_tx_from_ep_txq(struct htc_target *target,
489                                struct htc_endpoint *endpoint)
490 {
491         struct list_head txq;
492         struct htc_packet *packet;
493         int bundle_sent;
494         int n_pkts_bundle;
495
496         spin_lock_bh(&target->tx_lock);
497
498         endpoint->tx_proc_cnt++;
499         if (endpoint->tx_proc_cnt > 1) {
500                 endpoint->tx_proc_cnt--;
501                 spin_unlock_bh(&target->tx_lock);
502                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
503                 return;
504         }
505
506         /*
507          * drain the endpoint TX queue for transmission as long
508          * as we have enough credits.
509          */
510         INIT_LIST_HEAD(&txq);
511
512         while (true) {
513
514                 if (list_empty(&endpoint->txq))
515                         break;
516
517                 htc_tx_pkts_get(target, endpoint, &txq);
518
519                 if (list_empty(&txq))
520                         break;
521
522                 spin_unlock_bh(&target->tx_lock);
523
524                 bundle_sent = 0;
525                 n_pkts_bundle = 0;
526
527                 while (true) {
528                         /* try to send a bundle on each pass */
529                         if ((target->tx_bndl_enable) &&
530                             (get_queue_depth(&txq) >=
531                             HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
532                                 int temp1 = 0, temp2 = 0;
533
534                                 htc_issue_send_bundle(endpoint, &txq,
535                                                       &temp1, &temp2);
536                                 bundle_sent += temp1;
537                                 n_pkts_bundle += temp2;
538                         }
539
540                         if (list_empty(&txq))
541                                 break;
542
543                         packet = list_first_entry(&txq, struct htc_packet,
544                                                   list);
545                         list_del(&packet->list);
546
547                         htc_prep_send_pkt(packet, packet->info.tx.flags,
548                                           0, packet->info.tx.seqno);
549                         htc_issue_send(target, packet);
550                 }
551
552                 spin_lock_bh(&target->tx_lock);
553
554                 endpoint->ep_st.tx_bundles += bundle_sent;
555                 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
556         }
557
558         endpoint->tx_proc_cnt = 0;
559         spin_unlock_bh(&target->tx_lock);
560 }
561
562 static bool htc_try_send(struct htc_target *target,
563                          struct htc_endpoint *endpoint,
564                          struct htc_packet *tx_pkt)
565 {
566         struct htc_ep_callbacks ep_cb;
567         int txq_depth;
568         bool overflow = false;
569
570         ep_cb = endpoint->ep_cb;
571
572         spin_lock_bh(&target->tx_lock);
573         txq_depth = get_queue_depth(&endpoint->txq);
574         spin_unlock_bh(&target->tx_lock);
575
576         if (txq_depth >= endpoint->max_txq_depth)
577                 overflow = true;
578
579         if (overflow)
580                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
581                            "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
582                            endpoint->eid, overflow, txq_depth,
583                            endpoint->max_txq_depth);
584
585         if (overflow && ep_cb.tx_full) {
586                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
587                            "indicating overflowed tx packet: 0x%p\n", tx_pkt);
588
589                 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
590                     HTC_SEND_FULL_DROP) {
591                         endpoint->ep_st.tx_dropped += 1;
592                         return false;
593                 }
594         }
595
596         spin_lock_bh(&target->tx_lock);
597         list_add_tail(&tx_pkt->list, &endpoint->txq);
598         spin_unlock_bh(&target->tx_lock);
599
600         htc_tx_from_ep_txq(target, endpoint);
601
602         return true;
603 }
604
605 static void htc_chk_ep_txq(struct htc_target *target)
606 {
607         struct htc_endpoint *endpoint;
608         struct htc_endpoint_credit_dist *cred_dist;
609
610         /*
611          * Run through the credit distribution list to see if there are
612          * packets queued. NOTE: no locks need to be taken since the
613          * distribution list is not dynamic (cannot be re-ordered) and we
614          * are not modifying any state.
615          */
616         list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
617                 endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
618
619                 spin_lock_bh(&target->tx_lock);
620                 if (!list_empty(&endpoint->txq)) {
621                         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
622                                    "ep %d has %d credits and %d packets in tx queue\n",
623                                    cred_dist->endpoint,
624                                    endpoint->cred_dist.credits,
625                                    get_queue_depth(&endpoint->txq));
626                         spin_unlock_bh(&target->tx_lock);
627                         /*
628                          * Try to start the stalled queue, this list is
629                          * ordered by priority. If there are credits
630                          * available the highest priority queue will get a
631                          * chance to reclaim credits from lower priority
632                          * ones.
633                          */
634                         htc_tx_from_ep_txq(target, endpoint);
635                         spin_lock_bh(&target->tx_lock);
636                 }
637                 spin_unlock_bh(&target->tx_lock);
638         }
639 }
640
641 static int htc_setup_tx_complete(struct htc_target *target)
642 {
643         struct htc_packet *send_pkt = NULL;
644         int status;
645
646         send_pkt = htc_get_control_buf(target, true);
647
648         if (!send_pkt)
649                 return -ENOMEM;
650
651         if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
652                 struct htc_setup_comp_ext_msg *setup_comp_ext;
653                 u32 flags = 0;
654
655                 setup_comp_ext =
656                     (struct htc_setup_comp_ext_msg *)send_pkt->buf;
657                 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
658                 setup_comp_ext->msg_id =
659                         cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
660
661                 if (target->msg_per_bndl_max > 0) {
662                         /* Indicate HTC bundling to the target */
663                         flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
664                         setup_comp_ext->msg_per_rxbndl =
665                                                 target->msg_per_bndl_max;
666                 }
667
668                 memcpy(&setup_comp_ext->flags, &flags,
669                        sizeof(setup_comp_ext->flags));
670                 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
671                                        sizeof(struct htc_setup_comp_ext_msg),
672                                        ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
673
674         } else {
675                 struct htc_setup_comp_msg *setup_comp;
676                 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
677                 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
678                 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
679                 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
680                                        sizeof(struct htc_setup_comp_msg),
681                                        ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
682         }
683
684         /* we want synchronous operation */
685         send_pkt->completion = NULL;
686         htc_prep_send_pkt(send_pkt, 0, 0, 0);
687         status = htc_issue_send(target, send_pkt);
688
689         if (send_pkt != NULL)
690                 htc_reclaim_txctrl_buf(target, send_pkt);
691
692         return status;
693 }
694
695 void htc_set_credit_dist(struct htc_target *target,
696                          struct htc_credit_state_info *cred_dist_cntxt,
697                          u16 srvc_pri_order[], int list_len)
698 {
699         struct htc_endpoint *endpoint;
700         int i, ep;
701
702         target->cred_dist_cntxt = cred_dist_cntxt;
703
704         list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
705                       &target->cred_dist_list);
706
707         for (i = 0; i < list_len; i++) {
708                 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
709                         endpoint = &target->endpoint[ep];
710                         if (endpoint->svc_id == srvc_pri_order[i]) {
711                                 list_add_tail(&endpoint->cred_dist.list,
712                                               &target->cred_dist_list);
713                                 break;
714                         }
715                 }
716                 if (ep >= ENDPOINT_MAX) {
717                         WARN_ON(1);
718                         return;
719                 }
720         }
721 }
722
723 int htc_tx(struct htc_target *target, struct htc_packet *packet)
724 {
725         struct htc_endpoint *endpoint;
726         struct list_head queue;
727
728         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
729                    "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
730                    packet->endpoint, packet->buf, packet->act_len);
731
732         if (packet->endpoint >= ENDPOINT_MAX) {
733                 WARN_ON(1);
734                 return -EINVAL;
735         }
736
737         endpoint = &target->endpoint[packet->endpoint];
738
739         if (!htc_try_send(target, endpoint, packet)) {
740                 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
741                                  -ECANCELED : -ENOSPC;
742                 INIT_LIST_HEAD(&queue);
743                 list_add(&packet->list, &queue);
744                 htc_tx_complete(endpoint, &queue);
745         }
746
747         return 0;
748 }
749
750 /* flush endpoint TX queue */
751 void htc_flush_txep(struct htc_target *target,
752                     enum htc_endpoint_id eid, u16 tag)
753 {
754         struct htc_packet *packet, *tmp_pkt;
755         struct list_head discard_q, container;
756         struct htc_endpoint *endpoint = &target->endpoint[eid];
757
758         if (!endpoint->svc_id) {
759                 WARN_ON(1);
760                 return;
761         }
762
763         /* initialize the discard queue */
764         INIT_LIST_HEAD(&discard_q);
765
766         spin_lock_bh(&target->tx_lock);
767
768         list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
769                 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
770                     (tag == packet->info.tx.tag))
771                         list_move_tail(&packet->list, &discard_q);
772         }
773
774         spin_unlock_bh(&target->tx_lock);
775
776         list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
777                 packet->status = -ECANCELED;
778                 list_del(&packet->list);
779                 ath6kl_dbg(ATH6KL_DBG_TRC,
780                         "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
781                         packet, packet->act_len,
782                         packet->endpoint, packet->info.tx.tag);
783
784                 INIT_LIST_HEAD(&container);
785                 list_add_tail(&packet->list, &container);
786                 htc_tx_complete(endpoint, &container);
787         }
788
789 }
790
791 static void htc_flush_txep_all(struct htc_target *target)
792 {
793         struct htc_endpoint *endpoint;
794         int i;
795
796         dump_cred_dist_stats(target);
797
798         for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
799                 endpoint = &target->endpoint[i];
800                 if (endpoint->svc_id == 0)
801                         /* not in use.. */
802                         continue;
803                 htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
804         }
805 }
806
807 void htc_indicate_activity_change(struct htc_target *target,
808                                   enum htc_endpoint_id eid, bool active)
809 {
810         struct htc_endpoint *endpoint = &target->endpoint[eid];
811         bool dist = false;
812
813         if (endpoint->svc_id == 0) {
814                 WARN_ON(1);
815                 return;
816         }
817
818         spin_lock_bh(&target->tx_lock);
819
820         if (active) {
821                 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
822                         endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
823                         dist = true;
824                 }
825         } else {
826                 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
827                         endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
828                         dist = true;
829                 }
830         }
831
832         if (dist) {
833                 endpoint->cred_dist.txq_depth =
834                         get_queue_depth(&endpoint->txq);
835
836                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
837                            target->cred_dist_cntxt, &target->cred_dist_list);
838
839                 ath6k_credit_distribute(target->cred_dist_cntxt,
840                                         &target->cred_dist_list,
841                                         HTC_CREDIT_DIST_ACTIVITY_CHANGE);
842         }
843
844         spin_unlock_bh(&target->tx_lock);
845
846         if (dist && !active)
847                 htc_chk_ep_txq(target);
848 }
849
850 /* HTC Rx */
851
852 static inline void htc_update_rx_stats(struct htc_endpoint *endpoint,
853                                        int n_look_ahds)
854 {
855         endpoint->ep_st.rx_pkts++;
856         if (n_look_ahds == 1)
857                 endpoint->ep_st.rx_lkahds++;
858         else if (n_look_ahds > 1)
859                 endpoint->ep_st.rx_bundle_lkahd++;
860 }
861
862 static inline bool htc_valid_rx_frame_len(struct htc_target *target,
863                                           enum htc_endpoint_id eid, int len)
864 {
865         return (eid == target->dev->ar->ctrl_ep) ?
866                 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
867 }
868
869 static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
870 {
871         struct list_head queue;
872
873         INIT_LIST_HEAD(&queue);
874         list_add_tail(&packet->list, &queue);
875         return htc_add_rxbuf_multiple(target, &queue);
876 }
877
878 static void htc_reclaim_rxbuf(struct htc_target *target,
879                               struct htc_packet *packet,
880                               struct htc_endpoint *ep)
881 {
882         if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
883                 htc_rxpkt_reset(packet);
884                 packet->status = -ECANCELED;
885                 ep->ep_cb.rx(ep->target, packet);
886         } else {
887                 htc_rxpkt_reset(packet);
888                 htc_add_rxbuf((void *)(target), packet);
889         }
890 }
891
892 static void reclaim_rx_ctrl_buf(struct htc_target *target,
893                                 struct htc_packet *packet)
894 {
895         spin_lock_bh(&target->htc_lock);
896         list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
897         spin_unlock_bh(&target->htc_lock);
898 }
899
900 static int dev_rx_pkt(struct htc_target *target, struct htc_packet *packet,
901                       u32 rx_len)
902 {
903         struct ath6kl_device *dev = target->dev;
904         u32 padded_len;
905         int status;
906
907         padded_len = CALC_TXRX_PADDED_LEN(dev, rx_len);
908
909         if (padded_len > packet->buf_len) {
910                 ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
911                            padded_len, rx_len, packet->buf_len);
912                 return -ENOMEM;
913         }
914
915         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
916                    "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
917                    packet, packet->info.rx.exp_hdr,
918                    padded_len, dev->ar->mbox_info.htc_addr, "sync");
919
920         status = hif_read_write_sync(dev->ar,
921                                      dev->ar->mbox_info.htc_addr,
922                                      packet->buf, padded_len,
923                                      HIF_RD_SYNC_BLOCK_FIX);
924
925         packet->status = status;
926
927         return status;
928 }
929
930 /*
931  * optimization for recv packets, we can indicate a
932  * "hint" that there are more  single-packets to fetch
933  * on this endpoint.
934  */
935 static void set_rxpkt_indication_flag(u32 lk_ahd,
936                                       struct htc_endpoint *endpoint,
937                                       struct htc_packet *packet)
938 {
939         struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
940
941         if (htc_hdr->eid == packet->endpoint) {
942                 if (!list_empty(&endpoint->rx_bufq))
943                         packet->info.rx.indicat_flags |=
944                                         HTC_RX_FLAGS_INDICATE_MORE_PKTS;
945         }
946 }
947
948 static void chk_rx_water_mark(struct htc_endpoint *endpoint)
949 {
950         struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
951
952         if (ep_cb.rx_refill_thresh > 0) {
953                 spin_lock_bh(&endpoint->target->rx_lock);
954                 if (get_queue_depth(&endpoint->rx_bufq)
955                     < ep_cb.rx_refill_thresh) {
956                         spin_unlock_bh(&endpoint->target->rx_lock);
957                         ep_cb.rx_refill(endpoint->target, endpoint->eid);
958                         return;
959                 }
960                 spin_unlock_bh(&endpoint->target->rx_lock);
961         }
962 }
963
964 /* This function is called with rx_lock held */
965 static int htc_setup_rxpkts(struct htc_target *target, struct htc_endpoint *ep,
966                             u32 *lk_ahds, struct list_head *queue, int n_msg)
967 {
968         struct htc_packet *packet;
969         /* FIXME: type of lk_ahds can't be right */
970         struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
971         struct htc_ep_callbacks ep_cb;
972         int status = 0, j, full_len;
973         bool no_recycle;
974
975         full_len = CALC_TXRX_PADDED_LEN(target->dev,
976                                         le16_to_cpu(htc_hdr->payld_len) +
977                                         sizeof(*htc_hdr));
978
979         if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
980                 ath6kl_warn("Rx buffer requested with invalid length\n");
981                 return -EINVAL;
982         }
983
984         ep_cb = ep->ep_cb;
985         for (j = 0; j < n_msg; j++) {
986
987                 /*
988                  * Reset flag, any packets allocated using the
989                  * rx_alloc() API cannot be recycled on
990                  * cleanup,they must be explicitly returned.
991                  */
992                 no_recycle = false;
993
994                 if (ep_cb.rx_allocthresh &&
995                     (full_len > ep_cb.rx_alloc_thresh)) {
996                         ep->ep_st.rx_alloc_thresh_hit += 1;
997                         ep->ep_st.rxalloc_thresh_byte +=
998                                 le16_to_cpu(htc_hdr->payld_len);
999
1000                         spin_unlock_bh(&target->rx_lock);
1001                         no_recycle = true;
1002
1003                         packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1004                                                       full_len);
1005                         spin_lock_bh(&target->rx_lock);
1006                 } else {
1007                         /* refill handler is being used */
1008                         if (list_empty(&ep->rx_bufq)) {
1009                                 if (ep_cb.rx_refill) {
1010                                         spin_unlock_bh(&target->rx_lock);
1011                                         ep_cb.rx_refill(ep->target, ep->eid);
1012                                         spin_lock_bh(&target->rx_lock);
1013                                 }
1014                         }
1015
1016                         if (list_empty(&ep->rx_bufq))
1017                                 packet = NULL;
1018                         else {
1019                                 packet = list_first_entry(&ep->rx_bufq,
1020                                                 struct htc_packet, list);
1021                                 list_del(&packet->list);
1022                         }
1023                 }
1024
1025                 if (!packet) {
1026                         target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1027                         target->ep_waiting = ep->eid;
1028                         return -ENOSPC;
1029                 }
1030
1031                 /* clear flags */
1032                 packet->info.rx.rx_flags = 0;
1033                 packet->info.rx.indicat_flags = 0;
1034                 packet->status = 0;
1035
1036                 if (no_recycle)
1037                         /*
1038                          * flag that these packets cannot be
1039                          * recycled, they have to be returned to
1040                          * the user
1041                          */
1042                         packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1043
1044                 /* Caller needs to free this upon any failure */
1045                 list_add_tail(&packet->list, queue);
1046
1047                 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1048                         status = -ECANCELED;
1049                         break;
1050                 }
1051
1052                 if (j) {
1053                         packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1054                         packet->info.rx.exp_hdr = 0xFFFFFFFF;
1055                 } else
1056                         /* set expected look ahead */
1057                         packet->info.rx.exp_hdr = *lk_ahds;
1058
1059                 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1060                         HTC_HDR_LENGTH;
1061         }
1062
1063         return status;
1064 }
1065
1066 static int alloc_and_prep_rxpkts(struct htc_target *target,
1067                                  u32 lk_ahds[], int msg,
1068                                  struct htc_endpoint *endpoint,
1069                                  struct list_head *queue)
1070 {
1071         int status = 0;
1072         struct htc_packet *packet, *tmp_pkt;
1073         struct htc_frame_hdr *htc_hdr;
1074         int i, n_msg;
1075
1076         spin_lock_bh(&target->rx_lock);
1077
1078         for (i = 0; i < msg; i++) {
1079
1080                 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1081
1082                 if (htc_hdr->eid >= ENDPOINT_MAX) {
1083                         ath6kl_err("invalid ep in look-ahead: %d\n",
1084                                    htc_hdr->eid);
1085                         status = -ENOMEM;
1086                         break;
1087                 }
1088
1089                 if (htc_hdr->eid != endpoint->eid) {
1090                         ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1091                                    htc_hdr->eid, endpoint->eid, i);
1092                         status = -ENOMEM;
1093                         break;
1094                 }
1095
1096                 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1097                         ath6kl_err("payload len %d exceeds max htc : %d !\n",
1098                                    htc_hdr->payld_len,
1099                                    (u32) HTC_MAX_PAYLOAD_LENGTH);
1100                         status = -ENOMEM;
1101                         break;
1102                 }
1103
1104                 if (endpoint->svc_id == 0) {
1105                         ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1106                         status = -ENOMEM;
1107                         break;
1108                 }
1109
1110                 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1111                         /*
1112                          * HTC header indicates that every packet to follow
1113                          * has the same padded length so that it can be
1114                          * optimally fetched as a full bundle.
1115                          */
1116                         n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1117                                 HTC_FLG_RX_BNDL_CNT_S;
1118
1119                         /* the count doesn't include the starter frame */
1120                         n_msg++;
1121                         if (n_msg > target->msg_per_bndl_max) {
1122                                 status = -ENOMEM;
1123                                 break;
1124                         }
1125
1126                         endpoint->ep_st.rx_bundle_from_hdr += 1;
1127                         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1128                                    "htc hdr indicates :%d msg can be fetched as a bundle\n",
1129                                    n_msg);
1130                 } else
1131                         /* HTC header only indicates 1 message to fetch */
1132                         n_msg = 1;
1133
1134                 /* Setup packet buffers for each message */
1135                 status = htc_setup_rxpkts(target, endpoint, &lk_ahds[i], queue,
1136                                           n_msg);
1137
1138                 /*
1139                  * This is due to unavailabilty of buffers to rx entire data.
1140                  * Return no error so that free buffers from queue can be used
1141                  * to receive partial data.
1142                  */
1143                 if (status == -ENOSPC) {
1144                         spin_unlock_bh(&target->rx_lock);
1145                         return 0;
1146                 }
1147
1148                 if (status)
1149                         break;
1150         }
1151
1152         spin_unlock_bh(&target->rx_lock);
1153
1154         if (status) {
1155                 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1156                         list_del(&packet->list);
1157                         htc_reclaim_rxbuf(target, packet,
1158                                           &target->endpoint[packet->endpoint]);
1159                 }
1160         }
1161
1162         return status;
1163 }
1164
1165 static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1166 {
1167         if (packets->endpoint != ENDPOINT_0) {
1168                 WARN_ON(1);
1169                 return;
1170         }
1171
1172         if (packets->status == -ECANCELED) {
1173                 reclaim_rx_ctrl_buf(context, packets);
1174                 return;
1175         }
1176
1177         if (packets->act_len > 0) {
1178                 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1179                         packets->act_len + HTC_HDR_LENGTH);
1180
1181                 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
1182                              "Unexpected ENDPOINT 0 Message",
1183                              packets->buf - HTC_HDR_LENGTH,
1184                              packets->act_len + HTC_HDR_LENGTH);
1185         }
1186
1187         htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1188 }
1189
1190 static void htc_proc_cred_rpt(struct htc_target *target,
1191                               struct htc_credit_report *rpt,
1192                               int n_entries,
1193                               enum htc_endpoint_id from_ep)
1194 {
1195         struct htc_endpoint *endpoint;
1196         int tot_credits = 0, i;
1197         bool dist = false;
1198
1199         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1200                    "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
1201
1202         spin_lock_bh(&target->tx_lock);
1203
1204         for (i = 0; i < n_entries; i++, rpt++) {
1205                 if (rpt->eid >= ENDPOINT_MAX) {
1206                         WARN_ON(1);
1207                         spin_unlock_bh(&target->tx_lock);
1208                         return;
1209                 }
1210
1211                 endpoint = &target->endpoint[rpt->eid];
1212
1213                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
1214                         rpt->eid, rpt->credits);
1215
1216                 endpoint->ep_st.tx_cred_rpt += 1;
1217                 endpoint->ep_st.cred_retnd += rpt->credits;
1218
1219                 if (from_ep == rpt->eid) {
1220                         /*
1221                          * This credit report arrived on the same endpoint
1222                          * indicating it arrived in an RX packet.
1223                          */
1224                         endpoint->ep_st.cred_from_rx += rpt->credits;
1225                         endpoint->ep_st.cred_rpt_from_rx += 1;
1226                 } else if (from_ep == ENDPOINT_0) {
1227                         /* credit arrived on endpoint 0 as a NULL message */
1228                         endpoint->ep_st.cred_from_ep0 += rpt->credits;
1229                         endpoint->ep_st.cred_rpt_ep0 += 1;
1230                 } else {
1231                         endpoint->ep_st.cred_from_other += rpt->credits;
1232                         endpoint->ep_st.cred_rpt_from_other += 1;
1233                 }
1234
1235                 if (ENDPOINT_0 == rpt->eid)
1236                         /* always give endpoint 0 credits back */
1237                         endpoint->cred_dist.credits += rpt->credits;
1238                 else {
1239                         endpoint->cred_dist.cred_to_dist += rpt->credits;
1240                         dist = true;
1241                 }
1242
1243                 /*
1244                  * Refresh tx depth for distribution function that will
1245                  * recover these credits NOTE: this is only valid when
1246                  * there are credits to recover!
1247                  */
1248                 endpoint->cred_dist.txq_depth =
1249                         get_queue_depth(&endpoint->txq);
1250
1251                 tot_credits += rpt->credits;
1252         }
1253
1254         ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1255                    "report indicated %d credits to distribute\n",
1256                    tot_credits);
1257
1258         if (dist) {
1259                 /*
1260                  * This was a credit return based on a completed send
1261                  * operations note, this is done with the lock held
1262                  */
1263                 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
1264                            target->cred_dist_cntxt, &target->cred_dist_list);
1265
1266                 ath6k_credit_distribute(target->cred_dist_cntxt,
1267                                         &target->cred_dist_list,
1268                                         HTC_CREDIT_DIST_SEND_COMPLETE);
1269         }
1270
1271         spin_unlock_bh(&target->tx_lock);
1272
1273         if (tot_credits)
1274                 htc_chk_ep_txq(target);
1275 }
1276
1277 static int htc_parse_trailer(struct htc_target *target,
1278                              struct htc_record_hdr *record,
1279                              u8 *record_buf, u32 *next_lk_ahds,
1280                              enum htc_endpoint_id endpoint,
1281                              int *n_lk_ahds)
1282 {
1283         struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1284         struct htc_lookahead_report *lk_ahd;
1285         int len;
1286
1287         switch (record->rec_id) {
1288         case HTC_RECORD_CREDITS:
1289                 len = record->len / sizeof(struct htc_credit_report);
1290                 if (!len) {
1291                         WARN_ON(1);
1292                         return -EINVAL;
1293                 }
1294
1295                 htc_proc_cred_rpt(target,
1296                                   (struct htc_credit_report *) record_buf,
1297                                   len, endpoint);
1298                 break;
1299         case HTC_RECORD_LOOKAHEAD:
1300                 len = record->len / sizeof(*lk_ahd);
1301                 if (!len) {
1302                         WARN_ON(1);
1303                         return -EINVAL;
1304                 }
1305
1306                 lk_ahd = (struct htc_lookahead_report *) record_buf;
1307                 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
1308                     && next_lk_ahds) {
1309
1310                         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1311                                    "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
1312                                    lk_ahd->pre_valid, lk_ahd->post_valid);
1313
1314                         /* look ahead bytes are valid, copy them over */
1315                         memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1316
1317                         ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
1318                                         next_lk_ahds, 4);
1319
1320                         *n_lk_ahds = 1;
1321                 }
1322                 break;
1323         case HTC_RECORD_LOOKAHEAD_BUNDLE:
1324                 len = record->len / sizeof(*bundle_lkahd_rpt);
1325                 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1326                         WARN_ON(1);
1327                         return -EINVAL;
1328                 }
1329
1330                 if (next_lk_ahds) {
1331                         int i;
1332
1333                         bundle_lkahd_rpt =
1334                                 (struct htc_bundle_lkahd_rpt *) record_buf;
1335
1336                         ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
1337                                         record_buf, record->len);
1338
1339                         for (i = 0; i < len; i++) {
1340                                 memcpy((u8 *)&next_lk_ahds[i],
1341                                        bundle_lkahd_rpt->lk_ahd, 4);
1342                                 bundle_lkahd_rpt++;
1343                         }
1344
1345                         *n_lk_ahds = i;
1346                 }
1347                 break;
1348         default:
1349                 ath6kl_err("unhandled record: id:%d len:%d\n",
1350                            record->rec_id, record->len);
1351                 break;
1352         }
1353
1354         return 0;
1355
1356 }
1357
1358 static int htc_proc_trailer(struct htc_target *target,
1359                             u8 *buf, int len, u32 *next_lk_ahds,
1360                             int *n_lk_ahds, enum htc_endpoint_id endpoint)
1361 {
1362         struct htc_record_hdr *record;
1363         int orig_len;
1364         int status;
1365         u8 *record_buf;
1366         u8 *orig_buf;
1367
1368         ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
1369
1370         ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", buf, len);
1371
1372         orig_buf = buf;
1373         orig_len = len;
1374         status = 0;
1375
1376         while (len > 0) {
1377
1378                 if (len < sizeof(struct htc_record_hdr)) {
1379                         status = -ENOMEM;
1380                         break;
1381                 }
1382                 /* these are byte aligned structs */
1383                 record = (struct htc_record_hdr *) buf;
1384                 len -= sizeof(struct htc_record_hdr);
1385                 buf += sizeof(struct htc_record_hdr);
1386
1387                 if (record->len > len) {
1388                         ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1389                                    record->len, record->rec_id, len);
1390                         status = -ENOMEM;
1391                         break;
1392                 }
1393                 record_buf = buf;
1394
1395                 status = htc_parse_trailer(target, record, record_buf,
1396                                            next_lk_ahds, endpoint, n_lk_ahds);
1397
1398                 if (status)
1399                         break;
1400
1401                 /* advance buffer past this record for next time around */
1402                 buf += record->len;
1403                 len -= record->len;
1404         }
1405
1406         ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
1407                      orig_buf, orig_len);
1408
1409         return status;
1410 }
1411
1412 static int htc_proc_rxhdr(struct htc_target *target,
1413                           struct htc_packet *packet,
1414                           u32 *next_lkahds, int *n_lkahds)
1415 {
1416         int status = 0;
1417         u16 payload_len;
1418         u32 lk_ahd;
1419         struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1420
1421         if (n_lkahds != NULL)
1422                 *n_lkahds = 0;
1423
1424         ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", packet->buf,
1425                         packet->act_len);
1426
1427         /*
1428          * NOTE: we cannot assume the alignment of buf, so we use the safe
1429          * macros to retrieve 16 bit fields.
1430          */
1431         payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1432
1433         memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1434
1435         if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1436                 /*
1437                  * Refresh the expected header and the actual length as it
1438                  * was unknown when this packet was grabbed as part of the
1439                  * bundle.
1440                  */
1441                 packet->info.rx.exp_hdr = lk_ahd;
1442                 packet->act_len = payload_len + HTC_HDR_LENGTH;
1443
1444                 /* validate the actual header that was refreshed  */
1445                 if (packet->act_len > packet->buf_len) {
1446                         ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1447                                    payload_len, lk_ahd);
1448                         /*
1449                          * Limit this to max buffer just to print out some
1450                          * of the buffer.
1451                          */
1452                         packet->act_len = min(packet->act_len, packet->buf_len);
1453                         status = -ENOMEM;
1454                         goto fail_rx;
1455                 }
1456
1457                 if (packet->endpoint != htc_hdr->eid) {
1458                         ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1459                                    htc_hdr->eid, packet->endpoint);
1460                         status = -ENOMEM;
1461                         goto fail_rx;
1462                 }
1463         }
1464
1465         if (lk_ahd != packet->info.rx.exp_hdr) {
1466                 ath6kl_err("htc_proc_rxhdr, lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1467                            packet, packet->info.rx.rx_flags);
1468                 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
1469                                 &packet->info.rx.exp_hdr, 4);
1470                 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
1471                                 (u8 *)&lk_ahd, sizeof(lk_ahd));
1472                 status = -ENOMEM;
1473                 goto fail_rx;
1474         }
1475
1476         if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1477                 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1478                     htc_hdr->ctrl[0] > payload_len) {
1479                         ath6kl_err("htc_proc_rxhdr, invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1480                                    payload_len, htc_hdr->ctrl[0]);
1481                         status = -ENOMEM;
1482                         goto fail_rx;
1483                 }
1484
1485                 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1486                         next_lkahds = NULL;
1487                         n_lkahds = NULL;
1488                 }
1489
1490                 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1491                                           + payload_len - htc_hdr->ctrl[0],
1492                                           htc_hdr->ctrl[0], next_lkahds,
1493                                            n_lkahds, packet->endpoint);
1494
1495                 if (status)
1496                         goto fail_rx;
1497
1498                 packet->act_len -= htc_hdr->ctrl[0];
1499         }
1500
1501         packet->buf += HTC_HDR_LENGTH;
1502         packet->act_len -= HTC_HDR_LENGTH;
1503
1504 fail_rx:
1505         if (status)
1506                 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
1507                                 packet->buf,
1508                                 packet->act_len < 256 ? packet->act_len : 256);
1509         else {
1510                 if (packet->act_len > 0)
1511                         ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
1512                                         "HTC - Application Msg",
1513                                         packet->buf, packet->act_len);
1514         }
1515
1516         return status;
1517 }
1518
1519 static void do_rx_completion(struct htc_endpoint *endpoint,
1520                              struct htc_packet *packet)
1521 {
1522                 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1523                            "htc calling ep %d recv callback on packet 0x%p\n",
1524                            endpoint->eid, packet);
1525                 endpoint->ep_cb.rx(endpoint->target, packet);
1526 }
1527
1528 static int htc_issue_rxpkt_bundle(struct htc_target *target,
1529                                   struct list_head *rxq,
1530                                   struct list_head *sync_compq,
1531                                   int *n_pkt_fetched, bool part_bundle)
1532 {
1533         struct hif_scatter_req *scat_req;
1534         struct htc_packet *packet;
1535         int rem_space = target->dev->max_rx_bndl_sz;
1536         int n_scat_pkt, status = 0, i, len;
1537
1538         n_scat_pkt = get_queue_depth(rxq);
1539         n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1540
1541         if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1542                 /*
1543                  * We were forced to split this bundle receive operation
1544                  * all packets in this partial bundle must have their
1545                  * lookaheads ignored.
1546                  */
1547                 part_bundle = true;
1548
1549                 /*
1550                  * This would only happen if the target ignored our max
1551                  * bundle limit.
1552                  */
1553                 ath6kl_warn("htc_issue_rxpkt_bundle : partial bundle detected num:%d , %d\n",
1554                             get_queue_depth(rxq), n_scat_pkt);
1555         }
1556
1557         len = 0;
1558
1559         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1560                 "htc_issue_rxpkt_bundle (numpackets: %d , actual : %d)\n",
1561                 get_queue_depth(rxq), n_scat_pkt);
1562
1563         scat_req = hif_scatter_req_get(target->dev->ar);
1564
1565         if (scat_req == NULL)
1566                 goto fail_rx_pkt;
1567
1568         for (i = 0; i < n_scat_pkt; i++) {
1569                 int pad_len;
1570
1571                 packet = list_first_entry(rxq, struct htc_packet, list);
1572                 list_del(&packet->list);
1573
1574                 pad_len = CALC_TXRX_PADDED_LEN(target->dev,
1575                                                    packet->act_len);
1576
1577                 if ((rem_space - pad_len) < 0) {
1578                         list_add(&packet->list, rxq);
1579                         break;
1580                 }
1581
1582                 rem_space -= pad_len;
1583
1584                 if (part_bundle || (i < (n_scat_pkt - 1)))
1585                         /*
1586                          * Packet 0..n-1 cannot be checked for look-aheads
1587                          * since we are fetching a bundle the last packet
1588                          * however can have it's lookahead used
1589                          */
1590                         packet->info.rx.rx_flags |=
1591                             HTC_RX_PKT_IGNORE_LOOKAHEAD;
1592
1593                 /* NOTE: 1 HTC packet per scatter entry */
1594                 scat_req->scat_list[i].buf = packet->buf;
1595                 scat_req->scat_list[i].len = pad_len;
1596
1597                 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1598
1599                 list_add_tail(&packet->list, sync_compq);
1600
1601                 WARN_ON(!scat_req->scat_list[i].len);
1602                 len += scat_req->scat_list[i].len;
1603         }
1604
1605         scat_req->len = len;
1606         scat_req->scat_entries = i;
1607
1608         status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
1609
1610         if (!status)
1611                 *n_pkt_fetched = i;
1612
1613         /* free scatter request */
1614         hif_scatter_req_add(target->dev->ar, scat_req);
1615
1616 fail_rx_pkt:
1617
1618         return status;
1619 }
1620
1621 static int htc_proc_fetched_rxpkts(struct htc_target *target,
1622                                    struct list_head *comp_pktq, u32 lk_ahds[],
1623                                    int *n_lk_ahd)
1624 {
1625         struct htc_packet *packet, *tmp_pkt;
1626         struct htc_endpoint *ep;
1627         int status = 0;
1628
1629         list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
1630                 list_del(&packet->list);
1631                 ep = &target->endpoint[packet->endpoint];
1632
1633                 /* process header for each of the recv packet */
1634                 status = htc_proc_rxhdr(target, packet, lk_ahds, n_lk_ahd);
1635                 if (status)
1636                         return status;
1637
1638                 if (list_empty(comp_pktq)) {
1639                         /*
1640                          * Last packet's more packet flag is set
1641                          * based on the lookahead.
1642                          */
1643                         if (*n_lk_ahd > 0)
1644                                 set_rxpkt_indication_flag(lk_ahds[0],
1645                                                           ep, packet);
1646                 } else
1647                         /*
1648                          * Packets in a bundle automatically have
1649                          * this flag set.
1650                          */
1651                         packet->info.rx.indicat_flags |=
1652                                 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1653
1654                 htc_update_rx_stats(ep, *n_lk_ahd);
1655
1656                 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
1657                         ep->ep_st.rx_bundl += 1;
1658
1659                 do_rx_completion(ep, packet);
1660         }
1661
1662         return status;
1663 }
1664
1665 static int htc_fetch_rxpkts(struct htc_target *target,
1666                             struct list_head *rx_pktq,
1667                             struct list_head *comp_pktq)
1668 {
1669         int fetched_pkts;
1670         bool part_bundle = false;
1671         int status = 0;
1672
1673         /* now go fetch the list of HTC packets */
1674         while (!list_empty(rx_pktq)) {
1675                 fetched_pkts = 0;
1676
1677                 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
1678                         /*
1679                          * There are enough packets to attempt a
1680                          * bundle transfer and recv bundling is
1681                          * allowed.
1682                          */
1683                         status = htc_issue_rxpkt_bundle(target, rx_pktq,
1684                                                         comp_pktq,
1685                                                         &fetched_pkts,
1686                                                         part_bundle);
1687                         if (status)
1688                                 return status;
1689
1690                         if (!list_empty(rx_pktq))
1691                                 part_bundle = true;
1692                 }
1693
1694                 if (!fetched_pkts) {
1695                         struct htc_packet *packet;
1696
1697                         packet = list_first_entry(rx_pktq, struct htc_packet,
1698                                                    list);
1699
1700                         list_del(&packet->list);
1701
1702                         /* fully synchronous */
1703                         packet->completion = NULL;
1704
1705                         if (!list_empty(rx_pktq))
1706                                 /*
1707                                  * look_aheads in all packet
1708                                  * except the last one in the
1709                                  * bundle must be ignored
1710                                  */
1711                                 packet->info.rx.rx_flags |=
1712                                         HTC_RX_PKT_IGNORE_LOOKAHEAD;
1713
1714                         /* go fetch the packet */
1715                         status = dev_rx_pkt(target, packet, packet->act_len);
1716                         if (status)
1717                                 return status;
1718
1719                         list_add_tail(&packet->list, comp_pktq);
1720                 }
1721         }
1722
1723         return status;
1724 }
1725
1726 int htc_rxmsg_pending_handler(struct htc_target *target, u32 msg_look_ahead[],
1727                               int *num_pkts)
1728 {
1729         struct htc_packet *packets, *tmp_pkt;
1730         struct htc_endpoint *endpoint;
1731         struct list_head rx_pktq, comp_pktq;
1732         int status = 0;
1733         u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
1734         int num_look_ahead = 1;
1735         enum htc_endpoint_id id;
1736         int n_fetched = 0;
1737
1738         *num_pkts = 0;
1739
1740         /*
1741          * On first entry copy the look_aheads into our temp array for
1742          * processing
1743          */
1744         memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
1745
1746         while (true) {
1747
1748                 /*
1749                  * First lookahead sets the expected endpoint IDs for all
1750                  * packets in a bundle.
1751                  */
1752                 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
1753                 endpoint = &target->endpoint[id];
1754
1755                 if (id >= ENDPOINT_MAX) {
1756                         ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
1757                                    id);
1758                         status = -ENOMEM;
1759                         break;
1760                 }
1761
1762                 INIT_LIST_HEAD(&rx_pktq);
1763                 INIT_LIST_HEAD(&comp_pktq);
1764
1765                 /*
1766                  * Try to allocate as many HTC RX packets indicated by the
1767                  * look_aheads.
1768                  */
1769                 status = alloc_and_prep_rxpkts(target, look_aheads,
1770                                                num_look_ahead, endpoint,
1771                                                &rx_pktq);
1772                 if (status)
1773                         break;
1774
1775                 if (get_queue_depth(&rx_pktq) >= 2)
1776                         /*
1777                          * A recv bundle was detected, force IRQ status
1778                          * re-check again
1779                          */
1780                         target->dev->chk_irq_status_cnt = 1;
1781
1782                 n_fetched += get_queue_depth(&rx_pktq);
1783
1784                 num_look_ahead = 0;
1785
1786                 status = htc_fetch_rxpkts(target, &rx_pktq, &comp_pktq);
1787
1788                 if (!status)
1789                         chk_rx_water_mark(endpoint);
1790
1791                 /* Process fetched packets */
1792                 status = htc_proc_fetched_rxpkts(target, &comp_pktq,
1793                                                  look_aheads, &num_look_ahead);
1794
1795                 if (!num_look_ahead || status)
1796                         break;
1797
1798                 /*
1799                  * For SYNCH processing, if we get here, we are running
1800                  * through the loop again due to a detected lookahead. Set
1801                  * flag that we should re-check IRQ status registers again
1802                  * before leaving IRQ processing, this can net better
1803                  * performance in high throughput situations.
1804                  */
1805                 target->dev->chk_irq_status_cnt = 1;
1806         }
1807
1808         if (status) {
1809                 ath6kl_err("failed to get pending recv messages: %d\n",
1810                            status);
1811                 /*
1812                  * Cleanup any packets we allocated but didn't use to
1813                  * actually fetch any packets.
1814                  */
1815                 list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
1816                         list_del(&packets->list);
1817                         htc_reclaim_rxbuf(target, packets,
1818                                         &target->endpoint[packets->endpoint]);
1819                 }
1820
1821                 /* cleanup any packets in sync completion queue */
1822                 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
1823                         list_del(&packets->list);
1824                         htc_reclaim_rxbuf(target, packets,
1825                                           &target->endpoint[packets->endpoint]);
1826                 }
1827
1828                 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1829                         ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
1830                         ath6kldev_rx_control(target->dev, false);
1831                 }
1832         }
1833
1834         /*
1835          * Before leaving, check to see if host ran out of buffers and
1836          * needs to stop the receiver.
1837          */
1838         if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1839                 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
1840                 ath6kldev_rx_control(target->dev, false);
1841         }
1842         *num_pkts = n_fetched;
1843
1844         return status;
1845 }
1846
1847 /*
1848  * Synchronously wait for a control message from the target,
1849  * This function is used at initialization time ONLY.  At init messages
1850  * on ENDPOINT 0 are expected.
1851  */
1852 static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
1853 {
1854         struct htc_packet *packet = NULL;
1855         struct htc_frame_hdr *htc_hdr;
1856         u32 look_ahead;
1857
1858         if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
1859                                HTC_TARGET_RESPONSE_TIMEOUT))
1860                 return NULL;
1861
1862         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1863                 "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
1864
1865         htc_hdr = (struct htc_frame_hdr *)&look_ahead;
1866
1867         if (htc_hdr->eid != ENDPOINT_0)
1868                 return NULL;
1869
1870         packet = htc_get_control_buf(target, false);
1871
1872         if (!packet)
1873                 return NULL;
1874
1875         packet->info.rx.rx_flags = 0;
1876         packet->info.rx.exp_hdr = look_ahead;
1877         packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
1878
1879         if (packet->act_len > packet->buf_len)
1880                 goto fail_ctrl_rx;
1881
1882         /* we want synchronous operation */
1883         packet->completion = NULL;
1884
1885         /* get the message from the device, this will block */
1886         if (dev_rx_pkt(target, packet, packet->act_len))
1887                 goto fail_ctrl_rx;
1888
1889         /* process receive header */
1890         packet->status = htc_proc_rxhdr(target, packet, NULL, NULL);
1891
1892         if (packet->status) {
1893                 ath6kl_err("htc_wait_for_ctrl_msg, htc_proc_rxhdr failed (status = %d)\n",
1894                            packet->status);
1895                 goto fail_ctrl_rx;
1896         }
1897
1898         return packet;
1899
1900 fail_ctrl_rx:
1901         if (packet != NULL) {
1902                 htc_rxpkt_reset(packet);
1903                 reclaim_rx_ctrl_buf(target, packet);
1904         }
1905
1906         return NULL;
1907 }
1908
1909 int htc_add_rxbuf_multiple(struct htc_target *target,
1910                            struct list_head *pkt_queue)
1911 {
1912         struct htc_endpoint *endpoint;
1913         struct htc_packet *first_pkt;
1914         bool rx_unblock = false;
1915         int status = 0, depth;
1916
1917         if (list_empty(pkt_queue))
1918                 return -ENOMEM;
1919
1920         first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
1921
1922         if (first_pkt->endpoint >= ENDPOINT_MAX)
1923                 return status;
1924
1925         depth = get_queue_depth(pkt_queue);
1926
1927         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1928                 "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
1929                 first_pkt->endpoint, depth, first_pkt->buf_len);
1930
1931         endpoint = &target->endpoint[first_pkt->endpoint];
1932
1933         if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1934                 struct htc_packet *packet, *tmp_pkt;
1935
1936                 /* walk through queue and mark each one canceled */
1937                 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
1938                         packet->status = -ECANCELED;
1939                         list_del(&packet->list);
1940                         do_rx_completion(endpoint, packet);
1941                 }
1942
1943                 return status;
1944         }
1945
1946         spin_lock_bh(&target->rx_lock);
1947
1948         list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
1949
1950         /* check if we are blocked waiting for a new buffer */
1951         if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1952                 if (target->ep_waiting == first_pkt->endpoint) {
1953                         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1954                                 "receiver was blocked on ep:%d, unblocking.\n",
1955                                 target->ep_waiting);
1956                         target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
1957                         target->ep_waiting = ENDPOINT_MAX;
1958                         rx_unblock = true;
1959                 }
1960         }
1961
1962         spin_unlock_bh(&target->rx_lock);
1963
1964         if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
1965                 /* TODO : implement a buffer threshold count? */
1966                 ath6kldev_rx_control(target->dev, true);
1967
1968         return status;
1969 }
1970
1971 void htc_flush_rx_buf(struct htc_target *target)
1972 {
1973         struct htc_endpoint *endpoint;
1974         struct htc_packet *packet, *tmp_pkt;
1975         int i;
1976
1977         for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1978                 endpoint = &target->endpoint[i];
1979                 if (!endpoint->svc_id)
1980                         /* not in use.. */
1981                         continue;
1982
1983                 spin_lock_bh(&target->rx_lock);
1984                 list_for_each_entry_safe(packet, tmp_pkt,
1985                                          &endpoint->rx_bufq, list) {
1986                         list_del(&packet->list);
1987                         spin_unlock_bh(&target->rx_lock);
1988                         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1989                                    "flushing rx pkt:0x%p, len:%d, ep:%d\n",
1990                                    packet, packet->buf_len,
1991                                    packet->endpoint);
1992                         dev_kfree_skb(packet->pkt_cntxt);
1993                         spin_lock_bh(&target->rx_lock);
1994                 }
1995                 spin_unlock_bh(&target->rx_lock);
1996         }
1997 }
1998
1999 int htc_conn_service(struct htc_target *target,
2000                      struct htc_service_connect_req *conn_req,
2001                      struct htc_service_connect_resp *conn_resp)
2002 {
2003         struct htc_packet *rx_pkt = NULL;
2004         struct htc_packet *tx_pkt = NULL;
2005         struct htc_conn_service_resp *resp_msg;
2006         struct htc_conn_service_msg *conn_msg;
2007         struct htc_endpoint *endpoint;
2008         enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2009         unsigned int max_msg_sz = 0;
2010         int status = 0;
2011
2012         ath6kl_dbg(ATH6KL_DBG_TRC,
2013                    "htc_conn_service, target:0x%p service id:0x%X\n",
2014                    target, conn_req->svc_id);
2015
2016         if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2017                 /* special case for pseudo control service */
2018                 assigned_ep = ENDPOINT_0;
2019                 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2020         } else {
2021                 /* allocate a packet to send to the target */
2022                 tx_pkt = htc_get_control_buf(target, true);
2023
2024                 if (!tx_pkt)
2025                         return -ENOMEM;
2026
2027                 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2028                 memset(conn_msg, 0, sizeof(*conn_msg));
2029                 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2030                 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2031                 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2032
2033                 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2034                                  sizeof(*conn_msg) + conn_msg->svc_meta_len,
2035                                  ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2036
2037                 /* we want synchronous operation */
2038                 tx_pkt->completion = NULL;
2039                 htc_prep_send_pkt(tx_pkt, 0, 0, 0);
2040                 status = htc_issue_send(target, tx_pkt);
2041
2042                 if (status)
2043                         goto fail_tx;
2044
2045                 /* wait for response */
2046                 rx_pkt = htc_wait_for_ctrl_msg(target);
2047
2048                 if (!rx_pkt) {
2049                         status = -ENOMEM;
2050                         goto fail_tx;
2051                 }
2052
2053                 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2054
2055                 if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
2056                     || (rx_pkt->act_len < sizeof(*resp_msg))) {
2057                         status = -ENOMEM;
2058                         goto fail_tx;
2059                 }
2060
2061                 conn_resp->resp_code = resp_msg->status;
2062                 /* check response status */
2063                 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2064                         ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2065                                    resp_msg->svc_id, resp_msg->status);
2066                         status = -ENOMEM;
2067                         goto fail_tx;
2068                 }
2069
2070                 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2071                 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2072         }
2073
2074         if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
2075                 status = -ENOMEM;
2076                 goto fail_tx;
2077         }
2078
2079         endpoint = &target->endpoint[assigned_ep];
2080         endpoint->eid = assigned_ep;
2081         if (endpoint->svc_id) {
2082                 status = -ENOMEM;
2083                 goto fail_tx;
2084         }
2085
2086         /* return assigned endpoint to caller */
2087         conn_resp->endpoint = assigned_ep;
2088         conn_resp->len_max = max_msg_sz;
2089
2090         /* setup the endpoint */
2091
2092         /* this marks the endpoint in use */
2093         endpoint->svc_id = conn_req->svc_id;
2094
2095         endpoint->max_txq_depth = conn_req->max_txq_depth;
2096         endpoint->len_max = max_msg_sz;
2097         endpoint->ep_cb = conn_req->ep_cb;
2098         endpoint->cred_dist.svc_id = conn_req->svc_id;
2099         endpoint->cred_dist.htc_rsvd = endpoint;
2100         endpoint->cred_dist.endpoint = assigned_ep;
2101         endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2102
2103         if (conn_req->max_rxmsg_sz) {
2104                 /*
2105                  * Override cred_per_msg calculation, this optimizes
2106                  * the credit-low indications since the host will actually
2107                  * issue smaller messages in the Send path.
2108                  */
2109                 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2110                         status = -ENOMEM;
2111                         goto fail_tx;
2112                 }
2113                 endpoint->cred_dist.cred_per_msg =
2114                     conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2115         } else
2116                 endpoint->cred_dist.cred_per_msg =
2117                     max_msg_sz / target->tgt_cred_sz;
2118
2119         if (!endpoint->cred_dist.cred_per_msg)
2120                 endpoint->cred_dist.cred_per_msg = 1;
2121
2122         /* save local connection flags */
2123         endpoint->conn_flags = conn_req->flags;
2124
2125 fail_tx:
2126         if (tx_pkt)
2127                 htc_reclaim_txctrl_buf(target, tx_pkt);
2128
2129         if (rx_pkt) {
2130                 htc_rxpkt_reset(rx_pkt);
2131                 reclaim_rx_ctrl_buf(target, rx_pkt);
2132         }
2133
2134         return status;
2135 }
2136
2137 static void reset_ep_state(struct htc_target *target)
2138 {
2139         struct htc_endpoint *endpoint;
2140         int i;
2141
2142         for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2143                 endpoint = &target->endpoint[i];
2144                 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2145                 endpoint->svc_id = 0;
2146                 endpoint->len_max = 0;
2147                 endpoint->max_txq_depth = 0;
2148                 memset(&endpoint->ep_st, 0,
2149                        sizeof(endpoint->ep_st));
2150                 INIT_LIST_HEAD(&endpoint->rx_bufq);
2151                 INIT_LIST_HEAD(&endpoint->txq);
2152                 endpoint->target = target;
2153         }
2154
2155         /* reset distribution list */
2156         INIT_LIST_HEAD(&target->cred_dist_list);
2157 }
2158
2159 int htc_get_rxbuf_num(struct htc_target *target, enum htc_endpoint_id endpoint)
2160 {
2161         int num;
2162
2163         spin_lock_bh(&target->rx_lock);
2164         num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2165         spin_unlock_bh(&target->rx_lock);
2166         return num;
2167 }
2168
2169 static void htc_setup_msg_bndl(struct htc_target *target)
2170 {
2171         struct hif_dev_scat_sup_info *scat_info = &target->dev->hif_scat_info;
2172
2173         /* limit what HTC can handle */
2174         target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2175                                        target->msg_per_bndl_max);
2176
2177         if (ath6kl_hif_enable_scatter(target->dev->ar, scat_info)) {
2178                 target->msg_per_bndl_max = 0;
2179                 return;
2180         }
2181
2182         /* limit bundle what the device layer can handle */
2183         target->msg_per_bndl_max = min(scat_info->max_scat_entries,
2184                                        target->msg_per_bndl_max);
2185
2186         ath6kl_dbg(ATH6KL_DBG_TRC,
2187                    "htc bundling allowed. max msg per htc bundle: %d\n",
2188                    target->msg_per_bndl_max);
2189
2190         /* Max rx bundle size is limited by the max tx bundle size */
2191         target->dev->max_rx_bndl_sz = scat_info->max_xfer_szper_scatreq;
2192         /* Max tx bundle size if limited by the extended mbox address range */
2193         target->dev->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
2194                                           scat_info->max_xfer_szper_scatreq);
2195
2196         ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
2197                    target->dev->max_rx_bndl_sz, target->dev->max_tx_bndl_sz);
2198
2199         if (target->dev->max_tx_bndl_sz)
2200                 target->tx_bndl_enable = true;
2201
2202         if (target->dev->max_rx_bndl_sz)
2203                 target->rx_bndl_enable = true;
2204
2205         if ((target->tgt_cred_sz % target->dev->block_sz) != 0) {
2206                 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2207                             target->tgt_cred_sz);
2208
2209                 /*
2210                  * Disallow send bundling since the credit size is
2211                  * not aligned to a block size the I/O block
2212                  * padding will spill into the next credit buffer
2213                  * which is fatal.
2214                  */
2215                 target->tx_bndl_enable = false;
2216         }
2217 }
2218
2219 int htc_wait_target(struct htc_target *target)
2220 {
2221         struct htc_packet *packet = NULL;
2222         struct htc_ready_ext_msg *rdy_msg;
2223         struct htc_service_connect_req connect;
2224         struct htc_service_connect_resp resp;
2225         int status;
2226
2227         /* we should be getting 1 control message that the target is ready */
2228         packet = htc_wait_for_ctrl_msg(target);
2229
2230         if (!packet)
2231                 return -ENOMEM;
2232
2233         /* we controlled the buffer creation so it's properly aligned */
2234         rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2235
2236         if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2237             (packet->act_len < sizeof(struct htc_ready_msg))) {
2238                 status = -ENOMEM;
2239                 goto fail_wait_target;
2240         }
2241
2242         if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2243                 status = -ENOMEM;
2244                 goto fail_wait_target;
2245         }
2246
2247         target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2248         target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2249
2250         ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
2251                    "target ready: credits: %d credit size: %d\n",
2252                    target->tgt_creds, target->tgt_cred_sz);
2253
2254         /* check if this is an extended ready message */
2255         if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2256                 /* this is an extended message */
2257                 target->htc_tgt_ver = rdy_msg->htc_ver;
2258                 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2259         } else {
2260                 /* legacy */
2261                 target->htc_tgt_ver = HTC_VERSION_2P0;
2262                 target->msg_per_bndl_max = 0;
2263         }
2264
2265         ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
2266                   (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2267                   target->htc_tgt_ver);
2268
2269         if (target->msg_per_bndl_max > 0)
2270                 htc_setup_msg_bndl(target);
2271
2272         /* setup our pseudo HTC control endpoint connection */
2273         memset(&connect, 0, sizeof(connect));
2274         memset(&resp, 0, sizeof(resp));
2275         connect.ep_cb.rx = htc_ctrl_rx;
2276         connect.ep_cb.rx_refill = NULL;
2277         connect.ep_cb.tx_full = NULL;
2278         connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2279         connect.svc_id = HTC_CTRL_RSVD_SVC;
2280
2281         /* connect fake service */
2282         status = htc_conn_service((void *)target, &connect, &resp);
2283
2284         if (status)
2285                 ath6kl_hif_cleanup_scatter(target->dev->ar);
2286
2287 fail_wait_target:
2288         if (packet) {
2289                 htc_rxpkt_reset(packet);
2290                 reclaim_rx_ctrl_buf(target, packet);
2291         }
2292
2293         return status;
2294 }
2295
2296 /*
2297  * Start HTC, enable interrupts and let the target know
2298  * host has finished setup.
2299  */
2300 int htc_start(struct htc_target *target)
2301 {
2302         struct htc_packet *packet;
2303         int status;
2304
2305         /* Disable interrupts at the chip level */
2306         ath6kldev_disable_intrs(target->dev);
2307
2308         target->htc_flags = 0;
2309         target->rx_st_flags = 0;
2310
2311         /* Push control receive buffers into htc control endpoint */
2312         while ((packet = htc_get_control_buf(target, false)) != NULL) {
2313                 status = htc_add_rxbuf(target, packet);
2314                 if (status)
2315                         return status;
2316         }
2317
2318         /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2319         ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
2320                           target->tgt_creds);
2321
2322         dump_cred_dist_stats(target);
2323
2324         /* Indicate to the target of the setup completion */
2325         status = htc_setup_tx_complete(target);
2326
2327         if (status)
2328                 return status;
2329
2330         /* unmask interrupts */
2331         status = ath6kldev_unmask_intrs(target->dev);
2332
2333         if (status)
2334                 htc_stop(target);
2335
2336         return status;
2337 }
2338
2339 /* htc_stop: stop interrupt reception, and flush all queued buffers */
2340 void htc_stop(struct htc_target *target)
2341 {
2342         spin_lock_bh(&target->htc_lock);
2343         target->htc_flags |= HTC_OP_STATE_STOPPING;
2344         spin_unlock_bh(&target->htc_lock);
2345
2346         /*
2347          * Masking interrupts is a synchronous operation, when this
2348          * function returns all pending HIF I/O has completed, we can
2349          * safely flush the queues.
2350          */
2351         ath6kldev_mask_intrs(target->dev);
2352
2353         htc_flush_txep_all(target);
2354
2355         htc_flush_rx_buf(target);
2356
2357         reset_ep_state(target);
2358 }
2359
2360 void *htc_create(struct ath6kl *ar)
2361 {
2362         struct htc_target *target = NULL;
2363         struct htc_packet *packet;
2364         int status = 0, i = 0;
2365         u32 block_size, ctrl_bufsz;
2366
2367         target = kzalloc(sizeof(*target), GFP_KERNEL);
2368         if (!target) {
2369                 ath6kl_err("unable to allocate memory\n");
2370                 return NULL;
2371         }
2372
2373         target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2374         if (!target->dev) {
2375                 ath6kl_err("unable to allocate memory\n");
2376                 status = -ENOMEM;
2377                 goto fail_create_htc;
2378         }
2379
2380         spin_lock_init(&target->htc_lock);
2381         spin_lock_init(&target->rx_lock);
2382         spin_lock_init(&target->tx_lock);
2383
2384         INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2385         INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2386         INIT_LIST_HEAD(&target->cred_dist_list);
2387
2388         target->dev->ar = ar;
2389         target->dev->htc_cnxt = target;
2390         target->ep_waiting = ENDPOINT_MAX;
2391
2392         reset_ep_state(target);
2393
2394         status = ath6kldev_setup(target->dev);
2395
2396         if (status)
2397                 goto fail_create_htc;
2398
2399         block_size = ar->mbox_info.block_size;
2400
2401         ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2402                       (block_size + HTC_HDR_LENGTH) :
2403                       (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2404
2405         for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2406                 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2407                 if (!packet)
2408                         break;
2409
2410                 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2411                 if (!packet->buf_start) {
2412                         kfree(packet);
2413                         break;
2414                 }
2415
2416                 packet->buf_len = ctrl_bufsz;
2417                 if (i < NUM_CONTROL_RX_BUFFERS) {
2418                         packet->act_len = 0;
2419                         packet->buf = packet->buf_start;
2420                         packet->endpoint = ENDPOINT_0;
2421                         list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2422                 } else
2423                         list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2424         }
2425
2426 fail_create_htc:
2427         if (i != NUM_CONTROL_BUFFERS || status) {
2428                 if (target) {
2429                         htc_cleanup(target);
2430                         target = NULL;
2431                 }
2432         }
2433
2434         return target;
2435 }
2436
2437 /* cleanup the HTC instance */
2438 void htc_cleanup(struct htc_target *target)
2439 {
2440         struct htc_packet *packet, *tmp_packet;
2441
2442         ath6kl_hif_cleanup_scatter(target->dev->ar);
2443
2444         list_for_each_entry_safe(packet, tmp_packet,
2445                         &target->free_ctrl_txbuf, list) {
2446                 list_del(&packet->list);
2447                 kfree(packet->buf_start);
2448                 kfree(packet);
2449         }
2450
2451         list_for_each_entry_safe(packet, tmp_packet,
2452                         &target->free_ctrl_rxbuf, list) {
2453                 list_del(&packet->list);
2454                 kfree(packet->buf_start);
2455                 kfree(packet);
2456         }
2457
2458         kfree(target->dev);
2459         kfree(target);
2460 }