2 * Copyright (c) 2004-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar)
25 unsigned long timeout;
28 ar->bmi.cmd_credits = 0;
30 /* Read the counter register to get the command credits */
31 addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
33 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
34 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
37 * Hit the credit counter with a 4-byte access, the first byte
38 * read will hit the counter and cause a decrement, while the
39 * remaining 3 bytes has no effect. The rationale behind this
40 * is to make all HIF accesses 4-byte aligned.
42 ret = hif_read_write_sync(ar, addr,
43 (u8 *)&ar->bmi.cmd_credits, 4,
44 HIF_RD_SYNC_BYTE_INC);
46 ath6kl_err("Unable to decrement the command credit count register: %d\n",
51 /* The counter is only 8 bits.
52 * Ignore anything in the upper 3 bytes
54 ar->bmi.cmd_credits &= 0xFF;
57 if (!ar->bmi.cmd_credits) {
58 ath6kl_err("bmi communication timeout\n");
65 static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
67 unsigned long timeout;
71 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
72 while (time_before(jiffies, timeout) && !rx_word) {
73 ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS,
74 (u8 *)&rx_word, sizeof(rx_word),
75 HIF_RD_SYNC_BYTE_INC);
77 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
81 /* all we really want is one bit */
82 rx_word &= (1 << ENDPOINT1);
86 ath6kl_err("bmi_recv_buf FIFO empty\n");
93 static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len)
98 ret = ath6kl_get_bmi_cmd_credits(ar);
102 addr = ar->mbox_info.htc_addr;
104 ret = hif_read_write_sync(ar, addr, buf, len,
105 HIF_WR_SYNC_BYTE_INC);
107 ath6kl_err("unable to send the bmi data to the device\n");
112 static int ath6kl_bmi_recv_buf(struct ath6kl *ar, u8 *buf, u32 len)
118 * During normal bootup, small reads may be required.
119 * Rather than issue an HIF Read and then wait as the Target
120 * adds successive bytes to the FIFO, we wait here until
121 * we know that response data is available.
123 * This allows us to cleanly timeout on an unexpected
124 * Target failure rather than risk problems at the HIF level.
125 * In particular, this avoids SDIO timeouts and possibly garbage
126 * data on some host controllers. And on an interconnect
127 * such as Compact Flash (as well as some SDIO masters) which
128 * does not provide any indication on data timeout, it avoids
129 * a potential hang or garbage response.
131 * Synchronization is more difficult for reads larger than the
132 * size of the MBOX FIFO (128B), because the Target is unable
133 * to push the 129th byte of data until AFTER the Host posts an
134 * HIF Read and removes some FIFO data. So for large reads the
135 * Host proceeds to post an HIF Read BEFORE all the data is
136 * actually available to read. Fortunately, large BMI reads do
137 * not occur in practice -- they're supported for debug/development.
139 * So Host/Target BMI synchronization is divided into these cases:
143 * CASE 2: 4 <= length <= 128
144 * Wait for first 4 bytes to be in FIFO
145 * If CONSERVATIVE_BMI_READ is enabled, also wait for
146 * a BMI command credit, which indicates that the ENTIRE
147 * response is available in the the FIFO
149 * CASE 3: length > 128
150 * Wait for the first 4 bytes to be in FIFO
152 * For most uses, a small timeout should be sufficient and we will
153 * usually see a response quickly; but there may be some unusual
154 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
155 * For now, we use an unbounded busy loop while waiting for
158 * If BMI_EXECUTE ever needs to support longer-latency execution,
159 * especially in production, this code needs to be enhanced to sleep
160 * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
161 * a function of Host processor speed.
163 if (len >= 4) { /* NB: Currently, always true */
164 ret = ath6kl_bmi_get_rx_lkahd(ar);
169 addr = ar->mbox_info.htc_addr;
170 ret = hif_read_write_sync(ar, addr, buf, len,
171 HIF_RD_SYNC_BYTE_INC);
173 ath6kl_err("Unable to read the bmi data from the device: %d\n",
181 int ath6kl_bmi_done(struct ath6kl *ar)
186 if (ar->bmi.done_sent) {
187 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n");
191 ar->bmi.done_sent = true;
193 ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
195 ath6kl_err("Unable to send bmi done: %d\n", ret);
199 ath6kl_bmi_cleanup(ar);
204 int ath6kl_bmi_get_target_info(struct ath6kl *ar,
205 struct ath6kl_bmi_target_info *targ_info)
208 u32 cid = BMI_GET_TARGET_INFO;
210 if (ar->bmi.done_sent) {
211 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
215 ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
217 ath6kl_err("Unable to send get target info: %d\n", ret);
221 ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version,
222 sizeof(targ_info->version));
224 ath6kl_err("Unable to recv target info: %d\n", ret);
228 if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
229 /* Determine how many bytes are in the Target's targ_info */
230 ret = ath6kl_bmi_recv_buf(ar,
231 (u8 *)&targ_info->byte_count,
232 sizeof(targ_info->byte_count));
234 ath6kl_err("unable to read target info byte count: %d\n",
240 * The target's targ_info doesn't match the host's targ_info.
241 * We need to do some backwards compatibility to make this work.
243 if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
248 /* Read the remainder of the targ_info */
249 ret = ath6kl_bmi_recv_buf(ar,
251 sizeof(targ_info->byte_count),
253 sizeof(targ_info->byte_count));
256 ath6kl_err("Unable to read target info (%d bytes): %d\n",
257 targ_info->byte_count, ret);
262 ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
263 targ_info->version, targ_info->type);
268 int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
270 u32 cid = BMI_READ_MEMORY;
273 u32 len_remain, rx_len;
276 if (ar->bmi.done_sent) {
277 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
281 size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len);
282 if (size > MAX_BMI_CMDBUF_SZ) {
286 memset(ar->bmi.cmd_buf, 0, size);
288 ath6kl_dbg(ATH6KL_DBG_BMI,
289 "bmi read memory: device: addr: 0x%x, len: %d\n",
295 rx_len = (len_remain < BMI_DATASZ_MAX) ?
296 len_remain : BMI_DATASZ_MAX;
298 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
299 offset += sizeof(cid);
300 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
301 offset += sizeof(addr);
302 memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
303 offset += sizeof(len);
305 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
307 ath6kl_err("Unable to write to the device: %d\n",
311 ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len);
313 ath6kl_err("Unable to read from the device: %d\n",
317 memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len);
318 len_remain -= rx_len; addr += rx_len;
324 int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
326 u32 cid = BMI_WRITE_MEMORY;
329 u32 len_remain, tx_len;
330 const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
331 u8 aligned_buf[BMI_DATASZ_MAX];
334 if (ar->bmi.done_sent) {
335 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
339 if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) {
344 memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header);
346 ath6kl_dbg(ATH6KL_DBG_BMI,
347 "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
351 src = &buf[len - len_remain];
353 if (len_remain < (BMI_DATASZ_MAX - header)) {
354 if (len_remain & 3) {
355 /* align it with 4 bytes */
356 len_remain = len_remain +
357 (4 - (len_remain & 3));
358 memcpy(aligned_buf, src, len_remain);
363 tx_len = (BMI_DATASZ_MAX - header);
367 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
368 offset += sizeof(cid);
369 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
370 offset += sizeof(addr);
371 memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
372 offset += sizeof(tx_len);
373 memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
376 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
378 ath6kl_err("Unable to write to the device: %d\n",
382 len_remain -= tx_len; addr += tx_len;
388 int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
390 u32 cid = BMI_EXECUTE;
395 if (ar->bmi.done_sent) {
396 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
400 size = sizeof(cid) + sizeof(addr) + sizeof(param);
401 if (size > MAX_BMI_CMDBUF_SZ) {
405 memset(ar->bmi.cmd_buf, 0, size);
407 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n",
411 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
412 offset += sizeof(cid);
413 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
414 offset += sizeof(addr);
415 memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
416 offset += sizeof(*param);
418 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
420 ath6kl_err("Unable to write to the device: %d\n", ret);
424 ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
426 ath6kl_err("Unable to read from the device: %d\n", ret);
430 memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
435 int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
437 u32 cid = BMI_SET_APP_START;
442 if (ar->bmi.done_sent) {
443 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
447 size = sizeof(cid) + sizeof(addr);
448 if (size > MAX_BMI_CMDBUF_SZ) {
452 memset(ar->bmi.cmd_buf, 0, size);
454 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr);
457 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
458 offset += sizeof(cid);
459 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
460 offset += sizeof(addr);
462 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
464 ath6kl_err("Unable to write to the device: %d\n", ret);
471 int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
473 u32 cid = BMI_READ_SOC_REGISTER;
478 if (ar->bmi.done_sent) {
479 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
483 size = sizeof(cid) + sizeof(addr);
484 if (size > MAX_BMI_CMDBUF_SZ) {
488 memset(ar->bmi.cmd_buf, 0, size);
490 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr);
493 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
494 offset += sizeof(cid);
495 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
496 offset += sizeof(addr);
498 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
500 ath6kl_err("Unable to write to the device: %d\n", ret);
504 ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
506 ath6kl_err("Unable to read from the device: %d\n", ret);
509 memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
514 int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
516 u32 cid = BMI_WRITE_SOC_REGISTER;
521 if (ar->bmi.done_sent) {
522 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
526 size = sizeof(cid) + sizeof(addr) + sizeof(param);
527 if (size > MAX_BMI_CMDBUF_SZ) {
531 memset(ar->bmi.cmd_buf, 0, size);
533 ath6kl_dbg(ATH6KL_DBG_BMI,
534 "bmi write SOC reg: addr: 0x%x, param: %d\n",
538 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
539 offset += sizeof(cid);
540 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
541 offset += sizeof(addr);
542 memcpy(&(ar->bmi.cmd_buf[offset]), ¶m, sizeof(param));
543 offset += sizeof(param);
545 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
547 ath6kl_err("Unable to write to the device: %d\n", ret);
554 int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
556 u32 cid = BMI_LZ_DATA;
559 u32 len_remain, tx_len;
560 const u32 header = sizeof(cid) + sizeof(len);
563 if (ar->bmi.done_sent) {
564 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
568 size = BMI_DATASZ_MAX + header;
569 if (size > MAX_BMI_CMDBUF_SZ) {
573 memset(ar->bmi.cmd_buf, 0, size);
575 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n",
580 tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ?
581 len_remain : (BMI_DATASZ_MAX - header);
584 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
585 offset += sizeof(cid);
586 memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
587 offset += sizeof(tx_len);
588 memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
592 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
594 ath6kl_err("Unable to write to the device: %d\n",
599 len_remain -= tx_len;
605 int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
607 u32 cid = BMI_LZ_STREAM_START;
612 if (ar->bmi.done_sent) {
613 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
617 size = sizeof(cid) + sizeof(addr);
618 if (size > MAX_BMI_CMDBUF_SZ) {
622 memset(ar->bmi.cmd_buf, 0, size);
624 ath6kl_dbg(ATH6KL_DBG_BMI,
625 "bmi LZ stream start: addr: 0x%x)\n",
629 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
630 offset += sizeof(cid);
631 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
632 offset += sizeof(addr);
634 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
636 ath6kl_err("Unable to start LZ stream to the device: %d\n",
644 int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
648 u32 last_word_offset = len & ~0x3;
649 u32 unaligned_bytes = len & 0x3;
651 ret = ath6kl_bmi_lz_stream_start(ar, addr);
655 if (unaligned_bytes) {
656 /* copy the last word into a zero padded buffer */
657 memcpy(&last_word, &buf[last_word_offset], unaligned_bytes);
660 ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset);
665 ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4);
668 /* Close compressed stream and open a new (fake) one.
669 * This serves mainly to flush Target caches. */
670 ret = ath6kl_bmi_lz_stream_start(ar, 0x00);
675 int ath6kl_bmi_init(struct ath6kl *ar)
677 ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
679 if (!ar->bmi.cmd_buf)
685 void ath6kl_bmi_cleanup(struct ath6kl *ar)
687 kfree(ar->bmi.cmd_buf);
688 ar->bmi.cmd_buf = NULL;