2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
24 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
26 ib->coalescing_timeo = coalescing_timeo;
27 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
28 (u32)ib->coalescing_timeo, 0);
33 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
35 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
36 (rxf)->vlan_strip_pending = true; \
39 #define bna_rxf_rss_cfg_soft_reset(rxf) \
41 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
42 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
43 BNA_RSS_F_CFG_PENDING | \
44 BNA_RSS_F_STATUS_PENDING); \
47 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
48 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
49 static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
50 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
54 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
55 enum bna_cleanup_type cleanup);
56 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
57 enum bna_cleanup_type cleanup);
58 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
59 enum bna_cleanup_type cleanup);
61 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
63 bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
65 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
67 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
69 bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
71 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
75 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
77 call_rxf_stop_cbfn(rxf);
81 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
85 if (rxf->flags & BNA_RXF_F_PAUSED) {
86 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
87 call_rxf_start_cbfn(rxf);
89 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
93 call_rxf_stop_cbfn(rxf);
101 call_rxf_cam_fltr_cbfn(rxf);
105 rxf->flags |= BNA_RXF_F_PAUSED;
106 call_rxf_pause_cbfn(rxf);
110 rxf->flags &= ~BNA_RXF_F_PAUSED;
111 call_rxf_resume_cbfn(rxf);
120 bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
122 call_rxf_pause_cbfn(rxf);
126 bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
131 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
135 call_rxf_cam_fltr_cbfn(rxf);
139 rxf->flags &= ~BNA_RXF_F_PAUSED;
140 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
149 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
151 if (!bna_rxf_cfg_apply(rxf)) {
152 /* No more pending config updates */
153 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
158 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
162 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
166 bna_rxf_cfg_reset(rxf);
167 call_rxf_start_cbfn(rxf);
168 call_rxf_cam_fltr_cbfn(rxf);
169 call_rxf_resume_cbfn(rxf);
170 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
178 rxf->flags |= BNA_RXF_F_PAUSED;
179 call_rxf_start_cbfn(rxf);
180 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
184 if (!bna_rxf_cfg_apply(rxf)) {
185 /* No more pending config updates */
186 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
196 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
198 call_rxf_start_cbfn(rxf);
199 call_rxf_cam_fltr_cbfn(rxf);
200 call_rxf_resume_cbfn(rxf);
204 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
209 bna_rxf_cfg_reset(rxf);
210 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
214 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
218 rxf->flags |= BNA_RXF_F_PAUSED;
219 if (!bna_rxf_fltr_clear(rxf))
220 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
222 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
231 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
236 bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
240 bna_rxf_cfg_reset(rxf);
241 call_rxf_pause_cbfn(rxf);
242 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
246 if (!bna_rxf_fltr_clear(rxf)) {
247 /* No more pending CAM entries to clear */
248 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
258 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
263 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
268 bna_rxf_cfg_reset(rxf);
269 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
278 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
279 enum bfi_enet_h2i_msgs req_type)
281 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
283 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
284 req->mh.num_entries = htons(
285 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
286 memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
287 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
288 sizeof(struct bfi_enet_ucast_req), &req->mh);
289 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
293 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
295 struct bfi_enet_mcast_add_req *req =
296 &rxf->bfi_enet_cmd.mcast_add_req;
298 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
300 req->mh.num_entries = htons(
301 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
302 memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
303 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
304 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
305 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
309 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
311 struct bfi_enet_mcast_del_req *req =
312 &rxf->bfi_enet_cmd.mcast_del_req;
314 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
316 req->mh.num_entries = htons(
317 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
318 req->handle = htons(handle);
319 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
320 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
321 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
325 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
327 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
329 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
330 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
331 req->mh.num_entries = htons(
332 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
333 req->enable = status;
334 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
335 sizeof(struct bfi_enet_enable_req), &req->mh);
336 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
340 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
342 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
344 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
345 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
346 req->mh.num_entries = htons(
347 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
348 req->enable = status;
349 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
350 sizeof(struct bfi_enet_enable_req), &req->mh);
351 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
355 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
357 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
361 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
362 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
363 req->mh.num_entries = htons(
364 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
365 req->block_idx = block_idx;
366 for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
367 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
368 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
370 htonl(rxf->vlan_filter_table[j]);
372 req->bit_mask[i] = 0xFFFFFFFF;
374 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
375 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
376 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
380 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
382 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
384 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
385 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
386 req->mh.num_entries = htons(
387 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
388 req->enable = rxf->vlan_strip_status;
389 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
390 sizeof(struct bfi_enet_enable_req), &req->mh);
391 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
395 bna_bfi_rit_cfg(struct bna_rxf *rxf)
397 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
399 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
400 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
401 req->mh.num_entries = htons(
402 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
403 req->size = htons(rxf->rit_size);
404 memcpy(&req->table[0], rxf->rit, rxf->rit_size);
405 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
406 sizeof(struct bfi_enet_rit_req), &req->mh);
407 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
411 bna_bfi_rss_cfg(struct bna_rxf *rxf)
413 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
416 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
417 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
418 req->mh.num_entries = htons(
419 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
420 req->cfg.type = rxf->rss_cfg.hash_type;
421 req->cfg.mask = rxf->rss_cfg.hash_mask;
422 for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
424 htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
425 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
426 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
427 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
431 bna_bfi_rss_enable(struct bna_rxf *rxf)
433 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
435 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
436 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
437 req->mh.num_entries = htons(
438 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
439 req->enable = rxf->rss_status;
440 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
441 sizeof(struct bfi_enet_enable_req), &req->mh);
442 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
445 /* This function gets the multicast MAC that has already been added to CAM */
446 static struct bna_mac *
447 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
450 struct list_head *qe;
452 list_for_each(qe, &rxf->mcast_active_q) {
453 mac = (struct bna_mac *)qe;
454 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
458 list_for_each(qe, &rxf->mcast_pending_del_q) {
459 mac = (struct bna_mac *)qe;
460 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
467 static struct bna_mcam_handle *
468 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
470 struct bna_mcam_handle *mchandle;
471 struct list_head *qe;
473 list_for_each(qe, &rxf->mcast_handle_q) {
474 mchandle = (struct bna_mcam_handle *)qe;
475 if (mchandle->handle == handle)
483 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
485 struct bna_mac *mcmac;
486 struct bna_mcam_handle *mchandle;
488 mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
489 mchandle = bna_rxf_mchandle_get(rxf, handle);
490 if (mchandle == NULL) {
491 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
492 mchandle->handle = handle;
493 mchandle->refcnt = 0;
494 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
497 mcmac->handle = mchandle;
501 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
502 enum bna_cleanup_type cleanup)
504 struct bna_mcam_handle *mchandle;
507 mchandle = mac->handle;
508 if (mchandle == NULL)
512 if (mchandle->refcnt == 0) {
513 if (cleanup == BNA_HARD_CLEANUP) {
514 bna_bfi_mcast_del_req(rxf, mchandle->handle);
517 list_del(&mchandle->qe);
518 bfa_q_qe_init(&mchandle->qe);
519 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
527 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
529 struct bna_mac *mac = NULL;
530 struct list_head *qe;
533 /* First delete multicast entries to maintain the count */
534 while (!list_empty(&rxf->mcast_pending_del_q)) {
535 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
537 mac = (struct bna_mac *)qe;
538 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
539 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
544 /* Add multicast entries */
545 if (!list_empty(&rxf->mcast_pending_add_q)) {
546 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
548 mac = (struct bna_mac *)qe;
549 list_add_tail(&mac->qe, &rxf->mcast_active_q);
550 bna_bfi_mcast_add_req(rxf, mac);
558 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
560 u8 vlan_pending_bitmask;
563 if (rxf->vlan_pending_bitmask) {
564 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
565 while (!(vlan_pending_bitmask & 0x1)) {
567 vlan_pending_bitmask >>= 1;
569 rxf->vlan_pending_bitmask &= ~(1 << block_idx);
570 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
578 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
580 struct list_head *qe;
584 /* Throw away delete pending mcast entries */
585 while (!list_empty(&rxf->mcast_pending_del_q)) {
586 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
588 mac = (struct bna_mac *)qe;
589 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
590 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
595 /* Move active mcast entries to pending_add_q */
596 while (!list_empty(&rxf->mcast_active_q)) {
597 bfa_q_deq(&rxf->mcast_active_q, &qe);
599 list_add_tail(qe, &rxf->mcast_pending_add_q);
600 mac = (struct bna_mac *)qe;
601 if (bna_rxf_mcast_del(rxf, mac, cleanup))
609 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
611 if (rxf->rss_pending) {
612 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
613 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
614 bna_bfi_rit_cfg(rxf);
618 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
619 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
620 bna_bfi_rss_cfg(rxf);
624 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
625 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
626 bna_bfi_rss_enable(rxf);
635 bna_rxf_cfg_apply(struct bna_rxf *rxf)
637 if (bna_rxf_ucast_cfg_apply(rxf))
640 if (bna_rxf_mcast_cfg_apply(rxf))
643 if (bna_rxf_promisc_cfg_apply(rxf))
646 if (bna_rxf_allmulti_cfg_apply(rxf))
649 if (bna_rxf_vlan_cfg_apply(rxf))
652 if (bna_rxf_vlan_strip_cfg_apply(rxf))
655 if (bna_rxf_rss_cfg_apply(rxf))
661 /* Only software reset */
663 bna_rxf_fltr_clear(struct bna_rxf *rxf)
665 if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
668 if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
671 if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
674 if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
681 bna_rxf_cfg_reset(struct bna_rxf *rxf)
683 bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
684 bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
685 bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
686 bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
687 bna_rxf_vlan_cfg_soft_reset(rxf);
688 bna_rxf_rss_cfg_soft_reset(rxf);
692 bna_rit_init(struct bna_rxf *rxf, int rit_size)
694 struct bna_rx *rx = rxf->rx;
696 struct list_head *qe;
699 rxf->rit_size = rit_size;
700 list_for_each(qe, &rx->rxp_q) {
701 rxp = (struct bna_rxp *)qe;
702 rxf->rit[offset] = rxp->cq.ccb->id;
709 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
711 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
715 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
716 struct bfi_msgq_mhdr *msghdr)
718 struct bfi_enet_rsp *rsp =
719 container_of(msghdr, struct bfi_enet_rsp, mh);
722 /* Clear ucast from cache */
723 rxf->ucast_active_set = 0;
726 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
730 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
731 struct bfi_msgq_mhdr *msghdr)
733 struct bfi_enet_mcast_add_req *req =
734 &rxf->bfi_enet_cmd.mcast_add_req;
735 struct bfi_enet_mcast_add_rsp *rsp =
736 container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh);
738 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
740 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
744 bna_rxf_init(struct bna_rxf *rxf,
746 struct bna_rx_config *q_config,
747 struct bna_res_info *res_info)
751 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
752 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
753 rxf->ucast_pending_set = 0;
754 rxf->ucast_active_set = 0;
755 INIT_LIST_HEAD(&rxf->ucast_active_q);
756 rxf->ucast_pending_mac = NULL;
758 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
759 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
760 INIT_LIST_HEAD(&rxf->mcast_active_q);
761 INIT_LIST_HEAD(&rxf->mcast_handle_q);
763 if (q_config->paused)
764 rxf->flags |= BNA_RXF_F_PAUSED;
767 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
768 bna_rit_init(rxf, q_config->num_paths);
770 rxf->rss_status = q_config->rss_status;
771 if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
772 rxf->rss_cfg = q_config->rss_config;
773 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
774 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
775 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
778 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
779 memset(rxf->vlan_filter_table, 0,
780 (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
781 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
782 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
784 rxf->vlan_strip_status = q_config->vlan_strip_status;
786 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
790 bna_rxf_uninit(struct bna_rxf *rxf)
794 rxf->ucast_pending_set = 0;
795 rxf->ucast_active_set = 0;
797 while (!list_empty(&rxf->ucast_pending_add_q)) {
798 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
799 bfa_q_qe_init(&mac->qe);
800 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
803 if (rxf->ucast_pending_mac) {
804 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
805 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
806 rxf->ucast_pending_mac);
807 rxf->ucast_pending_mac = NULL;
810 while (!list_empty(&rxf->mcast_pending_add_q)) {
811 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
812 bfa_q_qe_init(&mac->qe);
813 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
816 rxf->rxmode_pending = 0;
817 rxf->rxmode_pending_bitmask = 0;
818 if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
819 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
820 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
821 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
823 rxf->rss_pending = 0;
824 rxf->vlan_strip_pending = false;
832 bna_rx_cb_rxf_started(struct bna_rx *rx)
834 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
838 bna_rxf_start(struct bna_rxf *rxf)
840 rxf->start_cbfn = bna_rx_cb_rxf_started;
841 rxf->start_cbarg = rxf->rx;
842 bfa_fsm_send_event(rxf, RXF_E_START);
846 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
848 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
852 bna_rxf_stop(struct bna_rxf *rxf)
854 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
855 rxf->stop_cbarg = rxf->rx;
856 bfa_fsm_send_event(rxf, RXF_E_STOP);
860 bna_rxf_fail(struct bna_rxf *rxf)
862 bfa_fsm_send_event(rxf, RXF_E_FAIL);
866 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
867 void (*cbfn)(struct bnad *, struct bna_rx *))
869 struct bna_rxf *rxf = &rx->rxf;
871 if (rxf->ucast_pending_mac == NULL) {
872 rxf->ucast_pending_mac =
873 bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
874 if (rxf->ucast_pending_mac == NULL)
875 return BNA_CB_UCAST_CAM_FULL;
876 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
879 memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
880 rxf->ucast_pending_set = 1;
881 rxf->cam_fltr_cbfn = cbfn;
882 rxf->cam_fltr_cbarg = rx->bna->bnad;
884 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
886 return BNA_CB_SUCCESS;
890 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
891 void (*cbfn)(struct bnad *, struct bna_rx *))
893 struct bna_rxf *rxf = &rx->rxf;
896 /* Check if already added or pending addition */
897 if (bna_mac_find(&rxf->mcast_active_q, addr) ||
898 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
900 cbfn(rx->bna->bnad, rx);
901 return BNA_CB_SUCCESS;
904 mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
906 return BNA_CB_MCAST_LIST_FULL;
907 bfa_q_qe_init(&mac->qe);
908 memcpy(mac->addr, addr, ETH_ALEN);
909 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
911 rxf->cam_fltr_cbfn = cbfn;
912 rxf->cam_fltr_cbarg = rx->bna->bnad;
914 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
916 return BNA_CB_SUCCESS;
920 bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
921 void (*cbfn)(struct bnad *, struct bna_rx *))
923 struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
924 struct bna_rxf *rxf = &rx->rxf;
925 struct list_head list_head;
926 struct list_head *qe;
928 struct bna_mac *mac, *del_mac;
931 /* Purge the pending_add_q */
932 while (!list_empty(&rxf->ucast_pending_add_q)) {
933 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
935 mac = (struct bna_mac *)qe;
936 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
939 /* Schedule active_q entries for deletion */
940 while (!list_empty(&rxf->ucast_active_q)) {
941 bfa_q_deq(&rxf->ucast_active_q, &qe);
942 mac = (struct bna_mac *)qe;
943 bfa_q_qe_init(&mac->qe);
945 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
946 memcpy(del_mac, mac, sizeof(*del_mac));
947 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
948 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
952 INIT_LIST_HEAD(&list_head);
953 for (i = 0, mcaddr = uclist; i < count; i++) {
954 mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
957 bfa_q_qe_init(&mac->qe);
958 memcpy(mac->addr, mcaddr, ETH_ALEN);
959 list_add_tail(&mac->qe, &list_head);
963 /* Add the new entries */
964 while (!list_empty(&list_head)) {
965 bfa_q_deq(&list_head, &qe);
966 mac = (struct bna_mac *)qe;
967 bfa_q_qe_init(&mac->qe);
968 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
971 rxf->cam_fltr_cbfn = cbfn;
972 rxf->cam_fltr_cbarg = rx->bna->bnad;
973 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
975 return BNA_CB_SUCCESS;
978 while (!list_empty(&list_head)) {
979 bfa_q_deq(&list_head, &qe);
980 mac = (struct bna_mac *)qe;
981 bfa_q_qe_init(&mac->qe);
982 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
985 return BNA_CB_UCAST_CAM_FULL;
989 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
990 void (*cbfn)(struct bnad *, struct bna_rx *))
992 struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
993 struct bna_rxf *rxf = &rx->rxf;
994 struct list_head list_head;
995 struct list_head *qe;
997 struct bna_mac *mac, *del_mac;
1000 /* Purge the pending_add_q */
1001 while (!list_empty(&rxf->mcast_pending_add_q)) {
1002 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1004 mac = (struct bna_mac *)qe;
1005 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1008 /* Schedule active_q entries for deletion */
1009 while (!list_empty(&rxf->mcast_active_q)) {
1010 bfa_q_deq(&rxf->mcast_active_q, &qe);
1011 mac = (struct bna_mac *)qe;
1012 bfa_q_qe_init(&mac->qe);
1014 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
1016 memcpy(del_mac, mac, sizeof(*del_mac));
1017 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
1019 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1022 /* Allocate nodes */
1023 INIT_LIST_HEAD(&list_head);
1024 for (i = 0, mcaddr = mclist; i < count; i++) {
1025 mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
1028 bfa_q_qe_init(&mac->qe);
1029 memcpy(mac->addr, mcaddr, ETH_ALEN);
1030 list_add_tail(&mac->qe, &list_head);
1035 /* Add the new entries */
1036 while (!list_empty(&list_head)) {
1037 bfa_q_deq(&list_head, &qe);
1038 mac = (struct bna_mac *)qe;
1039 bfa_q_qe_init(&mac->qe);
1040 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1043 rxf->cam_fltr_cbfn = cbfn;
1044 rxf->cam_fltr_cbarg = rx->bna->bnad;
1045 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1047 return BNA_CB_SUCCESS;
1050 while (!list_empty(&list_head)) {
1051 bfa_q_deq(&list_head, &qe);
1052 mac = (struct bna_mac *)qe;
1053 bfa_q_qe_init(&mac->qe);
1054 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1057 return BNA_CB_MCAST_LIST_FULL;
1061 bna_rx_mcast_delall(struct bna_rx *rx,
1062 void (*cbfn)(struct bnad *, struct bna_rx *))
1064 struct bna_rxf *rxf = &rx->rxf;
1065 struct list_head *qe;
1066 struct bna_mac *mac, *del_mac;
1067 int need_hw_config = 0;
1069 /* Purge all entries from pending_add_q */
1070 while (!list_empty(&rxf->mcast_pending_add_q)) {
1071 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1072 mac = (struct bna_mac *)qe;
1073 bfa_q_qe_init(&mac->qe);
1074 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1077 /* Schedule all entries in active_q for deletion */
1078 while (!list_empty(&rxf->mcast_active_q)) {
1079 bfa_q_deq(&rxf->mcast_active_q, &qe);
1080 mac = (struct bna_mac *)qe;
1081 bfa_q_qe_init(&mac->qe);
1083 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
1085 memcpy(del_mac, mac, sizeof(*del_mac));
1086 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
1088 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1092 if (need_hw_config) {
1093 rxf->cam_fltr_cbfn = cbfn;
1094 rxf->cam_fltr_cbarg = rx->bna->bnad;
1095 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1100 (*cbfn)(rx->bna->bnad, rx);
1104 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1106 struct bna_rxf *rxf = &rx->rxf;
1107 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1108 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
1109 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1111 rxf->vlan_filter_table[index] |= bit;
1112 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1113 rxf->vlan_pending_bitmask |= (1 << group_id);
1114 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1119 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1121 struct bna_rxf *rxf = &rx->rxf;
1122 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1123 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
1124 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1126 rxf->vlan_filter_table[index] &= ~bit;
1127 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1128 rxf->vlan_pending_bitmask |= (1 << group_id);
1129 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1134 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1136 struct bna_mac *mac = NULL;
1137 struct list_head *qe;
1139 /* Delete MAC addresses previousely added */
1140 if (!list_empty(&rxf->ucast_pending_del_q)) {
1141 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1143 mac = (struct bna_mac *)qe;
1144 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1145 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
1149 /* Set default unicast MAC */
1150 if (rxf->ucast_pending_set) {
1151 rxf->ucast_pending_set = 0;
1152 memcpy(rxf->ucast_active_mac.addr,
1153 rxf->ucast_pending_mac->addr, ETH_ALEN);
1154 rxf->ucast_active_set = 1;
1155 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1156 BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1160 /* Add additional MAC entries */
1161 if (!list_empty(&rxf->ucast_pending_add_q)) {
1162 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1164 mac = (struct bna_mac *)qe;
1165 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1166 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1174 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1176 struct list_head *qe;
1177 struct bna_mac *mac;
1179 /* Throw away delete pending ucast entries */
1180 while (!list_empty(&rxf->ucast_pending_del_q)) {
1181 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1183 mac = (struct bna_mac *)qe;
1184 if (cleanup == BNA_SOFT_CLEANUP)
1185 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1188 bna_bfi_ucast_req(rxf, mac,
1189 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1190 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1196 /* Move active ucast entries to pending_add_q */
1197 while (!list_empty(&rxf->ucast_active_q)) {
1198 bfa_q_deq(&rxf->ucast_active_q, &qe);
1200 list_add_tail(qe, &rxf->ucast_pending_add_q);
1201 if (cleanup == BNA_HARD_CLEANUP) {
1202 mac = (struct bna_mac *)qe;
1203 bna_bfi_ucast_req(rxf, mac,
1204 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1209 if (rxf->ucast_active_set) {
1210 rxf->ucast_pending_set = 1;
1211 rxf->ucast_active_set = 0;
1212 if (cleanup == BNA_HARD_CLEANUP) {
1213 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1214 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1223 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1225 struct bna *bna = rxf->rx->bna;
1227 /* Enable/disable promiscuous mode */
1228 if (is_promisc_enable(rxf->rxmode_pending,
1229 rxf->rxmode_pending_bitmask)) {
1230 /* move promisc configuration from pending -> active */
1231 promisc_inactive(rxf->rxmode_pending,
1232 rxf->rxmode_pending_bitmask);
1233 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1234 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1236 } else if (is_promisc_disable(rxf->rxmode_pending,
1237 rxf->rxmode_pending_bitmask)) {
1238 /* move promisc configuration from pending -> active */
1239 promisc_inactive(rxf->rxmode_pending,
1240 rxf->rxmode_pending_bitmask);
1241 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1242 bna->promisc_rid = BFI_INVALID_RID;
1243 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1251 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1253 struct bna *bna = rxf->rx->bna;
1255 /* Clear pending promisc mode disable */
1256 if (is_promisc_disable(rxf->rxmode_pending,
1257 rxf->rxmode_pending_bitmask)) {
1258 promisc_inactive(rxf->rxmode_pending,
1259 rxf->rxmode_pending_bitmask);
1260 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1261 bna->promisc_rid = BFI_INVALID_RID;
1262 if (cleanup == BNA_HARD_CLEANUP) {
1263 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1268 /* Move promisc mode config from active -> pending */
1269 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1270 promisc_enable(rxf->rxmode_pending,
1271 rxf->rxmode_pending_bitmask);
1272 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1273 if (cleanup == BNA_HARD_CLEANUP) {
1274 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1283 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1285 /* Enable/disable allmulti mode */
1286 if (is_allmulti_enable(rxf->rxmode_pending,
1287 rxf->rxmode_pending_bitmask)) {
1288 /* move allmulti configuration from pending -> active */
1289 allmulti_inactive(rxf->rxmode_pending,
1290 rxf->rxmode_pending_bitmask);
1291 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1292 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1294 } else if (is_allmulti_disable(rxf->rxmode_pending,
1295 rxf->rxmode_pending_bitmask)) {
1296 /* move allmulti configuration from pending -> active */
1297 allmulti_inactive(rxf->rxmode_pending,
1298 rxf->rxmode_pending_bitmask);
1299 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1300 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1308 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1310 /* Clear pending allmulti mode disable */
1311 if (is_allmulti_disable(rxf->rxmode_pending,
1312 rxf->rxmode_pending_bitmask)) {
1313 allmulti_inactive(rxf->rxmode_pending,
1314 rxf->rxmode_pending_bitmask);
1315 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1316 if (cleanup == BNA_HARD_CLEANUP) {
1317 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1322 /* Move allmulti mode config from active -> pending */
1323 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1324 allmulti_enable(rxf->rxmode_pending,
1325 rxf->rxmode_pending_bitmask);
1326 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1327 if (cleanup == BNA_HARD_CLEANUP) {
1328 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1337 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1339 struct bna *bna = rxf->rx->bna;
1342 if (is_promisc_enable(rxf->rxmode_pending,
1343 rxf->rxmode_pending_bitmask) ||
1344 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1345 /* Do nothing if pending enable or already enabled */
1346 } else if (is_promisc_disable(rxf->rxmode_pending,
1347 rxf->rxmode_pending_bitmask)) {
1348 /* Turn off pending disable command */
1349 promisc_inactive(rxf->rxmode_pending,
1350 rxf->rxmode_pending_bitmask);
1352 /* Schedule enable */
1353 promisc_enable(rxf->rxmode_pending,
1354 rxf->rxmode_pending_bitmask);
1355 bna->promisc_rid = rxf->rx->rid;
1363 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1365 struct bna *bna = rxf->rx->bna;
1368 if (is_promisc_disable(rxf->rxmode_pending,
1369 rxf->rxmode_pending_bitmask) ||
1370 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1371 /* Do nothing if pending disable or already disabled */
1372 } else if (is_promisc_enable(rxf->rxmode_pending,
1373 rxf->rxmode_pending_bitmask)) {
1374 /* Turn off pending enable command */
1375 promisc_inactive(rxf->rxmode_pending,
1376 rxf->rxmode_pending_bitmask);
1377 bna->promisc_rid = BFI_INVALID_RID;
1378 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1379 /* Schedule disable */
1380 promisc_disable(rxf->rxmode_pending,
1381 rxf->rxmode_pending_bitmask);
1389 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1393 if (is_allmulti_enable(rxf->rxmode_pending,
1394 rxf->rxmode_pending_bitmask) ||
1395 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1396 /* Do nothing if pending enable or already enabled */
1397 } else if (is_allmulti_disable(rxf->rxmode_pending,
1398 rxf->rxmode_pending_bitmask)) {
1399 /* Turn off pending disable command */
1400 allmulti_inactive(rxf->rxmode_pending,
1401 rxf->rxmode_pending_bitmask);
1403 /* Schedule enable */
1404 allmulti_enable(rxf->rxmode_pending,
1405 rxf->rxmode_pending_bitmask);
1413 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1417 if (is_allmulti_disable(rxf->rxmode_pending,
1418 rxf->rxmode_pending_bitmask) ||
1419 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1420 /* Do nothing if pending disable or already disabled */
1421 } else if (is_allmulti_enable(rxf->rxmode_pending,
1422 rxf->rxmode_pending_bitmask)) {
1423 /* Turn off pending enable command */
1424 allmulti_inactive(rxf->rxmode_pending,
1425 rxf->rxmode_pending_bitmask);
1426 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1427 /* Schedule disable */
1428 allmulti_disable(rxf->rxmode_pending,
1429 rxf->rxmode_pending_bitmask);
1437 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1439 if (rxf->vlan_strip_pending) {
1440 rxf->vlan_strip_pending = false;
1441 bna_bfi_vlan_strip_enable(rxf);
1450 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1451 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1453 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1454 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1456 #define call_rx_stop_cbfn(rx) \
1458 if ((rx)->stop_cbfn) { \
1459 void (*cbfn)(void *, struct bna_rx *); \
1461 cbfn = (rx)->stop_cbfn; \
1462 cbarg = (rx)->stop_cbarg; \
1463 (rx)->stop_cbfn = NULL; \
1464 (rx)->stop_cbarg = NULL; \
1469 #define call_rx_stall_cbfn(rx) \
1471 if ((rx)->rx_stall_cbfn) \
1472 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1475 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1477 struct bna_dma_addr cur_q_addr = \
1478 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1479 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1480 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1481 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1482 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1483 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1484 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1487 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1488 static void bna_rx_enet_stop(struct bna_rx *rx);
1489 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1491 bfa_fsm_state_decl(bna_rx, stopped,
1492 struct bna_rx, enum bna_rx_event);
1493 bfa_fsm_state_decl(bna_rx, start_wait,
1494 struct bna_rx, enum bna_rx_event);
1495 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1496 struct bna_rx, enum bna_rx_event);
1497 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1498 struct bna_rx, enum bna_rx_event);
1499 bfa_fsm_state_decl(bna_rx, started,
1500 struct bna_rx, enum bna_rx_event);
1501 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1502 struct bna_rx, enum bna_rx_event);
1503 bfa_fsm_state_decl(bna_rx, stop_wait,
1504 struct bna_rx, enum bna_rx_event);
1505 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1506 struct bna_rx, enum bna_rx_event);
1507 bfa_fsm_state_decl(bna_rx, failed,
1508 struct bna_rx, enum bna_rx_event);
1509 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1510 struct bna_rx, enum bna_rx_event);
1512 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1514 call_rx_stop_cbfn(rx);
1517 static void bna_rx_sm_stopped(struct bna_rx *rx,
1518 enum bna_rx_event event)
1522 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1526 call_rx_stop_cbfn(rx);
1534 bfa_sm_fault(event);
1539 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1541 bna_bfi_rx_enet_start(rx);
1545 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1550 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1555 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1556 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1560 bna_rx_enet_stop(rx);
1564 bfa_sm_fault(event);
1569 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1570 enum bna_rx_event event)
1574 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1578 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1582 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1586 bfa_sm_fault(event);
1591 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1593 rx->rx_post_cbfn(rx->bna->bnad, rx);
1594 bna_rxf_start(&rx->rxf);
1598 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1603 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1607 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1608 bna_rxf_fail(&rx->rxf);
1609 call_rx_stall_cbfn(rx);
1610 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1613 case RX_E_RXF_STARTED:
1614 bna_rxf_stop(&rx->rxf);
1617 case RX_E_RXF_STOPPED:
1618 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1619 call_rx_stall_cbfn(rx);
1620 bna_rx_enet_stop(rx);
1624 bfa_sm_fault(event);
1631 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1636 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1641 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1645 bna_rx_enet_stop(rx);
1649 bfa_sm_fault(event);
1654 bna_rx_sm_started_entry(struct bna_rx *rx)
1656 struct bna_rxp *rxp;
1657 struct list_head *qe_rxp;
1658 int is_regular = (rx->type == BNA_RX_T_REGULAR);
1661 list_for_each(qe_rxp, &rx->rxp_q) {
1662 rxp = (struct bna_rxp *)qe_rxp;
1663 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1666 bna_ethport_cb_rx_started(&rx->bna->ethport);
1670 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1674 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1675 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1676 bna_rxf_stop(&rx->rxf);
1680 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1681 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1682 bna_rxf_fail(&rx->rxf);
1683 call_rx_stall_cbfn(rx);
1684 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1688 bfa_sm_fault(event);
1693 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1694 enum bna_rx_event event)
1698 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1702 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1703 bna_rxf_fail(&rx->rxf);
1704 call_rx_stall_cbfn(rx);
1705 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1708 case RX_E_RXF_STARTED:
1709 bfa_fsm_set_state(rx, bna_rx_sm_started);
1713 bfa_sm_fault(event);
1719 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1724 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1728 case RX_E_RXF_STOPPED:
1732 case RX_E_CLEANUP_DONE:
1733 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1737 bfa_sm_fault(event);
1743 bna_rx_sm_failed_entry(struct bna_rx *rx)
1748 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1752 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1756 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1760 case RX_E_RXF_STARTED:
1761 case RX_E_RXF_STOPPED:
1765 case RX_E_CLEANUP_DONE:
1766 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1770 bfa_sm_fault(event);
1775 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1780 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1784 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1788 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1791 case RX_E_CLEANUP_DONE:
1792 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1796 bfa_sm_fault(event);
1802 bna_bfi_rx_enet_start(struct bna_rx *rx)
1804 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1805 struct bna_rxp *rxp = NULL;
1806 struct bna_rxq *q0 = NULL, *q1 = NULL;
1807 struct list_head *rxp_qe;
1810 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1811 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1812 cfg_req->mh.num_entries = htons(
1813 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1815 cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
1816 cfg_req->num_queue_sets = rx->num_paths;
1817 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1819 i++, rxp_qe = bfa_q_next(rxp_qe)) {
1820 rxp = (struct bna_rxp *)rxp_qe;
1822 GET_RXQS(rxp, q0, q1);
1823 switch (rxp->type) {
1827 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1829 cfg_req->q_cfg[i].qs.rx_buffer_size =
1830 htons((u16)q1->buffer_size);
1833 case BNA_RXP_SINGLE:
1834 /* Large/Single RxQ */
1835 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1837 if (q0->multi_buffer)
1838 /* multi-buffer is enabled by allocating
1839 * a new rx with new set of resources.
1840 * q0->buffer_size should be initialized to
1843 cfg_req->rx_cfg.multi_buffer =
1844 BNA_STATUS_T_ENABLED;
1847 bna_enet_mtu_get(&rx->bna->enet);
1848 cfg_req->q_cfg[i].ql.rx_buffer_size =
1849 htons((u16)q0->buffer_size);
1856 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1859 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1860 rxp->cq.ib.ib_seg_host_addr.lsb;
1861 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1862 rxp->cq.ib.ib_seg_host_addr.msb;
1863 cfg_req->q_cfg[i].ib.intr.msix_index =
1864 htons((u16)rxp->cq.ib.intr_vector);
1867 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1868 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1869 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1870 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1871 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1872 ? BNA_STATUS_T_ENABLED :
1873 BNA_STATUS_T_DISABLED;
1874 cfg_req->ib_cfg.coalescing_timeout =
1875 htonl((u32)rxp->cq.ib.coalescing_timeo);
1876 cfg_req->ib_cfg.inter_pkt_timeout =
1877 htonl((u32)rxp->cq.ib.interpkt_timeo);
1878 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1880 switch (rxp->type) {
1882 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1886 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1887 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1888 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1889 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1892 case BNA_RXP_SINGLE:
1893 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1899 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1901 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1902 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1903 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1907 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1909 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1911 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1912 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1913 req->mh.num_entries = htons(
1914 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1915 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1917 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1921 bna_rx_enet_stop(struct bna_rx *rx)
1923 struct bna_rxp *rxp;
1924 struct list_head *qe_rxp;
1927 list_for_each(qe_rxp, &rx->rxp_q) {
1928 rxp = (struct bna_rxp *)qe_rxp;
1929 bna_ib_stop(rx->bna, &rxp->cq.ib);
1932 bna_bfi_rx_enet_stop(rx);
1936 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1938 if ((rx_mod->rx_free_count == 0) ||
1939 (rx_mod->rxp_free_count == 0) ||
1940 (rx_mod->rxq_free_count == 0))
1943 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1944 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1945 (rx_mod->rxq_free_count < rx_cfg->num_paths))
1948 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1949 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1956 static struct bna_rxq *
1957 bna_rxq_get(struct bna_rx_mod *rx_mod)
1959 struct bna_rxq *rxq = NULL;
1960 struct list_head *qe = NULL;
1962 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1963 rx_mod->rxq_free_count--;
1964 rxq = (struct bna_rxq *)qe;
1965 bfa_q_qe_init(&rxq->qe);
1971 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1973 bfa_q_qe_init(&rxq->qe);
1974 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1975 rx_mod->rxq_free_count++;
1978 static struct bna_rxp *
1979 bna_rxp_get(struct bna_rx_mod *rx_mod)
1981 struct list_head *qe = NULL;
1982 struct bna_rxp *rxp = NULL;
1984 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1985 rx_mod->rxp_free_count--;
1986 rxp = (struct bna_rxp *)qe;
1987 bfa_q_qe_init(&rxp->qe);
1993 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1995 bfa_q_qe_init(&rxp->qe);
1996 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1997 rx_mod->rxp_free_count++;
2000 static struct bna_rx *
2001 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2003 struct list_head *qe = NULL;
2004 struct bna_rx *rx = NULL;
2006 if (type == BNA_RX_T_REGULAR) {
2007 bfa_q_deq(&rx_mod->rx_free_q, &qe);
2009 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
2011 rx_mod->rx_free_count--;
2012 rx = (struct bna_rx *)qe;
2013 bfa_q_qe_init(&rx->qe);
2014 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
2021 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2023 struct list_head *prev_qe = NULL;
2024 struct list_head *qe;
2026 bfa_q_qe_init(&rx->qe);
2028 list_for_each(qe, &rx_mod->rx_free_q) {
2029 if (((struct bna_rx *)qe)->rid < rx->rid)
2035 if (prev_qe == NULL) {
2036 /* This is the first entry */
2037 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
2038 } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
2039 /* This is the last entry */
2040 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
2042 /* Somewhere in the middle */
2043 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
2044 bfa_q_prev(&rx->qe) = prev_qe;
2045 bfa_q_next(prev_qe) = &rx->qe;
2046 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
2049 rx_mod->rx_free_count++;
2053 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
2056 switch (rxp->type) {
2057 case BNA_RXP_SINGLE:
2058 rxp->rxq.single.only = q0;
2059 rxp->rxq.single.reserved = NULL;
2062 rxp->rxq.slr.large = q0;
2063 rxp->rxq.slr.small = q1;
2066 rxp->rxq.hds.data = q0;
2067 rxp->rxq.hds.hdr = q1;
2075 bna_rxq_qpt_setup(struct bna_rxq *rxq,
2076 struct bna_rxp *rxp,
2079 struct bna_mem_descr *qpt_mem,
2080 struct bna_mem_descr *swqpt_mem,
2081 struct bna_mem_descr *page_mem)
2085 struct bna_dma_addr bna_dma;
2088 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2089 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2090 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
2091 rxq->qpt.page_count = page_count;
2092 rxq->qpt.page_size = page_size;
2094 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
2095 rxq->rcb->sw_q = page_mem->kva;
2097 kva = page_mem->kva;
2098 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2100 for (i = 0; i < rxq->qpt.page_count; i++) {
2101 rxq->rcb->sw_qpt[i] = kva;
2104 BNA_SET_DMA_ADDR(dma, &bna_dma);
2105 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
2107 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
2114 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
2117 struct bna_mem_descr *qpt_mem,
2118 struct bna_mem_descr *swqpt_mem,
2119 struct bna_mem_descr *page_mem)
2123 struct bna_dma_addr bna_dma;
2126 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2127 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2128 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2129 rxp->cq.qpt.page_count = page_count;
2130 rxp->cq.qpt.page_size = page_size;
2132 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2133 rxp->cq.ccb->sw_q = page_mem->kva;
2135 kva = page_mem->kva;
2136 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2138 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2139 rxp->cq.ccb->sw_qpt[i] = kva;
2142 BNA_SET_DMA_ADDR(dma, &bna_dma);
2143 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2145 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2152 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
2154 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2156 bfa_wc_down(&rx_mod->rx_stop_wc);
2160 bna_rx_mod_cb_rx_stopped_all(void *arg)
2162 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2164 if (rx_mod->stop_cbfn)
2165 rx_mod->stop_cbfn(&rx_mod->bna->enet);
2166 rx_mod->stop_cbfn = NULL;
2170 bna_rx_start(struct bna_rx *rx)
2172 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2173 if (rx->rx_flags & BNA_RX_F_ENABLED)
2174 bfa_fsm_send_event(rx, RX_E_START);
2178 bna_rx_stop(struct bna_rx *rx)
2180 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2181 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2182 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2184 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2185 rx->stop_cbarg = &rx->bna->rx_mod;
2186 bfa_fsm_send_event(rx, RX_E_STOP);
2191 bna_rx_fail(struct bna_rx *rx)
2193 /* Indicate Enet is not enabled, and failed */
2194 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2195 bfa_fsm_send_event(rx, RX_E_FAIL);
2199 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2202 struct list_head *qe;
2204 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2205 if (type == BNA_RX_T_LOOPBACK)
2206 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2208 list_for_each(qe, &rx_mod->rx_active_q) {
2209 rx = (struct bna_rx *)qe;
2210 if (rx->type == type)
2216 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2219 struct list_head *qe;
2221 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2222 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2224 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2226 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2228 list_for_each(qe, &rx_mod->rx_active_q) {
2229 rx = (struct bna_rx *)qe;
2230 if (rx->type == type) {
2231 bfa_wc_up(&rx_mod->rx_stop_wc);
2236 bfa_wc_wait(&rx_mod->rx_stop_wc);
2240 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2243 struct list_head *qe;
2245 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2246 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2248 list_for_each(qe, &rx_mod->rx_active_q) {
2249 rx = (struct bna_rx *)qe;
2254 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2255 struct bna_res_info *res_info)
2258 struct bna_rx *rx_ptr;
2259 struct bna_rxp *rxp_ptr;
2260 struct bna_rxq *rxq_ptr;
2265 rx_mod->rx = (struct bna_rx *)
2266 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2267 rx_mod->rxp = (struct bna_rxp *)
2268 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2269 rx_mod->rxq = (struct bna_rxq *)
2270 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2272 /* Initialize the queues */
2273 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2274 rx_mod->rx_free_count = 0;
2275 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2276 rx_mod->rxq_free_count = 0;
2277 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2278 rx_mod->rxp_free_count = 0;
2279 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2281 /* Build RX queues */
2282 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2283 rx_ptr = &rx_mod->rx[index];
2285 bfa_q_qe_init(&rx_ptr->qe);
2286 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2288 rx_ptr->rid = index;
2289 rx_ptr->stop_cbfn = NULL;
2290 rx_ptr->stop_cbarg = NULL;
2292 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2293 rx_mod->rx_free_count++;
2296 /* build RX-path queue */
2297 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2298 rxp_ptr = &rx_mod->rxp[index];
2299 bfa_q_qe_init(&rxp_ptr->qe);
2300 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2301 rx_mod->rxp_free_count++;
2304 /* build RXQ queue */
2305 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2306 rxq_ptr = &rx_mod->rxq[index];
2307 bfa_q_qe_init(&rxq_ptr->qe);
2308 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2309 rx_mod->rxq_free_count++;
2314 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2316 struct list_head *qe;
2320 list_for_each(qe, &rx_mod->rx_free_q)
2324 list_for_each(qe, &rx_mod->rxp_free_q)
2328 list_for_each(qe, &rx_mod->rxq_free_q)
2335 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2337 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2338 struct bna_rxp *rxp = NULL;
2339 struct bna_rxq *q0 = NULL, *q1 = NULL;
2340 struct list_head *rxp_qe;
2343 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2344 sizeof(struct bfi_enet_rx_cfg_rsp));
2346 rx->hw_id = cfg_rsp->hw_id;
2348 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2350 i++, rxp_qe = bfa_q_next(rxp_qe)) {
2351 rxp = (struct bna_rxp *)rxp_qe;
2352 GET_RXQS(rxp, q0, q1);
2354 /* Setup doorbells */
2355 rxp->cq.ccb->i_dbell->doorbell_addr =
2356 rx->bna->pcidev.pci_bar_kva
2357 + ntohl(cfg_rsp->q_handles[i].i_dbell);
2358 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2360 rx->bna->pcidev.pci_bar_kva
2361 + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2362 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2365 rx->bna->pcidev.pci_bar_kva
2366 + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2367 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2370 /* Initialize producer/consumer indexes */
2371 (*rxp->cq.ccb->hw_producer_index) = 0;
2372 rxp->cq.ccb->producer_index = 0;
2373 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2375 q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2378 bfa_fsm_send_event(rx, RX_E_STARTED);
2382 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2384 bfa_fsm_send_event(rx, RX_E_STOPPED);
2388 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2390 u32 cq_size, hq_size, dq_size;
2391 u32 cpage_count, hpage_count, dpage_count;
2392 struct bna_mem_info *mem_info;
2397 dq_depth = q_cfg->q0_depth;
2398 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
2399 cq_depth = dq_depth + hq_depth;
2401 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2402 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2403 cq_size = ALIGN(cq_size, PAGE_SIZE);
2404 cpage_count = SIZE_TO_PAGES(cq_size);
2406 BNA_TO_POWER_OF_2_HIGH(dq_depth);
2407 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2408 dq_size = ALIGN(dq_size, PAGE_SIZE);
2409 dpage_count = SIZE_TO_PAGES(dq_size);
2411 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2412 BNA_TO_POWER_OF_2_HIGH(hq_depth);
2413 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2414 hq_size = ALIGN(hq_size, PAGE_SIZE);
2415 hpage_count = SIZE_TO_PAGES(hq_size);
2419 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2420 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2421 mem_info->mem_type = BNA_MEM_T_KVA;
2422 mem_info->len = sizeof(struct bna_ccb);
2423 mem_info->num = q_cfg->num_paths;
2425 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2426 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2427 mem_info->mem_type = BNA_MEM_T_KVA;
2428 mem_info->len = sizeof(struct bna_rcb);
2429 mem_info->num = BNA_GET_RXQS(q_cfg);
2431 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2432 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2433 mem_info->mem_type = BNA_MEM_T_DMA;
2434 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2435 mem_info->num = q_cfg->num_paths;
2437 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2438 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2439 mem_info->mem_type = BNA_MEM_T_KVA;
2440 mem_info->len = cpage_count * sizeof(void *);
2441 mem_info->num = q_cfg->num_paths;
2443 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2444 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2445 mem_info->mem_type = BNA_MEM_T_DMA;
2446 mem_info->len = PAGE_SIZE * cpage_count;
2447 mem_info->num = q_cfg->num_paths;
2449 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2450 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2451 mem_info->mem_type = BNA_MEM_T_DMA;
2452 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2453 mem_info->num = q_cfg->num_paths;
2455 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2456 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2457 mem_info->mem_type = BNA_MEM_T_KVA;
2458 mem_info->len = dpage_count * sizeof(void *);
2459 mem_info->num = q_cfg->num_paths;
2461 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2462 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2463 mem_info->mem_type = BNA_MEM_T_DMA;
2464 mem_info->len = PAGE_SIZE * dpage_count;
2465 mem_info->num = q_cfg->num_paths;
2467 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2468 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2469 mem_info->mem_type = BNA_MEM_T_DMA;
2470 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2471 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2473 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2474 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2475 mem_info->mem_type = BNA_MEM_T_KVA;
2476 mem_info->len = hpage_count * sizeof(void *);
2477 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2479 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2480 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2481 mem_info->mem_type = BNA_MEM_T_DMA;
2482 mem_info->len = PAGE_SIZE * hpage_count;
2483 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2485 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2486 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2487 mem_info->mem_type = BNA_MEM_T_DMA;
2488 mem_info->len = BFI_IBIDX_SIZE;
2489 mem_info->num = q_cfg->num_paths;
2491 res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2492 mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2493 mem_info->mem_type = BNA_MEM_T_KVA;
2494 mem_info->len = BFI_ENET_RSS_RIT_MAX;
2497 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2498 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2499 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2503 bna_rx_create(struct bna *bna, struct bnad *bnad,
2504 struct bna_rx_config *rx_cfg,
2505 const struct bna_rx_event_cbfn *rx_cbfn,
2506 struct bna_res_info *res_info,
2509 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2511 struct bna_rxp *rxp;
2514 struct bna_intr_info *intr_info;
2515 struct bna_mem_descr *hqunmap_mem;
2516 struct bna_mem_descr *dqunmap_mem;
2517 struct bna_mem_descr *ccb_mem;
2518 struct bna_mem_descr *rcb_mem;
2519 struct bna_mem_descr *cqpt_mem;
2520 struct bna_mem_descr *cswqpt_mem;
2521 struct bna_mem_descr *cpage_mem;
2522 struct bna_mem_descr *hqpt_mem;
2523 struct bna_mem_descr *dqpt_mem;
2524 struct bna_mem_descr *hsqpt_mem;
2525 struct bna_mem_descr *dsqpt_mem;
2526 struct bna_mem_descr *hpage_mem;
2527 struct bna_mem_descr *dpage_mem;
2528 u32 dpage_count, hpage_count;
2529 u32 hq_idx, dq_idx, rcb_idx;
2533 if (!bna_rx_res_check(rx_mod, rx_cfg))
2536 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2537 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2538 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2539 dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
2540 hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
2541 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2542 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2543 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2544 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2545 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2546 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2547 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2548 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2549 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2551 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2554 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2557 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2560 rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2563 INIT_LIST_HEAD(&rx->rxp_q);
2564 rx->stop_cbfn = NULL;
2565 rx->stop_cbarg = NULL;
2568 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2569 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2570 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2571 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2572 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2573 /* Following callbacks are mandatory */
2574 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2575 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2577 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2579 case BNA_RX_T_REGULAR:
2580 if (!(rx->bna->rx_mod.flags &
2581 BNA_RX_MOD_F_ENET_LOOPBACK))
2582 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2584 case BNA_RX_T_LOOPBACK:
2585 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2586 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2591 rx->num_paths = rx_cfg->num_paths;
2592 for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
2593 i < rx->num_paths; i++) {
2594 rxp = bna_rxp_get(rx_mod);
2595 list_add_tail(&rxp->qe, &rx->rxp_q);
2596 rxp->type = rx_cfg->rxp_type;
2600 q0 = bna_rxq_get(rx_mod);
2601 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2604 q1 = bna_rxq_get(rx_mod);
2606 if (1 == intr_info->num)
2607 rxp->vector = intr_info->idl[0].vector;
2609 rxp->vector = intr_info->idl[i].vector;
2613 rxp->cq.ib.ib_seg_host_addr.lsb =
2614 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2615 rxp->cq.ib.ib_seg_host_addr.msb =
2616 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2617 rxp->cq.ib.ib_seg_host_addr_kva =
2618 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2619 rxp->cq.ib.intr_type = intr_info->intr_type;
2620 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2621 rxp->cq.ib.intr_vector = rxp->vector;
2623 rxp->cq.ib.intr_vector = (1 << rxp->vector);
2624 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2625 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2626 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2628 bna_rxp_add_rxqs(rxp, q0, q1);
2635 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2636 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
2637 rcb_idx++; dq_idx++;
2638 q0->rcb->q_depth = rx_cfg->q0_depth;
2639 q0->q_depth = rx_cfg->q0_depth;
2640 q0->multi_buffer = rx_cfg->q0_multi_buf;
2641 q0->buffer_size = rx_cfg->q0_buf_size;
2642 q0->num_vecs = rx_cfg->q0_num_vecs;
2644 q0->rcb->bnad = bna->bnad;
2646 q0->rx_packets = q0->rx_bytes = 0;
2647 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2649 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2650 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2652 if (rx->rcb_setup_cbfn)
2653 rx->rcb_setup_cbfn(bnad, q0->rcb);
2661 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2662 q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
2663 rcb_idx++; hq_idx++;
2664 q1->rcb->q_depth = rx_cfg->q1_depth;
2665 q1->q_depth = rx_cfg->q1_depth;
2666 q1->multi_buffer = BNA_STATUS_T_DISABLED;
2669 q1->rcb->bnad = bna->bnad;
2671 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2672 rx_cfg->hds_config.forced_offset
2673 : rx_cfg->q1_buf_size;
2674 q1->rx_packets = q1->rx_bytes = 0;
2675 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2677 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2678 &hqpt_mem[i], &hsqpt_mem[i],
2681 if (rx->rcb_setup_cbfn)
2682 rx->rcb_setup_cbfn(bnad, q1->rcb);
2687 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2688 cq_depth = rx_cfg->q0_depth +
2689 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2690 0 : rx_cfg->q1_depth);
2691 /* if multi-buffer is enabled sum of q0_depth
2692 * and q1_depth need not be a power of 2
2694 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2695 rxp->cq.ccb->q_depth = cq_depth;
2696 rxp->cq.ccb->cq = &rxp->cq;
2697 rxp->cq.ccb->rcb[0] = q0->rcb;
2698 q0->rcb->ccb = rxp->cq.ccb;
2700 rxp->cq.ccb->rcb[1] = q1->rcb;
2701 q1->rcb->ccb = rxp->cq.ccb;
2703 rxp->cq.ccb->hw_producer_index =
2704 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2705 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2706 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2707 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2708 rxp->cq.ccb->rx_coalescing_timeo =
2709 rxp->cq.ib.coalescing_timeo;
2710 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2711 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2712 rxp->cq.ccb->bnad = bna->bnad;
2713 rxp->cq.ccb->id = i;
2715 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2716 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2718 if (rx->ccb_setup_cbfn)
2719 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2722 rx->hds_cfg = rx_cfg->hds_config;
2724 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2726 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2728 rx_mod->rid_mask |= (1 << rx->rid);
2734 bna_rx_destroy(struct bna_rx *rx)
2736 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2737 struct bna_rxq *q0 = NULL;
2738 struct bna_rxq *q1 = NULL;
2739 struct bna_rxp *rxp;
2740 struct list_head *qe;
2742 bna_rxf_uninit(&rx->rxf);
2744 while (!list_empty(&rx->rxp_q)) {
2745 bfa_q_deq(&rx->rxp_q, &rxp);
2746 GET_RXQS(rxp, q0, q1);
2747 if (rx->rcb_destroy_cbfn)
2748 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2752 bna_rxq_put(rx_mod, q0);
2755 if (rx->rcb_destroy_cbfn)
2756 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2760 bna_rxq_put(rx_mod, q1);
2762 rxp->rxq.slr.large = NULL;
2763 rxp->rxq.slr.small = NULL;
2765 if (rx->ccb_destroy_cbfn)
2766 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2769 bna_rxp_put(rx_mod, rxp);
2772 list_for_each(qe, &rx_mod->rx_active_q) {
2773 if (qe == &rx->qe) {
2775 bfa_q_qe_init(&rx->qe);
2780 rx_mod->rid_mask &= ~(1 << rx->rid);
2784 bna_rx_put(rx_mod, rx);
2788 bna_rx_enable(struct bna_rx *rx)
2790 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2793 rx->rx_flags |= BNA_RX_F_ENABLED;
2794 if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2795 bfa_fsm_send_event(rx, RX_E_START);
2799 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2800 void (*cbfn)(void *, struct bna_rx *))
2802 if (type == BNA_SOFT_CLEANUP) {
2803 /* h/w should not be accessed. Treat we're stopped */
2804 (*cbfn)(rx->bna->bnad, rx);
2806 rx->stop_cbfn = cbfn;
2807 rx->stop_cbarg = rx->bna->bnad;
2809 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2811 bfa_fsm_send_event(rx, RX_E_STOP);
2816 bna_rx_cleanup_complete(struct bna_rx *rx)
2818 bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2822 bna_rx_vlan_strip_enable(struct bna_rx *rx)
2824 struct bna_rxf *rxf = &rx->rxf;
2826 if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
2827 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
2828 rxf->vlan_strip_pending = true;
2829 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2834 bna_rx_vlan_strip_disable(struct bna_rx *rx)
2836 struct bna_rxf *rxf = &rx->rxf;
2838 if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
2839 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
2840 rxf->vlan_strip_pending = true;
2841 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2846 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2847 enum bna_rxmode bitmask,
2848 void (*cbfn)(struct bnad *, struct bna_rx *))
2850 struct bna_rxf *rxf = &rx->rxf;
2851 int need_hw_config = 0;
2855 if (is_promisc_enable(new_mode, bitmask)) {
2856 /* If promisc mode is already enabled elsewhere in the system */
2857 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2858 (rx->bna->promisc_rid != rxf->rx->rid))
2861 /* If default mode is already enabled in the system */
2862 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2865 /* Trying to enable promiscuous and default mode together */
2866 if (is_default_enable(new_mode, bitmask))
2870 if (is_default_enable(new_mode, bitmask)) {
2871 /* If default mode is already enabled elsewhere in the system */
2872 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2873 (rx->bna->default_mode_rid != rxf->rx->rid)) {
2877 /* If promiscuous mode is already enabled in the system */
2878 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2882 /* Process the commands */
2884 if (is_promisc_enable(new_mode, bitmask)) {
2885 if (bna_rxf_promisc_enable(rxf))
2887 } else if (is_promisc_disable(new_mode, bitmask)) {
2888 if (bna_rxf_promisc_disable(rxf))
2892 if (is_allmulti_enable(new_mode, bitmask)) {
2893 if (bna_rxf_allmulti_enable(rxf))
2895 } else if (is_allmulti_disable(new_mode, bitmask)) {
2896 if (bna_rxf_allmulti_disable(rxf))
2900 /* Trigger h/w if needed */
2902 if (need_hw_config) {
2903 rxf->cam_fltr_cbfn = cbfn;
2904 rxf->cam_fltr_cbarg = rx->bna->bnad;
2905 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2907 (*cbfn)(rx->bna->bnad, rx);
2909 return BNA_CB_SUCCESS;
2916 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2918 struct bna_rxf *rxf = &rx->rxf;
2920 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2921 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2922 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2923 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2928 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2930 struct bna_rxp *rxp;
2931 struct list_head *qe;
2933 list_for_each(qe, &rx->rxp_q) {
2934 rxp = (struct bna_rxp *)qe;
2935 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2936 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2941 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2945 for (i = 0; i < BNA_LOAD_T_MAX; i++)
2946 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2947 bna->rx_mod.dim_vector[i][j] = vector[i][j];
2951 bna_rx_dim_update(struct bna_ccb *ccb)
2953 struct bna *bna = ccb->cq->rx->bna;
2955 u32 pkt_rt, small_rt, large_rt;
2956 u8 coalescing_timeo;
2958 if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2959 (ccb->pkt_rate.large_pkt_cnt == 0))
2962 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2964 small_rt = ccb->pkt_rate.small_pkt_cnt;
2965 large_rt = ccb->pkt_rate.large_pkt_cnt;
2967 pkt_rt = small_rt + large_rt;
2969 if (pkt_rt < BNA_PKT_RATE_10K)
2970 load = BNA_LOAD_T_LOW_4;
2971 else if (pkt_rt < BNA_PKT_RATE_20K)
2972 load = BNA_LOAD_T_LOW_3;
2973 else if (pkt_rt < BNA_PKT_RATE_30K)
2974 load = BNA_LOAD_T_LOW_2;
2975 else if (pkt_rt < BNA_PKT_RATE_40K)
2976 load = BNA_LOAD_T_LOW_1;
2977 else if (pkt_rt < BNA_PKT_RATE_50K)
2978 load = BNA_LOAD_T_HIGH_1;
2979 else if (pkt_rt < BNA_PKT_RATE_60K)
2980 load = BNA_LOAD_T_HIGH_2;
2981 else if (pkt_rt < BNA_PKT_RATE_80K)
2982 load = BNA_LOAD_T_HIGH_3;
2984 load = BNA_LOAD_T_HIGH_4;
2986 if (small_rt > (large_rt << 1))
2991 ccb->pkt_rate.small_pkt_cnt = 0;
2992 ccb->pkt_rate.large_pkt_cnt = 0;
2994 coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2995 ccb->rx_coalescing_timeo = coalescing_timeo;
2998 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
3001 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
3014 #define call_tx_stop_cbfn(tx) \
3016 if ((tx)->stop_cbfn) { \
3017 void (*cbfn)(void *, struct bna_tx *); \
3019 cbfn = (tx)->stop_cbfn; \
3020 cbarg = (tx)->stop_cbarg; \
3021 (tx)->stop_cbfn = NULL; \
3022 (tx)->stop_cbarg = NULL; \
3023 cbfn(cbarg, (tx)); \
3027 #define call_tx_prio_change_cbfn(tx) \
3029 if ((tx)->prio_change_cbfn) { \
3030 void (*cbfn)(struct bnad *, struct bna_tx *); \
3031 cbfn = (tx)->prio_change_cbfn; \
3032 (tx)->prio_change_cbfn = NULL; \
3033 cbfn((tx)->bna->bnad, (tx)); \
3037 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
3038 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
3039 static void bna_tx_enet_stop(struct bna_tx *tx);
3047 TX_E_PRIO_CHANGE = 6,
3048 TX_E_CLEANUP_DONE = 7,
3052 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
3053 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
3054 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
3055 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
3056 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
3058 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
3060 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
3062 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
3063 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
3067 bna_tx_sm_stopped_entry(struct bna_tx *tx)
3069 call_tx_stop_cbfn(tx);
3073 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
3077 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3081 call_tx_stop_cbfn(tx);
3088 case TX_E_PRIO_CHANGE:
3089 call_tx_prio_change_cbfn(tx);
3092 case TX_E_BW_UPDATE:
3097 bfa_sm_fault(event);
3102 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
3104 bna_bfi_tx_enet_start(tx);
3108 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
3112 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3113 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3117 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3118 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3122 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
3123 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
3124 BNA_TX_F_BW_UPDATED);
3125 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3127 bfa_fsm_set_state(tx, bna_tx_sm_started);
3130 case TX_E_PRIO_CHANGE:
3131 tx->flags |= BNA_TX_F_PRIO_CHANGED;
3134 case TX_E_BW_UPDATE:
3135 tx->flags |= BNA_TX_F_BW_UPDATED;
3139 bfa_sm_fault(event);
3144 bna_tx_sm_started_entry(struct bna_tx *tx)
3146 struct bna_txq *txq;
3147 struct list_head *qe;
3148 int is_regular = (tx->type == BNA_TX_T_REGULAR);
3150 list_for_each(qe, &tx->txq_q) {
3151 txq = (struct bna_txq *)qe;
3152 txq->tcb->priority = txq->priority;
3154 bna_ib_start(tx->bna, &txq->ib, is_regular);
3156 tx->tx_resume_cbfn(tx->bna->bnad, tx);
3160 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3164 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3165 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3166 bna_tx_enet_stop(tx);
3170 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3171 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3172 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3175 case TX_E_PRIO_CHANGE:
3176 case TX_E_BW_UPDATE:
3177 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3181 bfa_sm_fault(event);
3186 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
3191 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3196 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3197 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3202 * We are here due to start_wait -> stop_wait transition on
3205 bna_tx_enet_stop(tx);
3208 case TX_E_PRIO_CHANGE:
3209 case TX_E_BW_UPDATE:
3214 bfa_sm_fault(event);
3219 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3224 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3228 case TX_E_PRIO_CHANGE:
3229 case TX_E_BW_UPDATE:
3233 case TX_E_CLEANUP_DONE:
3234 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3238 bfa_sm_fault(event);
3243 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3245 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3246 bna_tx_enet_stop(tx);
3250 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3254 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3258 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3259 call_tx_prio_change_cbfn(tx);
3260 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3264 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3267 case TX_E_PRIO_CHANGE:
3268 case TX_E_BW_UPDATE:
3273 bfa_sm_fault(event);
3278 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3280 call_tx_prio_change_cbfn(tx);
3281 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3285 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3289 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3293 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3296 case TX_E_PRIO_CHANGE:
3297 case TX_E_BW_UPDATE:
3301 case TX_E_CLEANUP_DONE:
3302 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3306 bfa_sm_fault(event);
3311 bna_tx_sm_failed_entry(struct bna_tx *tx)
3316 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3320 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3324 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3331 case TX_E_CLEANUP_DONE:
3332 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3336 bfa_sm_fault(event);
3341 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3346 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3350 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3354 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3357 case TX_E_CLEANUP_DONE:
3358 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3361 case TX_E_BW_UPDATE:
3366 bfa_sm_fault(event);
3371 bna_bfi_tx_enet_start(struct bna_tx *tx)
3373 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3374 struct bna_txq *txq = NULL;
3375 struct list_head *qe;
3378 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3379 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3380 cfg_req->mh.num_entries = htons(
3381 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3383 cfg_req->num_queues = tx->num_txq;
3384 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3386 i++, qe = bfa_q_next(qe)) {
3387 txq = (struct bna_txq *)qe;
3389 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3390 cfg_req->q_cfg[i].q.priority = txq->priority;
3392 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3393 txq->ib.ib_seg_host_addr.lsb;
3394 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3395 txq->ib.ib_seg_host_addr.msb;
3396 cfg_req->q_cfg[i].ib.intr.msix_index =
3397 htons((u16)txq->ib.intr_vector);
3400 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3401 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3402 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3403 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3404 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3405 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3406 cfg_req->ib_cfg.coalescing_timeout =
3407 htonl((u32)txq->ib.coalescing_timeo);
3408 cfg_req->ib_cfg.inter_pkt_timeout =
3409 htonl((u32)txq->ib.interpkt_timeo);
3410 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3412 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3413 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3414 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED;
3415 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3417 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3418 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3419 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3423 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3425 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3427 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3428 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3429 req->mh.num_entries = htons(
3430 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3431 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3433 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3437 bna_tx_enet_stop(struct bna_tx *tx)
3439 struct bna_txq *txq;
3440 struct list_head *qe;
3443 list_for_each(qe, &tx->txq_q) {
3444 txq = (struct bna_txq *)qe;
3445 bna_ib_stop(tx->bna, &txq->ib);
3448 bna_bfi_tx_enet_stop(tx);
3452 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3453 struct bna_mem_descr *qpt_mem,
3454 struct bna_mem_descr *swqpt_mem,
3455 struct bna_mem_descr *page_mem)
3459 struct bna_dma_addr bna_dma;
3462 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3463 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3464 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3465 txq->qpt.page_count = page_count;
3466 txq->qpt.page_size = page_size;
3468 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3469 txq->tcb->sw_q = page_mem->kva;
3471 kva = page_mem->kva;
3472 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3474 for (i = 0; i < page_count; i++) {
3475 txq->tcb->sw_qpt[i] = kva;
3478 BNA_SET_DMA_ADDR(dma, &bna_dma);
3479 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3481 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3487 static struct bna_tx *
3488 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3490 struct list_head *qe = NULL;
3491 struct bna_tx *tx = NULL;
3493 if (list_empty(&tx_mod->tx_free_q))
3495 if (type == BNA_TX_T_REGULAR) {
3496 bfa_q_deq(&tx_mod->tx_free_q, &qe);
3498 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3500 tx = (struct bna_tx *)qe;
3501 bfa_q_qe_init(&tx->qe);
3508 bna_tx_free(struct bna_tx *tx)
3510 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3511 struct bna_txq *txq;
3512 struct list_head *prev_qe;
3513 struct list_head *qe;
3515 while (!list_empty(&tx->txq_q)) {
3516 bfa_q_deq(&tx->txq_q, &txq);
3517 bfa_q_qe_init(&txq->qe);
3520 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3523 list_for_each(qe, &tx_mod->tx_active_q) {
3524 if (qe == &tx->qe) {
3526 bfa_q_qe_init(&tx->qe);
3535 list_for_each(qe, &tx_mod->tx_free_q) {
3536 if (((struct bna_tx *)qe)->rid < tx->rid)
3543 if (prev_qe == NULL) {
3544 /* This is the first entry */
3545 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3546 } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3547 /* This is the last entry */
3548 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3550 /* Somewhere in the middle */
3551 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3552 bfa_q_prev(&tx->qe) = prev_qe;
3553 bfa_q_next(prev_qe) = &tx->qe;
3554 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3559 bna_tx_start(struct bna_tx *tx)
3561 tx->flags |= BNA_TX_F_ENET_STARTED;
3562 if (tx->flags & BNA_TX_F_ENABLED)
3563 bfa_fsm_send_event(tx, TX_E_START);
3567 bna_tx_stop(struct bna_tx *tx)
3569 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3570 tx->stop_cbarg = &tx->bna->tx_mod;
3572 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3573 bfa_fsm_send_event(tx, TX_E_STOP);
3577 bna_tx_fail(struct bna_tx *tx)
3579 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3580 bfa_fsm_send_event(tx, TX_E_FAIL);
3584 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3586 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3587 struct bna_txq *txq = NULL;
3588 struct list_head *qe;
3591 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3592 sizeof(struct bfi_enet_tx_cfg_rsp));
3594 tx->hw_id = cfg_rsp->hw_id;
3596 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3597 i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3598 txq = (struct bna_txq *)qe;
3600 /* Setup doorbells */
3601 txq->tcb->i_dbell->doorbell_addr =
3602 tx->bna->pcidev.pci_bar_kva
3603 + ntohl(cfg_rsp->q_handles[i].i_dbell);
3605 tx->bna->pcidev.pci_bar_kva
3606 + ntohl(cfg_rsp->q_handles[i].q_dbell);
3607 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3609 /* Initialize producer/consumer indexes */
3610 (*txq->tcb->hw_consumer_index) = 0;
3611 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3614 bfa_fsm_send_event(tx, TX_E_STARTED);
3618 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3620 bfa_fsm_send_event(tx, TX_E_STOPPED);
3624 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3627 struct list_head *qe;
3629 list_for_each(qe, &tx_mod->tx_active_q) {
3630 tx = (struct bna_tx *)qe;
3631 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3636 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3640 struct bna_mem_info *mem_info;
3642 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3643 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3644 mem_info->mem_type = BNA_MEM_T_KVA;
3645 mem_info->len = sizeof(struct bna_tcb);
3646 mem_info->num = num_txq;
3648 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3649 q_size = ALIGN(q_size, PAGE_SIZE);
3650 page_count = q_size >> PAGE_SHIFT;
3652 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3653 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3654 mem_info->mem_type = BNA_MEM_T_DMA;
3655 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3656 mem_info->num = num_txq;
3658 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3659 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3660 mem_info->mem_type = BNA_MEM_T_KVA;
3661 mem_info->len = page_count * sizeof(void *);
3662 mem_info->num = num_txq;
3664 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3665 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3666 mem_info->mem_type = BNA_MEM_T_DMA;
3667 mem_info->len = PAGE_SIZE * page_count;
3668 mem_info->num = num_txq;
3670 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3671 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3672 mem_info->mem_type = BNA_MEM_T_DMA;
3673 mem_info->len = BFI_IBIDX_SIZE;
3674 mem_info->num = num_txq;
3676 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3677 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3679 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3683 bna_tx_create(struct bna *bna, struct bnad *bnad,
3684 struct bna_tx_config *tx_cfg,
3685 const struct bna_tx_event_cbfn *tx_cbfn,
3686 struct bna_res_info *res_info, void *priv)
3688 struct bna_intr_info *intr_info;
3689 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3691 struct bna_txq *txq;
3692 struct list_head *qe;
3696 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3697 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3704 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3709 tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3717 INIT_LIST_HEAD(&tx->txq_q);
3718 for (i = 0; i < tx_cfg->num_txq; i++) {
3719 if (list_empty(&tx_mod->txq_free_q))
3722 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3723 bfa_q_qe_init(&txq->qe);
3724 list_add_tail(&txq->qe, &tx->txq_q);
3734 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3735 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3736 /* Following callbacks are mandatory */
3737 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3738 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3739 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3741 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3743 tx->num_txq = tx_cfg->num_txq;
3746 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3748 case BNA_TX_T_REGULAR:
3749 if (!(tx->bna->tx_mod.flags &
3750 BNA_TX_MOD_F_ENET_LOOPBACK))
3751 tx->flags |= BNA_TX_F_ENET_STARTED;
3753 case BNA_TX_T_LOOPBACK:
3754 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3755 tx->flags |= BNA_TX_F_ENET_STARTED;
3763 list_for_each(qe, &tx->txq_q) {
3764 txq = (struct bna_txq *)qe;
3765 txq->tcb = (struct bna_tcb *)
3766 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3767 txq->tx_packets = 0;
3771 txq->ib.ib_seg_host_addr.lsb =
3772 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3773 txq->ib.ib_seg_host_addr.msb =
3774 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3775 txq->ib.ib_seg_host_addr_kva =
3776 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3777 txq->ib.intr_type = intr_info->intr_type;
3778 txq->ib.intr_vector = (intr_info->num == 1) ?
3779 intr_info->idl[0].vector :
3780 intr_info->idl[i].vector;
3781 if (intr_info->intr_type == BNA_INTR_T_INTX)
3782 txq->ib.intr_vector = (1 << txq->ib.intr_vector);
3783 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3784 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3785 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3789 txq->tcb->q_depth = tx_cfg->txq_depth;
3790 txq->tcb->unmap_q = (void *)
3791 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3792 txq->tcb->hw_consumer_index =
3793 (u32 *)txq->ib.ib_seg_host_addr_kva;
3794 txq->tcb->i_dbell = &txq->ib.door_bell;
3795 txq->tcb->intr_type = txq->ib.intr_type;
3796 txq->tcb->intr_vector = txq->ib.intr_vector;
3797 txq->tcb->txq = txq;
3798 txq->tcb->bnad = bnad;
3801 /* QPT, SWQPT, Pages */
3802 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3803 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3804 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3805 &res_info[BNA_TX_RES_MEM_T_PAGE].
3806 res_u.mem_info.mdl[i]);
3808 /* Callback to bnad for setting up TCB */
3809 if (tx->tcb_setup_cbfn)
3810 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3812 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3813 txq->priority = txq->tcb->id;
3815 txq->priority = tx_mod->default_prio;
3820 tx->txf_vlan_id = 0;
3822 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3824 tx_mod->rid_mask |= (1 << tx->rid);
3834 bna_tx_destroy(struct bna_tx *tx)
3836 struct bna_txq *txq;
3837 struct list_head *qe;
3839 list_for_each(qe, &tx->txq_q) {
3840 txq = (struct bna_txq *)qe;
3841 if (tx->tcb_destroy_cbfn)
3842 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3845 tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
3850 bna_tx_enable(struct bna_tx *tx)
3852 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3855 tx->flags |= BNA_TX_F_ENABLED;
3857 if (tx->flags & BNA_TX_F_ENET_STARTED)
3858 bfa_fsm_send_event(tx, TX_E_START);
3862 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3863 void (*cbfn)(void *, struct bna_tx *))
3865 if (type == BNA_SOFT_CLEANUP) {
3866 (*cbfn)(tx->bna->bnad, tx);
3870 tx->stop_cbfn = cbfn;
3871 tx->stop_cbarg = tx->bna->bnad;
3873 tx->flags &= ~BNA_TX_F_ENABLED;
3875 bfa_fsm_send_event(tx, TX_E_STOP);
3879 bna_tx_cleanup_complete(struct bna_tx *tx)
3881 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3885 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3887 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3889 bfa_wc_down(&tx_mod->tx_stop_wc);
3893 bna_tx_mod_cb_tx_stopped_all(void *arg)
3895 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3897 if (tx_mod->stop_cbfn)
3898 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3899 tx_mod->stop_cbfn = NULL;
3903 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3904 struct bna_res_info *res_info)
3911 tx_mod->tx = (struct bna_tx *)
3912 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3913 tx_mod->txq = (struct bna_txq *)
3914 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3916 INIT_LIST_HEAD(&tx_mod->tx_free_q);
3917 INIT_LIST_HEAD(&tx_mod->tx_active_q);
3919 INIT_LIST_HEAD(&tx_mod->txq_free_q);
3921 for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3922 tx_mod->tx[i].rid = i;
3923 bfa_q_qe_init(&tx_mod->tx[i].qe);
3924 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3925 bfa_q_qe_init(&tx_mod->txq[i].qe);
3926 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3929 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3930 tx_mod->default_prio = 0;
3931 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3932 tx_mod->iscsi_prio = -1;
3936 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3938 struct list_head *qe;
3942 list_for_each(qe, &tx_mod->tx_free_q)
3946 list_for_each(qe, &tx_mod->txq_free_q)
3953 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3956 struct list_head *qe;
3958 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3959 if (type == BNA_TX_T_LOOPBACK)
3960 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3962 list_for_each(qe, &tx_mod->tx_active_q) {
3963 tx = (struct bna_tx *)qe;
3964 if (tx->type == type)
3970 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3973 struct list_head *qe;
3975 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3976 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3978 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3980 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3982 list_for_each(qe, &tx_mod->tx_active_q) {
3983 tx = (struct bna_tx *)qe;
3984 if (tx->type == type) {
3985 bfa_wc_up(&tx_mod->tx_stop_wc);
3990 bfa_wc_wait(&tx_mod->tx_stop_wc);
3994 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3997 struct list_head *qe;
3999 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
4000 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
4002 list_for_each(qe, &tx_mod->tx_active_q) {
4003 tx = (struct bna_tx *)qe;
4009 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
4011 struct bna_txq *txq;
4012 struct list_head *qe;
4014 list_for_each(qe, &tx->txq_q) {
4015 txq = (struct bna_txq *)qe;
4016 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);