2 * Copyright (C) 2005 - 2015 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 static char *be_port_misconfig_evt_desc[] = {
23 "A valid SFP module detected",
24 "Optics faulted/ incorrectly installed/ not installed.",
25 "Optics of two types installed.",
26 "Incompatible optics.",
27 "Unknown port SFP status"
30 static char *be_port_misconfig_remedy_desc[] = {
32 "Reseat optics. If issue not resolved, replace",
33 "Remove one optic or install matching pair of optics",
34 "Replace with compatible optics for card to function",
38 static struct be_cmd_priv_map cmd_priv_map[] = {
40 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
42 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
43 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46 OPCODE_COMMON_GET_FLOW_CONTROL,
48 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
49 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52 OPCODE_COMMON_SET_FLOW_CONTROL,
54 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
55 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
58 OPCODE_ETH_GET_PPORT_STATS,
60 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
61 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
64 OPCODE_COMMON_GET_PHY_DETAILS,
66 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
67 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
71 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
74 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
75 u32 cmd_privileges = adapter->cmd_privileges;
77 for (i = 0; i < num_entries; i++)
78 if (opcode == cmd_priv_map[i].opcode &&
79 subsystem == cmd_priv_map[i].subsystem)
80 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
86 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
88 return wrb->payload.embedded_payload;
91 static int be_mcc_notify(struct be_adapter *adapter)
93 struct be_queue_info *mccq = &adapter->mcc_obj.q;
96 if (be_check_error(adapter, BE_ERROR_ANY))
99 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
100 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
103 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
108 /* To check if valid bit is set, check the entire word as we don't know
109 * the endianness of the data (old entry is host endian while a new entry is
111 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
115 if (compl->flags != 0) {
116 flags = le32_to_cpu(compl->flags);
117 if (flags & CQE_FLAGS_VALID_MASK) {
118 compl->flags = flags;
125 /* Need to reset the entire word that houses the valid bit */
126 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
131 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
136 addr = ((addr << 16) << 16) | tag0;
140 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
142 if (base_status == MCC_STATUS_NOT_SUPPORTED ||
143 base_status == MCC_STATUS_ILLEGAL_REQUEST ||
144 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
145 addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
146 (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
147 (base_status == MCC_STATUS_ILLEGAL_FIELD ||
148 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
154 /* Place holder for all the async MCC cmds wherein the caller is not in a busy
155 * loop (has not issued be_mcc_notify_wait())
157 static void be_async_cmd_process(struct be_adapter *adapter,
158 struct be_mcc_compl *compl,
159 struct be_cmd_resp_hdr *resp_hdr)
161 enum mcc_base_status base_status = base_status(compl->status);
162 u8 opcode = 0, subsystem = 0;
165 opcode = resp_hdr->opcode;
166 subsystem = resp_hdr->subsystem;
169 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
170 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
171 complete(&adapter->et_cmd_compl);
175 if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
176 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
177 complete(&adapter->et_cmd_compl);
181 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
182 opcode == OPCODE_COMMON_WRITE_OBJECT) &&
183 subsystem == CMD_SUBSYSTEM_COMMON) {
184 adapter->flash_status = compl->status;
185 complete(&adapter->et_cmd_compl);
189 if ((opcode == OPCODE_ETH_GET_STATISTICS ||
190 opcode == OPCODE_ETH_GET_PPORT_STATS) &&
191 subsystem == CMD_SUBSYSTEM_ETH &&
192 base_status == MCC_STATUS_SUCCESS) {
193 be_parse_stats(adapter);
194 adapter->stats_cmd_sent = false;
198 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
199 subsystem == CMD_SUBSYSTEM_COMMON) {
200 if (base_status == MCC_STATUS_SUCCESS) {
201 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
203 adapter->hwmon_info.be_on_die_temp =
204 resp->on_die_temperature;
206 adapter->be_get_temp_freq = 0;
207 adapter->hwmon_info.be_on_die_temp =
214 static int be_mcc_compl_process(struct be_adapter *adapter,
215 struct be_mcc_compl *compl)
217 enum mcc_base_status base_status;
218 enum mcc_addl_status addl_status;
219 struct be_cmd_resp_hdr *resp_hdr;
220 u8 opcode = 0, subsystem = 0;
222 /* Just swap the status to host endian; mcc tag is opaquely copied
224 be_dws_le_to_cpu(compl, 4);
226 base_status = base_status(compl->status);
227 addl_status = addl_status(compl->status);
229 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
231 opcode = resp_hdr->opcode;
232 subsystem = resp_hdr->subsystem;
235 be_async_cmd_process(adapter, compl, resp_hdr);
237 if (base_status != MCC_STATUS_SUCCESS &&
238 !be_skip_err_log(opcode, base_status, addl_status)) {
239 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
240 dev_warn(&adapter->pdev->dev,
241 "VF is not privileged to issue opcode %d-%d\n",
244 dev_err(&adapter->pdev->dev,
245 "opcode %d-%d failed:status %d-%d\n",
246 opcode, subsystem, base_status, addl_status);
249 return compl->status;
252 /* Link state evt is a string of bytes; no need for endian swapping */
253 static void be_async_link_state_process(struct be_adapter *adapter,
254 struct be_mcc_compl *compl)
256 struct be_async_event_link_state *evt =
257 (struct be_async_event_link_state *)compl;
259 /* When link status changes, link speed must be re-queried from FW */
260 adapter->phy.link_speed = -1;
262 /* On BEx the FW does not send a separate link status
263 * notification for physical and logical link.
264 * On other chips just process the logical link
265 * status notification
267 if (!BEx_chip(adapter) &&
268 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
271 /* For the initial link status do not rely on the ASYNC event as
272 * it may not be received in some cases.
274 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
275 be_link_status_update(adapter,
276 evt->port_link_status & LINK_STATUS_MASK);
279 static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
280 struct be_mcc_compl *compl)
282 struct be_async_event_misconfig_port *evt =
283 (struct be_async_event_misconfig_port *)compl;
284 u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1);
285 struct device *dev = &adapter->pdev->dev;
286 u8 port_misconfig_evt;
289 ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff);
291 /* Log an error message that would allow a user to determine
292 * whether the SFPs have an issue
294 dev_info(dev, "Port %c: %s %s", adapter->port_name,
295 be_port_misconfig_evt_desc[port_misconfig_evt],
296 be_port_misconfig_remedy_desc[port_misconfig_evt]);
298 if (port_misconfig_evt == INCOMPATIBLE_SFP)
299 adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP;
302 /* Grp5 CoS Priority evt */
303 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
304 struct be_mcc_compl *compl)
306 struct be_async_event_grp5_cos_priority *evt =
307 (struct be_async_event_grp5_cos_priority *)compl;
310 adapter->vlan_prio_bmap = evt->available_priority_bmap;
311 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
312 adapter->recommended_prio =
313 evt->reco_default_priority << VLAN_PRIO_SHIFT;
317 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
318 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
319 struct be_mcc_compl *compl)
321 struct be_async_event_grp5_qos_link_speed *evt =
322 (struct be_async_event_grp5_qos_link_speed *)compl;
324 if (adapter->phy.link_speed >= 0 &&
325 evt->physical_port == adapter->port_num)
326 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
330 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
331 struct be_mcc_compl *compl)
333 struct be_async_event_grp5_pvid_state *evt =
334 (struct be_async_event_grp5_pvid_state *)compl;
337 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
338 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
344 #define MGMT_ENABLE_MASK 0x4
345 static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
346 struct be_mcc_compl *compl)
348 struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
349 u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
351 if (evt_dw1 & MGMT_ENABLE_MASK) {
352 adapter->flags |= BE_FLAGS_OS2BMC;
353 adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
355 adapter->flags &= ~BE_FLAGS_OS2BMC;
359 static void be_async_grp5_evt_process(struct be_adapter *adapter,
360 struct be_mcc_compl *compl)
362 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
363 ASYNC_EVENT_TYPE_MASK;
365 switch (event_type) {
366 case ASYNC_EVENT_COS_PRIORITY:
367 be_async_grp5_cos_priority_process(adapter, compl);
369 case ASYNC_EVENT_QOS_SPEED:
370 be_async_grp5_qos_speed_process(adapter, compl);
372 case ASYNC_EVENT_PVID_STATE:
373 be_async_grp5_pvid_state_process(adapter, compl);
375 /* Async event to disable/enable os2bmc and/or mac-learning */
376 case ASYNC_EVENT_FW_CONTROL:
377 be_async_grp5_fw_control_process(adapter, compl);
384 static void be_async_dbg_evt_process(struct be_adapter *adapter,
385 struct be_mcc_compl *cmp)
388 struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
390 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
391 ASYNC_EVENT_TYPE_MASK;
393 switch (event_type) {
394 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
396 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
397 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
400 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
406 static void be_async_sliport_evt_process(struct be_adapter *adapter,
407 struct be_mcc_compl *cmp)
409 u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
410 ASYNC_EVENT_TYPE_MASK;
412 if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
413 be_async_port_misconfig_event_process(adapter, cmp);
416 static inline bool is_link_state_evt(u32 flags)
418 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
419 ASYNC_EVENT_CODE_LINK_STATE;
422 static inline bool is_grp5_evt(u32 flags)
424 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
425 ASYNC_EVENT_CODE_GRP_5;
428 static inline bool is_dbg_evt(u32 flags)
430 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
431 ASYNC_EVENT_CODE_QNQ;
434 static inline bool is_sliport_evt(u32 flags)
436 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
437 ASYNC_EVENT_CODE_SLIPORT;
440 static void be_mcc_event_process(struct be_adapter *adapter,
441 struct be_mcc_compl *compl)
443 if (is_link_state_evt(compl->flags))
444 be_async_link_state_process(adapter, compl);
445 else if (is_grp5_evt(compl->flags))
446 be_async_grp5_evt_process(adapter, compl);
447 else if (is_dbg_evt(compl->flags))
448 be_async_dbg_evt_process(adapter, compl);
449 else if (is_sliport_evt(compl->flags))
450 be_async_sliport_evt_process(adapter, compl);
453 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
455 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
456 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
458 if (be_mcc_compl_is_new(compl)) {
459 queue_tail_inc(mcc_cq);
465 void be_async_mcc_enable(struct be_adapter *adapter)
467 spin_lock_bh(&adapter->mcc_cq_lock);
469 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
470 adapter->mcc_obj.rearm_cq = true;
472 spin_unlock_bh(&adapter->mcc_cq_lock);
475 void be_async_mcc_disable(struct be_adapter *adapter)
477 spin_lock_bh(&adapter->mcc_cq_lock);
479 adapter->mcc_obj.rearm_cq = false;
480 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
482 spin_unlock_bh(&adapter->mcc_cq_lock);
485 int be_process_mcc(struct be_adapter *adapter)
487 struct be_mcc_compl *compl;
488 int num = 0, status = 0;
489 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
491 spin_lock(&adapter->mcc_cq_lock);
493 while ((compl = be_mcc_compl_get(adapter))) {
494 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
495 be_mcc_event_process(adapter, compl);
496 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
497 status = be_mcc_compl_process(adapter, compl);
498 atomic_dec(&mcc_obj->q.used);
500 be_mcc_compl_use(compl);
505 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
507 spin_unlock(&adapter->mcc_cq_lock);
511 /* Wait till no more pending mcc requests are present */
512 static int be_mcc_wait_compl(struct be_adapter *adapter)
514 #define mcc_timeout 120000 /* 12s timeout */
516 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
518 for (i = 0; i < mcc_timeout; i++) {
519 if (be_check_error(adapter, BE_ERROR_ANY))
523 status = be_process_mcc(adapter);
526 if (atomic_read(&mcc_obj->q.used) == 0)
530 if (i == mcc_timeout) {
531 dev_err(&adapter->pdev->dev, "FW not responding\n");
532 be_set_error(adapter, BE_ERROR_FW);
538 /* Notify MCC requests and wait for completion */
539 static int be_mcc_notify_wait(struct be_adapter *adapter)
542 struct be_mcc_wrb *wrb;
543 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
544 u16 index = mcc_obj->q.head;
545 struct be_cmd_resp_hdr *resp;
547 index_dec(&index, mcc_obj->q.len);
548 wrb = queue_index_node(&mcc_obj->q, index);
550 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
552 status = be_mcc_notify(adapter);
556 status = be_mcc_wait_compl(adapter);
560 status = (resp->base_status |
561 ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
562 CQE_ADDL_STATUS_SHIFT));
567 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
573 if (be_check_error(adapter, BE_ERROR_ANY))
576 ready = ioread32(db);
577 if (ready == 0xffffffff)
580 ready &= MPU_MAILBOX_DB_RDY_MASK;
585 dev_err(&adapter->pdev->dev, "FW not responding\n");
586 be_set_error(adapter, BE_ERROR_FW);
587 be_detect_error(adapter);
599 * Insert the mailbox address into the doorbell in two steps
600 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
602 static int be_mbox_notify_wait(struct be_adapter *adapter)
606 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
607 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
608 struct be_mcc_mailbox *mbox = mbox_mem->va;
609 struct be_mcc_compl *compl = &mbox->compl;
611 /* wait for ready to be set */
612 status = be_mbox_db_ready_wait(adapter, db);
616 val |= MPU_MAILBOX_DB_HI_MASK;
617 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
618 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
621 /* wait for ready to be set */
622 status = be_mbox_db_ready_wait(adapter, db);
627 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
628 val |= (u32)(mbox_mem->dma >> 4) << 2;
631 status = be_mbox_db_ready_wait(adapter, db);
635 /* A cq entry has been made now */
636 if (be_mcc_compl_is_new(compl)) {
637 status = be_mcc_compl_process(adapter, &mbox->compl);
638 be_mcc_compl_use(compl);
642 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
648 static u16 be_POST_stage_get(struct be_adapter *adapter)
652 if (BEx_chip(adapter))
653 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
655 pci_read_config_dword(adapter->pdev,
656 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
658 return sem & POST_STAGE_MASK;
661 static int lancer_wait_ready(struct be_adapter *adapter)
663 #define SLIPORT_READY_TIMEOUT 30
667 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
668 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
669 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
672 if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
673 !(sliport_status & SLIPORT_STATUS_RN_MASK))
679 return sliport_status ? : -1;
682 int be_fw_wait_ready(struct be_adapter *adapter)
685 int status, timeout = 0;
686 struct device *dev = &adapter->pdev->dev;
688 if (lancer_chip(adapter)) {
689 status = lancer_wait_ready(adapter);
698 /* There's no means to poll POST state on BE2/3 VFs */
699 if (BEx_chip(adapter) && be_virtfn(adapter))
702 stage = be_POST_stage_get(adapter);
703 if (stage == POST_STAGE_ARMFW_RDY)
706 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
707 if (msleep_interruptible(2000)) {
708 dev_err(dev, "Waiting for POST aborted\n");
712 } while (timeout < 60);
715 dev_err(dev, "POST timeout; stage=%#x\n", stage);
719 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
721 return &wrb->payload.sgl[0];
724 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
726 wrb->tag0 = addr & 0xFFFFFFFF;
727 wrb->tag1 = upper_32_bits(addr);
730 /* Don't touch the hdr after it's prepared */
731 /* mem will be NULL for embedded commands */
732 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
733 u8 subsystem, u8 opcode, int cmd_len,
734 struct be_mcc_wrb *wrb,
735 struct be_dma_mem *mem)
739 req_hdr->opcode = opcode;
740 req_hdr->subsystem = subsystem;
741 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
742 req_hdr->version = 0;
743 fill_wrb_tags(wrb, (ulong) req_hdr);
744 wrb->payload_length = cmd_len;
746 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
747 MCC_WRB_SGE_CNT_SHIFT;
748 sge = nonembedded_sgl(wrb);
749 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
750 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
751 sge->len = cpu_to_le32(mem->size);
753 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
754 be_dws_cpu_to_le(wrb, 8);
757 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
758 struct be_dma_mem *mem)
760 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
761 u64 dma = (u64)mem->dma;
763 for (i = 0; i < buf_pages; i++) {
764 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
765 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
770 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
772 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
773 struct be_mcc_wrb *wrb
774 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
775 memset(wrb, 0, sizeof(*wrb));
779 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
781 struct be_queue_info *mccq = &adapter->mcc_obj.q;
782 struct be_mcc_wrb *wrb;
787 if (atomic_read(&mccq->used) >= mccq->len)
790 wrb = queue_head_node(mccq);
791 queue_head_inc(mccq);
792 atomic_inc(&mccq->used);
793 memset(wrb, 0, sizeof(*wrb));
797 static bool use_mcc(struct be_adapter *adapter)
799 return adapter->mcc_obj.q.created;
802 /* Must be used only in process context */
803 static int be_cmd_lock(struct be_adapter *adapter)
805 if (use_mcc(adapter)) {
806 spin_lock_bh(&adapter->mcc_lock);
809 return mutex_lock_interruptible(&adapter->mbox_lock);
813 /* Must be used only in process context */
814 static void be_cmd_unlock(struct be_adapter *adapter)
816 if (use_mcc(adapter))
817 spin_unlock_bh(&adapter->mcc_lock);
819 return mutex_unlock(&adapter->mbox_lock);
822 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
823 struct be_mcc_wrb *wrb)
825 struct be_mcc_wrb *dest_wrb;
827 if (use_mcc(adapter)) {
828 dest_wrb = wrb_from_mccq(adapter);
832 dest_wrb = wrb_from_mbox(adapter);
835 memcpy(dest_wrb, wrb, sizeof(*wrb));
836 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
837 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
842 /* Must be used only in process context */
843 static int be_cmd_notify_wait(struct be_adapter *adapter,
844 struct be_mcc_wrb *wrb)
846 struct be_mcc_wrb *dest_wrb;
849 status = be_cmd_lock(adapter);
853 dest_wrb = be_cmd_copy(adapter, wrb);
857 if (use_mcc(adapter))
858 status = be_mcc_notify_wait(adapter);
860 status = be_mbox_notify_wait(adapter);
863 memcpy(wrb, dest_wrb, sizeof(*wrb));
865 be_cmd_unlock(adapter);
869 /* Tell fw we're about to start firing cmds by writing a
870 * special pattern across the wrb hdr; uses mbox
872 int be_cmd_fw_init(struct be_adapter *adapter)
877 if (lancer_chip(adapter))
880 if (mutex_lock_interruptible(&adapter->mbox_lock))
883 wrb = (u8 *)wrb_from_mbox(adapter);
893 status = be_mbox_notify_wait(adapter);
895 mutex_unlock(&adapter->mbox_lock);
899 /* Tell fw we're done with firing cmds by writing a
900 * special pattern across the wrb hdr; uses mbox
902 int be_cmd_fw_clean(struct be_adapter *adapter)
907 if (lancer_chip(adapter))
910 if (mutex_lock_interruptible(&adapter->mbox_lock))
913 wrb = (u8 *)wrb_from_mbox(adapter);
923 status = be_mbox_notify_wait(adapter);
925 mutex_unlock(&adapter->mbox_lock);
929 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
931 struct be_mcc_wrb *wrb;
932 struct be_cmd_req_eq_create *req;
933 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
936 if (mutex_lock_interruptible(&adapter->mbox_lock))
939 wrb = wrb_from_mbox(adapter);
940 req = embedded_payload(wrb);
942 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
943 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
946 /* Support for EQ_CREATEv2 available only SH-R onwards */
947 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
950 req->hdr.version = ver;
951 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
953 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
955 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
956 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
957 __ilog2_u32(eqo->q.len / 256));
958 be_dws_cpu_to_le(req->context, sizeof(req->context));
960 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
962 status = be_mbox_notify_wait(adapter);
964 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
966 eqo->q.id = le16_to_cpu(resp->eq_id);
968 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
969 eqo->q.created = true;
972 mutex_unlock(&adapter->mbox_lock);
977 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
978 bool permanent, u32 if_handle, u32 pmac_id)
980 struct be_mcc_wrb *wrb;
981 struct be_cmd_req_mac_query *req;
984 spin_lock_bh(&adapter->mcc_lock);
986 wrb = wrb_from_mccq(adapter);
991 req = embedded_payload(wrb);
993 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
994 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
996 req->type = MAC_ADDRESS_TYPE_NETWORK;
1000 req->if_id = cpu_to_le16((u16)if_handle);
1001 req->pmac_id = cpu_to_le32(pmac_id);
1005 status = be_mcc_notify_wait(adapter);
1007 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
1009 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
1013 spin_unlock_bh(&adapter->mcc_lock);
1017 /* Uses synchronous MCCQ */
1018 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1019 u32 if_id, u32 *pmac_id, u32 domain)
1021 struct be_mcc_wrb *wrb;
1022 struct be_cmd_req_pmac_add *req;
1025 spin_lock_bh(&adapter->mcc_lock);
1027 wrb = wrb_from_mccq(adapter);
1032 req = embedded_payload(wrb);
1034 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1035 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
1038 req->hdr.domain = domain;
1039 req->if_id = cpu_to_le32(if_id);
1040 memcpy(req->mac_address, mac_addr, ETH_ALEN);
1042 status = be_mcc_notify_wait(adapter);
1044 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1046 *pmac_id = le32_to_cpu(resp->pmac_id);
1050 spin_unlock_bh(&adapter->mcc_lock);
1052 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1058 /* Uses synchronous MCCQ */
1059 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
1061 struct be_mcc_wrb *wrb;
1062 struct be_cmd_req_pmac_del *req;
1068 spin_lock_bh(&adapter->mcc_lock);
1070 wrb = wrb_from_mccq(adapter);
1075 req = embedded_payload(wrb);
1077 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1078 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1081 req->hdr.domain = dom;
1082 req->if_id = cpu_to_le32(if_id);
1083 req->pmac_id = cpu_to_le32(pmac_id);
1085 status = be_mcc_notify_wait(adapter);
1088 spin_unlock_bh(&adapter->mcc_lock);
1093 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1094 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1096 struct be_mcc_wrb *wrb;
1097 struct be_cmd_req_cq_create *req;
1098 struct be_dma_mem *q_mem = &cq->dma_mem;
1102 if (mutex_lock_interruptible(&adapter->mbox_lock))
1105 wrb = wrb_from_mbox(adapter);
1106 req = embedded_payload(wrb);
1107 ctxt = &req->context;
1109 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1110 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1113 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1115 if (BEx_chip(adapter)) {
1116 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1118 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1120 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1121 __ilog2_u32(cq->len / 256));
1122 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1123 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1124 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1126 req->hdr.version = 2;
1127 req->page_size = 1; /* 1 for 4K */
1129 /* coalesce-wm field in this cmd is not relevant to Lancer.
1130 * Lancer uses COMMON_MODIFY_CQ to set this field
1132 if (!lancer_chip(adapter))
1133 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1135 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1137 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1138 __ilog2_u32(cq->len / 256));
1139 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1140 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1141 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1144 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1146 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1148 status = be_mbox_notify_wait(adapter);
1150 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1152 cq->id = le16_to_cpu(resp->cq_id);
1156 mutex_unlock(&adapter->mbox_lock);
1161 static u32 be_encoded_q_len(int q_len)
1163 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1165 if (len_encoded == 16)
1170 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1171 struct be_queue_info *mccq,
1172 struct be_queue_info *cq)
1174 struct be_mcc_wrb *wrb;
1175 struct be_cmd_req_mcc_ext_create *req;
1176 struct be_dma_mem *q_mem = &mccq->dma_mem;
1180 if (mutex_lock_interruptible(&adapter->mbox_lock))
1183 wrb = wrb_from_mbox(adapter);
1184 req = embedded_payload(wrb);
1185 ctxt = &req->context;
1187 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1188 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1191 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1192 if (BEx_chip(adapter)) {
1193 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1194 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1195 be_encoded_q_len(mccq->len));
1196 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1198 req->hdr.version = 1;
1199 req->cq_id = cpu_to_le16(cq->id);
1201 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1202 be_encoded_q_len(mccq->len));
1203 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1204 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1206 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1210 /* Subscribe to Link State, Sliport Event and Group 5 Events
1211 * (bits 1, 5 and 17 set)
1213 req->async_event_bitmap[0] =
1214 cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
1215 BIT(ASYNC_EVENT_CODE_GRP_5) |
1216 BIT(ASYNC_EVENT_CODE_QNQ) |
1217 BIT(ASYNC_EVENT_CODE_SLIPORT));
1219 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1221 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1223 status = be_mbox_notify_wait(adapter);
1225 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1227 mccq->id = le16_to_cpu(resp->id);
1228 mccq->created = true;
1230 mutex_unlock(&adapter->mbox_lock);
1235 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1236 struct be_queue_info *mccq,
1237 struct be_queue_info *cq)
1239 struct be_mcc_wrb *wrb;
1240 struct be_cmd_req_mcc_create *req;
1241 struct be_dma_mem *q_mem = &mccq->dma_mem;
1245 if (mutex_lock_interruptible(&adapter->mbox_lock))
1248 wrb = wrb_from_mbox(adapter);
1249 req = embedded_payload(wrb);
1250 ctxt = &req->context;
1252 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1253 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1256 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1258 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1259 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1260 be_encoded_q_len(mccq->len));
1261 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1263 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1265 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1267 status = be_mbox_notify_wait(adapter);
1269 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1271 mccq->id = le16_to_cpu(resp->id);
1272 mccq->created = true;
1275 mutex_unlock(&adapter->mbox_lock);
1279 int be_cmd_mccq_create(struct be_adapter *adapter,
1280 struct be_queue_info *mccq, struct be_queue_info *cq)
1284 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1285 if (status && BEx_chip(adapter)) {
1286 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1287 "or newer to avoid conflicting priorities between NIC "
1288 "and FCoE traffic");
1289 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1294 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1296 struct be_mcc_wrb wrb = {0};
1297 struct be_cmd_req_eth_tx_create *req;
1298 struct be_queue_info *txq = &txo->q;
1299 struct be_queue_info *cq = &txo->cq;
1300 struct be_dma_mem *q_mem = &txq->dma_mem;
1301 int status, ver = 0;
1303 req = embedded_payload(&wrb);
1304 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1305 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1307 if (lancer_chip(adapter)) {
1308 req->hdr.version = 1;
1309 } else if (BEx_chip(adapter)) {
1310 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1311 req->hdr.version = 2;
1312 } else { /* For SH */
1313 req->hdr.version = 2;
1316 if (req->hdr.version > 0)
1317 req->if_id = cpu_to_le16(adapter->if_handle);
1318 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1319 req->ulp_num = BE_ULP1_NUM;
1320 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1321 req->cq_id = cpu_to_le16(cq->id);
1322 req->queue_size = be_encoded_q_len(txq->len);
1323 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1324 ver = req->hdr.version;
1326 status = be_cmd_notify_wait(adapter, &wrb);
1328 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1330 txq->id = le16_to_cpu(resp->cid);
1332 txo->db_offset = le32_to_cpu(resp->db_offset);
1334 txo->db_offset = DB_TXULP1_OFFSET;
1335 txq->created = true;
1342 int be_cmd_rxq_create(struct be_adapter *adapter,
1343 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1344 u32 if_id, u32 rss, u8 *rss_id)
1346 struct be_mcc_wrb *wrb;
1347 struct be_cmd_req_eth_rx_create *req;
1348 struct be_dma_mem *q_mem = &rxq->dma_mem;
1351 spin_lock_bh(&adapter->mcc_lock);
1353 wrb = wrb_from_mccq(adapter);
1358 req = embedded_payload(wrb);
1360 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1361 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1363 req->cq_id = cpu_to_le16(cq_id);
1364 req->frag_size = fls(frag_size) - 1;
1366 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1367 req->interface_id = cpu_to_le32(if_id);
1368 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1369 req->rss_queue = cpu_to_le32(rss);
1371 status = be_mcc_notify_wait(adapter);
1373 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1375 rxq->id = le16_to_cpu(resp->id);
1376 rxq->created = true;
1377 *rss_id = resp->rss_id;
1381 spin_unlock_bh(&adapter->mcc_lock);
1385 /* Generic destroyer function for all types of queues
1388 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1391 struct be_mcc_wrb *wrb;
1392 struct be_cmd_req_q_destroy *req;
1393 u8 subsys = 0, opcode = 0;
1396 if (mutex_lock_interruptible(&adapter->mbox_lock))
1399 wrb = wrb_from_mbox(adapter);
1400 req = embedded_payload(wrb);
1402 switch (queue_type) {
1404 subsys = CMD_SUBSYSTEM_COMMON;
1405 opcode = OPCODE_COMMON_EQ_DESTROY;
1408 subsys = CMD_SUBSYSTEM_COMMON;
1409 opcode = OPCODE_COMMON_CQ_DESTROY;
1412 subsys = CMD_SUBSYSTEM_ETH;
1413 opcode = OPCODE_ETH_TX_DESTROY;
1416 subsys = CMD_SUBSYSTEM_ETH;
1417 opcode = OPCODE_ETH_RX_DESTROY;
1420 subsys = CMD_SUBSYSTEM_COMMON;
1421 opcode = OPCODE_COMMON_MCC_DESTROY;
1427 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1429 req->id = cpu_to_le16(q->id);
1431 status = be_mbox_notify_wait(adapter);
1434 mutex_unlock(&adapter->mbox_lock);
1439 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1441 struct be_mcc_wrb *wrb;
1442 struct be_cmd_req_q_destroy *req;
1445 spin_lock_bh(&adapter->mcc_lock);
1447 wrb = wrb_from_mccq(adapter);
1452 req = embedded_payload(wrb);
1454 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1455 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1456 req->id = cpu_to_le16(q->id);
1458 status = be_mcc_notify_wait(adapter);
1462 spin_unlock_bh(&adapter->mcc_lock);
1466 /* Create an rx filtering policy configuration on an i/f
1467 * Will use MBOX only if MCCQ has not been created.
1469 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1470 u32 *if_handle, u32 domain)
1472 struct be_mcc_wrb wrb = {0};
1473 struct be_cmd_req_if_create *req;
1476 req = embedded_payload(&wrb);
1477 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1478 OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1479 sizeof(*req), &wrb, NULL);
1480 req->hdr.domain = domain;
1481 req->capability_flags = cpu_to_le32(cap_flags);
1482 req->enable_flags = cpu_to_le32(en_flags);
1483 req->pmac_invalid = true;
1485 status = be_cmd_notify_wait(adapter, &wrb);
1487 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1489 *if_handle = le32_to_cpu(resp->interface_id);
1491 /* Hack to retrieve VF's pmac-id on BE3 */
1492 if (BE3_chip(adapter) && be_virtfn(adapter))
1493 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1499 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1501 struct be_mcc_wrb *wrb;
1502 struct be_cmd_req_if_destroy *req;
1505 if (interface_id == -1)
1508 spin_lock_bh(&adapter->mcc_lock);
1510 wrb = wrb_from_mccq(adapter);
1515 req = embedded_payload(wrb);
1517 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1518 OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1519 sizeof(*req), wrb, NULL);
1520 req->hdr.domain = domain;
1521 req->interface_id = cpu_to_le32(interface_id);
1523 status = be_mcc_notify_wait(adapter);
1525 spin_unlock_bh(&adapter->mcc_lock);
1529 /* Get stats is a non embedded command: the request is not embedded inside
1530 * WRB but is a separate dma memory block
1531 * Uses asynchronous MCC
1533 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1535 struct be_mcc_wrb *wrb;
1536 struct be_cmd_req_hdr *hdr;
1539 spin_lock_bh(&adapter->mcc_lock);
1541 wrb = wrb_from_mccq(adapter);
1546 hdr = nonemb_cmd->va;
1548 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1549 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1552 /* version 1 of the cmd is not supported only by BE2 */
1553 if (BE2_chip(adapter))
1555 if (BE3_chip(adapter) || lancer_chip(adapter))
1560 status = be_mcc_notify(adapter);
1564 adapter->stats_cmd_sent = true;
1567 spin_unlock_bh(&adapter->mcc_lock);
1572 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1573 struct be_dma_mem *nonemb_cmd)
1575 struct be_mcc_wrb *wrb;
1576 struct lancer_cmd_req_pport_stats *req;
1579 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1583 spin_lock_bh(&adapter->mcc_lock);
1585 wrb = wrb_from_mccq(adapter);
1590 req = nonemb_cmd->va;
1592 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1593 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1596 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1597 req->cmd_params.params.reset_stats = 0;
1599 status = be_mcc_notify(adapter);
1603 adapter->stats_cmd_sent = true;
1606 spin_unlock_bh(&adapter->mcc_lock);
1610 static int be_mac_to_link_speed(int mac_speed)
1612 switch (mac_speed) {
1613 case PHY_LINK_SPEED_ZERO:
1615 case PHY_LINK_SPEED_10MBPS:
1617 case PHY_LINK_SPEED_100MBPS:
1619 case PHY_LINK_SPEED_1GBPS:
1621 case PHY_LINK_SPEED_10GBPS:
1623 case PHY_LINK_SPEED_20GBPS:
1625 case PHY_LINK_SPEED_25GBPS:
1627 case PHY_LINK_SPEED_40GBPS:
1633 /* Uses synchronous mcc
1634 * Returns link_speed in Mbps
1636 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1637 u8 *link_status, u32 dom)
1639 struct be_mcc_wrb *wrb;
1640 struct be_cmd_req_link_status *req;
1643 spin_lock_bh(&adapter->mcc_lock);
1646 *link_status = LINK_DOWN;
1648 wrb = wrb_from_mccq(adapter);
1653 req = embedded_payload(wrb);
1655 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1656 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1657 sizeof(*req), wrb, NULL);
1659 /* version 1 of the cmd is not supported only by BE2 */
1660 if (!BE2_chip(adapter))
1661 req->hdr.version = 1;
1663 req->hdr.domain = dom;
1665 status = be_mcc_notify_wait(adapter);
1667 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1670 *link_speed = resp->link_speed ?
1671 le16_to_cpu(resp->link_speed) * 10 :
1672 be_mac_to_link_speed(resp->mac_speed);
1674 if (!resp->logical_link_status)
1678 *link_status = resp->logical_link_status;
1682 spin_unlock_bh(&adapter->mcc_lock);
1686 /* Uses synchronous mcc */
1687 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1689 struct be_mcc_wrb *wrb;
1690 struct be_cmd_req_get_cntl_addnl_attribs *req;
1693 spin_lock_bh(&adapter->mcc_lock);
1695 wrb = wrb_from_mccq(adapter);
1700 req = embedded_payload(wrb);
1702 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1703 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1704 sizeof(*req), wrb, NULL);
1706 status = be_mcc_notify(adapter);
1708 spin_unlock_bh(&adapter->mcc_lock);
1712 /* Uses synchronous mcc */
1713 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1715 struct be_mcc_wrb *wrb;
1716 struct be_cmd_req_get_fat *req;
1719 spin_lock_bh(&adapter->mcc_lock);
1721 wrb = wrb_from_mccq(adapter);
1726 req = embedded_payload(wrb);
1728 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1729 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1731 req->fat_operation = cpu_to_le32(QUERY_FAT);
1732 status = be_mcc_notify_wait(adapter);
1734 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1736 if (log_size && resp->log_size)
1737 *log_size = le32_to_cpu(resp->log_size) -
1741 spin_unlock_bh(&adapter->mcc_lock);
1745 int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1747 struct be_dma_mem get_fat_cmd;
1748 struct be_mcc_wrb *wrb;
1749 struct be_cmd_req_get_fat *req;
1750 u32 offset = 0, total_size, buf_size,
1751 log_offset = sizeof(u32), payload_len;
1757 total_size = buf_len;
1759 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1760 get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1762 &get_fat_cmd.dma, GFP_ATOMIC);
1763 if (!get_fat_cmd.va) {
1764 dev_err(&adapter->pdev->dev,
1765 "Memory allocation failure while reading FAT data\n");
1769 spin_lock_bh(&adapter->mcc_lock);
1771 while (total_size) {
1772 buf_size = min(total_size, (u32)60*1024);
1773 total_size -= buf_size;
1775 wrb = wrb_from_mccq(adapter);
1780 req = get_fat_cmd.va;
1782 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1783 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1784 OPCODE_COMMON_MANAGE_FAT, payload_len,
1787 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1788 req->read_log_offset = cpu_to_le32(log_offset);
1789 req->read_log_length = cpu_to_le32(buf_size);
1790 req->data_buffer_size = cpu_to_le32(buf_size);
1792 status = be_mcc_notify_wait(adapter);
1794 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1796 memcpy(buf + offset,
1798 le32_to_cpu(resp->read_log_length));
1800 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1804 log_offset += buf_size;
1807 dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
1808 get_fat_cmd.va, get_fat_cmd.dma);
1809 spin_unlock_bh(&adapter->mcc_lock);
1813 /* Uses synchronous mcc */
1814 int be_cmd_get_fw_ver(struct be_adapter *adapter)
1816 struct be_mcc_wrb *wrb;
1817 struct be_cmd_req_get_fw_version *req;
1820 spin_lock_bh(&adapter->mcc_lock);
1822 wrb = wrb_from_mccq(adapter);
1828 req = embedded_payload(wrb);
1830 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1831 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1833 status = be_mcc_notify_wait(adapter);
1835 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1837 strlcpy(adapter->fw_ver, resp->firmware_version_string,
1838 sizeof(adapter->fw_ver));
1839 strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
1840 sizeof(adapter->fw_on_flash));
1843 spin_unlock_bh(&adapter->mcc_lock);
1847 /* set the EQ delay interval of an EQ to specified value
1850 static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1851 struct be_set_eqd *set_eqd, int num)
1853 struct be_mcc_wrb *wrb;
1854 struct be_cmd_req_modify_eq_delay *req;
1857 spin_lock_bh(&adapter->mcc_lock);
1859 wrb = wrb_from_mccq(adapter);
1864 req = embedded_payload(wrb);
1866 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1867 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1870 req->num_eq = cpu_to_le32(num);
1871 for (i = 0; i < num; i++) {
1872 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1873 req->set_eqd[i].phase = 0;
1874 req->set_eqd[i].delay_multiplier =
1875 cpu_to_le32(set_eqd[i].delay_multiplier);
1878 status = be_mcc_notify(adapter);
1880 spin_unlock_bh(&adapter->mcc_lock);
1884 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1890 num_eqs = min(num, 8);
1891 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1899 /* Uses sycnhronous mcc */
1900 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1901 u32 num, u32 domain)
1903 struct be_mcc_wrb *wrb;
1904 struct be_cmd_req_vlan_config *req;
1907 spin_lock_bh(&adapter->mcc_lock);
1909 wrb = wrb_from_mccq(adapter);
1914 req = embedded_payload(wrb);
1916 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1917 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1919 req->hdr.domain = domain;
1921 req->interface_id = if_id;
1922 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1923 req->num_vlan = num;
1924 memcpy(req->normal_vlan, vtag_array,
1925 req->num_vlan * sizeof(vtag_array[0]));
1927 status = be_mcc_notify_wait(adapter);
1929 spin_unlock_bh(&adapter->mcc_lock);
1933 static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1935 struct be_mcc_wrb *wrb;
1936 struct be_dma_mem *mem = &adapter->rx_filter;
1937 struct be_cmd_req_rx_filter *req = mem->va;
1940 spin_lock_bh(&adapter->mcc_lock);
1942 wrb = wrb_from_mccq(adapter);
1947 memset(req, 0, sizeof(*req));
1948 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1949 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1952 req->if_id = cpu_to_le32(adapter->if_handle);
1953 req->if_flags_mask = cpu_to_le32(flags);
1954 req->if_flags = (value == ON) ? req->if_flags_mask : 0;
1956 if (flags & BE_IF_FLAGS_MULTICAST) {
1957 struct netdev_hw_addr *ha;
1960 /* Reset mcast promisc mode if already set by setting mask
1961 * and not setting flags field
1963 req->if_flags_mask |=
1964 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1965 be_if_cap_flags(adapter));
1966 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1967 netdev_for_each_mc_addr(ha, adapter->netdev)
1968 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1971 status = be_mcc_notify(adapter);
1973 spin_unlock_bh(&adapter->mcc_lock);
1977 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1979 struct device *dev = &adapter->pdev->dev;
1981 if ((flags & be_if_cap_flags(adapter)) != flags) {
1982 dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
1983 dev_warn(dev, "Interface is capable of 0x%x flags only\n",
1984 be_if_cap_flags(adapter));
1986 flags &= be_if_cap_flags(adapter);
1988 return __be_cmd_rx_filter(adapter, flags, value);
1991 /* Uses synchrounous mcc */
1992 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1994 struct be_mcc_wrb *wrb;
1995 struct be_cmd_req_set_flow_control *req;
1998 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1999 CMD_SUBSYSTEM_COMMON))
2002 spin_lock_bh(&adapter->mcc_lock);
2004 wrb = wrb_from_mccq(adapter);
2009 req = embedded_payload(wrb);
2011 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2012 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
2015 req->hdr.version = 1;
2016 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
2017 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
2019 status = be_mcc_notify_wait(adapter);
2022 spin_unlock_bh(&adapter->mcc_lock);
2024 if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
2031 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
2033 struct be_mcc_wrb *wrb;
2034 struct be_cmd_req_get_flow_control *req;
2037 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
2038 CMD_SUBSYSTEM_COMMON))
2041 spin_lock_bh(&adapter->mcc_lock);
2043 wrb = wrb_from_mccq(adapter);
2048 req = embedded_payload(wrb);
2050 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2051 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
2054 status = be_mcc_notify_wait(adapter);
2056 struct be_cmd_resp_get_flow_control *resp =
2057 embedded_payload(wrb);
2059 *tx_fc = le16_to_cpu(resp->tx_flow_control);
2060 *rx_fc = le16_to_cpu(resp->rx_flow_control);
2064 spin_unlock_bh(&adapter->mcc_lock);
2069 int be_cmd_query_fw_cfg(struct be_adapter *adapter)
2071 struct be_mcc_wrb *wrb;
2072 struct be_cmd_req_query_fw_cfg *req;
2075 if (mutex_lock_interruptible(&adapter->mbox_lock))
2078 wrb = wrb_from_mbox(adapter);
2079 req = embedded_payload(wrb);
2081 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2082 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2083 sizeof(*req), wrb, NULL);
2085 status = be_mbox_notify_wait(adapter);
2087 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2089 adapter->port_num = le32_to_cpu(resp->phys_port);
2090 adapter->function_mode = le32_to_cpu(resp->function_mode);
2091 adapter->function_caps = le32_to_cpu(resp->function_caps);
2092 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
2093 dev_info(&adapter->pdev->dev,
2094 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2095 adapter->function_mode, adapter->function_caps);
2098 mutex_unlock(&adapter->mbox_lock);
2103 int be_cmd_reset_function(struct be_adapter *adapter)
2105 struct be_mcc_wrb *wrb;
2106 struct be_cmd_req_hdr *req;
2109 if (lancer_chip(adapter)) {
2110 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2111 adapter->db + SLIPORT_CONTROL_OFFSET);
2112 status = lancer_wait_ready(adapter);
2114 dev_err(&adapter->pdev->dev,
2115 "Adapter in non recoverable error\n");
2119 if (mutex_lock_interruptible(&adapter->mbox_lock))
2122 wrb = wrb_from_mbox(adapter);
2123 req = embedded_payload(wrb);
2125 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2126 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2129 status = be_mbox_notify_wait(adapter);
2131 mutex_unlock(&adapter->mbox_lock);
2135 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2136 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2138 struct be_mcc_wrb *wrb;
2139 struct be_cmd_req_rss_config *req;
2142 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2145 spin_lock_bh(&adapter->mcc_lock);
2147 wrb = wrb_from_mccq(adapter);
2152 req = embedded_payload(wrb);
2154 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2155 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2157 req->if_id = cpu_to_le32(adapter->if_handle);
2158 req->enable_rss = cpu_to_le16(rss_hash_opts);
2159 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2161 if (!BEx_chip(adapter))
2162 req->hdr.version = 1;
2164 memcpy(req->cpu_table, rsstable, table_size);
2165 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2166 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2168 status = be_mcc_notify_wait(adapter);
2170 spin_unlock_bh(&adapter->mcc_lock);
2175 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2176 u8 bcn, u8 sts, u8 state)
2178 struct be_mcc_wrb *wrb;
2179 struct be_cmd_req_enable_disable_beacon *req;
2182 spin_lock_bh(&adapter->mcc_lock);
2184 wrb = wrb_from_mccq(adapter);
2189 req = embedded_payload(wrb);
2191 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2192 OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2193 sizeof(*req), wrb, NULL);
2195 req->port_num = port_num;
2196 req->beacon_state = state;
2197 req->beacon_duration = bcn;
2198 req->status_duration = sts;
2200 status = be_mcc_notify_wait(adapter);
2203 spin_unlock_bh(&adapter->mcc_lock);
2208 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2210 struct be_mcc_wrb *wrb;
2211 struct be_cmd_req_get_beacon_state *req;
2214 spin_lock_bh(&adapter->mcc_lock);
2216 wrb = wrb_from_mccq(adapter);
2221 req = embedded_payload(wrb);
2223 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2224 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2227 req->port_num = port_num;
2229 status = be_mcc_notify_wait(adapter);
2231 struct be_cmd_resp_get_beacon_state *resp =
2232 embedded_payload(wrb);
2234 *state = resp->beacon_state;
2238 spin_unlock_bh(&adapter->mcc_lock);
2243 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2244 u8 page_num, u8 *data)
2246 struct be_dma_mem cmd;
2247 struct be_mcc_wrb *wrb;
2248 struct be_cmd_req_port_type *req;
2251 if (page_num > TR_PAGE_A2)
2254 cmd.size = sizeof(struct be_cmd_resp_port_type);
2255 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2258 dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2262 spin_lock_bh(&adapter->mcc_lock);
2264 wrb = wrb_from_mccq(adapter);
2271 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2272 OPCODE_COMMON_READ_TRANSRECV_DATA,
2273 cmd.size, wrb, &cmd);
2275 req->port = cpu_to_le32(adapter->hba_port_num);
2276 req->page_num = cpu_to_le32(page_num);
2277 status = be_mcc_notify_wait(adapter);
2279 struct be_cmd_resp_port_type *resp = cmd.va;
2281 memcpy(data, resp->page_data, PAGE_DATA_LEN);
2284 spin_unlock_bh(&adapter->mcc_lock);
2285 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2289 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2290 u32 data_size, u32 data_offset,
2291 const char *obj_name, u32 *data_written,
2292 u8 *change_status, u8 *addn_status)
2294 struct be_mcc_wrb *wrb;
2295 struct lancer_cmd_req_write_object *req;
2296 struct lancer_cmd_resp_write_object *resp;
2300 spin_lock_bh(&adapter->mcc_lock);
2301 adapter->flash_status = 0;
2303 wrb = wrb_from_mccq(adapter);
2309 req = embedded_payload(wrb);
2311 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2312 OPCODE_COMMON_WRITE_OBJECT,
2313 sizeof(struct lancer_cmd_req_write_object), wrb,
2316 ctxt = &req->context;
2317 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2318 write_length, ctxt, data_size);
2321 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2324 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2327 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2328 req->write_offset = cpu_to_le32(data_offset);
2329 strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2330 req->descriptor_count = cpu_to_le32(1);
2331 req->buf_len = cpu_to_le32(data_size);
2332 req->addr_low = cpu_to_le32((cmd->dma +
2333 sizeof(struct lancer_cmd_req_write_object))
2335 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2336 sizeof(struct lancer_cmd_req_write_object)));
2338 status = be_mcc_notify(adapter);
2342 spin_unlock_bh(&adapter->mcc_lock);
2344 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2345 msecs_to_jiffies(60000)))
2346 status = -ETIMEDOUT;
2348 status = adapter->flash_status;
2350 resp = embedded_payload(wrb);
2352 *data_written = le32_to_cpu(resp->actual_write_len);
2353 *change_status = resp->change_status;
2355 *addn_status = resp->additional_status;
2361 spin_unlock_bh(&adapter->mcc_lock);
2365 int be_cmd_query_cable_type(struct be_adapter *adapter)
2367 u8 page_data[PAGE_DATA_LEN];
2370 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2373 switch (adapter->phy.interface_type) {
2375 adapter->phy.cable_type =
2376 page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
2378 case PHY_TYPE_SFP_PLUS_10GB:
2379 adapter->phy.cable_type =
2380 page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
2383 adapter->phy.cable_type = 0;
2390 int be_cmd_query_sfp_info(struct be_adapter *adapter)
2392 u8 page_data[PAGE_DATA_LEN];
2395 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2398 strlcpy(adapter->phy.vendor_name, page_data +
2399 SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
2400 strlcpy(adapter->phy.vendor_pn,
2401 page_data + SFP_VENDOR_PN_OFFSET,
2402 SFP_VENDOR_NAME_LEN - 1);
2408 int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
2410 struct lancer_cmd_req_delete_object *req;
2411 struct be_mcc_wrb *wrb;
2414 spin_lock_bh(&adapter->mcc_lock);
2416 wrb = wrb_from_mccq(adapter);
2422 req = embedded_payload(wrb);
2424 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2425 OPCODE_COMMON_DELETE_OBJECT,
2426 sizeof(*req), wrb, NULL);
2428 strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2430 status = be_mcc_notify_wait(adapter);
2432 spin_unlock_bh(&adapter->mcc_lock);
2436 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2437 u32 data_size, u32 data_offset, const char *obj_name,
2438 u32 *data_read, u32 *eof, u8 *addn_status)
2440 struct be_mcc_wrb *wrb;
2441 struct lancer_cmd_req_read_object *req;
2442 struct lancer_cmd_resp_read_object *resp;
2445 spin_lock_bh(&adapter->mcc_lock);
2447 wrb = wrb_from_mccq(adapter);
2453 req = embedded_payload(wrb);
2455 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2456 OPCODE_COMMON_READ_OBJECT,
2457 sizeof(struct lancer_cmd_req_read_object), wrb,
2460 req->desired_read_len = cpu_to_le32(data_size);
2461 req->read_offset = cpu_to_le32(data_offset);
2462 strcpy(req->object_name, obj_name);
2463 req->descriptor_count = cpu_to_le32(1);
2464 req->buf_len = cpu_to_le32(data_size);
2465 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2466 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2468 status = be_mcc_notify_wait(adapter);
2470 resp = embedded_payload(wrb);
2472 *data_read = le32_to_cpu(resp->actual_read_len);
2473 *eof = le32_to_cpu(resp->eof);
2475 *addn_status = resp->additional_status;
2479 spin_unlock_bh(&adapter->mcc_lock);
2483 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2484 u32 flash_type, u32 flash_opcode, u32 img_offset,
2487 struct be_mcc_wrb *wrb;
2488 struct be_cmd_write_flashrom *req;
2491 spin_lock_bh(&adapter->mcc_lock);
2492 adapter->flash_status = 0;
2494 wrb = wrb_from_mccq(adapter);
2501 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2502 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2505 req->params.op_type = cpu_to_le32(flash_type);
2506 if (flash_type == OPTYPE_OFFSET_SPECIFIED)
2507 req->params.offset = cpu_to_le32(img_offset);
2509 req->params.op_code = cpu_to_le32(flash_opcode);
2510 req->params.data_buf_size = cpu_to_le32(buf_size);
2512 status = be_mcc_notify(adapter);
2516 spin_unlock_bh(&adapter->mcc_lock);
2518 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2519 msecs_to_jiffies(40000)))
2520 status = -ETIMEDOUT;
2522 status = adapter->flash_status;
2527 spin_unlock_bh(&adapter->mcc_lock);
2531 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2532 u16 img_optype, u32 img_offset, u32 crc_offset)
2534 struct be_cmd_read_flash_crc *req;
2535 struct be_mcc_wrb *wrb;
2538 spin_lock_bh(&adapter->mcc_lock);
2540 wrb = wrb_from_mccq(adapter);
2545 req = embedded_payload(wrb);
2547 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2548 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2551 req->params.op_type = cpu_to_le32(img_optype);
2552 if (img_optype == OPTYPE_OFFSET_SPECIFIED)
2553 req->params.offset = cpu_to_le32(img_offset + crc_offset);
2555 req->params.offset = cpu_to_le32(crc_offset);
2557 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2558 req->params.data_buf_size = cpu_to_le32(0x4);
2560 status = be_mcc_notify_wait(adapter);
2562 memcpy(flashed_crc, req->crc, 4);
2565 spin_unlock_bh(&adapter->mcc_lock);
2569 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2570 struct be_dma_mem *nonemb_cmd)
2572 struct be_mcc_wrb *wrb;
2573 struct be_cmd_req_acpi_wol_magic_config *req;
2576 spin_lock_bh(&adapter->mcc_lock);
2578 wrb = wrb_from_mccq(adapter);
2583 req = nonemb_cmd->va;
2585 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2586 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2588 memcpy(req->magic_mac, mac, ETH_ALEN);
2590 status = be_mcc_notify_wait(adapter);
2593 spin_unlock_bh(&adapter->mcc_lock);
2597 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2598 u8 loopback_type, u8 enable)
2600 struct be_mcc_wrb *wrb;
2601 struct be_cmd_req_set_lmode *req;
2604 spin_lock_bh(&adapter->mcc_lock);
2606 wrb = wrb_from_mccq(adapter);
2612 req = embedded_payload(wrb);
2614 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2615 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2618 req->src_port = port_num;
2619 req->dest_port = port_num;
2620 req->loopback_type = loopback_type;
2621 req->loopback_state = enable;
2623 status = be_mcc_notify(adapter);
2627 spin_unlock_bh(&adapter->mcc_lock);
2629 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2630 msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
2631 status = -ETIMEDOUT;
2636 spin_unlock_bh(&adapter->mcc_lock);
2640 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2641 u32 loopback_type, u32 pkt_size, u32 num_pkts,
2644 struct be_mcc_wrb *wrb;
2645 struct be_cmd_req_loopback_test *req;
2646 struct be_cmd_resp_loopback_test *resp;
2649 spin_lock_bh(&adapter->mcc_lock);
2651 wrb = wrb_from_mccq(adapter);
2657 req = embedded_payload(wrb);
2659 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2660 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2663 req->hdr.timeout = cpu_to_le32(15);
2664 req->pattern = cpu_to_le64(pattern);
2665 req->src_port = cpu_to_le32(port_num);
2666 req->dest_port = cpu_to_le32(port_num);
2667 req->pkt_size = cpu_to_le32(pkt_size);
2668 req->num_pkts = cpu_to_le32(num_pkts);
2669 req->loopback_type = cpu_to_le32(loopback_type);
2671 status = be_mcc_notify(adapter);
2675 spin_unlock_bh(&adapter->mcc_lock);
2677 wait_for_completion(&adapter->et_cmd_compl);
2678 resp = embedded_payload(wrb);
2679 status = le32_to_cpu(resp->status);
2683 spin_unlock_bh(&adapter->mcc_lock);
2687 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2688 u32 byte_cnt, struct be_dma_mem *cmd)
2690 struct be_mcc_wrb *wrb;
2691 struct be_cmd_req_ddrdma_test *req;
2695 spin_lock_bh(&adapter->mcc_lock);
2697 wrb = wrb_from_mccq(adapter);
2703 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2704 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2707 req->pattern = cpu_to_le64(pattern);
2708 req->byte_count = cpu_to_le32(byte_cnt);
2709 for (i = 0; i < byte_cnt; i++) {
2710 req->snd_buff[i] = (u8)(pattern >> (j*8));
2716 status = be_mcc_notify_wait(adapter);
2719 struct be_cmd_resp_ddrdma_test *resp;
2722 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2729 spin_unlock_bh(&adapter->mcc_lock);
2733 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2734 struct be_dma_mem *nonemb_cmd)
2736 struct be_mcc_wrb *wrb;
2737 struct be_cmd_req_seeprom_read *req;
2740 spin_lock_bh(&adapter->mcc_lock);
2742 wrb = wrb_from_mccq(adapter);
2747 req = nonemb_cmd->va;
2749 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2750 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2753 status = be_mcc_notify_wait(adapter);
2756 spin_unlock_bh(&adapter->mcc_lock);
2760 int be_cmd_get_phy_info(struct be_adapter *adapter)
2762 struct be_mcc_wrb *wrb;
2763 struct be_cmd_req_get_phy_info *req;
2764 struct be_dma_mem cmd;
2767 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2768 CMD_SUBSYSTEM_COMMON))
2771 spin_lock_bh(&adapter->mcc_lock);
2773 wrb = wrb_from_mccq(adapter);
2778 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2779 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2782 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2789 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2790 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2793 status = be_mcc_notify_wait(adapter);
2795 struct be_phy_info *resp_phy_info =
2796 cmd.va + sizeof(struct be_cmd_req_hdr);
2798 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2799 adapter->phy.interface_type =
2800 le16_to_cpu(resp_phy_info->interface_type);
2801 adapter->phy.auto_speeds_supported =
2802 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2803 adapter->phy.fixed_speeds_supported =
2804 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2805 adapter->phy.misc_params =
2806 le32_to_cpu(resp_phy_info->misc_params);
2808 if (BE2_chip(adapter)) {
2809 adapter->phy.fixed_speeds_supported =
2810 BE_SUPPORTED_SPEED_10GBPS |
2811 BE_SUPPORTED_SPEED_1GBPS;
2814 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2816 spin_unlock_bh(&adapter->mcc_lock);
2820 static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2822 struct be_mcc_wrb *wrb;
2823 struct be_cmd_req_set_qos *req;
2826 spin_lock_bh(&adapter->mcc_lock);
2828 wrb = wrb_from_mccq(adapter);
2834 req = embedded_payload(wrb);
2836 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2837 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2839 req->hdr.domain = domain;
2840 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2841 req->max_bps_nic = cpu_to_le32(bps);
2843 status = be_mcc_notify_wait(adapter);
2846 spin_unlock_bh(&adapter->mcc_lock);
2850 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2852 struct be_mcc_wrb *wrb;
2853 struct be_cmd_req_cntl_attribs *req;
2854 struct be_cmd_resp_cntl_attribs *resp;
2856 int payload_len = max(sizeof(*req), sizeof(*resp));
2857 struct mgmt_controller_attrib *attribs;
2858 struct be_dma_mem attribs_cmd;
2860 if (mutex_lock_interruptible(&adapter->mbox_lock))
2863 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2864 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2865 attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2867 &attribs_cmd.dma, GFP_ATOMIC);
2868 if (!attribs_cmd.va) {
2869 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2874 wrb = wrb_from_mbox(adapter);
2879 req = attribs_cmd.va;
2881 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2882 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2885 status = be_mbox_notify_wait(adapter);
2887 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2888 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2892 mutex_unlock(&adapter->mbox_lock);
2894 dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
2895 attribs_cmd.va, attribs_cmd.dma);
2900 int be_cmd_req_native_mode(struct be_adapter *adapter)
2902 struct be_mcc_wrb *wrb;
2903 struct be_cmd_req_set_func_cap *req;
2906 if (mutex_lock_interruptible(&adapter->mbox_lock))
2909 wrb = wrb_from_mbox(adapter);
2915 req = embedded_payload(wrb);
2917 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2918 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2919 sizeof(*req), wrb, NULL);
2921 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2922 CAPABILITY_BE3_NATIVE_ERX_API);
2923 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2925 status = be_mbox_notify_wait(adapter);
2927 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2929 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2930 CAPABILITY_BE3_NATIVE_ERX_API;
2931 if (!adapter->be3_native)
2932 dev_warn(&adapter->pdev->dev,
2933 "adapter not in advanced mode\n");
2936 mutex_unlock(&adapter->mbox_lock);
2940 /* Get privilege(s) for a function */
2941 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2944 struct be_mcc_wrb *wrb;
2945 struct be_cmd_req_get_fn_privileges *req;
2948 spin_lock_bh(&adapter->mcc_lock);
2950 wrb = wrb_from_mccq(adapter);
2956 req = embedded_payload(wrb);
2958 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2959 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2962 req->hdr.domain = domain;
2964 status = be_mcc_notify_wait(adapter);
2966 struct be_cmd_resp_get_fn_privileges *resp =
2967 embedded_payload(wrb);
2969 *privilege = le32_to_cpu(resp->privilege_mask);
2971 /* In UMC mode FW does not return right privileges.
2972 * Override with correct privilege equivalent to PF.
2974 if (BEx_chip(adapter) && be_is_mc(adapter) &&
2976 *privilege = MAX_PRIVILEGES;
2980 spin_unlock_bh(&adapter->mcc_lock);
2984 /* Set privilege(s) for a function */
2985 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2988 struct be_mcc_wrb *wrb;
2989 struct be_cmd_req_set_fn_privileges *req;
2992 spin_lock_bh(&adapter->mcc_lock);
2994 wrb = wrb_from_mccq(adapter);
3000 req = embedded_payload(wrb);
3001 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3002 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
3004 req->hdr.domain = domain;
3005 if (lancer_chip(adapter))
3006 req->privileges_lancer = cpu_to_le32(privileges);
3008 req->privileges = cpu_to_le32(privileges);
3010 status = be_mcc_notify_wait(adapter);
3012 spin_unlock_bh(&adapter->mcc_lock);
3016 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
3017 * pmac_id_valid: false => pmac_id or MAC address is requested.
3018 * If pmac_id is returned, pmac_id_valid is returned as true
3020 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
3021 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
3024 struct be_mcc_wrb *wrb;
3025 struct be_cmd_req_get_mac_list *req;
3028 struct be_dma_mem get_mac_list_cmd;
3031 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
3032 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
3033 get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3034 get_mac_list_cmd.size,
3035 &get_mac_list_cmd.dma,
3038 if (!get_mac_list_cmd.va) {
3039 dev_err(&adapter->pdev->dev,
3040 "Memory allocation failure during GET_MAC_LIST\n");
3044 spin_lock_bh(&adapter->mcc_lock);
3046 wrb = wrb_from_mccq(adapter);
3052 req = get_mac_list_cmd.va;
3054 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3055 OPCODE_COMMON_GET_MAC_LIST,
3056 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
3057 req->hdr.domain = domain;
3058 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
3059 if (*pmac_id_valid) {
3060 req->mac_id = cpu_to_le32(*pmac_id);
3061 req->iface_id = cpu_to_le16(if_handle);
3062 req->perm_override = 0;
3064 req->perm_override = 1;
3067 status = be_mcc_notify_wait(adapter);
3069 struct be_cmd_resp_get_mac_list *resp =
3070 get_mac_list_cmd.va;
3072 if (*pmac_id_valid) {
3073 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
3078 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
3079 /* Mac list returned could contain one or more active mac_ids
3080 * or one or more true or pseudo permanent mac addresses.
3081 * If an active mac_id is present, return first active mac_id
3084 for (i = 0; i < mac_count; i++) {
3085 struct get_list_macaddr *mac_entry;
3089 mac_entry = &resp->macaddr_list[i];
3090 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
3091 /* mac_id is a 32 bit value and mac_addr size
3094 if (mac_addr_size == sizeof(u32)) {
3095 *pmac_id_valid = true;
3096 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
3097 *pmac_id = le32_to_cpu(mac_id);
3101 /* If no active mac_id found, return first mac addr */
3102 *pmac_id_valid = false;
3103 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
3108 spin_unlock_bh(&adapter->mcc_lock);
3109 dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
3110 get_mac_list_cmd.va, get_mac_list_cmd.dma);
3114 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3115 u8 *mac, u32 if_handle, bool active, u32 domain)
3118 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3120 if (BEx_chip(adapter))
3121 return be_cmd_mac_addr_query(adapter, mac, false,
3122 if_handle, curr_pmac_id);
3124 /* Fetch the MAC address using pmac_id */
3125 return be_cmd_get_mac_from_list(adapter, mac, &active,
3130 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
3133 bool pmac_valid = false;
3137 if (BEx_chip(adapter)) {
3138 if (be_physfn(adapter))
3139 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
3142 status = be_cmd_mac_addr_query(adapter, mac, false,
3143 adapter->if_handle, 0);
3145 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
3146 NULL, adapter->if_handle, 0);
3152 /* Uses synchronous MCCQ */
3153 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3154 u8 mac_count, u32 domain)
3156 struct be_mcc_wrb *wrb;
3157 struct be_cmd_req_set_mac_list *req;
3159 struct be_dma_mem cmd;
3161 memset(&cmd, 0, sizeof(struct be_dma_mem));
3162 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3163 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3168 spin_lock_bh(&adapter->mcc_lock);
3170 wrb = wrb_from_mccq(adapter);
3177 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3178 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
3181 req->hdr.domain = domain;
3182 req->mac_count = mac_count;
3184 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
3186 status = be_mcc_notify_wait(adapter);
3189 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3190 spin_unlock_bh(&adapter->mcc_lock);
3194 /* Wrapper to delete any active MACs and provision the new mac.
3195 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3196 * current list are active.
3198 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3200 bool active_mac = false;
3201 u8 old_mac[ETH_ALEN];
3205 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
3206 &pmac_id, if_id, dom);
3208 if (!status && active_mac)
3209 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3211 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3214 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3215 u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
3217 struct be_mcc_wrb *wrb;
3218 struct be_cmd_req_set_hsw_config *req;
3222 spin_lock_bh(&adapter->mcc_lock);
3224 wrb = wrb_from_mccq(adapter);
3230 req = embedded_payload(wrb);
3231 ctxt = &req->context;
3233 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3234 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3237 req->hdr.domain = domain;
3238 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3240 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3241 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3243 if (!BEx_chip(adapter) && hsw_mode) {
3244 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3245 ctxt, adapter->hba_port_num);
3246 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3247 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3251 /* Enable/disable both mac and vlan spoof checking */
3252 if (!BEx_chip(adapter) && spoofchk) {
3253 AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
3255 AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
3259 be_dws_cpu_to_le(req->context, sizeof(req->context));
3260 status = be_mcc_notify_wait(adapter);
3263 spin_unlock_bh(&adapter->mcc_lock);
3267 /* Get Hyper switch config */
3268 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3269 u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
3271 struct be_mcc_wrb *wrb;
3272 struct be_cmd_req_get_hsw_config *req;
3277 spin_lock_bh(&adapter->mcc_lock);
3279 wrb = wrb_from_mccq(adapter);
3285 req = embedded_payload(wrb);
3286 ctxt = &req->context;
3288 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3289 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3292 req->hdr.domain = domain;
3293 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3295 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
3297 if (!BEx_chip(adapter) && mode) {
3298 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3299 ctxt, adapter->hba_port_num);
3300 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3302 be_dws_cpu_to_le(req->context, sizeof(req->context));
3304 status = be_mcc_notify_wait(adapter);
3306 struct be_cmd_resp_get_hsw_config *resp =
3307 embedded_payload(wrb);
3309 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3310 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3311 pvid, &resp->context);
3313 *pvid = le16_to_cpu(vid);
3315 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3316 port_fwd_type, &resp->context);
3319 AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3320 spoofchk, &resp->context);
3324 spin_unlock_bh(&adapter->mcc_lock);
3328 static bool be_is_wol_excluded(struct be_adapter *adapter)
3330 struct pci_dev *pdev = adapter->pdev;
3332 if (be_virtfn(adapter))
3335 switch (pdev->subsystem_device) {
3336 case OC_SUBSYS_DEVICE_ID1:
3337 case OC_SUBSYS_DEVICE_ID2:
3338 case OC_SUBSYS_DEVICE_ID3:
3339 case OC_SUBSYS_DEVICE_ID4:
3346 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3348 struct be_mcc_wrb *wrb;
3349 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
3351 struct be_dma_mem cmd;
3353 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3357 if (be_is_wol_excluded(adapter))
3360 if (mutex_lock_interruptible(&adapter->mbox_lock))
3363 memset(&cmd, 0, sizeof(struct be_dma_mem));
3364 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3365 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3368 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3373 wrb = wrb_from_mbox(adapter);
3381 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3382 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3383 sizeof(*req), wrb, &cmd);
3385 req->hdr.version = 1;
3386 req->query_options = BE_GET_WOL_CAP;
3388 status = be_mbox_notify_wait(adapter);
3390 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3392 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
3394 adapter->wol_cap = resp->wol_settings;
3395 if (adapter->wol_cap & BE_WOL_CAP)
3396 adapter->wol_en = true;
3399 mutex_unlock(&adapter->mbox_lock);
3401 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3407 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3409 struct be_dma_mem extfat_cmd;
3410 struct be_fat_conf_params *cfgs;
3414 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3415 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3416 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3417 extfat_cmd.size, &extfat_cmd.dma,
3422 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3426 cfgs = (struct be_fat_conf_params *)
3427 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
3428 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
3429 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
3431 for (j = 0; j < num_modes; j++) {
3432 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
3433 cfgs->module[i].trace_lvl[j].dbg_lvl =
3438 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
3440 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
3445 int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3447 struct be_dma_mem extfat_cmd;
3448 struct be_fat_conf_params *cfgs;
3452 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3453 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3454 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3455 extfat_cmd.size, &extfat_cmd.dma,
3458 if (!extfat_cmd.va) {
3459 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3464 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3466 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3467 sizeof(struct be_cmd_resp_hdr));
3469 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3470 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3471 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3474 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
3480 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3481 struct be_dma_mem *cmd)
3483 struct be_mcc_wrb *wrb;
3484 struct be_cmd_req_get_ext_fat_caps *req;
3487 if (mutex_lock_interruptible(&adapter->mbox_lock))
3490 wrb = wrb_from_mbox(adapter);
3497 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3498 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3499 cmd->size, wrb, cmd);
3500 req->parameter_type = cpu_to_le32(1);
3502 status = be_mbox_notify_wait(adapter);
3504 mutex_unlock(&adapter->mbox_lock);
3508 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3509 struct be_dma_mem *cmd,
3510 struct be_fat_conf_params *configs)
3512 struct be_mcc_wrb *wrb;
3513 struct be_cmd_req_set_ext_fat_caps *req;
3516 spin_lock_bh(&adapter->mcc_lock);
3518 wrb = wrb_from_mccq(adapter);
3525 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3526 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3527 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3528 cmd->size, wrb, cmd);
3530 status = be_mcc_notify_wait(adapter);
3532 spin_unlock_bh(&adapter->mcc_lock);
3536 int be_cmd_query_port_name(struct be_adapter *adapter)
3538 struct be_cmd_req_get_port_name *req;
3539 struct be_mcc_wrb *wrb;
3542 if (mutex_lock_interruptible(&adapter->mbox_lock))
3545 wrb = wrb_from_mbox(adapter);
3546 req = embedded_payload(wrb);
3548 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3549 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3551 if (!BEx_chip(adapter))
3552 req->hdr.version = 1;
3554 status = be_mbox_notify_wait(adapter);
3556 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3558 adapter->port_name = resp->port_name[adapter->hba_port_num];
3560 adapter->port_name = adapter->hba_port_num + '0';
3563 mutex_unlock(&adapter->mbox_lock);
3567 /* Descriptor type */
3573 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
3576 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3577 struct be_nic_res_desc *nic;
3580 for (i = 0; i < desc_count; i++) {
3581 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3582 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
3583 nic = (struct be_nic_res_desc *)hdr;
3584 if (desc_type == FUNC_DESC ||
3585 (desc_type == VFT_DESC &&
3586 nic->flags & (1 << VFT_SHIFT)))
3590 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3591 hdr = (void *)hdr + hdr->desc_len;
3596 static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
3598 return be_get_nic_desc(buf, desc_count, VFT_DESC);
3601 static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
3603 return be_get_nic_desc(buf, desc_count, FUNC_DESC);
3606 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3609 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3610 struct be_pcie_res_desc *pcie;
3613 for (i = 0; i < desc_count; i++) {
3614 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3615 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3616 pcie = (struct be_pcie_res_desc *)hdr;
3617 if (pcie->pf_num == devfn)
3621 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3622 hdr = (void *)hdr + hdr->desc_len;
3627 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
3629 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3632 for (i = 0; i < desc_count; i++) {
3633 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
3634 return (struct be_port_res_desc *)hdr;
3636 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3637 hdr = (void *)hdr + hdr->desc_len;
3642 static void be_copy_nic_desc(struct be_resources *res,
3643 struct be_nic_res_desc *desc)
3645 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3646 res->max_vlans = le16_to_cpu(desc->vlan_count);
3647 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3648 res->max_tx_qs = le16_to_cpu(desc->txq_count);
3649 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3650 res->max_rx_qs = le16_to_cpu(desc->rq_count);
3651 res->max_evt_qs = le16_to_cpu(desc->eq_count);
3652 res->max_cq_count = le16_to_cpu(desc->cq_count);
3653 res->max_iface_count = le16_to_cpu(desc->iface_count);
3654 res->max_mcc_count = le16_to_cpu(desc->mcc_count);
3655 /* Clear flags that driver is not interested in */
3656 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3657 BE_IF_CAP_FLAGS_WANT;
3661 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3663 struct be_mcc_wrb *wrb;
3664 struct be_cmd_req_get_func_config *req;
3666 struct be_dma_mem cmd;
3668 if (mutex_lock_interruptible(&adapter->mbox_lock))
3671 memset(&cmd, 0, sizeof(struct be_dma_mem));
3672 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3673 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3676 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3681 wrb = wrb_from_mbox(adapter);
3689 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3690 OPCODE_COMMON_GET_FUNC_CONFIG,
3691 cmd.size, wrb, &cmd);
3693 if (skyhawk_chip(adapter))
3694 req->hdr.version = 1;
3696 status = be_mbox_notify_wait(adapter);
3698 struct be_cmd_resp_get_func_config *resp = cmd.va;
3699 u32 desc_count = le32_to_cpu(resp->desc_count);
3700 struct be_nic_res_desc *desc;
3702 desc = be_get_func_nic_desc(resp->func_param, desc_count);
3708 adapter->pf_number = desc->pf_num;
3709 be_copy_nic_desc(res, desc);
3712 mutex_unlock(&adapter->mbox_lock);
3714 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3719 /* Will use MBOX only if MCCQ has not been created */
3720 int be_cmd_get_profile_config(struct be_adapter *adapter,
3721 struct be_resources *res, u8 query, u8 domain)
3723 struct be_cmd_resp_get_profile_config *resp;
3724 struct be_cmd_req_get_profile_config *req;
3725 struct be_nic_res_desc *vf_res;
3726 struct be_pcie_res_desc *pcie;
3727 struct be_port_res_desc *port;
3728 struct be_nic_res_desc *nic;
3729 struct be_mcc_wrb wrb = {0};
3730 struct be_dma_mem cmd;
3734 memset(&cmd, 0, sizeof(struct be_dma_mem));
3735 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3736 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3742 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3743 OPCODE_COMMON_GET_PROFILE_CONFIG,
3744 cmd.size, &wrb, &cmd);
3746 req->hdr.domain = domain;
3747 if (!lancer_chip(adapter))
3748 req->hdr.version = 1;
3749 req->type = ACTIVE_PROFILE_TYPE;
3751 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
3752 * descriptors with all bits set to "1" for the fields which can be
3753 * modified using SET_PROFILE_CONFIG cmd.
3755 if (query == RESOURCE_MODIFIABLE)
3756 req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
3758 status = be_cmd_notify_wait(adapter, &wrb);
3763 desc_count = le16_to_cpu(resp->desc_count);
3765 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3768 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3770 port = be_get_port_desc(resp->func_param, desc_count);
3772 adapter->mc_type = port->mc_type;
3774 nic = be_get_func_nic_desc(resp->func_param, desc_count);
3776 be_copy_nic_desc(res, nic);
3778 vf_res = be_get_vft_desc(resp->func_param, desc_count);
3780 res->vf_if_cap_flags = vf_res->cap_flags;
3783 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3788 /* Will use MBOX only if MCCQ has not been created */
3789 static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3790 int size, int count, u8 version, u8 domain)
3792 struct be_cmd_req_set_profile_config *req;
3793 struct be_mcc_wrb wrb = {0};
3794 struct be_dma_mem cmd;
3797 memset(&cmd, 0, sizeof(struct be_dma_mem));
3798 cmd.size = sizeof(struct be_cmd_req_set_profile_config);
3799 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3805 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3806 OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
3808 req->hdr.version = version;
3809 req->hdr.domain = domain;
3810 req->desc_count = cpu_to_le32(count);
3811 memcpy(req->desc, desc, size);
3813 status = be_cmd_notify_wait(adapter, &wrb);
3816 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3821 /* Mark all fields invalid */
3822 static void be_reset_nic_desc(struct be_nic_res_desc *nic)
3824 memset(nic, 0, sizeof(*nic));
3825 nic->unicast_mac_count = 0xFFFF;
3826 nic->mcc_count = 0xFFFF;
3827 nic->vlan_count = 0xFFFF;
3828 nic->mcast_mac_count = 0xFFFF;
3829 nic->txq_count = 0xFFFF;
3830 nic->rq_count = 0xFFFF;
3831 nic->rssq_count = 0xFFFF;
3832 nic->lro_count = 0xFFFF;
3833 nic->cq_count = 0xFFFF;
3834 nic->toe_conn_count = 0xFFFF;
3835 nic->eq_count = 0xFFFF;
3836 nic->iface_count = 0xFFFF;
3837 nic->link_param = 0xFF;
3838 nic->channel_id_param = cpu_to_le16(0xF000);
3839 nic->acpi_params = 0xFF;
3840 nic->wol_param = 0x0F;
3841 nic->tunnel_iface_count = 0xFFFF;
3842 nic->direct_tenant_iface_count = 0xFFFF;
3843 nic->bw_min = 0xFFFFFFFF;
3844 nic->bw_max = 0xFFFFFFFF;
3847 /* Mark all fields invalid */
3848 static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
3850 memset(pcie, 0, sizeof(*pcie));
3851 pcie->sriov_state = 0xFF;
3852 pcie->pf_state = 0xFF;
3853 pcie->pf_type = 0xFF;
3854 pcie->num_vfs = 0xFFFF;
3857 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3860 struct be_nic_res_desc nic_desc;
3864 if (BE3_chip(adapter))
3865 return be_cmd_set_qos(adapter, max_rate / 10, domain);
3867 be_reset_nic_desc(&nic_desc);
3868 nic_desc.pf_num = adapter->pf_number;
3869 nic_desc.vf_num = domain;
3870 nic_desc.bw_min = 0;
3871 if (lancer_chip(adapter)) {
3872 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3873 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3874 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3876 nic_desc.bw_max = cpu_to_le32(max_rate / 10);
3879 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3880 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3881 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3882 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3883 nic_desc.bw_max = cpu_to_le32(bw_percent);
3886 return be_cmd_set_profile_config(adapter, &nic_desc,
3887 nic_desc.hdr.desc_len,
3888 1, version, domain);
3891 static void be_fill_vf_res_template(struct be_adapter *adapter,
3892 struct be_resources pool_res,
3893 u16 num_vfs, u16 num_vf_qs,
3894 struct be_nic_res_desc *nic_vft)
3896 u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
3897 struct be_resources res_mod = {0};
3899 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3900 * which are modifiable using SET_PROFILE_CONFIG cmd.
3902 be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
3904 /* If RSS IFACE capability flags are modifiable for a VF, set the
3905 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3906 * more than 1 RSSQ is available for a VF.
3907 * Otherwise, provision only 1 queue pair for VF.
3909 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3910 nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3911 if (num_vf_qs > 1) {
3912 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3913 if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3914 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3916 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3917 BE_IF_FLAGS_DEFQ_RSS);
3920 nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
3925 nic_vft->rq_count = cpu_to_le16(num_vf_qs);
3926 nic_vft->txq_count = cpu_to_le16(num_vf_qs);
3927 nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
3928 nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
3931 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
3932 * among the PF and it's VFs, if the fields are changeable
3934 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
3935 nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
3938 if (res_mod.max_vlans == FIELD_MODIFIABLE)
3939 nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
3942 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
3943 nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
3946 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
3947 nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
3951 int be_cmd_set_sriov_config(struct be_adapter *adapter,
3952 struct be_resources pool_res, u16 num_vfs,
3956 struct be_pcie_res_desc pcie;
3957 struct be_nic_res_desc nic_vft;
3960 /* PF PCIE descriptor */
3961 be_reset_pcie_desc(&desc.pcie);
3962 desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
3963 desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3964 desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
3965 desc.pcie.pf_num = adapter->pdev->devfn;
3966 desc.pcie.sriov_state = num_vfs ? 1 : 0;
3967 desc.pcie.num_vfs = cpu_to_le16(num_vfs);
3969 /* VF NIC Template descriptor */
3970 be_reset_nic_desc(&desc.nic_vft);
3971 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3972 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3973 desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
3974 desc.nic_vft.pf_num = adapter->pdev->devfn;
3975 desc.nic_vft.vf_num = 0;
3977 be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
3980 return be_cmd_set_profile_config(adapter, &desc,
3981 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
3984 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
3986 struct be_mcc_wrb *wrb;
3987 struct be_cmd_req_manage_iface_filters *req;
3990 if (iface == 0xFFFFFFFF)
3993 spin_lock_bh(&adapter->mcc_lock);
3995 wrb = wrb_from_mccq(adapter);
4000 req = embedded_payload(wrb);
4002 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4003 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
4006 req->target_iface_id = cpu_to_le32(iface);
4008 status = be_mcc_notify_wait(adapter);
4010 spin_unlock_bh(&adapter->mcc_lock);
4014 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
4016 struct be_port_res_desc port_desc;
4018 memset(&port_desc, 0, sizeof(port_desc));
4019 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
4020 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4021 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
4022 port_desc.link_num = adapter->hba_port_num;
4024 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
4026 port_desc.nv_port = swab16(port);
4028 port_desc.nv_flags = NV_TYPE_DISABLED;
4029 port_desc.nv_port = 0;
4032 return be_cmd_set_profile_config(adapter, &port_desc,
4033 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
4036 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
4039 struct be_mcc_wrb *wrb;
4040 struct be_cmd_req_get_iface_list *req;
4041 struct be_cmd_resp_get_iface_list *resp;
4044 spin_lock_bh(&adapter->mcc_lock);
4046 wrb = wrb_from_mccq(adapter);
4051 req = embedded_payload(wrb);
4053 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4054 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
4056 req->hdr.domain = vf_num + 1;
4058 status = be_mcc_notify_wait(adapter);
4060 resp = (struct be_cmd_resp_get_iface_list *)req;
4061 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
4065 spin_unlock_bh(&adapter->mcc_lock);
4069 static int lancer_wait_idle(struct be_adapter *adapter)
4071 #define SLIPORT_IDLE_TIMEOUT 30
4075 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
4076 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
4077 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
4083 if (i == SLIPORT_IDLE_TIMEOUT)
4089 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
4093 status = lancer_wait_idle(adapter);
4097 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
4102 /* Routine to check whether dump image is present or not */
4103 bool dump_present(struct be_adapter *adapter)
4105 u32 sliport_status = 0;
4107 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
4108 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
4111 int lancer_initiate_dump(struct be_adapter *adapter)
4113 struct device *dev = &adapter->pdev->dev;
4116 if (dump_present(adapter)) {
4117 dev_info(dev, "Previous dump not cleared, not forcing dump\n");
4121 /* give firmware reset and diagnostic dump */
4122 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
4123 PHYSDEV_CONTROL_DD_MASK);
4125 dev_err(dev, "FW reset failed\n");
4129 status = lancer_wait_idle(adapter);
4133 if (!dump_present(adapter)) {
4134 dev_err(dev, "FW dump not generated\n");
4141 int lancer_delete_dump(struct be_adapter *adapter)
4145 status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
4146 return be_cmd_status(status);
4150 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
4152 struct be_mcc_wrb *wrb;
4153 struct be_cmd_enable_disable_vf *req;
4156 if (BEx_chip(adapter))
4159 spin_lock_bh(&adapter->mcc_lock);
4161 wrb = wrb_from_mccq(adapter);
4167 req = embedded_payload(wrb);
4169 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4170 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
4173 req->hdr.domain = domain;
4175 status = be_mcc_notify_wait(adapter);
4177 spin_unlock_bh(&adapter->mcc_lock);
4181 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
4183 struct be_mcc_wrb *wrb;
4184 struct be_cmd_req_intr_set *req;
4187 if (mutex_lock_interruptible(&adapter->mbox_lock))
4190 wrb = wrb_from_mbox(adapter);
4192 req = embedded_payload(wrb);
4194 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4195 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
4198 req->intr_enabled = intr_enable;
4200 status = be_mbox_notify_wait(adapter);
4202 mutex_unlock(&adapter->mbox_lock);
4207 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4209 struct be_cmd_req_get_active_profile *req;
4210 struct be_mcc_wrb *wrb;
4213 if (mutex_lock_interruptible(&adapter->mbox_lock))
4216 wrb = wrb_from_mbox(adapter);
4222 req = embedded_payload(wrb);
4224 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4225 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
4228 status = be_mbox_notify_wait(adapter);
4230 struct be_cmd_resp_get_active_profile *resp =
4231 embedded_payload(wrb);
4233 *profile_id = le16_to_cpu(resp->active_profile_id);
4237 mutex_unlock(&adapter->mbox_lock);
4241 int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4242 int link_state, u8 domain)
4244 struct be_mcc_wrb *wrb;
4245 struct be_cmd_req_set_ll_link *req;
4248 if (BEx_chip(adapter) || lancer_chip(adapter))
4251 spin_lock_bh(&adapter->mcc_lock);
4253 wrb = wrb_from_mccq(adapter);
4259 req = embedded_payload(wrb);
4261 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4262 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
4263 sizeof(*req), wrb, NULL);
4265 req->hdr.version = 1;
4266 req->hdr.domain = domain;
4268 if (link_state == IFLA_VF_LINK_STATE_ENABLE)
4269 req->link_config |= 1;
4271 if (link_state == IFLA_VF_LINK_STATE_AUTO)
4272 req->link_config |= 1 << PLINK_TRACK_SHIFT;
4274 status = be_mcc_notify_wait(adapter);
4276 spin_unlock_bh(&adapter->mcc_lock);
4280 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
4281 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
4283 struct be_adapter *adapter = netdev_priv(netdev_handle);
4284 struct be_mcc_wrb *wrb;
4285 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
4286 struct be_cmd_req_hdr *req;
4287 struct be_cmd_resp_hdr *resp;
4290 spin_lock_bh(&adapter->mcc_lock);
4292 wrb = wrb_from_mccq(adapter);
4297 req = embedded_payload(wrb);
4298 resp = embedded_payload(wrb);
4300 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
4301 hdr->opcode, wrb_payload_size, wrb, NULL);
4302 memcpy(req, wrb_payload, wrb_payload_size);
4303 be_dws_cpu_to_le(req, wrb_payload_size);
4305 status = be_mcc_notify_wait(adapter);
4307 *cmd_status = (status & 0xffff);
4310 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
4311 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
4313 spin_unlock_bh(&adapter->mcc_lock);
4316 EXPORT_SYMBOL(be_roce_mcc_cmd);