2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/pci.h>
38 #include "t4vf_common.h"
39 #include "t4vf_defs.h"
41 #include "../cxgb4/t4_regs.h"
42 #include "../cxgb4/t4_values.h"
43 #include "../cxgb4/t4fw_api.h"
46 * Wait for the device to become ready (signified by our "who am I" register
47 * returning a value other than all 1's). Return an error if it doesn't
50 int t4vf_wait_dev_ready(struct adapter *adapter)
52 const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
53 const u32 notready1 = 0xffffffff;
54 const u32 notready2 = 0xeeeeeeee;
57 val = t4_read_reg(adapter, whoami);
58 if (val != notready1 && val != notready2)
61 val = t4_read_reg(adapter, whoami);
62 if (val != notready1 && val != notready2)
69 * Get the reply to a mailbox command and store it in @rpl in big-endian order
70 * (since the firmware data structures are specified in a big-endian layout).
72 static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
75 for ( ; size; size -= 8, mbox_data += 8)
76 *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
80 * Dump contents of mailbox with a leading tag.
82 static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
84 dev_err(adapter->pdev_dev,
85 "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag,
86 (unsigned long long)t4_read_reg64(adapter, mbox_data + 0),
87 (unsigned long long)t4_read_reg64(adapter, mbox_data + 8),
88 (unsigned long long)t4_read_reg64(adapter, mbox_data + 16),
89 (unsigned long long)t4_read_reg64(adapter, mbox_data + 24),
90 (unsigned long long)t4_read_reg64(adapter, mbox_data + 32),
91 (unsigned long long)t4_read_reg64(adapter, mbox_data + 40),
92 (unsigned long long)t4_read_reg64(adapter, mbox_data + 48),
93 (unsigned long long)t4_read_reg64(adapter, mbox_data + 56));
97 * t4vf_wr_mbox_core - send a command to FW through the mailbox
98 * @adapter: the adapter
99 * @cmd: the command to write
100 * @size: command length in bytes
101 * @rpl: where to optionally store the reply
102 * @sleep_ok: if true we may sleep while awaiting command completion
104 * Sends the given command to FW through the mailbox and waits for the
105 * FW to execute the command. If @rpl is not %NULL it is used to store
106 * the FW's reply to the command. The command and its optional reply
107 * are of the same length. FW can take up to 500 ms to respond.
108 * @sleep_ok determines whether we may sleep while awaiting the response.
109 * If sleeping is allowed we use progressive backoff otherwise we spin.
111 * The return value is 0 on success or a negative errno on failure. A
112 * failure can happen either because we are not able to execute the
113 * command or FW executes it but signals an error. In the latter case
114 * the return value is the error code indicated by FW (negated).
116 int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
117 void *rpl, bool sleep_ok)
119 static const int delay[] = {
120 1, 1, 3, 5, 10, 10, 20, 50, 100
124 int i, ms, delay_idx;
126 u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
128 /* In T6, mailbox size is changed to 128 bytes to avoid
129 * invalidating the entire prefetch buffer.
131 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
132 mbox_data = T4VF_MBDATA_BASE_ADDR;
134 mbox_data = T6VF_MBDATA_BASE_ADDR;
137 * Commands must be multiples of 16 bytes in length and may not be
138 * larger than the size of the Mailbox Data register array.
140 if ((size % 16) != 0 ||
141 size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
145 * Loop trying to get ownership of the mailbox. Return an error
146 * if we can't gain ownership.
148 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
149 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
150 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
151 if (v != MBOX_OWNER_DRV)
152 return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
155 * Write the command array into the Mailbox Data register array and
156 * transfer ownership of the mailbox to the firmware.
158 * For the VFs, the Mailbox Data "registers" are actually backed by
159 * T4's "MA" interface rather than PL Registers (as is the case for
160 * the PFs). Because these are in different coherency domains, the
161 * write to the VF's PL-register-backed Mailbox Control can race in
162 * front of the writes to the MA-backed VF Mailbox Data "registers".
163 * So we need to do a read-back on at least one byte of the VF Mailbox
164 * Data registers before doing the write to the VF Mailbox Control
167 for (i = 0, p = cmd; i < size; i += 8)
168 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
169 t4_read_reg(adapter, mbox_data); /* flush write */
171 t4_write_reg(adapter, mbox_ctl,
172 MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
173 t4_read_reg(adapter, mbox_ctl); /* flush write */
176 * Spin waiting for firmware to acknowledge processing our command.
181 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
183 ms = delay[delay_idx];
184 if (delay_idx < ARRAY_SIZE(delay) - 1)
191 * If we're the owner, see if this is the reply we wanted.
193 v = t4_read_reg(adapter, mbox_ctl);
194 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
196 * If the Message Valid bit isn't on, revoke ownership
197 * of the mailbox and continue waiting for our reply.
199 if ((v & MBMSGVALID_F) == 0) {
200 t4_write_reg(adapter, mbox_ctl,
201 MBOWNER_V(MBOX_OWNER_NONE));
206 * We now have our reply. Extract the command return
207 * value, copy the reply back to our caller's buffer
208 * (if specified) and revoke ownership of the mailbox.
209 * We return the (negated) firmware command return
210 * code (this depends on FW_SUCCESS == 0).
213 /* return value in low-order little-endian word */
214 v = t4_read_reg(adapter, mbox_data);
215 if (FW_CMD_RETVAL_G(v))
216 dump_mbox(adapter, "FW Error", mbox_data);
219 /* request bit in high-order BE word */
220 WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
221 & FW_CMD_REQUEST_F) == 0);
222 get_mbox_rpl(adapter, rpl, size, mbox_data);
223 WARN_ON((be32_to_cpu(*(__be32 *)rpl)
224 & FW_CMD_REQUEST_F) != 0);
226 t4_write_reg(adapter, mbox_ctl,
227 MBOWNER_V(MBOX_OWNER_NONE));
228 return -FW_CMD_RETVAL_G(v);
233 * We timed out. Return the error ...
235 dump_mbox(adapter, "FW Timeout", mbox_data);
239 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
240 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
241 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
244 * init_link_config - initialize a link's SW state
245 * @lc: structure holding the link state
246 * @caps: link capabilities
248 * Initializes the SW state maintained for each link, including the link's
249 * capabilities and default speed/flow-control/autonegotiation settings.
251 static void init_link_config(struct link_config *lc, unsigned int caps)
253 lc->supported = caps;
254 lc->requested_speed = 0;
256 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
257 if (lc->supported & FW_PORT_CAP_ANEG) {
258 lc->advertising = lc->supported & ADVERT_MASK;
259 lc->autoneg = AUTONEG_ENABLE;
260 lc->requested_fc |= PAUSE_AUTONEG;
263 lc->autoneg = AUTONEG_DISABLE;
268 * t4vf_port_init - initialize port hardware/software state
269 * @adapter: the adapter
270 * @pidx: the adapter port index
272 int t4vf_port_init(struct adapter *adapter, int pidx)
274 struct port_info *pi = adap2pinfo(adapter, pidx);
275 struct fw_vi_cmd vi_cmd, vi_rpl;
276 struct fw_port_cmd port_cmd, port_rpl;
280 * Execute a VI Read command to get our Virtual Interface information
281 * like MAC address, etc.
283 memset(&vi_cmd, 0, sizeof(vi_cmd));
284 vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
287 vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
288 vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid));
289 v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
293 BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd));
294 pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd));
295 t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac);
298 * If we don't have read access to our port information, we're done
299 * now. Otherwise, execute a PORT Read command to get it ...
301 if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
304 memset(&port_cmd, 0, sizeof(port_cmd));
305 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
308 FW_PORT_CMD_PORTID_V(pi->port_id));
309 port_cmd.action_to_len16 =
310 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
312 v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl);
316 v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
317 pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
318 FW_PORT_CMD_MDIOADDR_G(v) : -1;
319 pi->port_type = FW_PORT_CMD_PTYPE_G(v);
320 pi->mod_type = FW_PORT_MOD_TYPE_NA;
322 init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap));
328 * t4vf_fw_reset - issue a reset to FW
329 * @adapter: the adapter
331 * Issues a reset command to FW. For a Physical Function this would
332 * result in the Firmware resetting all of its state. For a Virtual
333 * Function this just resets the state associated with the VF.
335 int t4vf_fw_reset(struct adapter *adapter)
337 struct fw_reset_cmd cmd;
339 memset(&cmd, 0, sizeof(cmd));
340 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) |
342 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
343 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
347 * t4vf_query_params - query FW or device parameters
348 * @adapter: the adapter
349 * @nparams: the number of parameters
350 * @params: the parameter names
351 * @vals: the parameter values
353 * Reads the values of firmware or device parameters. Up to 7 parameters
354 * can be queried at once.
356 static int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
357 const u32 *params, u32 *vals)
360 struct fw_params_cmd cmd, rpl;
361 struct fw_params_param *p;
367 memset(&cmd, 0, sizeof(cmd));
368 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
371 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
372 param[nparams].mnem), 16);
373 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
374 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
375 p->mnem = htonl(*params++);
377 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
379 for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
380 *vals++ = be32_to_cpu(p->val);
385 * t4vf_set_params - sets FW or device parameters
386 * @adapter: the adapter
387 * @nparams: the number of parameters
388 * @params: the parameter names
389 * @vals: the parameter values
391 * Sets the values of firmware or device parameters. Up to 7 parameters
392 * can be specified at once.
394 int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
395 const u32 *params, const u32 *vals)
398 struct fw_params_cmd cmd;
399 struct fw_params_param *p;
405 memset(&cmd, 0, sizeof(cmd));
406 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
409 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
410 param[nparams]), 16);
411 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
412 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
413 p->mnem = cpu_to_be32(*params++);
414 p->val = cpu_to_be32(*vals++);
417 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
421 * t4vf_fl_pkt_align - return the fl packet alignment
422 * @adapter: the adapter
424 * T4 has a single field to specify the packing and padding boundary.
425 * T5 onwards has separate fields for this and hence the alignment for
426 * next packet offset is maximum of these two. And T6 changes the
427 * Ingress Padding Boundary Shift, so it's all a mess and it's best
428 * if we put this in low-level Common Code ...
431 int t4vf_fl_pkt_align(struct adapter *adapter)
433 u32 sge_control, sge_control2;
434 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
436 sge_control = adapter->params.sge.sge_control;
438 /* T4 uses a single control field to specify both the PCIe Padding and
439 * Packing Boundary. T5 introduced the ability to specify these
440 * separately. The actual Ingress Packet Data alignment boundary
441 * within Packed Buffer Mode is the maximum of these two
442 * specifications. (Note that it makes no real practical sense to
443 * have the Pading Boudary be larger than the Packing Boundary but you
444 * could set the chip up that way and, in fact, legacy T4 code would
445 * end doing this because it would initialize the Padding Boundary and
446 * leave the Packing Boundary initialized to 0 (16 bytes).)
447 * Padding Boundary values in T6 starts from 8B,
448 * where as it is 32B for T4 and T5.
450 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
451 ingpad_shift = INGPADBOUNDARY_SHIFT_X;
453 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
455 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
457 fl_align = ingpadboundary;
458 if (!is_t4(adapter->params.chip)) {
459 /* T5 has a different interpretation of one of the PCIe Packing
462 sge_control2 = adapter->params.sge.sge_control2;
463 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
464 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
465 ingpackboundary = 16;
467 ingpackboundary = 1 << (ingpackboundary +
468 INGPACKBOUNDARY_SHIFT_X);
470 fl_align = max(ingpadboundary, ingpackboundary);
476 * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
477 * @adapter: the adapter
479 * @qtype: the Ingress or Egress type for @qid
480 * @pbar2_qoffset: BAR2 Queue Offset
481 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
483 * Returns the BAR2 SGE Queue Registers information associated with the
484 * indicated Absolute Queue ID. These are passed back in return value
485 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
486 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
488 * This may return an error which indicates that BAR2 SGE Queue
489 * registers aren't available. If an error is not returned, then the
490 * following values are returned:
492 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
493 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
495 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
496 * require the "Inferred Queue ID" ability may be used. E.g. the
497 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
498 * then these "Inferred Queue ID" register may not be used.
500 int t4vf_bar2_sge_qregs(struct adapter *adapter,
502 enum t4_bar2_qtype qtype,
504 unsigned int *pbar2_qid)
506 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
507 u64 bar2_page_offset, bar2_qoffset;
508 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
510 /* T4 doesn't support BAR2 SGE Queue registers.
512 if (is_t4(adapter->params.chip))
515 /* Get our SGE Page Size parameters.
517 page_shift = adapter->params.sge.sge_vf_hps + 10;
518 page_size = 1 << page_shift;
520 /* Get the right Queues per Page parameters for our Queue.
522 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
523 ? adapter->params.sge.sge_vf_eq_qpp
524 : adapter->params.sge.sge_vf_iq_qpp);
525 qpp_mask = (1 << qpp_shift) - 1;
527 /* Calculate the basics of the BAR2 SGE Queue register area:
528 * o The BAR2 page the Queue registers will be in.
529 * o The BAR2 Queue ID.
530 * o The BAR2 Queue ID Offset into the BAR2 page.
532 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
533 bar2_qid = qid & qpp_mask;
534 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
536 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
537 * hardware will infer the Absolute Queue ID simply from the writes to
538 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
539 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
540 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
541 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
542 * from the BAR2 Page and BAR2 Queue ID.
544 * One important censequence of this is that some BAR2 SGE registers
545 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
546 * there. But other registers synthesize the SGE Queue ID purely
547 * from the writes to the registers -- the Write Combined Doorbell
548 * Buffer is a good example. These BAR2 SGE Registers are only
549 * available for those BAR2 SGE Register areas where the SGE Absolute
550 * Queue ID can be inferred from simple writes.
552 bar2_qoffset = bar2_page_offset;
553 bar2_qinferred = (bar2_qid_offset < page_size);
554 if (bar2_qinferred) {
555 bar2_qoffset += bar2_qid_offset;
559 *pbar2_qoffset = bar2_qoffset;
560 *pbar2_qid = bar2_qid;
565 * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
566 * @adapter: the adapter
568 * Retrieves various core SGE parameters in the form of hardware SGE
569 * register values. The caller is responsible for decoding these as
570 * needed. The SGE parameters are stored in @adapter->params.sge.
572 int t4vf_get_sge_params(struct adapter *adapter)
574 struct sge_params *sge_params = &adapter->params.sge;
575 u32 params[7], vals[7];
578 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
579 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
580 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
581 FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
582 params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
583 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
584 params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
585 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
586 params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
587 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
588 params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
589 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
590 params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
591 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
592 v = t4vf_query_params(adapter, 7, params, vals);
595 sge_params->sge_control = vals[0];
596 sge_params->sge_host_page_size = vals[1];
597 sge_params->sge_fl_buffer_size[0] = vals[2];
598 sge_params->sge_fl_buffer_size[1] = vals[3];
599 sge_params->sge_timer_value_0_and_1 = vals[4];
600 sge_params->sge_timer_value_2_and_3 = vals[5];
601 sge_params->sge_timer_value_4_and_5 = vals[6];
603 /* T4 uses a single control field to specify both the PCIe Padding and
604 * Packing Boundary. T5 introduced the ability to specify these
605 * separately with the Padding Boundary in SGE_CONTROL and and Packing
606 * Boundary in SGE_CONTROL2. So for T5 and later we need to grab
607 * SGE_CONTROL in order to determine how ingress packet data will be
608 * laid out in Packed Buffer Mode. Unfortunately, older versions of
609 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
610 * failure grabbing it we throw an error since we can't figure out the
613 if (!is_t4(adapter->params.chip)) {
614 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
615 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A));
616 v = t4vf_query_params(adapter, 1, params, vals);
617 if (v != FW_SUCCESS) {
618 dev_err(adapter->pdev_dev,
619 "Unable to get SGE Control2; "
620 "probably old firmware.\n");
623 sge_params->sge_control2 = vals[0];
626 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
627 FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
628 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
629 FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
630 v = t4vf_query_params(adapter, 2, params, vals);
633 sge_params->sge_ingress_rx_threshold = vals[0];
634 sge_params->sge_congestion_control = vals[1];
636 /* For T5 and later we want to use the new BAR2 Doorbells.
637 * Unfortunately, older firmware didn't allow the this register to be
640 if (!is_t4(adapter->params.chip)) {
642 unsigned int pf, s_hps, s_qpp;
644 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
645 FW_PARAMS_PARAM_XYZ_V(
646 SGE_EGRESS_QUEUES_PER_PAGE_VF_A));
647 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
648 FW_PARAMS_PARAM_XYZ_V(
649 SGE_INGRESS_QUEUES_PER_PAGE_VF_A));
650 v = t4vf_query_params(adapter, 2, params, vals);
651 if (v != FW_SUCCESS) {
652 dev_warn(adapter->pdev_dev,
653 "Unable to get VF SGE Queues/Page; "
654 "probably old firmware.\n");
657 sge_params->sge_egress_queues_per_page = vals[0];
658 sge_params->sge_ingress_queues_per_page = vals[1];
660 /* We need the Queues/Page for our VF. This is based on the
661 * PF from which we're instantiated and is indexed in the
662 * register we just read. Do it once here so other code in
663 * the driver can just use it.
665 whoami = t4_read_reg(adapter,
666 T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
667 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
668 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
670 s_hps = (HOSTPAGESIZEPF0_S +
671 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
672 sge_params->sge_vf_hps =
673 ((sge_params->sge_host_page_size >> s_hps)
674 & HOSTPAGESIZEPF0_M);
676 s_qpp = (QUEUESPERPAGEPF0_S +
677 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
678 sge_params->sge_vf_eq_qpp =
679 ((sge_params->sge_egress_queues_per_page >> s_qpp)
680 & QUEUESPERPAGEPF0_M);
681 sge_params->sge_vf_iq_qpp =
682 ((sge_params->sge_ingress_queues_per_page >> s_qpp)
683 & QUEUESPERPAGEPF0_M);
690 * t4vf_get_vpd_params - retrieve device VPD paremeters
691 * @adapter: the adapter
693 * Retrives various device Vital Product Data parameters. The parameters
694 * are stored in @adapter->params.vpd.
696 int t4vf_get_vpd_params(struct adapter *adapter)
698 struct vpd_params *vpd_params = &adapter->params.vpd;
699 u32 params[7], vals[7];
702 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
703 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
704 v = t4vf_query_params(adapter, 1, params, vals);
707 vpd_params->cclk = vals[0];
713 * t4vf_get_dev_params - retrieve device paremeters
714 * @adapter: the adapter
716 * Retrives various device parameters. The parameters are stored in
717 * @adapter->params.dev.
719 int t4vf_get_dev_params(struct adapter *adapter)
721 struct dev_params *dev_params = &adapter->params.dev;
722 u32 params[7], vals[7];
725 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
726 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV));
727 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
728 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV));
729 v = t4vf_query_params(adapter, 2, params, vals);
732 dev_params->fwrev = vals[0];
733 dev_params->tprev = vals[1];
739 * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
740 * @adapter: the adapter
742 * Retrieves global RSS mode and parameters with which we have to live
743 * and stores them in the @adapter's RSS parameters.
745 int t4vf_get_rss_glb_config(struct adapter *adapter)
747 struct rss_params *rss = &adapter->params.rss;
748 struct fw_rss_glb_config_cmd cmd, rpl;
752 * Execute an RSS Global Configuration read command to retrieve
753 * our RSS configuration.
755 memset(&cmd, 0, sizeof(cmd));
756 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
759 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
760 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
765 * Transate the big-endian RSS Global Configuration into our
766 * cpu-endian format based on the RSS mode. We also do first level
767 * filtering at this point to weed out modes which don't support
770 rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G(
771 be32_to_cpu(rpl.u.manual.mode_pkd));
773 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
774 u32 word = be32_to_cpu(
775 rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
777 rss->u.basicvirtual.synmapen =
778 ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0);
779 rss->u.basicvirtual.syn4tupenipv6 =
780 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0);
781 rss->u.basicvirtual.syn2tupenipv6 =
782 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0);
783 rss->u.basicvirtual.syn4tupenipv4 =
784 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0);
785 rss->u.basicvirtual.syn2tupenipv4 =
786 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0);
788 rss->u.basicvirtual.ofdmapen =
789 ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0);
791 rss->u.basicvirtual.tnlmapen =
792 ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0);
793 rss->u.basicvirtual.tnlalllookup =
794 ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0);
796 rss->u.basicvirtual.hashtoeplitz =
797 ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0);
799 /* we need at least Tunnel Map Enable to be set */
800 if (!rss->u.basicvirtual.tnlmapen)
806 /* all unknown/unsupported RSS modes result in an error */
814 * t4vf_get_vfres - retrieve VF resource limits
815 * @adapter: the adapter
817 * Retrieves configured resource limits and capabilities for a virtual
818 * function. The results are stored in @adapter->vfres.
820 int t4vf_get_vfres(struct adapter *adapter)
822 struct vf_resources *vfres = &adapter->params.vfres;
823 struct fw_pfvf_cmd cmd, rpl;
828 * Execute PFVF Read command to get VF resource limits; bail out early
829 * with error on command failure.
831 memset(&cmd, 0, sizeof(cmd));
832 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
835 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
836 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
841 * Extract VF resource limits and return success.
843 word = be32_to_cpu(rpl.niqflint_niq);
844 vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
845 vfres->niq = FW_PFVF_CMD_NIQ_G(word);
847 word = be32_to_cpu(rpl.type_to_neq);
848 vfres->neq = FW_PFVF_CMD_NEQ_G(word);
849 vfres->pmask = FW_PFVF_CMD_PMASK_G(word);
851 word = be32_to_cpu(rpl.tc_to_nexactf);
852 vfres->tc = FW_PFVF_CMD_TC_G(word);
853 vfres->nvi = FW_PFVF_CMD_NVI_G(word);
854 vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
856 word = be32_to_cpu(rpl.r_caps_to_nethctrl);
857 vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
858 vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
859 vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
865 * t4vf_read_rss_vi_config - read a VI's RSS configuration
866 * @adapter: the adapter
867 * @viid: Virtual Interface ID
868 * @config: pointer to host-native VI RSS Configuration buffer
870 * Reads the Virtual Interface's RSS configuration information and
871 * translates it into CPU-native format.
873 int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
874 union rss_vi_config *config)
876 struct fw_rss_vi_config_cmd cmd, rpl;
879 memset(&cmd, 0, sizeof(cmd));
880 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
883 FW_RSS_VI_CONFIG_CMD_VIID(viid));
884 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
885 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
889 switch (adapter->params.rss.mode) {
890 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
891 u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen);
893 config->basicvirtual.ip6fourtupen =
894 ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0);
895 config->basicvirtual.ip6twotupen =
896 ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0);
897 config->basicvirtual.ip4fourtupen =
898 ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0);
899 config->basicvirtual.ip4twotupen =
900 ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0);
901 config->basicvirtual.udpen =
902 ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0);
903 config->basicvirtual.defaultq =
904 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word);
916 * t4vf_write_rss_vi_config - write a VI's RSS configuration
917 * @adapter: the adapter
918 * @viid: Virtual Interface ID
919 * @config: pointer to host-native VI RSS Configuration buffer
921 * Write the Virtual Interface's RSS configuration information
922 * (translating it into firmware-native format before writing).
924 int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
925 union rss_vi_config *config)
927 struct fw_rss_vi_config_cmd cmd, rpl;
929 memset(&cmd, 0, sizeof(cmd));
930 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
933 FW_RSS_VI_CONFIG_CMD_VIID(viid));
934 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
935 switch (adapter->params.rss.mode) {
936 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
939 if (config->basicvirtual.ip6fourtupen)
940 word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F;
941 if (config->basicvirtual.ip6twotupen)
942 word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F;
943 if (config->basicvirtual.ip4fourtupen)
944 word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F;
945 if (config->basicvirtual.ip4twotupen)
946 word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F;
947 if (config->basicvirtual.udpen)
948 word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F;
949 word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(
950 config->basicvirtual.defaultq);
951 cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word);
959 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
963 * t4vf_config_rss_range - configure a portion of the RSS mapping table
964 * @adapter: the adapter
965 * @viid: Virtual Interface of RSS Table Slice
966 * @start: starting entry in the table to write
967 * @n: how many table entries to write
968 * @rspq: values for the "Response Queue" (Ingress Queue) lookup table
969 * @nrspq: number of values in @rspq
971 * Programs the selected part of the VI's RSS mapping table with the
972 * provided values. If @nrspq < @n the supplied values are used repeatedly
973 * until the full table range is populated.
975 * The caller must ensure the values in @rspq are in the range 0..1023.
977 int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
978 int start, int n, const u16 *rspq, int nrspq)
980 const u16 *rsp = rspq;
981 const u16 *rsp_end = rspq+nrspq;
982 struct fw_rss_ind_tbl_cmd cmd;
985 * Initialize firmware command template to write the RSS table.
987 memset(&cmd, 0, sizeof(cmd));
988 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
991 FW_RSS_IND_TBL_CMD_VIID_V(viid));
992 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
995 * Each firmware RSS command can accommodate up to 32 RSS Ingress
996 * Queue Identifiers. These Ingress Queue IDs are packed three to
997 * a 32-bit word as 10-bit values with the upper remaining 2 bits
1001 __be32 *qp = &cmd.iq0_to_iq2;
1002 int nq = min(n, 32);
1006 * Set up the firmware RSS command header to send the next
1007 * "nq" Ingress Queue IDs to the firmware.
1009 cmd.niqid = cpu_to_be16(nq);
1010 cmd.startidx = cpu_to_be16(start);
1013 * "nq" more done for the start of the next loop.
1019 * While there are still Ingress Queue IDs to stuff into the
1020 * current firmware RSS command, retrieve them from the
1021 * Ingress Queue ID array and insert them into the command.
1025 * Grab up to the next 3 Ingress Queue IDs (wrapping
1026 * around the Ingress Queue ID array if necessary) and
1027 * insert them into the firmware RSS command at the
1028 * current 3-tuple position within the commad.
1032 int nqbuf = min(3, nq);
1035 qbuf[0] = qbuf[1] = qbuf[2] = 0;
1042 *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) |
1043 FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) |
1044 FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2]));
1048 * Send this portion of the RRS table update to the firmware;
1049 * bail out on any errors.
1051 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1059 * t4vf_alloc_vi - allocate a virtual interface on a port
1060 * @adapter: the adapter
1061 * @port_id: physical port associated with the VI
1063 * Allocate a new Virtual Interface and bind it to the indicated
1064 * physical port. Return the new Virtual Interface Identifier on
1065 * success, or a [negative] error number on failure.
1067 int t4vf_alloc_vi(struct adapter *adapter, int port_id)
1069 struct fw_vi_cmd cmd, rpl;
1073 * Execute a VI command to allocate Virtual Interface and return its
1076 memset(&cmd, 0, sizeof(cmd));
1077 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
1081 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
1083 cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id);
1084 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1088 return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid));
1092 * t4vf_free_vi -- free a virtual interface
1093 * @adapter: the adapter
1094 * @viid: the virtual interface identifier
1096 * Free a previously allocated Virtual Interface. Return an error on
1099 int t4vf_free_vi(struct adapter *adapter, int viid)
1101 struct fw_vi_cmd cmd;
1104 * Execute a VI command to free the Virtual Interface.
1106 memset(&cmd, 0, sizeof(cmd));
1107 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
1110 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
1112 cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
1113 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1117 * t4vf_enable_vi - enable/disable a virtual interface
1118 * @adapter: the adapter
1119 * @viid: the Virtual Interface ID
1120 * @rx_en: 1=enable Rx, 0=disable Rx
1121 * @tx_en: 1=enable Tx, 0=disable Tx
1123 * Enables/disables a virtual interface.
1125 int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
1126 bool rx_en, bool tx_en)
1128 struct fw_vi_enable_cmd cmd;
1130 memset(&cmd, 0, sizeof(cmd));
1131 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
1134 FW_VI_ENABLE_CMD_VIID_V(viid));
1135 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
1136 FW_VI_ENABLE_CMD_EEN_V(tx_en) |
1138 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1142 * t4vf_identify_port - identify a VI's port by blinking its LED
1143 * @adapter: the adapter
1144 * @viid: the Virtual Interface ID
1145 * @nblinks: how many times to blink LED at 2.5 Hz
1147 * Identifies a VI's port by blinking its LED.
1149 int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
1150 unsigned int nblinks)
1152 struct fw_vi_enable_cmd cmd;
1154 memset(&cmd, 0, sizeof(cmd));
1155 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
1158 FW_VI_ENABLE_CMD_VIID_V(viid));
1159 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F |
1161 cmd.blinkdur = cpu_to_be16(nblinks);
1162 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1166 * t4vf_set_rxmode - set Rx properties of a virtual interface
1167 * @adapter: the adapter
1169 * @mtu: the new MTU or -1 for no change
1170 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
1171 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
1172 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
1173 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
1176 * Sets Rx properties of a virtual interface.
1178 int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid,
1179 int mtu, int promisc, int all_multi, int bcast, int vlanex,
1182 struct fw_vi_rxmode_cmd cmd;
1184 /* convert to FW values */
1186 mtu = FW_VI_RXMODE_CMD_MTU_M;
1188 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
1190 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
1192 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
1194 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
1196 memset(&cmd, 0, sizeof(cmd));
1197 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
1200 FW_VI_RXMODE_CMD_VIID_V(viid));
1201 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
1202 cmd.mtu_to_vlanexen =
1203 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
1204 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
1205 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
1206 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
1207 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
1208 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
1212 * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses
1213 * @adapter: the adapter
1214 * @viid: the Virtual Interface Identifier
1215 * @free: if true any existing filters for this VI id are first removed
1216 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
1217 * @addr: the MAC address(es)
1218 * @idx: where to store the index of each allocated filter
1219 * @hash: pointer to hash address filter bitmap
1220 * @sleep_ok: call is allowed to sleep
1222 * Allocates an exact-match filter for each of the supplied addresses and
1223 * sets it to the corresponding address. If @idx is not %NULL it should
1224 * have at least @naddr entries, each of which will be set to the index of
1225 * the filter allocated for the corresponding MAC address. If a filter
1226 * could not be allocated for an address its index is set to 0xffff.
1227 * If @hash is not %NULL addresses that fail to allocate an exact filter
1228 * are hashed and update the hash filter bitmap pointed at by @hash.
1230 * Returns a negative error number or the number of filters allocated.
1232 int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
1233 unsigned int naddr, const u8 **addr, u16 *idx,
1234 u64 *hash, bool sleep_ok)
1236 int offset, ret = 0;
1237 unsigned nfilters = 0;
1238 unsigned int rem = naddr;
1239 struct fw_vi_mac_cmd cmd, rpl;
1240 unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
1242 if (naddr > max_naddr)
1245 for (offset = 0; offset < naddr; /**/) {
1246 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
1248 : ARRAY_SIZE(cmd.u.exact));
1249 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1250 u.exact[fw_naddr]), 16);
1251 struct fw_vi_mac_exact *p;
1254 memset(&cmd, 0, sizeof(cmd));
1255 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1258 (free ? FW_CMD_EXEC_F : 0) |
1259 FW_VI_MAC_CMD_VIID_V(viid));
1260 cmd.freemacs_to_len16 =
1261 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
1262 FW_CMD_LEN16_V(len16));
1264 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
1265 p->valid_to_idx = cpu_to_be16(
1266 FW_VI_MAC_CMD_VALID_F |
1267 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
1268 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
1272 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
1274 if (ret && ret != -ENOMEM)
1277 for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
1278 u16 index = FW_VI_MAC_CMD_IDX_G(
1279 be16_to_cpu(p->valid_to_idx));
1286 if (index < max_naddr)
1289 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
1298 * If there were no errors or we merely ran out of room in our MAC
1299 * address arena, return the number of filters actually written.
1301 if (ret == 0 || ret == -ENOMEM)
1307 * t4vf_free_mac_filt - frees exact-match filters of given MAC addresses
1308 * @adapter: the adapter
1310 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
1311 * @addr: the MAC address(es)
1312 * @sleep_ok: call is allowed to sleep
1314 * Frees the exact-match filter for each of the supplied addresses
1316 * Returns a negative error number or the number of filters freed.
1318 int t4vf_free_mac_filt(struct adapter *adapter, unsigned int viid,
1319 unsigned int naddr, const u8 **addr, bool sleep_ok)
1321 int offset, ret = 0;
1322 struct fw_vi_mac_cmd cmd;
1323 unsigned int nfilters = 0;
1324 unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
1325 unsigned int rem = naddr;
1327 if (naddr > max_naddr)
1330 for (offset = 0; offset < (int)naddr ; /**/) {
1331 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ?
1332 rem : ARRAY_SIZE(cmd.u.exact));
1333 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1334 u.exact[fw_naddr]), 16);
1335 struct fw_vi_mac_exact *p;
1338 memset(&cmd, 0, sizeof(cmd));
1339 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1343 FW_VI_MAC_CMD_VIID_V(viid));
1344 cmd.freemacs_to_len16 =
1345 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
1346 FW_CMD_LEN16_V(len16));
1348 for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) {
1349 p->valid_to_idx = cpu_to_be16(
1350 FW_VI_MAC_CMD_VALID_F |
1351 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
1352 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
1355 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd,
1360 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
1361 u16 index = FW_VI_MAC_CMD_IDX_G(
1362 be16_to_cpu(p->valid_to_idx));
1364 if (index < max_naddr)
1378 * t4vf_change_mac - modifies the exact-match filter for a MAC address
1379 * @adapter: the adapter
1380 * @viid: the Virtual Interface ID
1381 * @idx: index of existing filter for old value of MAC address, or -1
1382 * @addr: the new MAC address value
1383 * @persist: if idx < 0, the new MAC allocation should be persistent
1385 * Modifies an exact-match filter and sets it to the new MAC address.
1386 * Note that in general it is not possible to modify the value of a given
1387 * filter so the generic way to modify an address filter is to free the
1388 * one being used by the old address value and allocate a new filter for
1389 * the new address value. @idx can be -1 if the address is a new
1392 * Returns a negative error number or the index of the filter with the new
1395 int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
1396 int idx, const u8 *addr, bool persist)
1399 struct fw_vi_mac_cmd cmd, rpl;
1400 struct fw_vi_mac_exact *p = &cmd.u.exact[0];
1401 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1403 unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size;
1406 * If this is a new allocation, determine whether it should be
1407 * persistent (across a "freemacs" operation) or not.
1410 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
1412 memset(&cmd, 0, sizeof(cmd));
1413 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1416 FW_VI_MAC_CMD_VIID_V(viid));
1417 cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
1418 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
1419 FW_VI_MAC_CMD_IDX_V(idx));
1420 memcpy(p->macaddr, addr, sizeof(p->macaddr));
1422 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1424 p = &rpl.u.exact[0];
1425 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
1426 if (ret >= max_mac_addr)
1433 * t4vf_set_addr_hash - program the MAC inexact-match hash filter
1434 * @adapter: the adapter
1435 * @viid: the Virtual Interface Identifier
1436 * @ucast: whether the hash filter should also match unicast addresses
1437 * @vec: the value to be written to the hash filter
1438 * @sleep_ok: call is allowed to sleep
1440 * Sets the 64-bit inexact-match hash filter for a virtual interface.
1442 int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid,
1443 bool ucast, u64 vec, bool sleep_ok)
1445 struct fw_vi_mac_cmd cmd;
1446 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1449 memset(&cmd, 0, sizeof(cmd));
1450 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1453 FW_VI_ENABLE_CMD_VIID_V(viid));
1454 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
1455 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
1456 FW_CMD_LEN16_V(len16));
1457 cmd.u.hash.hashvec = cpu_to_be64(vec);
1458 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
1462 * t4vf_get_port_stats - collect "port" statistics
1463 * @adapter: the adapter
1464 * @pidx: the port index
1465 * @s: the stats structure to fill
1467 * Collect statistics for the "port"'s Virtual Interface.
1469 int t4vf_get_port_stats(struct adapter *adapter, int pidx,
1470 struct t4vf_port_stats *s)
1472 struct port_info *pi = adap2pinfo(adapter, pidx);
1473 struct fw_vi_stats_vf fwstats;
1474 unsigned int rem = VI_VF_NUM_STATS;
1475 __be64 *fwsp = (__be64 *)&fwstats;
1478 * Grab the Virtual Interface statistics a chunk at a time via mailbox
1479 * commands. We could use a Work Request and get all of them at once
1480 * but that's an asynchronous interface which is awkward to use.
1483 unsigned int ix = VI_VF_NUM_STATS - rem;
1484 unsigned int nstats = min(6U, rem);
1485 struct fw_vi_stats_cmd cmd, rpl;
1486 size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
1487 sizeof(struct fw_vi_stats_ctl));
1488 size_t len16 = DIV_ROUND_UP(len, 16);
1491 memset(&cmd, 0, sizeof(cmd));
1492 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) |
1493 FW_VI_STATS_CMD_VIID_V(pi->viid) |
1496 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
1497 cmd.u.ctl.nstats_ix =
1498 cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) |
1499 FW_VI_STATS_CMD_NSTATS_V(nstats));
1500 ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
1504 memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
1511 * Translate firmware statistics into host native statistics.
1513 s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes);
1514 s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
1515 s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes);
1516 s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
1517 s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes);
1518 s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
1519 s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames);
1520 s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes);
1521 s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames);
1523 s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes);
1524 s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
1525 s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes);
1526 s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
1527 s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes);
1528 s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
1530 s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames);
1536 * t4vf_iq_free - free an ingress queue and its free lists
1537 * @adapter: the adapter
1538 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
1539 * @iqid: ingress queue ID
1540 * @fl0id: FL0 queue ID or 0xffff if no attached FL0
1541 * @fl1id: FL1 queue ID or 0xffff if no attached FL1
1543 * Frees an ingress queue and its associated free lists, if any.
1545 int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype,
1546 unsigned int iqid, unsigned int fl0id, unsigned int fl1id)
1548 struct fw_iq_cmd cmd;
1550 memset(&cmd, 0, sizeof(cmd));
1551 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
1554 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F |
1556 cmd.type_to_iqandstindex =
1557 cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
1559 cmd.iqid = cpu_to_be16(iqid);
1560 cmd.fl0id = cpu_to_be16(fl0id);
1561 cmd.fl1id = cpu_to_be16(fl1id);
1562 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1566 * t4vf_eth_eq_free - free an Ethernet egress queue
1567 * @adapter: the adapter
1568 * @eqid: egress queue ID
1570 * Frees an Ethernet egress queue.
1572 int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
1574 struct fw_eq_eth_cmd cmd;
1576 memset(&cmd, 0, sizeof(cmd));
1577 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
1580 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F |
1582 cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
1583 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1587 * t4vf_handle_fw_rpl - process a firmware reply message
1588 * @adapter: the adapter
1589 * @rpl: start of the firmware message
1591 * Processes a firmware message, such as link state change messages.
1593 int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1595 const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
1596 u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi));
1601 * Link/module state change message.
1603 const struct fw_port_cmd *port_cmd =
1604 (const struct fw_port_cmd *)rpl;
1606 int action, port_id, link_ok, speed, fc, pidx;
1609 * Extract various fields from port status change message.
1611 action = FW_PORT_CMD_ACTION_G(
1612 be32_to_cpu(port_cmd->action_to_len16));
1613 if (action != FW_PORT_ACTION_GET_PORT_INFO) {
1614 dev_err(adapter->pdev_dev,
1615 "Unknown firmware PORT reply action %x\n",
1620 port_id = FW_PORT_CMD_PORTID_G(
1621 be32_to_cpu(port_cmd->op_to_portid));
1623 stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
1624 link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
1627 if (stat & FW_PORT_CMD_RXPAUSE_F)
1629 if (stat & FW_PORT_CMD_TXPAUSE_F)
1631 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
1633 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
1635 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
1637 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
1641 * Scan all of our "ports" (Virtual Interfaces) looking for
1642 * those bound to the physical port which has changed. If
1643 * our recorded state doesn't match the current state,
1644 * signal that change to the OS code.
1646 for_each_port(adapter, pidx) {
1647 struct port_info *pi = adap2pinfo(adapter, pidx);
1648 struct link_config *lc;
1650 if (pi->port_id != port_id)
1655 mod = FW_PORT_CMD_MODTYPE_G(stat);
1656 if (mod != pi->mod_type) {
1658 t4vf_os_portmod_changed(adapter, pidx);
1661 if (link_ok != lc->link_ok || speed != lc->speed ||
1663 /* something changed */
1664 lc->link_ok = link_ok;
1668 be16_to_cpu(port_cmd->u.info.pcap);
1669 t4vf_os_link_changed(adapter, pidx, link_ok);
1676 dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n",
1684 int t4vf_prep_adapter(struct adapter *adapter)
1687 unsigned int chipid;
1689 /* Wait for the device to become ready before proceeding ...
1691 err = t4vf_wait_dev_ready(adapter);
1695 /* Default port and clock for debugging in case we can't reach
1698 adapter->params.nports = 1;
1699 adapter->params.vfres.pmask = 1;
1700 adapter->params.vpd.cclk = 50000;
1702 adapter->params.chip = 0;
1703 switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
1705 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
1706 adapter->params.arch.sge_fl_db = DBPRIO_F;
1707 adapter->params.arch.mps_tcam_size =
1708 NUM_MPS_CLS_SRAM_L_INSTANCES;
1712 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
1713 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
1714 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
1715 adapter->params.arch.mps_tcam_size =
1716 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1720 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
1721 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid);
1722 adapter->params.arch.sge_fl_db = 0;
1723 adapter->params.arch.mps_tcam_size =
1724 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;