2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
49 #include "mlx4_stats.h"
51 #define MLX4_MAC_VALID (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT 2
53 #define MLX4_VF_COUNTERS_PER_PORT 1
56 struct list_head list;
64 struct list_head list;
72 struct list_head list;
87 struct list_head list;
89 enum mlx4_protocol prot;
90 enum mlx4_steer_type steer;
95 RES_QP_BUSY = RES_ANY_BUSY,
97 /* QP number was allocated */
100 /* ICM memory for QP context was mapped */
103 /* QP is in hw ownership */
108 struct res_common com;
113 struct list_head mcg_list;
118 /* saved qp params before VST enforcement in order to restore on VGT */
128 enum res_mtt_states {
129 RES_MTT_BUSY = RES_ANY_BUSY,
133 static inline const char *mtt_states_str(enum res_mtt_states state)
136 case RES_MTT_BUSY: return "RES_MTT_BUSY";
137 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
138 default: return "Unknown";
143 struct res_common com;
148 enum res_mpt_states {
149 RES_MPT_BUSY = RES_ANY_BUSY,
156 struct res_common com;
162 RES_EQ_BUSY = RES_ANY_BUSY,
168 struct res_common com;
173 RES_CQ_BUSY = RES_ANY_BUSY,
179 struct res_common com;
184 enum res_srq_states {
185 RES_SRQ_BUSY = RES_ANY_BUSY,
191 struct res_common com;
197 enum res_counter_states {
198 RES_COUNTER_BUSY = RES_ANY_BUSY,
199 RES_COUNTER_ALLOCATED,
203 struct res_common com;
207 enum res_xrcdn_states {
208 RES_XRCD_BUSY = RES_ANY_BUSY,
213 struct res_common com;
217 enum res_fs_rule_states {
218 RES_FS_RULE_BUSY = RES_ANY_BUSY,
219 RES_FS_RULE_ALLOCATED,
223 struct res_common com;
227 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
229 struct rb_node *node = root->rb_node;
232 struct res_common *res = container_of(node, struct res_common,
235 if (res_id < res->res_id)
236 node = node->rb_left;
237 else if (res_id > res->res_id)
238 node = node->rb_right;
245 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
247 struct rb_node **new = &(root->rb_node), *parent = NULL;
249 /* Figure out where to put new node */
251 struct res_common *this = container_of(*new, struct res_common,
255 if (res->res_id < this->res_id)
256 new = &((*new)->rb_left);
257 else if (res->res_id > this->res_id)
258 new = &((*new)->rb_right);
263 /* Add new node and rebalance tree. */
264 rb_link_node(&res->node, parent, new);
265 rb_insert_color(&res->node, root);
280 static const char *resource_str(enum mlx4_resource rt)
283 case RES_QP: return "RES_QP";
284 case RES_CQ: return "RES_CQ";
285 case RES_SRQ: return "RES_SRQ";
286 case RES_MPT: return "RES_MPT";
287 case RES_MTT: return "RES_MTT";
288 case RES_MAC: return "RES_MAC";
289 case RES_VLAN: return "RES_VLAN";
290 case RES_EQ: return "RES_EQ";
291 case RES_COUNTER: return "RES_COUNTER";
292 case RES_FS_RULE: return "RES_FS_RULE";
293 case RES_XRCD: return "RES_XRCD";
294 default: return "Unknown resource type !!!";
298 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
299 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
300 enum mlx4_resource res_type, int count,
303 struct mlx4_priv *priv = mlx4_priv(dev);
304 struct resource_allocator *res_alloc =
305 &priv->mfunc.master.res_tracker.res_alloc[res_type];
307 int allocated, free, reserved, guaranteed, from_free;
310 if (slave > dev->persist->num_vfs)
313 spin_lock(&res_alloc->alloc_lock);
314 allocated = (port > 0) ?
315 res_alloc->allocated[(port - 1) *
316 (dev->persist->num_vfs + 1) + slave] :
317 res_alloc->allocated[slave];
318 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
320 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
321 res_alloc->res_reserved;
322 guaranteed = res_alloc->guaranteed[slave];
324 if (allocated + count > res_alloc->quota[slave]) {
325 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
326 slave, port, resource_str(res_type), count,
327 allocated, res_alloc->quota[slave]);
331 if (allocated + count <= guaranteed) {
335 /* portion may need to be obtained from free area */
336 if (guaranteed - allocated > 0)
337 from_free = count - (guaranteed - allocated);
341 from_rsvd = count - from_free;
343 if (free - from_free >= reserved)
346 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
347 slave, port, resource_str(res_type), free,
348 from_free, reserved);
352 /* grant the request */
354 res_alloc->allocated[(port - 1) *
355 (dev->persist->num_vfs + 1) + slave] += count;
356 res_alloc->res_port_free[port - 1] -= count;
357 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
359 res_alloc->allocated[slave] += count;
360 res_alloc->res_free -= count;
361 res_alloc->res_reserved -= from_rsvd;
366 spin_unlock(&res_alloc->alloc_lock);
370 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
371 enum mlx4_resource res_type, int count,
374 struct mlx4_priv *priv = mlx4_priv(dev);
375 struct resource_allocator *res_alloc =
376 &priv->mfunc.master.res_tracker.res_alloc[res_type];
377 int allocated, guaranteed, from_rsvd;
379 if (slave > dev->persist->num_vfs)
382 spin_lock(&res_alloc->alloc_lock);
384 allocated = (port > 0) ?
385 res_alloc->allocated[(port - 1) *
386 (dev->persist->num_vfs + 1) + slave] :
387 res_alloc->allocated[slave];
388 guaranteed = res_alloc->guaranteed[slave];
390 if (allocated - count >= guaranteed) {
393 /* portion may need to be returned to reserved area */
394 if (allocated - guaranteed > 0)
395 from_rsvd = count - (allocated - guaranteed);
401 res_alloc->allocated[(port - 1) *
402 (dev->persist->num_vfs + 1) + slave] -= count;
403 res_alloc->res_port_free[port - 1] += count;
404 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
406 res_alloc->allocated[slave] -= count;
407 res_alloc->res_free += count;
408 res_alloc->res_reserved += from_rsvd;
411 spin_unlock(&res_alloc->alloc_lock);
415 static inline void initialize_res_quotas(struct mlx4_dev *dev,
416 struct resource_allocator *res_alloc,
417 enum mlx4_resource res_type,
418 int vf, int num_instances)
420 res_alloc->guaranteed[vf] = num_instances /
421 (2 * (dev->persist->num_vfs + 1));
422 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
423 if (vf == mlx4_master_func_num(dev)) {
424 res_alloc->res_free = num_instances;
425 if (res_type == RES_MTT) {
426 /* reserved mtts will be taken out of the PF allocation */
427 res_alloc->res_free += dev->caps.reserved_mtts;
428 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
429 res_alloc->quota[vf] += dev->caps.reserved_mtts;
434 void mlx4_init_quotas(struct mlx4_dev *dev)
436 struct mlx4_priv *priv = mlx4_priv(dev);
439 /* quotas for VFs are initialized in mlx4_slave_cap */
440 if (mlx4_is_slave(dev))
443 if (!mlx4_is_mfunc(dev)) {
444 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
445 mlx4_num_reserved_sqps(dev);
446 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
447 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
448 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
449 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
453 pf = mlx4_master_func_num(dev);
455 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
457 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
459 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
461 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
463 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
466 static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
468 /* reduce the sink counter */
469 return (dev->caps.max_counters - 1 -
470 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
474 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
476 struct mlx4_priv *priv = mlx4_priv(dev);
479 int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
481 priv->mfunc.master.res_tracker.slave_list =
482 kzalloc(dev->num_slaves * sizeof(struct slave_list),
484 if (!priv->mfunc.master.res_tracker.slave_list)
487 for (i = 0 ; i < dev->num_slaves; i++) {
488 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
489 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
490 slave_list[i].res_list[t]);
491 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
494 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
496 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
497 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
499 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
500 struct resource_allocator *res_alloc =
501 &priv->mfunc.master.res_tracker.res_alloc[i];
502 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
503 sizeof(int), GFP_KERNEL);
504 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
505 sizeof(int), GFP_KERNEL);
506 if (i == RES_MAC || i == RES_VLAN)
507 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
508 (dev->persist->num_vfs
510 sizeof(int), GFP_KERNEL);
512 res_alloc->allocated = kzalloc((dev->persist->
514 sizeof(int), GFP_KERNEL);
515 /* Reduce the sink counter */
516 if (i == RES_COUNTER)
517 res_alloc->res_free = dev->caps.max_counters - 1;
519 if (!res_alloc->quota || !res_alloc->guaranteed ||
520 !res_alloc->allocated)
523 spin_lock_init(&res_alloc->alloc_lock);
524 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
525 struct mlx4_active_ports actv_ports =
526 mlx4_get_active_ports(dev, t);
529 initialize_res_quotas(dev, res_alloc, RES_QP,
530 t, dev->caps.num_qps -
531 dev->caps.reserved_qps -
532 mlx4_num_reserved_sqps(dev));
535 initialize_res_quotas(dev, res_alloc, RES_CQ,
536 t, dev->caps.num_cqs -
537 dev->caps.reserved_cqs);
540 initialize_res_quotas(dev, res_alloc, RES_SRQ,
541 t, dev->caps.num_srqs -
542 dev->caps.reserved_srqs);
545 initialize_res_quotas(dev, res_alloc, RES_MPT,
546 t, dev->caps.num_mpts -
547 dev->caps.reserved_mrws);
550 initialize_res_quotas(dev, res_alloc, RES_MTT,
551 t, dev->caps.num_mtts -
552 dev->caps.reserved_mtts);
555 if (t == mlx4_master_func_num(dev)) {
556 int max_vfs_pport = 0;
557 /* Calculate the max vfs per port for */
559 for (j = 0; j < dev->caps.num_ports;
561 struct mlx4_slaves_pport slaves_pport =
562 mlx4_phys_to_slaves_pport(dev, j + 1);
563 unsigned current_slaves =
564 bitmap_weight(slaves_pport.slaves,
565 dev->caps.num_ports) - 1;
566 if (max_vfs_pport < current_slaves)
570 res_alloc->quota[t] =
573 res_alloc->guaranteed[t] = 2;
574 for (j = 0; j < MLX4_MAX_PORTS; j++)
575 res_alloc->res_port_free[j] =
578 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
579 res_alloc->guaranteed[t] = 2;
583 if (t == mlx4_master_func_num(dev)) {
584 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
585 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
586 for (j = 0; j < MLX4_MAX_PORTS; j++)
587 res_alloc->res_port_free[j] =
590 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
591 res_alloc->guaranteed[t] = 0;
595 res_alloc->quota[t] = dev->caps.max_counters;
596 if (t == mlx4_master_func_num(dev))
597 res_alloc->guaranteed[t] =
598 MLX4_PF_COUNTERS_PER_PORT *
600 else if (t <= max_vfs_guarantee_counter)
601 res_alloc->guaranteed[t] =
602 MLX4_VF_COUNTERS_PER_PORT *
605 res_alloc->guaranteed[t] = 0;
606 res_alloc->res_free -= res_alloc->guaranteed[t];
611 if (i == RES_MAC || i == RES_VLAN) {
612 for (j = 0; j < dev->caps.num_ports; j++)
613 if (test_bit(j, actv_ports.ports))
614 res_alloc->res_port_rsvd[j] +=
615 res_alloc->guaranteed[t];
617 res_alloc->res_reserved += res_alloc->guaranteed[t];
621 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
625 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
626 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
627 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
628 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
629 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
630 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
631 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
636 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
637 enum mlx4_res_tracker_free_type type)
639 struct mlx4_priv *priv = mlx4_priv(dev);
642 if (priv->mfunc.master.res_tracker.slave_list) {
643 if (type != RES_TR_FREE_STRUCTS_ONLY) {
644 for (i = 0; i < dev->num_slaves; i++) {
645 if (type == RES_TR_FREE_ALL ||
646 dev->caps.function != i)
647 mlx4_delete_all_resources_for_slave(dev, i);
649 /* free master's vlans */
650 i = dev->caps.function;
651 mlx4_reset_roce_gids(dev, i);
652 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
653 rem_slave_vlans(dev, i);
654 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
657 if (type != RES_TR_FREE_SLAVES_ONLY) {
658 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
659 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
660 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
661 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
662 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
663 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
664 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
666 kfree(priv->mfunc.master.res_tracker.slave_list);
667 priv->mfunc.master.res_tracker.slave_list = NULL;
672 static void update_pkey_index(struct mlx4_dev *dev, int slave,
673 struct mlx4_cmd_mailbox *inbox)
675 u8 sched = *(u8 *)(inbox->buf + 64);
676 u8 orig_index = *(u8 *)(inbox->buf + 35);
678 struct mlx4_priv *priv = mlx4_priv(dev);
681 port = (sched >> 6 & 1) + 1;
683 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
684 *(u8 *)(inbox->buf + 35) = new_index;
687 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
690 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
691 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
692 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
695 if (MLX4_QP_ST_UD == ts) {
696 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
697 if (mlx4_is_eth(dev, port))
698 qp_ctx->pri_path.mgid_index =
699 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
701 qp_ctx->pri_path.mgid_index = slave | 0x80;
703 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
704 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
705 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
706 if (mlx4_is_eth(dev, port)) {
707 qp_ctx->pri_path.mgid_index +=
708 mlx4_get_base_gid_ix(dev, slave, port);
709 qp_ctx->pri_path.mgid_index &= 0x7f;
711 qp_ctx->pri_path.mgid_index = slave & 0x7F;
714 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
715 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
716 if (mlx4_is_eth(dev, port)) {
717 qp_ctx->alt_path.mgid_index +=
718 mlx4_get_base_gid_ix(dev, slave, port);
719 qp_ctx->alt_path.mgid_index &= 0x7f;
721 qp_ctx->alt_path.mgid_index = slave & 0x7F;
727 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
730 static int update_vport_qp_param(struct mlx4_dev *dev,
731 struct mlx4_cmd_mailbox *inbox,
734 struct mlx4_qp_context *qpc = inbox->buf + 8;
735 struct mlx4_vport_oper_state *vp_oper;
736 struct mlx4_priv *priv;
740 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
741 priv = mlx4_priv(dev);
742 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
743 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
745 err = handle_counter(dev, qpc, slave, port);
749 if (MLX4_VGT != vp_oper->state.default_vlan) {
750 /* the reserved QPs (special, proxy, tunnel)
751 * do not operate over vlans
753 if (mlx4_is_qp_reserved(dev, qpn))
756 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
757 if (qp_type == MLX4_QP_ST_UD ||
758 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
759 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
760 *(__be32 *)inbox->buf =
761 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
762 MLX4_QP_OPTPAR_VLAN_STRIPPING);
763 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
765 struct mlx4_update_qp_params params = {.flags = 0};
767 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
773 /* preserve IF_COUNTER flag */
774 qpc->pri_path.vlan_control &=
775 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
776 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
777 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
778 qpc->pri_path.vlan_control |=
779 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
780 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
781 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
782 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
783 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
784 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
785 } else if (0 != vp_oper->state.default_vlan) {
786 qpc->pri_path.vlan_control |=
787 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
788 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
789 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
790 } else { /* priority tagged */
791 qpc->pri_path.vlan_control |=
792 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
793 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
796 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
797 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
798 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
799 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
800 qpc->pri_path.sched_queue &= 0xC7;
801 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
802 qpc->qos_vport = vp_oper->state.qos_vport;
804 if (vp_oper->state.spoofchk) {
805 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
806 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
812 static int mpt_mask(struct mlx4_dev *dev)
814 return dev->caps.num_mpts - 1;
817 static void *find_res(struct mlx4_dev *dev, u64 res_id,
818 enum mlx4_resource type)
820 struct mlx4_priv *priv = mlx4_priv(dev);
822 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
826 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
827 enum mlx4_resource type,
830 struct res_common *r;
833 spin_lock_irq(mlx4_tlock(dev));
834 r = find_res(dev, res_id, type);
840 if (r->state == RES_ANY_BUSY) {
845 if (r->owner != slave) {
850 r->from_state = r->state;
851 r->state = RES_ANY_BUSY;
854 *((struct res_common **)res) = r;
857 spin_unlock_irq(mlx4_tlock(dev));
861 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
862 enum mlx4_resource type,
863 u64 res_id, int *slave)
866 struct res_common *r;
872 spin_lock(mlx4_tlock(dev));
874 r = find_res(dev, id, type);
879 spin_unlock(mlx4_tlock(dev));
884 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
885 enum mlx4_resource type)
887 struct res_common *r;
889 spin_lock_irq(mlx4_tlock(dev));
890 r = find_res(dev, res_id, type);
892 r->state = r->from_state;
893 spin_unlock_irq(mlx4_tlock(dev));
896 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
897 u64 in_param, u64 *out_param, int port);
899 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
902 struct res_common *r;
903 struct res_counter *counter;
906 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
909 spin_lock_irq(mlx4_tlock(dev));
910 r = find_res(dev, counter_index, RES_COUNTER);
911 if (!r || r->owner != slave)
913 counter = container_of(r, struct res_counter, com);
915 counter->port = port;
917 spin_unlock_irq(mlx4_tlock(dev));
921 static int handle_unexisting_counter(struct mlx4_dev *dev,
922 struct mlx4_qp_context *qpc, u8 slave,
925 struct mlx4_priv *priv = mlx4_priv(dev);
926 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
927 struct res_common *tmp;
928 struct res_counter *counter;
929 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
932 spin_lock_irq(mlx4_tlock(dev));
933 list_for_each_entry(tmp,
934 &tracker->slave_list[slave].res_list[RES_COUNTER],
936 counter = container_of(tmp, struct res_counter, com);
937 if (port == counter->port) {
938 qpc->pri_path.counter_index = counter->com.res_id;
939 spin_unlock_irq(mlx4_tlock(dev));
943 spin_unlock_irq(mlx4_tlock(dev));
945 /* No existing counter, need to allocate a new counter */
946 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
948 if (err == -ENOENT) {
950 } else if (err && err != -ENOSPC) {
951 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
952 __func__, slave, err);
954 qpc->pri_path.counter_index = counter_idx;
955 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
956 __func__, slave, qpc->pri_path.counter_index);
963 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
966 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
967 return handle_existing_counter(dev, slave, port,
968 qpc->pri_path.counter_index);
970 return handle_unexisting_counter(dev, qpc, slave, port);
973 static struct res_common *alloc_qp_tr(int id)
977 ret = kzalloc(sizeof *ret, GFP_KERNEL);
981 ret->com.res_id = id;
982 ret->com.state = RES_QP_RESERVED;
984 INIT_LIST_HEAD(&ret->mcg_list);
985 spin_lock_init(&ret->mcg_spl);
986 atomic_set(&ret->ref_count, 0);
991 static struct res_common *alloc_mtt_tr(int id, int order)
995 ret = kzalloc(sizeof *ret, GFP_KERNEL);
999 ret->com.res_id = id;
1001 ret->com.state = RES_MTT_ALLOCATED;
1002 atomic_set(&ret->ref_count, 0);
1007 static struct res_common *alloc_mpt_tr(int id, int key)
1009 struct res_mpt *ret;
1011 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1015 ret->com.res_id = id;
1016 ret->com.state = RES_MPT_RESERVED;
1022 static struct res_common *alloc_eq_tr(int id)
1026 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1030 ret->com.res_id = id;
1031 ret->com.state = RES_EQ_RESERVED;
1036 static struct res_common *alloc_cq_tr(int id)
1040 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1044 ret->com.res_id = id;
1045 ret->com.state = RES_CQ_ALLOCATED;
1046 atomic_set(&ret->ref_count, 0);
1051 static struct res_common *alloc_srq_tr(int id)
1053 struct res_srq *ret;
1055 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1059 ret->com.res_id = id;
1060 ret->com.state = RES_SRQ_ALLOCATED;
1061 atomic_set(&ret->ref_count, 0);
1066 static struct res_common *alloc_counter_tr(int id, int port)
1068 struct res_counter *ret;
1070 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1074 ret->com.res_id = id;
1075 ret->com.state = RES_COUNTER_ALLOCATED;
1081 static struct res_common *alloc_xrcdn_tr(int id)
1083 struct res_xrcdn *ret;
1085 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1089 ret->com.res_id = id;
1090 ret->com.state = RES_XRCD_ALLOCATED;
1095 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1097 struct res_fs_rule *ret;
1099 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1103 ret->com.res_id = id;
1104 ret->com.state = RES_FS_RULE_ALLOCATED;
1109 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1112 struct res_common *ret;
1116 ret = alloc_qp_tr(id);
1119 ret = alloc_mpt_tr(id, extra);
1122 ret = alloc_mtt_tr(id, extra);
1125 ret = alloc_eq_tr(id);
1128 ret = alloc_cq_tr(id);
1131 ret = alloc_srq_tr(id);
1134 pr_err("implementation missing\n");
1137 ret = alloc_counter_tr(id, extra);
1140 ret = alloc_xrcdn_tr(id);
1143 ret = alloc_fs_rule_tr(id, extra);
1154 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1155 struct mlx4_counter *data)
1157 struct mlx4_priv *priv = mlx4_priv(dev);
1158 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1159 struct res_common *tmp;
1160 struct res_counter *counter;
1164 memset(data, 0, sizeof(*data));
1166 counters_arr = kmalloc_array(dev->caps.max_counters,
1167 sizeof(*counters_arr), GFP_KERNEL);
1171 spin_lock_irq(mlx4_tlock(dev));
1172 list_for_each_entry(tmp,
1173 &tracker->slave_list[slave].res_list[RES_COUNTER],
1175 counter = container_of(tmp, struct res_counter, com);
1176 if (counter->port == port) {
1177 counters_arr[i] = (int)tmp->res_id;
1181 spin_unlock_irq(mlx4_tlock(dev));
1182 counters_arr[i] = -1;
1186 while (counters_arr[i] != -1) {
1187 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1190 memset(data, 0, sizeof(*data));
1197 kfree(counters_arr);
1201 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1202 enum mlx4_resource type, int extra)
1206 struct mlx4_priv *priv = mlx4_priv(dev);
1207 struct res_common **res_arr;
1208 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1209 struct rb_root *root = &tracker->res_tree[type];
1211 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1215 for (i = 0; i < count; ++i) {
1216 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1218 for (--i; i >= 0; --i)
1226 spin_lock_irq(mlx4_tlock(dev));
1227 for (i = 0; i < count; ++i) {
1228 if (find_res(dev, base + i, type)) {
1232 err = res_tracker_insert(root, res_arr[i]);
1235 list_add_tail(&res_arr[i]->list,
1236 &tracker->slave_list[slave].res_list[type]);
1238 spin_unlock_irq(mlx4_tlock(dev));
1244 for (--i; i >= base; --i)
1245 rb_erase(&res_arr[i]->node, root);
1247 spin_unlock_irq(mlx4_tlock(dev));
1249 for (i = 0; i < count; ++i)
1257 static int remove_qp_ok(struct res_qp *res)
1259 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1260 !list_empty(&res->mcg_list)) {
1261 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1262 res->com.state, atomic_read(&res->ref_count));
1264 } else if (res->com.state != RES_QP_RESERVED) {
1271 static int remove_mtt_ok(struct res_mtt *res, int order)
1273 if (res->com.state == RES_MTT_BUSY ||
1274 atomic_read(&res->ref_count)) {
1275 pr_devel("%s-%d: state %s, ref_count %d\n",
1277 mtt_states_str(res->com.state),
1278 atomic_read(&res->ref_count));
1280 } else if (res->com.state != RES_MTT_ALLOCATED)
1282 else if (res->order != order)
1288 static int remove_mpt_ok(struct res_mpt *res)
1290 if (res->com.state == RES_MPT_BUSY)
1292 else if (res->com.state != RES_MPT_RESERVED)
1298 static int remove_eq_ok(struct res_eq *res)
1300 if (res->com.state == RES_MPT_BUSY)
1302 else if (res->com.state != RES_MPT_RESERVED)
1308 static int remove_counter_ok(struct res_counter *res)
1310 if (res->com.state == RES_COUNTER_BUSY)
1312 else if (res->com.state != RES_COUNTER_ALLOCATED)
1318 static int remove_xrcdn_ok(struct res_xrcdn *res)
1320 if (res->com.state == RES_XRCD_BUSY)
1322 else if (res->com.state != RES_XRCD_ALLOCATED)
1328 static int remove_fs_rule_ok(struct res_fs_rule *res)
1330 if (res->com.state == RES_FS_RULE_BUSY)
1332 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1338 static int remove_cq_ok(struct res_cq *res)
1340 if (res->com.state == RES_CQ_BUSY)
1342 else if (res->com.state != RES_CQ_ALLOCATED)
1348 static int remove_srq_ok(struct res_srq *res)
1350 if (res->com.state == RES_SRQ_BUSY)
1352 else if (res->com.state != RES_SRQ_ALLOCATED)
1358 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1362 return remove_qp_ok((struct res_qp *)res);
1364 return remove_cq_ok((struct res_cq *)res);
1366 return remove_srq_ok((struct res_srq *)res);
1368 return remove_mpt_ok((struct res_mpt *)res);
1370 return remove_mtt_ok((struct res_mtt *)res, extra);
1374 return remove_eq_ok((struct res_eq *)res);
1376 return remove_counter_ok((struct res_counter *)res);
1378 return remove_xrcdn_ok((struct res_xrcdn *)res);
1380 return remove_fs_rule_ok((struct res_fs_rule *)res);
1386 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1387 enum mlx4_resource type, int extra)
1391 struct mlx4_priv *priv = mlx4_priv(dev);
1392 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1393 struct res_common *r;
1395 spin_lock_irq(mlx4_tlock(dev));
1396 for (i = base; i < base + count; ++i) {
1397 r = res_tracker_lookup(&tracker->res_tree[type], i);
1402 if (r->owner != slave) {
1406 err = remove_ok(r, type, extra);
1411 for (i = base; i < base + count; ++i) {
1412 r = res_tracker_lookup(&tracker->res_tree[type], i);
1413 rb_erase(&r->node, &tracker->res_tree[type]);
1420 spin_unlock_irq(mlx4_tlock(dev));
1425 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1426 enum res_qp_states state, struct res_qp **qp,
1429 struct mlx4_priv *priv = mlx4_priv(dev);
1430 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1434 spin_lock_irq(mlx4_tlock(dev));
1435 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1438 else if (r->com.owner != slave)
1443 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1444 __func__, r->com.res_id);
1448 case RES_QP_RESERVED:
1449 if (r->com.state == RES_QP_MAPPED && !alloc)
1452 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1457 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1458 r->com.state == RES_QP_HW)
1461 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1469 if (r->com.state != RES_QP_MAPPED)
1477 r->com.from_state = r->com.state;
1478 r->com.to_state = state;
1479 r->com.state = RES_QP_BUSY;
1485 spin_unlock_irq(mlx4_tlock(dev));
1490 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1491 enum res_mpt_states state, struct res_mpt **mpt)
1493 struct mlx4_priv *priv = mlx4_priv(dev);
1494 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1498 spin_lock_irq(mlx4_tlock(dev));
1499 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1502 else if (r->com.owner != slave)
1510 case RES_MPT_RESERVED:
1511 if (r->com.state != RES_MPT_MAPPED)
1515 case RES_MPT_MAPPED:
1516 if (r->com.state != RES_MPT_RESERVED &&
1517 r->com.state != RES_MPT_HW)
1522 if (r->com.state != RES_MPT_MAPPED)
1530 r->com.from_state = r->com.state;
1531 r->com.to_state = state;
1532 r->com.state = RES_MPT_BUSY;
1538 spin_unlock_irq(mlx4_tlock(dev));
1543 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1544 enum res_eq_states state, struct res_eq **eq)
1546 struct mlx4_priv *priv = mlx4_priv(dev);
1547 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1551 spin_lock_irq(mlx4_tlock(dev));
1552 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1555 else if (r->com.owner != slave)
1563 case RES_EQ_RESERVED:
1564 if (r->com.state != RES_EQ_HW)
1569 if (r->com.state != RES_EQ_RESERVED)
1578 r->com.from_state = r->com.state;
1579 r->com.to_state = state;
1580 r->com.state = RES_EQ_BUSY;
1586 spin_unlock_irq(mlx4_tlock(dev));
1591 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1592 enum res_cq_states state, struct res_cq **cq)
1594 struct mlx4_priv *priv = mlx4_priv(dev);
1595 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1599 spin_lock_irq(mlx4_tlock(dev));
1600 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1603 } else if (r->com.owner != slave) {
1605 } else if (state == RES_CQ_ALLOCATED) {
1606 if (r->com.state != RES_CQ_HW)
1608 else if (atomic_read(&r->ref_count))
1612 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1619 r->com.from_state = r->com.state;
1620 r->com.to_state = state;
1621 r->com.state = RES_CQ_BUSY;
1626 spin_unlock_irq(mlx4_tlock(dev));
1631 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1632 enum res_srq_states state, struct res_srq **srq)
1634 struct mlx4_priv *priv = mlx4_priv(dev);
1635 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1639 spin_lock_irq(mlx4_tlock(dev));
1640 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1643 } else if (r->com.owner != slave) {
1645 } else if (state == RES_SRQ_ALLOCATED) {
1646 if (r->com.state != RES_SRQ_HW)
1648 else if (atomic_read(&r->ref_count))
1650 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1655 r->com.from_state = r->com.state;
1656 r->com.to_state = state;
1657 r->com.state = RES_SRQ_BUSY;
1662 spin_unlock_irq(mlx4_tlock(dev));
1667 static void res_abort_move(struct mlx4_dev *dev, int slave,
1668 enum mlx4_resource type, int id)
1670 struct mlx4_priv *priv = mlx4_priv(dev);
1671 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1672 struct res_common *r;
1674 spin_lock_irq(mlx4_tlock(dev));
1675 r = res_tracker_lookup(&tracker->res_tree[type], id);
1676 if (r && (r->owner == slave))
1677 r->state = r->from_state;
1678 spin_unlock_irq(mlx4_tlock(dev));
1681 static void res_end_move(struct mlx4_dev *dev, int slave,
1682 enum mlx4_resource type, int id)
1684 struct mlx4_priv *priv = mlx4_priv(dev);
1685 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1686 struct res_common *r;
1688 spin_lock_irq(mlx4_tlock(dev));
1689 r = res_tracker_lookup(&tracker->res_tree[type], id);
1690 if (r && (r->owner == slave))
1691 r->state = r->to_state;
1692 spin_unlock_irq(mlx4_tlock(dev));
1695 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1697 return mlx4_is_qp_reserved(dev, qpn) &&
1698 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1701 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1703 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1706 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1707 u64 in_param, u64 *out_param)
1717 case RES_OP_RESERVE:
1718 count = get_param_l(&in_param) & 0xffffff;
1719 /* Turn off all unsupported QP allocation flags that the
1720 * slave tries to set.
1722 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1723 align = get_param_h(&in_param);
1724 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1728 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1730 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1734 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1736 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1737 __mlx4_qp_release_range(dev, base, count);
1740 set_param_l(out_param, base);
1742 case RES_OP_MAP_ICM:
1743 qpn = get_param_l(&in_param) & 0x7fffff;
1744 if (valid_reserved(dev, slave, qpn)) {
1745 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1750 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1755 if (!fw_reserved(dev, qpn)) {
1756 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1758 res_abort_move(dev, slave, RES_QP, qpn);
1763 res_end_move(dev, slave, RES_QP, qpn);
1773 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1774 u64 in_param, u64 *out_param)
1780 if (op != RES_OP_RESERVE_AND_MAP)
1783 order = get_param_l(&in_param);
1785 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1789 base = __mlx4_alloc_mtt_range(dev, order);
1791 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1795 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1797 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1798 __mlx4_free_mtt_range(dev, base, order);
1800 set_param_l(out_param, base);
1806 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1807 u64 in_param, u64 *out_param)
1812 struct res_mpt *mpt;
1815 case RES_OP_RESERVE:
1816 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1820 index = __mlx4_mpt_reserve(dev);
1822 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1825 id = index & mpt_mask(dev);
1827 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1829 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1830 __mlx4_mpt_release(dev, index);
1833 set_param_l(out_param, index);
1835 case RES_OP_MAP_ICM:
1836 index = get_param_l(&in_param);
1837 id = index & mpt_mask(dev);
1838 err = mr_res_start_move_to(dev, slave, id,
1839 RES_MPT_MAPPED, &mpt);
1843 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1845 res_abort_move(dev, slave, RES_MPT, id);
1849 res_end_move(dev, slave, RES_MPT, id);
1855 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1856 u64 in_param, u64 *out_param)
1862 case RES_OP_RESERVE_AND_MAP:
1863 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1867 err = __mlx4_cq_alloc_icm(dev, &cqn);
1869 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1873 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1875 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1876 __mlx4_cq_free_icm(dev, cqn);
1880 set_param_l(out_param, cqn);
1890 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1891 u64 in_param, u64 *out_param)
1897 case RES_OP_RESERVE_AND_MAP:
1898 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1902 err = __mlx4_srq_alloc_icm(dev, &srqn);
1904 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1908 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1910 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1911 __mlx4_srq_free_icm(dev, srqn);
1915 set_param_l(out_param, srqn);
1925 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1926 u8 smac_index, u64 *mac)
1928 struct mlx4_priv *priv = mlx4_priv(dev);
1929 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1930 struct list_head *mac_list =
1931 &tracker->slave_list[slave].res_list[RES_MAC];
1932 struct mac_res *res, *tmp;
1934 list_for_each_entry_safe(res, tmp, mac_list, list) {
1935 if (res->smac_index == smac_index && res->port == (u8) port) {
1943 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1945 struct mlx4_priv *priv = mlx4_priv(dev);
1946 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1947 struct list_head *mac_list =
1948 &tracker->slave_list[slave].res_list[RES_MAC];
1949 struct mac_res *res, *tmp;
1951 list_for_each_entry_safe(res, tmp, mac_list, list) {
1952 if (res->mac == mac && res->port == (u8) port) {
1953 /* mac found. update ref count */
1959 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1961 res = kzalloc(sizeof *res, GFP_KERNEL);
1963 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1967 res->port = (u8) port;
1968 res->smac_index = smac_index;
1970 list_add_tail(&res->list,
1971 &tracker->slave_list[slave].res_list[RES_MAC]);
1975 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1978 struct mlx4_priv *priv = mlx4_priv(dev);
1979 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1980 struct list_head *mac_list =
1981 &tracker->slave_list[slave].res_list[RES_MAC];
1982 struct mac_res *res, *tmp;
1984 list_for_each_entry_safe(res, tmp, mac_list, list) {
1985 if (res->mac == mac && res->port == (u8) port) {
1986 if (!--res->ref_count) {
1987 list_del(&res->list);
1988 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1996 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1998 struct mlx4_priv *priv = mlx4_priv(dev);
1999 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2000 struct list_head *mac_list =
2001 &tracker->slave_list[slave].res_list[RES_MAC];
2002 struct mac_res *res, *tmp;
2005 list_for_each_entry_safe(res, tmp, mac_list, list) {
2006 list_del(&res->list);
2007 /* dereference the mac the num times the slave referenced it */
2008 for (i = 0; i < res->ref_count; i++)
2009 __mlx4_unregister_mac(dev, res->port, res->mac);
2010 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2015 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2016 u64 in_param, u64 *out_param, int in_port)
2023 if (op != RES_OP_RESERVE_AND_MAP)
2026 port = !in_port ? get_param_l(out_param) : in_port;
2027 port = mlx4_slave_convert_port(
2034 err = __mlx4_register_mac(dev, port, mac);
2037 set_param_l(out_param, err);
2042 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2044 __mlx4_unregister_mac(dev, port, mac);
2049 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2050 int port, int vlan_index)
2052 struct mlx4_priv *priv = mlx4_priv(dev);
2053 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2054 struct list_head *vlan_list =
2055 &tracker->slave_list[slave].res_list[RES_VLAN];
2056 struct vlan_res *res, *tmp;
2058 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2059 if (res->vlan == vlan && res->port == (u8) port) {
2060 /* vlan found. update ref count */
2066 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2068 res = kzalloc(sizeof(*res), GFP_KERNEL);
2070 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2074 res->port = (u8) port;
2075 res->vlan_index = vlan_index;
2077 list_add_tail(&res->list,
2078 &tracker->slave_list[slave].res_list[RES_VLAN]);
2083 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2086 struct mlx4_priv *priv = mlx4_priv(dev);
2087 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2088 struct list_head *vlan_list =
2089 &tracker->slave_list[slave].res_list[RES_VLAN];
2090 struct vlan_res *res, *tmp;
2092 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2093 if (res->vlan == vlan && res->port == (u8) port) {
2094 if (!--res->ref_count) {
2095 list_del(&res->list);
2096 mlx4_release_resource(dev, slave, RES_VLAN,
2105 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2107 struct mlx4_priv *priv = mlx4_priv(dev);
2108 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2109 struct list_head *vlan_list =
2110 &tracker->slave_list[slave].res_list[RES_VLAN];
2111 struct vlan_res *res, *tmp;
2114 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2115 list_del(&res->list);
2116 /* dereference the vlan the num times the slave referenced it */
2117 for (i = 0; i < res->ref_count; i++)
2118 __mlx4_unregister_vlan(dev, res->port, res->vlan);
2119 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2124 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2125 u64 in_param, u64 *out_param, int in_port)
2127 struct mlx4_priv *priv = mlx4_priv(dev);
2128 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2134 port = !in_port ? get_param_l(out_param) : in_port;
2136 if (!port || op != RES_OP_RESERVE_AND_MAP)
2139 port = mlx4_slave_convert_port(
2144 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2145 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2146 slave_state[slave].old_vlan_api = true;
2150 vlan = (u16) in_param;
2152 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2154 set_param_l(out_param, (u32) vlan_index);
2155 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2157 __mlx4_unregister_vlan(dev, port, vlan);
2162 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2163 u64 in_param, u64 *out_param, int port)
2168 if (op != RES_OP_RESERVE)
2171 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2175 err = __mlx4_counter_alloc(dev, &index);
2177 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2181 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2183 __mlx4_counter_free(dev, index);
2184 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2186 set_param_l(out_param, index);
2192 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2193 u64 in_param, u64 *out_param)
2198 if (op != RES_OP_RESERVE)
2201 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2205 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2207 __mlx4_xrcd_free(dev, xrcdn);
2209 set_param_l(out_param, xrcdn);
2214 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2215 struct mlx4_vhcr *vhcr,
2216 struct mlx4_cmd_mailbox *inbox,
2217 struct mlx4_cmd_mailbox *outbox,
2218 struct mlx4_cmd_info *cmd)
2221 int alop = vhcr->op_modifier;
2223 switch (vhcr->in_modifier & 0xFF) {
2225 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2226 vhcr->in_param, &vhcr->out_param);
2230 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2231 vhcr->in_param, &vhcr->out_param);
2235 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2236 vhcr->in_param, &vhcr->out_param);
2240 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2241 vhcr->in_param, &vhcr->out_param);
2245 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2246 vhcr->in_param, &vhcr->out_param);
2250 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2251 vhcr->in_param, &vhcr->out_param,
2252 (vhcr->in_modifier >> 8) & 0xFF);
2256 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2257 vhcr->in_param, &vhcr->out_param,
2258 (vhcr->in_modifier >> 8) & 0xFF);
2262 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2263 vhcr->in_param, &vhcr->out_param, 0);
2267 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2268 vhcr->in_param, &vhcr->out_param);
2279 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2288 case RES_OP_RESERVE:
2289 base = get_param_l(&in_param) & 0x7fffff;
2290 count = get_param_h(&in_param);
2291 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2294 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2295 __mlx4_qp_release_range(dev, base, count);
2297 case RES_OP_MAP_ICM:
2298 qpn = get_param_l(&in_param) & 0x7fffff;
2299 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2304 if (!fw_reserved(dev, qpn))
2305 __mlx4_qp_free_icm(dev, qpn);
2307 res_end_move(dev, slave, RES_QP, qpn);
2309 if (valid_reserved(dev, slave, qpn))
2310 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2319 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2320 u64 in_param, u64 *out_param)
2326 if (op != RES_OP_RESERVE_AND_MAP)
2329 base = get_param_l(&in_param);
2330 order = get_param_h(&in_param);
2331 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2333 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2334 __mlx4_free_mtt_range(dev, base, order);
2339 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2345 struct res_mpt *mpt;
2348 case RES_OP_RESERVE:
2349 index = get_param_l(&in_param);
2350 id = index & mpt_mask(dev);
2351 err = get_res(dev, slave, id, RES_MPT, &mpt);
2355 put_res(dev, slave, id, RES_MPT);
2357 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2360 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2361 __mlx4_mpt_release(dev, index);
2363 case RES_OP_MAP_ICM:
2364 index = get_param_l(&in_param);
2365 id = index & mpt_mask(dev);
2366 err = mr_res_start_move_to(dev, slave, id,
2367 RES_MPT_RESERVED, &mpt);
2371 __mlx4_mpt_free_icm(dev, mpt->key);
2372 res_end_move(dev, slave, RES_MPT, id);
2382 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2383 u64 in_param, u64 *out_param)
2389 case RES_OP_RESERVE_AND_MAP:
2390 cqn = get_param_l(&in_param);
2391 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2395 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2396 __mlx4_cq_free_icm(dev, cqn);
2407 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2408 u64 in_param, u64 *out_param)
2414 case RES_OP_RESERVE_AND_MAP:
2415 srqn = get_param_l(&in_param);
2416 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2420 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2421 __mlx4_srq_free_icm(dev, srqn);
2432 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2433 u64 in_param, u64 *out_param, int in_port)
2439 case RES_OP_RESERVE_AND_MAP:
2440 port = !in_port ? get_param_l(out_param) : in_port;
2441 port = mlx4_slave_convert_port(
2446 mac_del_from_slave(dev, slave, in_param, port);
2447 __mlx4_unregister_mac(dev, port, in_param);
2458 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2459 u64 in_param, u64 *out_param, int port)
2461 struct mlx4_priv *priv = mlx4_priv(dev);
2462 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2465 port = mlx4_slave_convert_port(
2471 case RES_OP_RESERVE_AND_MAP:
2472 if (slave_state[slave].old_vlan_api)
2476 vlan_del_from_slave(dev, slave, in_param, port);
2477 __mlx4_unregister_vlan(dev, port, in_param);
2487 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2488 u64 in_param, u64 *out_param)
2493 if (op != RES_OP_RESERVE)
2496 index = get_param_l(&in_param);
2497 if (index == MLX4_SINK_COUNTER_INDEX(dev))
2500 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2504 __mlx4_counter_free(dev, index);
2505 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2510 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2511 u64 in_param, u64 *out_param)
2516 if (op != RES_OP_RESERVE)
2519 xrcdn = get_param_l(&in_param);
2520 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2524 __mlx4_xrcd_free(dev, xrcdn);
2529 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2530 struct mlx4_vhcr *vhcr,
2531 struct mlx4_cmd_mailbox *inbox,
2532 struct mlx4_cmd_mailbox *outbox,
2533 struct mlx4_cmd_info *cmd)
2536 int alop = vhcr->op_modifier;
2538 switch (vhcr->in_modifier & 0xFF) {
2540 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2545 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2546 vhcr->in_param, &vhcr->out_param);
2550 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2555 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2556 vhcr->in_param, &vhcr->out_param);
2560 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2561 vhcr->in_param, &vhcr->out_param);
2565 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2566 vhcr->in_param, &vhcr->out_param,
2567 (vhcr->in_modifier >> 8) & 0xFF);
2571 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2572 vhcr->in_param, &vhcr->out_param,
2573 (vhcr->in_modifier >> 8) & 0xFF);
2577 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2578 vhcr->in_param, &vhcr->out_param);
2582 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2583 vhcr->in_param, &vhcr->out_param);
2591 /* ugly but other choices are uglier */
2592 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2594 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2597 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2599 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2602 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2604 return be32_to_cpu(mpt->mtt_sz);
2607 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2609 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2612 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2614 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2617 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2619 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2622 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2624 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2627 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2629 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2632 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2634 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2637 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2639 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2640 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2641 int log_sq_sride = qpc->sq_size_stride & 7;
2642 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2643 int log_rq_stride = qpc->rq_size_stride & 7;
2644 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2645 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2646 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2647 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2652 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2654 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2655 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2656 total_mem = sq_size + rq_size;
2658 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2664 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2665 int size, struct res_mtt *mtt)
2667 int res_start = mtt->com.res_id;
2668 int res_size = (1 << mtt->order);
2670 if (start < res_start || start + size > res_start + res_size)
2675 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2676 struct mlx4_vhcr *vhcr,
2677 struct mlx4_cmd_mailbox *inbox,
2678 struct mlx4_cmd_mailbox *outbox,
2679 struct mlx4_cmd_info *cmd)
2682 int index = vhcr->in_modifier;
2683 struct res_mtt *mtt;
2684 struct res_mpt *mpt;
2685 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2691 id = index & mpt_mask(dev);
2692 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2696 /* Disable memory windows for VFs. */
2697 if (!mr_is_region(inbox->buf)) {
2702 /* Make sure that the PD bits related to the slave id are zeros. */
2703 pd = mr_get_pd(inbox->buf);
2704 pd_slave = (pd >> 17) & 0x7f;
2705 if (pd_slave != 0 && --pd_slave != slave) {
2710 if (mr_is_fmr(inbox->buf)) {
2711 /* FMR and Bind Enable are forbidden in slave devices. */
2712 if (mr_is_bind_enabled(inbox->buf)) {
2716 /* FMR and Memory Windows are also forbidden. */
2717 if (!mr_is_region(inbox->buf)) {
2723 phys = mr_phys_mpt(inbox->buf);
2725 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2729 err = check_mtt_range(dev, slave, mtt_base,
2730 mr_get_mtt_size(inbox->buf), mtt);
2737 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2742 atomic_inc(&mtt->ref_count);
2743 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2746 res_end_move(dev, slave, RES_MPT, id);
2751 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2753 res_abort_move(dev, slave, RES_MPT, id);
2758 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2759 struct mlx4_vhcr *vhcr,
2760 struct mlx4_cmd_mailbox *inbox,
2761 struct mlx4_cmd_mailbox *outbox,
2762 struct mlx4_cmd_info *cmd)
2765 int index = vhcr->in_modifier;
2766 struct res_mpt *mpt;
2769 id = index & mpt_mask(dev);
2770 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2774 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2779 atomic_dec(&mpt->mtt->ref_count);
2781 res_end_move(dev, slave, RES_MPT, id);
2785 res_abort_move(dev, slave, RES_MPT, id);
2790 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2791 struct mlx4_vhcr *vhcr,
2792 struct mlx4_cmd_mailbox *inbox,
2793 struct mlx4_cmd_mailbox *outbox,
2794 struct mlx4_cmd_info *cmd)
2797 int index = vhcr->in_modifier;
2798 struct res_mpt *mpt;
2801 id = index & mpt_mask(dev);
2802 err = get_res(dev, slave, id, RES_MPT, &mpt);
2806 if (mpt->com.from_state == RES_MPT_MAPPED) {
2807 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2808 * that, the VF must read the MPT. But since the MPT entry memory is not
2809 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2810 * entry contents. To guarantee that the MPT cannot be changed, the driver
2811 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2812 * ownership fofollowing the change. The change here allows the VF to
2813 * perform QUERY_MPT also when the entry is in SW ownership.
2815 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2816 &mlx4_priv(dev)->mr_table.dmpt_table,
2819 if (NULL == mpt_entry || NULL == outbox->buf) {
2824 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2827 } else if (mpt->com.from_state == RES_MPT_HW) {
2828 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2836 put_res(dev, slave, id, RES_MPT);
2840 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2842 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2845 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2847 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2850 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2852 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2855 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2856 struct mlx4_qp_context *context)
2858 u32 qpn = vhcr->in_modifier & 0xffffff;
2861 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2864 /* adjust qkey in qp context */
2865 context->qkey = cpu_to_be32(qkey);
2868 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2869 struct mlx4_qp_context *qpc,
2870 struct mlx4_cmd_mailbox *inbox);
2872 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2873 struct mlx4_vhcr *vhcr,
2874 struct mlx4_cmd_mailbox *inbox,
2875 struct mlx4_cmd_mailbox *outbox,
2876 struct mlx4_cmd_info *cmd)
2879 int qpn = vhcr->in_modifier & 0x7fffff;
2880 struct res_mtt *mtt;
2882 struct mlx4_qp_context *qpc = inbox->buf + 8;
2883 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2884 int mtt_size = qp_get_mtt_size(qpc);
2887 int rcqn = qp_get_rcqn(qpc);
2888 int scqn = qp_get_scqn(qpc);
2889 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2890 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2891 struct res_srq *srq;
2892 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2894 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2898 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2901 qp->local_qpn = local_qpn;
2902 qp->sched_queue = 0;
2904 qp->vlan_control = 0;
2906 qp->pri_path_fl = 0;
2909 qp->qpc_flags = be32_to_cpu(qpc->flags);
2911 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2915 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2919 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2924 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2931 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2936 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2937 update_pkey_index(dev, slave, inbox);
2938 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2941 atomic_inc(&mtt->ref_count);
2943 atomic_inc(&rcq->ref_count);
2945 atomic_inc(&scq->ref_count);
2949 put_res(dev, slave, scqn, RES_CQ);
2952 atomic_inc(&srq->ref_count);
2953 put_res(dev, slave, srqn, RES_SRQ);
2956 put_res(dev, slave, rcqn, RES_CQ);
2957 put_res(dev, slave, mtt_base, RES_MTT);
2958 res_end_move(dev, slave, RES_QP, qpn);
2964 put_res(dev, slave, srqn, RES_SRQ);
2967 put_res(dev, slave, scqn, RES_CQ);
2969 put_res(dev, slave, rcqn, RES_CQ);
2971 put_res(dev, slave, mtt_base, RES_MTT);
2973 res_abort_move(dev, slave, RES_QP, qpn);
2978 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2980 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2983 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2985 int log_eq_size = eqc->log_eq_size & 0x1f;
2986 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2988 if (log_eq_size + 5 < page_shift)
2991 return 1 << (log_eq_size + 5 - page_shift);
2994 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2996 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2999 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3001 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3002 int page_shift = (cqc->log_page_size & 0x3f) + 12;
3004 if (log_cq_size + 5 < page_shift)
3007 return 1 << (log_cq_size + 5 - page_shift);
3010 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3011 struct mlx4_vhcr *vhcr,
3012 struct mlx4_cmd_mailbox *inbox,
3013 struct mlx4_cmd_mailbox *outbox,
3014 struct mlx4_cmd_info *cmd)
3017 int eqn = vhcr->in_modifier;
3018 int res_id = (slave << 10) | eqn;
3019 struct mlx4_eq_context *eqc = inbox->buf;
3020 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3021 int mtt_size = eq_get_mtt_size(eqc);
3023 struct res_mtt *mtt;
3025 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3028 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3032 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3036 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3040 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3044 atomic_inc(&mtt->ref_count);
3046 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3047 res_end_move(dev, slave, RES_EQ, res_id);
3051 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3053 res_abort_move(dev, slave, RES_EQ, res_id);
3055 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3059 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3060 struct mlx4_vhcr *vhcr,
3061 struct mlx4_cmd_mailbox *inbox,
3062 struct mlx4_cmd_mailbox *outbox,
3063 struct mlx4_cmd_info *cmd)
3066 u8 get = vhcr->op_modifier;
3071 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3076 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3077 int len, struct res_mtt **res)
3079 struct mlx4_priv *priv = mlx4_priv(dev);
3080 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3081 struct res_mtt *mtt;
3084 spin_lock_irq(mlx4_tlock(dev));
3085 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3087 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3089 mtt->com.from_state = mtt->com.state;
3090 mtt->com.state = RES_MTT_BUSY;
3095 spin_unlock_irq(mlx4_tlock(dev));
3100 static int verify_qp_parameters(struct mlx4_dev *dev,
3101 struct mlx4_vhcr *vhcr,
3102 struct mlx4_cmd_mailbox *inbox,
3103 enum qp_transition transition, u8 slave)
3107 struct mlx4_qp_context *qp_ctx;
3108 enum mlx4_qp_optpar optpar;
3112 qp_ctx = inbox->buf + 8;
3113 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3114 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
3116 if (slave != mlx4_master_func_num(dev)) {
3117 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3118 /* setting QP rate-limit is disallowed for VFs */
3119 if (qp_ctx->rate_limit_params)
3125 case MLX4_QP_ST_XRC:
3127 switch (transition) {
3128 case QP_TRANS_INIT2RTR:
3129 case QP_TRANS_RTR2RTS:
3130 case QP_TRANS_RTS2RTS:
3131 case QP_TRANS_SQD2SQD:
3132 case QP_TRANS_SQD2RTS:
3133 if (slave != mlx4_master_func_num(dev))
3134 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3135 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3136 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3137 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3140 if (qp_ctx->pri_path.mgid_index >= num_gids)
3143 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3144 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3145 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3146 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3149 if (qp_ctx->alt_path.mgid_index >= num_gids)
3158 case MLX4_QP_ST_MLX:
3159 qpn = vhcr->in_modifier & 0x7fffff;
3160 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3161 if (transition == QP_TRANS_INIT2RTR &&
3162 slave != mlx4_master_func_num(dev) &&
3163 mlx4_is_qp_reserved(dev, qpn) &&
3164 !mlx4_vf_smi_enabled(dev, slave, port)) {
3165 /* only enabled VFs may create MLX proxy QPs */
3166 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3167 __func__, slave, port);
3179 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3180 struct mlx4_vhcr *vhcr,
3181 struct mlx4_cmd_mailbox *inbox,
3182 struct mlx4_cmd_mailbox *outbox,
3183 struct mlx4_cmd_info *cmd)
3185 struct mlx4_mtt mtt;
3186 __be64 *page_list = inbox->buf;
3187 u64 *pg_list = (u64 *)page_list;
3189 struct res_mtt *rmtt = NULL;
3190 int start = be64_to_cpu(page_list[0]);
3191 int npages = vhcr->in_modifier;
3194 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3198 /* Call the SW implementation of write_mtt:
3199 * - Prepare a dummy mtt struct
3200 * - Translate inbox contents to simple addresses in host endianness */
3201 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3202 we don't really use it */
3205 for (i = 0; i < npages; ++i)
3206 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3208 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3209 ((u64 *)page_list + 2));
3212 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3217 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3218 struct mlx4_vhcr *vhcr,
3219 struct mlx4_cmd_mailbox *inbox,
3220 struct mlx4_cmd_mailbox *outbox,
3221 struct mlx4_cmd_info *cmd)
3223 int eqn = vhcr->in_modifier;
3224 int res_id = eqn | (slave << 10);
3228 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3232 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3236 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3240 atomic_dec(&eq->mtt->ref_count);
3241 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3242 res_end_move(dev, slave, RES_EQ, res_id);
3243 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3248 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3250 res_abort_move(dev, slave, RES_EQ, res_id);
3255 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3257 struct mlx4_priv *priv = mlx4_priv(dev);
3258 struct mlx4_slave_event_eq_info *event_eq;
3259 struct mlx4_cmd_mailbox *mailbox;
3260 u32 in_modifier = 0;
3265 if (!priv->mfunc.master.slave_state)
3268 /* check for slave valid, slave not PF, and slave active */
3269 if (slave < 0 || slave > dev->persist->num_vfs ||
3270 slave == dev->caps.function ||
3271 !priv->mfunc.master.slave_state[slave].active)
3274 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3276 /* Create the event only if the slave is registered */
3277 if (event_eq->eqn < 0)
3280 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3281 res_id = (slave << 10) | event_eq->eqn;
3282 err = get_res(dev, slave, res_id, RES_EQ, &req);
3286 if (req->com.from_state != RES_EQ_HW) {
3291 mailbox = mlx4_alloc_cmd_mailbox(dev);
3292 if (IS_ERR(mailbox)) {
3293 err = PTR_ERR(mailbox);
3297 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3299 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3302 memcpy(mailbox->buf, (u8 *) eqe, 28);
3304 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3306 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3307 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3310 put_res(dev, slave, res_id, RES_EQ);
3311 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3312 mlx4_free_cmd_mailbox(dev, mailbox);
3316 put_res(dev, slave, res_id, RES_EQ);
3319 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3323 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3324 struct mlx4_vhcr *vhcr,
3325 struct mlx4_cmd_mailbox *inbox,
3326 struct mlx4_cmd_mailbox *outbox,
3327 struct mlx4_cmd_info *cmd)
3329 int eqn = vhcr->in_modifier;
3330 int res_id = eqn | (slave << 10);
3334 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3338 if (eq->com.from_state != RES_EQ_HW) {
3343 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3346 put_res(dev, slave, res_id, RES_EQ);
3350 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3351 struct mlx4_vhcr *vhcr,
3352 struct mlx4_cmd_mailbox *inbox,
3353 struct mlx4_cmd_mailbox *outbox,
3354 struct mlx4_cmd_info *cmd)
3357 int cqn = vhcr->in_modifier;
3358 struct mlx4_cq_context *cqc = inbox->buf;
3359 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3360 struct res_cq *cq = NULL;
3361 struct res_mtt *mtt;
3363 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3366 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3369 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3372 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3375 atomic_inc(&mtt->ref_count);
3377 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3378 res_end_move(dev, slave, RES_CQ, cqn);
3382 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3384 res_abort_move(dev, slave, RES_CQ, cqn);
3388 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3389 struct mlx4_vhcr *vhcr,
3390 struct mlx4_cmd_mailbox *inbox,
3391 struct mlx4_cmd_mailbox *outbox,
3392 struct mlx4_cmd_info *cmd)
3395 int cqn = vhcr->in_modifier;
3396 struct res_cq *cq = NULL;
3398 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3401 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3404 atomic_dec(&cq->mtt->ref_count);
3405 res_end_move(dev, slave, RES_CQ, cqn);
3409 res_abort_move(dev, slave, RES_CQ, cqn);
3413 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3414 struct mlx4_vhcr *vhcr,
3415 struct mlx4_cmd_mailbox *inbox,
3416 struct mlx4_cmd_mailbox *outbox,
3417 struct mlx4_cmd_info *cmd)
3419 int cqn = vhcr->in_modifier;
3423 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3427 if (cq->com.from_state != RES_CQ_HW)
3430 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3432 put_res(dev, slave, cqn, RES_CQ);
3437 static int handle_resize(struct mlx4_dev *dev, int slave,
3438 struct mlx4_vhcr *vhcr,
3439 struct mlx4_cmd_mailbox *inbox,
3440 struct mlx4_cmd_mailbox *outbox,
3441 struct mlx4_cmd_info *cmd,
3445 struct res_mtt *orig_mtt;
3446 struct res_mtt *mtt;
3447 struct mlx4_cq_context *cqc = inbox->buf;
3448 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3450 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3454 if (orig_mtt != cq->mtt) {
3459 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3463 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3466 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3469 atomic_dec(&orig_mtt->ref_count);
3470 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3471 atomic_inc(&mtt->ref_count);
3473 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3477 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3479 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3485 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3486 struct mlx4_vhcr *vhcr,
3487 struct mlx4_cmd_mailbox *inbox,
3488 struct mlx4_cmd_mailbox *outbox,
3489 struct mlx4_cmd_info *cmd)
3491 int cqn = vhcr->in_modifier;
3495 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3499 if (cq->com.from_state != RES_CQ_HW)
3502 if (vhcr->op_modifier == 0) {
3503 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3507 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3509 put_res(dev, slave, cqn, RES_CQ);
3514 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3516 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3517 int log_rq_stride = srqc->logstride & 7;
3518 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3520 if (log_srq_size + log_rq_stride + 4 < page_shift)
3523 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3526 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3527 struct mlx4_vhcr *vhcr,
3528 struct mlx4_cmd_mailbox *inbox,
3529 struct mlx4_cmd_mailbox *outbox,
3530 struct mlx4_cmd_info *cmd)
3533 int srqn = vhcr->in_modifier;
3534 struct res_mtt *mtt;
3535 struct res_srq *srq = NULL;
3536 struct mlx4_srq_context *srqc = inbox->buf;
3537 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3539 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3542 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3545 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3548 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3553 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3557 atomic_inc(&mtt->ref_count);
3559 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3560 res_end_move(dev, slave, RES_SRQ, srqn);
3564 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3566 res_abort_move(dev, slave, RES_SRQ, srqn);
3571 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3572 struct mlx4_vhcr *vhcr,
3573 struct mlx4_cmd_mailbox *inbox,
3574 struct mlx4_cmd_mailbox *outbox,
3575 struct mlx4_cmd_info *cmd)
3578 int srqn = vhcr->in_modifier;
3579 struct res_srq *srq = NULL;
3581 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3584 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3587 atomic_dec(&srq->mtt->ref_count);
3589 atomic_dec(&srq->cq->ref_count);
3590 res_end_move(dev, slave, RES_SRQ, srqn);
3595 res_abort_move(dev, slave, RES_SRQ, srqn);
3600 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3601 struct mlx4_vhcr *vhcr,
3602 struct mlx4_cmd_mailbox *inbox,
3603 struct mlx4_cmd_mailbox *outbox,
3604 struct mlx4_cmd_info *cmd)
3607 int srqn = vhcr->in_modifier;
3608 struct res_srq *srq;
3610 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3613 if (srq->com.from_state != RES_SRQ_HW) {
3617 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3619 put_res(dev, slave, srqn, RES_SRQ);
3623 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3624 struct mlx4_vhcr *vhcr,
3625 struct mlx4_cmd_mailbox *inbox,
3626 struct mlx4_cmd_mailbox *outbox,
3627 struct mlx4_cmd_info *cmd)
3630 int srqn = vhcr->in_modifier;
3631 struct res_srq *srq;
3633 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3637 if (srq->com.from_state != RES_SRQ_HW) {
3642 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3644 put_res(dev, slave, srqn, RES_SRQ);
3648 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3649 struct mlx4_vhcr *vhcr,
3650 struct mlx4_cmd_mailbox *inbox,
3651 struct mlx4_cmd_mailbox *outbox,
3652 struct mlx4_cmd_info *cmd)
3655 int qpn = vhcr->in_modifier & 0x7fffff;
3658 err = get_res(dev, slave, qpn, RES_QP, &qp);
3661 if (qp->com.from_state != RES_QP_HW) {
3666 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3668 put_res(dev, slave, qpn, RES_QP);
3672 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3673 struct mlx4_vhcr *vhcr,
3674 struct mlx4_cmd_mailbox *inbox,
3675 struct mlx4_cmd_mailbox *outbox,
3676 struct mlx4_cmd_info *cmd)
3678 struct mlx4_qp_context *context = inbox->buf + 8;
3679 adjust_proxy_tun_qkey(dev, vhcr, context);
3680 update_pkey_index(dev, slave, inbox);
3681 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3684 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3685 struct mlx4_qp_context *qpc,
3686 struct mlx4_cmd_mailbox *inbox)
3688 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3690 int port = mlx4_slave_convert_port(
3691 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3696 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3699 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3700 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3701 qpc->pri_path.sched_queue = pri_sched_queue;
3704 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3705 port = mlx4_slave_convert_port(
3706 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3710 qpc->alt_path.sched_queue =
3711 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3717 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3718 struct mlx4_qp_context *qpc,
3719 struct mlx4_cmd_mailbox *inbox)
3723 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3724 u8 sched = *(u8 *)(inbox->buf + 64);
3727 port = (sched >> 6 & 1) + 1;
3728 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3729 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3730 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3736 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3737 struct mlx4_vhcr *vhcr,
3738 struct mlx4_cmd_mailbox *inbox,
3739 struct mlx4_cmd_mailbox *outbox,
3740 struct mlx4_cmd_info *cmd)
3743 struct mlx4_qp_context *qpc = inbox->buf + 8;
3744 int qpn = vhcr->in_modifier & 0x7fffff;
3746 u8 orig_sched_queue;
3747 __be32 orig_param3 = qpc->param3;
3748 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3749 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3750 u8 orig_pri_path_fl = qpc->pri_path.fl;
3751 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3752 u8 orig_feup = qpc->pri_path.feup;
3754 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3757 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3761 if (roce_verify_mac(dev, slave, qpc, inbox))
3764 update_pkey_index(dev, slave, inbox);
3765 update_gid(dev, inbox, (u8)slave);
3766 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3767 orig_sched_queue = qpc->pri_path.sched_queue;
3769 err = get_res(dev, slave, qpn, RES_QP, &qp);
3772 if (qp->com.from_state != RES_QP_HW) {
3777 err = update_vport_qp_param(dev, inbox, slave, qpn);
3781 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3783 /* if no error, save sched queue value passed in by VF. This is
3784 * essentially the QOS value provided by the VF. This will be useful
3785 * if we allow dynamic changes from VST back to VGT
3788 qp->sched_queue = orig_sched_queue;
3789 qp->param3 = orig_param3;
3790 qp->vlan_control = orig_vlan_control;
3791 qp->fvl_rx = orig_fvl_rx;
3792 qp->pri_path_fl = orig_pri_path_fl;
3793 qp->vlan_index = orig_vlan_index;
3794 qp->feup = orig_feup;
3796 put_res(dev, slave, qpn, RES_QP);
3800 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3801 struct mlx4_vhcr *vhcr,
3802 struct mlx4_cmd_mailbox *inbox,
3803 struct mlx4_cmd_mailbox *outbox,
3804 struct mlx4_cmd_info *cmd)
3807 struct mlx4_qp_context *context = inbox->buf + 8;
3809 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3812 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3816 update_pkey_index(dev, slave, inbox);
3817 update_gid(dev, inbox, (u8)slave);
3818 adjust_proxy_tun_qkey(dev, vhcr, context);
3819 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3822 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3823 struct mlx4_vhcr *vhcr,
3824 struct mlx4_cmd_mailbox *inbox,
3825 struct mlx4_cmd_mailbox *outbox,
3826 struct mlx4_cmd_info *cmd)
3829 struct mlx4_qp_context *context = inbox->buf + 8;
3831 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3834 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3838 update_pkey_index(dev, slave, inbox);
3839 update_gid(dev, inbox, (u8)slave);
3840 adjust_proxy_tun_qkey(dev, vhcr, context);
3841 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3845 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3846 struct mlx4_vhcr *vhcr,
3847 struct mlx4_cmd_mailbox *inbox,
3848 struct mlx4_cmd_mailbox *outbox,
3849 struct mlx4_cmd_info *cmd)
3851 struct mlx4_qp_context *context = inbox->buf + 8;
3852 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3855 adjust_proxy_tun_qkey(dev, vhcr, context);
3856 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3859 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3860 struct mlx4_vhcr *vhcr,
3861 struct mlx4_cmd_mailbox *inbox,
3862 struct mlx4_cmd_mailbox *outbox,
3863 struct mlx4_cmd_info *cmd)
3866 struct mlx4_qp_context *context = inbox->buf + 8;
3868 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3871 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3875 adjust_proxy_tun_qkey(dev, vhcr, context);
3876 update_gid(dev, inbox, (u8)slave);
3877 update_pkey_index(dev, slave, inbox);
3878 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3881 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3882 struct mlx4_vhcr *vhcr,
3883 struct mlx4_cmd_mailbox *inbox,
3884 struct mlx4_cmd_mailbox *outbox,
3885 struct mlx4_cmd_info *cmd)
3888 struct mlx4_qp_context *context = inbox->buf + 8;
3890 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3893 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3897 adjust_proxy_tun_qkey(dev, vhcr, context);
3898 update_gid(dev, inbox, (u8)slave);
3899 update_pkey_index(dev, slave, inbox);
3900 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3903 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3904 struct mlx4_vhcr *vhcr,
3905 struct mlx4_cmd_mailbox *inbox,
3906 struct mlx4_cmd_mailbox *outbox,
3907 struct mlx4_cmd_info *cmd)
3910 int qpn = vhcr->in_modifier & 0x7fffff;
3913 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3916 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3920 atomic_dec(&qp->mtt->ref_count);
3921 atomic_dec(&qp->rcq->ref_count);
3922 atomic_dec(&qp->scq->ref_count);
3924 atomic_dec(&qp->srq->ref_count);
3925 res_end_move(dev, slave, RES_QP, qpn);
3929 res_abort_move(dev, slave, RES_QP, qpn);
3934 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3935 struct res_qp *rqp, u8 *gid)
3937 struct res_gid *res;
3939 list_for_each_entry(res, &rqp->mcg_list, list) {
3940 if (!memcmp(res->gid, gid, 16))
3946 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3947 u8 *gid, enum mlx4_protocol prot,
3948 enum mlx4_steer_type steer, u64 reg_id)
3950 struct res_gid *res;
3953 res = kzalloc(sizeof *res, GFP_KERNEL);
3957 spin_lock_irq(&rqp->mcg_spl);
3958 if (find_gid(dev, slave, rqp, gid)) {
3962 memcpy(res->gid, gid, 16);
3965 res->reg_id = reg_id;
3966 list_add_tail(&res->list, &rqp->mcg_list);
3969 spin_unlock_irq(&rqp->mcg_spl);
3974 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3975 u8 *gid, enum mlx4_protocol prot,
3976 enum mlx4_steer_type steer, u64 *reg_id)
3978 struct res_gid *res;
3981 spin_lock_irq(&rqp->mcg_spl);
3982 res = find_gid(dev, slave, rqp, gid);
3983 if (!res || res->prot != prot || res->steer != steer)
3986 *reg_id = res->reg_id;
3987 list_del(&res->list);
3991 spin_unlock_irq(&rqp->mcg_spl);
3996 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3997 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3998 enum mlx4_steer_type type, u64 *reg_id)
4000 switch (dev->caps.steering_mode) {
4001 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4002 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4005 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4006 block_loopback, prot,
4009 case MLX4_STEERING_MODE_B0:
4010 if (prot == MLX4_PROT_ETH) {
4011 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4016 return mlx4_qp_attach_common(dev, qp, gid,
4017 block_loopback, prot, type);
4023 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4024 u8 gid[16], enum mlx4_protocol prot,
4025 enum mlx4_steer_type type, u64 reg_id)
4027 switch (dev->caps.steering_mode) {
4028 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4029 return mlx4_flow_detach(dev, reg_id);
4030 case MLX4_STEERING_MODE_B0:
4031 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4037 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4038 u8 *gid, enum mlx4_protocol prot)
4042 if (prot != MLX4_PROT_ETH)
4045 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4046 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4047 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4056 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4057 struct mlx4_vhcr *vhcr,
4058 struct mlx4_cmd_mailbox *inbox,
4059 struct mlx4_cmd_mailbox *outbox,
4060 struct mlx4_cmd_info *cmd)
4062 struct mlx4_qp qp; /* dummy for calling attach/detach */
4063 u8 *gid = inbox->buf;
4064 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4069 int attach = vhcr->op_modifier;
4070 int block_loopback = vhcr->in_modifier >> 31;
4071 u8 steer_type_mask = 2;
4072 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4074 qpn = vhcr->in_modifier & 0xffffff;
4075 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4081 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4084 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4087 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4091 err = mlx4_adjust_port(dev, slave, gid, prot);
4095 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
4099 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4101 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4104 put_res(dev, slave, qpn, RES_QP);
4108 qp_detach(dev, &qp, gid, prot, type, reg_id);
4110 put_res(dev, slave, qpn, RES_QP);
4115 * MAC validation for Flow Steering rules.
4116 * VF can attach rules only with a mac address which is assigned to it.
4118 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4119 struct list_head *rlist)
4121 struct mac_res *res, *tmp;
4124 /* make sure it isn't multicast or broadcast mac*/
4125 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4126 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4127 list_for_each_entry_safe(res, tmp, rlist, list) {
4128 be_mac = cpu_to_be64(res->mac << 16);
4129 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4132 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4133 eth_header->eth.dst_mac, slave);
4139 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
4140 struct _rule_hw *eth_header)
4142 if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
4143 is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4144 struct mlx4_net_trans_rule_hw_eth *eth =
4145 (struct mlx4_net_trans_rule_hw_eth *)eth_header;
4146 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
4147 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
4148 next_rule->rsvd == 0;
4151 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
4156 * In case of missing eth header, append eth header with a MAC address
4157 * assigned to the VF.
4159 static int add_eth_header(struct mlx4_dev *dev, int slave,
4160 struct mlx4_cmd_mailbox *inbox,
4161 struct list_head *rlist, int header_id)
4163 struct mac_res *res, *tmp;
4165 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4166 struct mlx4_net_trans_rule_hw_eth *eth_header;
4167 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4168 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4170 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4172 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4174 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4176 /* Clear a space in the inbox for eth header */
4177 switch (header_id) {
4178 case MLX4_NET_TRANS_RULE_ID_IPV4:
4180 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4181 memmove(ip_header, eth_header,
4182 sizeof(*ip_header) + sizeof(*l4_header));
4184 case MLX4_NET_TRANS_RULE_ID_TCP:
4185 case MLX4_NET_TRANS_RULE_ID_UDP:
4186 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4188 memmove(l4_header, eth_header, sizeof(*l4_header));
4193 list_for_each_entry_safe(res, tmp, rlist, list) {
4194 if (port == res->port) {
4195 be_mac = cpu_to_be64(res->mac << 16);
4200 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4205 memset(eth_header, 0, sizeof(*eth_header));
4206 eth_header->size = sizeof(*eth_header) >> 2;
4207 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4208 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4209 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4215 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
4216 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
4217 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4218 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4219 struct mlx4_vhcr *vhcr,
4220 struct mlx4_cmd_mailbox *inbox,
4221 struct mlx4_cmd_mailbox *outbox,
4222 struct mlx4_cmd_info *cmd_info)
4225 u32 qpn = vhcr->in_modifier & 0xffffff;
4229 u64 pri_addr_path_mask;
4230 struct mlx4_update_qp_context *cmd;
4233 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4235 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4236 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4237 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4240 if ((pri_addr_path_mask &
4241 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4242 !(dev->caps.flags2 &
4243 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4245 "Src check LB for slave %d isn't supported\n",
4250 /* Just change the smac for the QP */
4251 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4253 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4257 port = (rqp->sched_queue >> 6 & 1) + 1;
4259 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4260 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4261 err = mac_find_smac_ix_in_slave(dev, slave, port,
4265 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4271 err = mlx4_cmd(dev, inbox->dma,
4272 vhcr->in_modifier, 0,
4273 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4276 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4281 put_res(dev, slave, qpn, RES_QP);
4285 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4286 struct mlx4_vhcr *vhcr,
4287 struct mlx4_cmd_mailbox *inbox,
4288 struct mlx4_cmd_mailbox *outbox,
4289 struct mlx4_cmd_info *cmd)
4292 struct mlx4_priv *priv = mlx4_priv(dev);
4293 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4294 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4298 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4299 struct _rule_hw *rule_header;
4302 if (dev->caps.steering_mode !=
4303 MLX4_STEERING_MODE_DEVICE_MANAGED)
4306 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4307 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4308 if (ctrl->port <= 0)
4310 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4311 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4313 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4316 rule_header = (struct _rule_hw *)(ctrl + 1);
4317 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4319 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4320 handle_eth_header_mcast_prio(ctrl, rule_header);
4322 if (slave == dev->caps.function)
4325 switch (header_id) {
4326 case MLX4_NET_TRANS_RULE_ID_ETH:
4327 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4332 case MLX4_NET_TRANS_RULE_ID_IB:
4334 case MLX4_NET_TRANS_RULE_ID_IPV4:
4335 case MLX4_NET_TRANS_RULE_ID_TCP:
4336 case MLX4_NET_TRANS_RULE_ID_UDP:
4337 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4338 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4342 vhcr->in_modifier +=
4343 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4346 pr_err("Corrupted mailbox\n");
4352 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4353 vhcr->in_modifier, 0,
4354 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4359 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4361 mlx4_err(dev, "Fail to add flow steering resources\n");
4363 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4364 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4368 atomic_inc(&rqp->ref_count);
4370 put_res(dev, slave, qpn, RES_QP);
4374 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4375 struct mlx4_vhcr *vhcr,
4376 struct mlx4_cmd_mailbox *inbox,
4377 struct mlx4_cmd_mailbox *outbox,
4378 struct mlx4_cmd_info *cmd)
4382 struct res_fs_rule *rrule;
4384 if (dev->caps.steering_mode !=
4385 MLX4_STEERING_MODE_DEVICE_MANAGED)
4388 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4391 /* Release the rule form busy state before removal */
4392 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4393 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4397 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4399 mlx4_err(dev, "Fail to remove flow steering resources\n");
4403 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4404 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4407 atomic_dec(&rqp->ref_count);
4409 put_res(dev, slave, rrule->qpn, RES_QP);
4414 BUSY_MAX_RETRIES = 10
4417 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4418 struct mlx4_vhcr *vhcr,
4419 struct mlx4_cmd_mailbox *inbox,
4420 struct mlx4_cmd_mailbox *outbox,
4421 struct mlx4_cmd_info *cmd)
4424 int index = vhcr->in_modifier & 0xffff;
4426 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4430 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4431 put_res(dev, slave, index, RES_COUNTER);
4435 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4437 struct res_gid *rgid;
4438 struct res_gid *tmp;
4439 struct mlx4_qp qp; /* dummy for calling attach/detach */
4441 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4442 switch (dev->caps.steering_mode) {
4443 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4444 mlx4_flow_detach(dev, rgid->reg_id);
4446 case MLX4_STEERING_MODE_B0:
4447 qp.qpn = rqp->local_qpn;
4448 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4449 rgid->prot, rgid->steer);
4452 list_del(&rgid->list);
4457 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4458 enum mlx4_resource type, int print)
4460 struct mlx4_priv *priv = mlx4_priv(dev);
4461 struct mlx4_resource_tracker *tracker =
4462 &priv->mfunc.master.res_tracker;
4463 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4464 struct res_common *r;
4465 struct res_common *tmp;
4469 spin_lock_irq(mlx4_tlock(dev));
4470 list_for_each_entry_safe(r, tmp, rlist, list) {
4471 if (r->owner == slave) {
4473 if (r->state == RES_ANY_BUSY) {
4476 "%s id 0x%llx is busy\n",
4481 r->from_state = r->state;
4482 r->state = RES_ANY_BUSY;
4488 spin_unlock_irq(mlx4_tlock(dev));
4493 static int move_all_busy(struct mlx4_dev *dev, int slave,
4494 enum mlx4_resource type)
4496 unsigned long begin;
4501 busy = _move_all_busy(dev, slave, type, 0);
4502 if (time_after(jiffies, begin + 5 * HZ))
4509 busy = _move_all_busy(dev, slave, type, 1);
4513 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4515 struct mlx4_priv *priv = mlx4_priv(dev);
4516 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4517 struct list_head *qp_list =
4518 &tracker->slave_list[slave].res_list[RES_QP];
4526 err = move_all_busy(dev, slave, RES_QP);
4528 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4531 spin_lock_irq(mlx4_tlock(dev));
4532 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4533 spin_unlock_irq(mlx4_tlock(dev));
4534 if (qp->com.owner == slave) {
4535 qpn = qp->com.res_id;
4536 detach_qp(dev, slave, qp);
4537 state = qp->com.from_state;
4538 while (state != 0) {
4540 case RES_QP_RESERVED:
4541 spin_lock_irq(mlx4_tlock(dev));
4542 rb_erase(&qp->com.node,
4543 &tracker->res_tree[RES_QP]);
4544 list_del(&qp->com.list);
4545 spin_unlock_irq(mlx4_tlock(dev));
4546 if (!valid_reserved(dev, slave, qpn)) {
4547 __mlx4_qp_release_range(dev, qpn, 1);
4548 mlx4_release_resource(dev, slave,
4555 if (!valid_reserved(dev, slave, qpn))
4556 __mlx4_qp_free_icm(dev, qpn);
4557 state = RES_QP_RESERVED;
4561 err = mlx4_cmd(dev, in_param,
4564 MLX4_CMD_TIME_CLASS_A,
4567 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4568 slave, qp->local_qpn);
4569 atomic_dec(&qp->rcq->ref_count);
4570 atomic_dec(&qp->scq->ref_count);
4571 atomic_dec(&qp->mtt->ref_count);
4573 atomic_dec(&qp->srq->ref_count);
4574 state = RES_QP_MAPPED;
4581 spin_lock_irq(mlx4_tlock(dev));
4583 spin_unlock_irq(mlx4_tlock(dev));
4586 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4588 struct mlx4_priv *priv = mlx4_priv(dev);
4589 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4590 struct list_head *srq_list =
4591 &tracker->slave_list[slave].res_list[RES_SRQ];
4592 struct res_srq *srq;
4593 struct res_srq *tmp;
4600 err = move_all_busy(dev, slave, RES_SRQ);
4602 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4605 spin_lock_irq(mlx4_tlock(dev));
4606 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4607 spin_unlock_irq(mlx4_tlock(dev));
4608 if (srq->com.owner == slave) {
4609 srqn = srq->com.res_id;
4610 state = srq->com.from_state;
4611 while (state != 0) {
4613 case RES_SRQ_ALLOCATED:
4614 __mlx4_srq_free_icm(dev, srqn);
4615 spin_lock_irq(mlx4_tlock(dev));
4616 rb_erase(&srq->com.node,
4617 &tracker->res_tree[RES_SRQ]);
4618 list_del(&srq->com.list);
4619 spin_unlock_irq(mlx4_tlock(dev));
4620 mlx4_release_resource(dev, slave,
4628 err = mlx4_cmd(dev, in_param, srqn, 1,
4630 MLX4_CMD_TIME_CLASS_A,
4633 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4636 atomic_dec(&srq->mtt->ref_count);
4638 atomic_dec(&srq->cq->ref_count);
4639 state = RES_SRQ_ALLOCATED;
4647 spin_lock_irq(mlx4_tlock(dev));
4649 spin_unlock_irq(mlx4_tlock(dev));
4652 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4654 struct mlx4_priv *priv = mlx4_priv(dev);
4655 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4656 struct list_head *cq_list =
4657 &tracker->slave_list[slave].res_list[RES_CQ];
4666 err = move_all_busy(dev, slave, RES_CQ);
4668 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4671 spin_lock_irq(mlx4_tlock(dev));
4672 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4673 spin_unlock_irq(mlx4_tlock(dev));
4674 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4675 cqn = cq->com.res_id;
4676 state = cq->com.from_state;
4677 while (state != 0) {
4679 case RES_CQ_ALLOCATED:
4680 __mlx4_cq_free_icm(dev, cqn);
4681 spin_lock_irq(mlx4_tlock(dev));
4682 rb_erase(&cq->com.node,
4683 &tracker->res_tree[RES_CQ]);
4684 list_del(&cq->com.list);
4685 spin_unlock_irq(mlx4_tlock(dev));
4686 mlx4_release_resource(dev, slave,
4694 err = mlx4_cmd(dev, in_param, cqn, 1,
4696 MLX4_CMD_TIME_CLASS_A,
4699 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4701 atomic_dec(&cq->mtt->ref_count);
4702 state = RES_CQ_ALLOCATED;
4710 spin_lock_irq(mlx4_tlock(dev));
4712 spin_unlock_irq(mlx4_tlock(dev));
4715 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4717 struct mlx4_priv *priv = mlx4_priv(dev);
4718 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4719 struct list_head *mpt_list =
4720 &tracker->slave_list[slave].res_list[RES_MPT];
4721 struct res_mpt *mpt;
4722 struct res_mpt *tmp;
4729 err = move_all_busy(dev, slave, RES_MPT);
4731 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4734 spin_lock_irq(mlx4_tlock(dev));
4735 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4736 spin_unlock_irq(mlx4_tlock(dev));
4737 if (mpt->com.owner == slave) {
4738 mptn = mpt->com.res_id;
4739 state = mpt->com.from_state;
4740 while (state != 0) {
4742 case RES_MPT_RESERVED:
4743 __mlx4_mpt_release(dev, mpt->key);
4744 spin_lock_irq(mlx4_tlock(dev));
4745 rb_erase(&mpt->com.node,
4746 &tracker->res_tree[RES_MPT]);
4747 list_del(&mpt->com.list);
4748 spin_unlock_irq(mlx4_tlock(dev));
4749 mlx4_release_resource(dev, slave,
4755 case RES_MPT_MAPPED:
4756 __mlx4_mpt_free_icm(dev, mpt->key);
4757 state = RES_MPT_RESERVED;
4762 err = mlx4_cmd(dev, in_param, mptn, 0,
4764 MLX4_CMD_TIME_CLASS_A,
4767 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4770 atomic_dec(&mpt->mtt->ref_count);
4771 state = RES_MPT_MAPPED;
4778 spin_lock_irq(mlx4_tlock(dev));
4780 spin_unlock_irq(mlx4_tlock(dev));
4783 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4785 struct mlx4_priv *priv = mlx4_priv(dev);
4786 struct mlx4_resource_tracker *tracker =
4787 &priv->mfunc.master.res_tracker;
4788 struct list_head *mtt_list =
4789 &tracker->slave_list[slave].res_list[RES_MTT];
4790 struct res_mtt *mtt;
4791 struct res_mtt *tmp;
4797 err = move_all_busy(dev, slave, RES_MTT);
4799 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4802 spin_lock_irq(mlx4_tlock(dev));
4803 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4804 spin_unlock_irq(mlx4_tlock(dev));
4805 if (mtt->com.owner == slave) {
4806 base = mtt->com.res_id;
4807 state = mtt->com.from_state;
4808 while (state != 0) {
4810 case RES_MTT_ALLOCATED:
4811 __mlx4_free_mtt_range(dev, base,
4813 spin_lock_irq(mlx4_tlock(dev));
4814 rb_erase(&mtt->com.node,
4815 &tracker->res_tree[RES_MTT]);
4816 list_del(&mtt->com.list);
4817 spin_unlock_irq(mlx4_tlock(dev));
4818 mlx4_release_resource(dev, slave, RES_MTT,
4819 1 << mtt->order, 0);
4829 spin_lock_irq(mlx4_tlock(dev));
4831 spin_unlock_irq(mlx4_tlock(dev));
4834 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4836 struct mlx4_priv *priv = mlx4_priv(dev);
4837 struct mlx4_resource_tracker *tracker =
4838 &priv->mfunc.master.res_tracker;
4839 struct list_head *fs_rule_list =
4840 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4841 struct res_fs_rule *fs_rule;
4842 struct res_fs_rule *tmp;
4847 err = move_all_busy(dev, slave, RES_FS_RULE);
4849 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4852 spin_lock_irq(mlx4_tlock(dev));
4853 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4854 spin_unlock_irq(mlx4_tlock(dev));
4855 if (fs_rule->com.owner == slave) {
4856 base = fs_rule->com.res_id;
4857 state = fs_rule->com.from_state;
4858 while (state != 0) {
4860 case RES_FS_RULE_ALLOCATED:
4862 err = mlx4_cmd(dev, base, 0, 0,
4863 MLX4_QP_FLOW_STEERING_DETACH,
4864 MLX4_CMD_TIME_CLASS_A,
4867 spin_lock_irq(mlx4_tlock(dev));
4868 rb_erase(&fs_rule->com.node,
4869 &tracker->res_tree[RES_FS_RULE]);
4870 list_del(&fs_rule->com.list);
4871 spin_unlock_irq(mlx4_tlock(dev));
4881 spin_lock_irq(mlx4_tlock(dev));
4883 spin_unlock_irq(mlx4_tlock(dev));
4886 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4888 struct mlx4_priv *priv = mlx4_priv(dev);
4889 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4890 struct list_head *eq_list =
4891 &tracker->slave_list[slave].res_list[RES_EQ];
4899 err = move_all_busy(dev, slave, RES_EQ);
4901 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4904 spin_lock_irq(mlx4_tlock(dev));
4905 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4906 spin_unlock_irq(mlx4_tlock(dev));
4907 if (eq->com.owner == slave) {
4908 eqn = eq->com.res_id;
4909 state = eq->com.from_state;
4910 while (state != 0) {
4912 case RES_EQ_RESERVED:
4913 spin_lock_irq(mlx4_tlock(dev));
4914 rb_erase(&eq->com.node,
4915 &tracker->res_tree[RES_EQ]);
4916 list_del(&eq->com.list);
4917 spin_unlock_irq(mlx4_tlock(dev));
4923 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
4924 1, MLX4_CMD_HW2SW_EQ,
4925 MLX4_CMD_TIME_CLASS_A,
4928 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4929 slave, eqn & 0x3ff);
4930 atomic_dec(&eq->mtt->ref_count);
4931 state = RES_EQ_RESERVED;
4939 spin_lock_irq(mlx4_tlock(dev));
4941 spin_unlock_irq(mlx4_tlock(dev));
4944 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4946 struct mlx4_priv *priv = mlx4_priv(dev);
4947 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4948 struct list_head *counter_list =
4949 &tracker->slave_list[slave].res_list[RES_COUNTER];
4950 struct res_counter *counter;
4951 struct res_counter *tmp;
4955 err = move_all_busy(dev, slave, RES_COUNTER);
4957 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4960 spin_lock_irq(mlx4_tlock(dev));
4961 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4962 if (counter->com.owner == slave) {
4963 index = counter->com.res_id;
4964 rb_erase(&counter->com.node,
4965 &tracker->res_tree[RES_COUNTER]);
4966 list_del(&counter->com.list);
4968 __mlx4_counter_free(dev, index);
4969 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4972 spin_unlock_irq(mlx4_tlock(dev));
4975 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4977 struct mlx4_priv *priv = mlx4_priv(dev);
4978 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4979 struct list_head *xrcdn_list =
4980 &tracker->slave_list[slave].res_list[RES_XRCD];
4981 struct res_xrcdn *xrcd;
4982 struct res_xrcdn *tmp;
4986 err = move_all_busy(dev, slave, RES_XRCD);
4988 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4991 spin_lock_irq(mlx4_tlock(dev));
4992 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4993 if (xrcd->com.owner == slave) {
4994 xrcdn = xrcd->com.res_id;
4995 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4996 list_del(&xrcd->com.list);
4998 __mlx4_xrcd_free(dev, xrcdn);
5001 spin_unlock_irq(mlx4_tlock(dev));
5004 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5006 struct mlx4_priv *priv = mlx4_priv(dev);
5007 mlx4_reset_roce_gids(dev, slave);
5008 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5009 rem_slave_vlans(dev, slave);
5010 rem_slave_macs(dev, slave);
5011 rem_slave_fs_rule(dev, slave);
5012 rem_slave_qps(dev, slave);
5013 rem_slave_srqs(dev, slave);
5014 rem_slave_cqs(dev, slave);
5015 rem_slave_mrs(dev, slave);
5016 rem_slave_eqs(dev, slave);
5017 rem_slave_mtts(dev, slave);
5018 rem_slave_counters(dev, slave);
5019 rem_slave_xrcdns(dev, slave);
5020 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5023 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5025 struct mlx4_vf_immed_vlan_work *work =
5026 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5027 struct mlx4_cmd_mailbox *mailbox;
5028 struct mlx4_update_qp_context *upd_context;
5029 struct mlx4_dev *dev = &work->priv->dev;
5030 struct mlx4_resource_tracker *tracker =
5031 &work->priv->mfunc.master.res_tracker;
5032 struct list_head *qp_list =
5033 &tracker->slave_list[work->slave].res_list[RES_QP];
5036 u64 qp_path_mask_vlan_ctrl =
5037 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5038 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5039 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5040 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5041 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5042 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5044 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5045 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5046 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5047 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5048 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5049 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5050 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5053 int port, errors = 0;
5056 if (mlx4_is_slave(dev)) {
5057 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5062 mailbox = mlx4_alloc_cmd_mailbox(dev);
5063 if (IS_ERR(mailbox))
5065 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5066 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5067 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5068 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5069 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5070 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5071 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5072 else if (!work->vlan_id)
5073 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5074 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5076 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5077 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5078 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5080 upd_context = mailbox->buf;
5081 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5083 spin_lock_irq(mlx4_tlock(dev));
5084 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5085 spin_unlock_irq(mlx4_tlock(dev));
5086 if (qp->com.owner == work->slave) {
5087 if (qp->com.from_state != RES_QP_HW ||
5088 !qp->sched_queue || /* no INIT2RTR trans yet */
5089 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5090 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5091 spin_lock_irq(mlx4_tlock(dev));
5094 port = (qp->sched_queue >> 6 & 1) + 1;
5095 if (port != work->port) {
5096 spin_lock_irq(mlx4_tlock(dev));
5099 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5100 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5102 upd_context->primary_addr_path_mask =
5103 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5104 if (work->vlan_id == MLX4_VGT) {
5105 upd_context->qp_context.param3 = qp->param3;
5106 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5107 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5108 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5109 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5110 upd_context->qp_context.pri_path.feup = qp->feup;
5111 upd_context->qp_context.pri_path.sched_queue =
5114 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5115 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5116 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5117 upd_context->qp_context.pri_path.fvl_rx =
5118 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5119 upd_context->qp_context.pri_path.fl =
5120 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
5121 upd_context->qp_context.pri_path.feup =
5122 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5123 upd_context->qp_context.pri_path.sched_queue =
5124 qp->sched_queue & 0xC7;
5125 upd_context->qp_context.pri_path.sched_queue |=
5126 ((work->qos & 0x7) << 3);
5127 upd_context->qp_mask |=
5129 MLX4_UPD_QP_MASK_QOS_VPP);
5130 upd_context->qp_context.qos_vport =
5134 err = mlx4_cmd(dev, mailbox->dma,
5135 qp->local_qpn & 0xffffff,
5136 0, MLX4_CMD_UPDATE_QP,
5137 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5139 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5140 work->slave, port, qp->local_qpn, err);
5144 spin_lock_irq(mlx4_tlock(dev));
5146 spin_unlock_irq(mlx4_tlock(dev));
5147 mlx4_free_cmd_mailbox(dev, mailbox);
5150 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5151 errors, work->slave, work->port);
5153 /* unregister previous vlan_id if needed and we had no errors
5154 * while updating the QPs
5156 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5157 NO_INDX != work->orig_vlan_ix)
5158 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5159 work->orig_vlan_id);