2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
49 #include "mlx4_stats.h"
51 #define MLX4_MAC_VALID (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT 2
53 #define MLX4_VF_COUNTERS_PER_PORT 1
56 struct list_head list;
64 struct list_head list;
72 struct list_head list;
87 struct list_head list;
89 enum mlx4_protocol prot;
90 enum mlx4_steer_type steer;
95 RES_QP_BUSY = RES_ANY_BUSY,
97 /* QP number was allocated */
100 /* ICM memory for QP context was mapped */
103 /* QP is in hw ownership */
108 struct res_common com;
113 struct list_head mcg_list;
118 /* saved qp params before VST enforcement in order to restore on VGT */
128 enum res_mtt_states {
129 RES_MTT_BUSY = RES_ANY_BUSY,
133 static inline const char *mtt_states_str(enum res_mtt_states state)
136 case RES_MTT_BUSY: return "RES_MTT_BUSY";
137 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
138 default: return "Unknown";
143 struct res_common com;
148 enum res_mpt_states {
149 RES_MPT_BUSY = RES_ANY_BUSY,
156 struct res_common com;
162 RES_EQ_BUSY = RES_ANY_BUSY,
168 struct res_common com;
173 RES_CQ_BUSY = RES_ANY_BUSY,
179 struct res_common com;
184 enum res_srq_states {
185 RES_SRQ_BUSY = RES_ANY_BUSY,
191 struct res_common com;
197 enum res_counter_states {
198 RES_COUNTER_BUSY = RES_ANY_BUSY,
199 RES_COUNTER_ALLOCATED,
203 struct res_common com;
207 enum res_xrcdn_states {
208 RES_XRCD_BUSY = RES_ANY_BUSY,
213 struct res_common com;
217 enum res_fs_rule_states {
218 RES_FS_RULE_BUSY = RES_ANY_BUSY,
219 RES_FS_RULE_ALLOCATED,
223 struct res_common com;
225 /* VF DMFS mbox with port flipped */
227 /* > 0 --> apply mirror when getting into HA mode */
228 /* = 0 --> un-apply mirror when getting out of HA mode */
230 struct list_head mirr_list;
234 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
236 struct rb_node *node = root->rb_node;
239 struct res_common *res = container_of(node, struct res_common,
242 if (res_id < res->res_id)
243 node = node->rb_left;
244 else if (res_id > res->res_id)
245 node = node->rb_right;
252 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
254 struct rb_node **new = &(root->rb_node), *parent = NULL;
256 /* Figure out where to put new node */
258 struct res_common *this = container_of(*new, struct res_common,
262 if (res->res_id < this->res_id)
263 new = &((*new)->rb_left);
264 else if (res->res_id > this->res_id)
265 new = &((*new)->rb_right);
270 /* Add new node and rebalance tree. */
271 rb_link_node(&res->node, parent, new);
272 rb_insert_color(&res->node, root);
287 static const char *resource_str(enum mlx4_resource rt)
290 case RES_QP: return "RES_QP";
291 case RES_CQ: return "RES_CQ";
292 case RES_SRQ: return "RES_SRQ";
293 case RES_MPT: return "RES_MPT";
294 case RES_MTT: return "RES_MTT";
295 case RES_MAC: return "RES_MAC";
296 case RES_VLAN: return "RES_VLAN";
297 case RES_EQ: return "RES_EQ";
298 case RES_COUNTER: return "RES_COUNTER";
299 case RES_FS_RULE: return "RES_FS_RULE";
300 case RES_XRCD: return "RES_XRCD";
301 default: return "Unknown resource type !!!";
305 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
306 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
307 enum mlx4_resource res_type, int count,
310 struct mlx4_priv *priv = mlx4_priv(dev);
311 struct resource_allocator *res_alloc =
312 &priv->mfunc.master.res_tracker.res_alloc[res_type];
314 int allocated, free, reserved, guaranteed, from_free;
317 if (slave > dev->persist->num_vfs)
320 spin_lock(&res_alloc->alloc_lock);
321 allocated = (port > 0) ?
322 res_alloc->allocated[(port - 1) *
323 (dev->persist->num_vfs + 1) + slave] :
324 res_alloc->allocated[slave];
325 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
327 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
328 res_alloc->res_reserved;
329 guaranteed = res_alloc->guaranteed[slave];
331 if (allocated + count > res_alloc->quota[slave]) {
332 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
333 slave, port, resource_str(res_type), count,
334 allocated, res_alloc->quota[slave]);
338 if (allocated + count <= guaranteed) {
342 /* portion may need to be obtained from free area */
343 if (guaranteed - allocated > 0)
344 from_free = count - (guaranteed - allocated);
348 from_rsvd = count - from_free;
350 if (free - from_free >= reserved)
353 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
354 slave, port, resource_str(res_type), free,
355 from_free, reserved);
359 /* grant the request */
361 res_alloc->allocated[(port - 1) *
362 (dev->persist->num_vfs + 1) + slave] += count;
363 res_alloc->res_port_free[port - 1] -= count;
364 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
366 res_alloc->allocated[slave] += count;
367 res_alloc->res_free -= count;
368 res_alloc->res_reserved -= from_rsvd;
373 spin_unlock(&res_alloc->alloc_lock);
377 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
378 enum mlx4_resource res_type, int count,
381 struct mlx4_priv *priv = mlx4_priv(dev);
382 struct resource_allocator *res_alloc =
383 &priv->mfunc.master.res_tracker.res_alloc[res_type];
384 int allocated, guaranteed, from_rsvd;
386 if (slave > dev->persist->num_vfs)
389 spin_lock(&res_alloc->alloc_lock);
391 allocated = (port > 0) ?
392 res_alloc->allocated[(port - 1) *
393 (dev->persist->num_vfs + 1) + slave] :
394 res_alloc->allocated[slave];
395 guaranteed = res_alloc->guaranteed[slave];
397 if (allocated - count >= guaranteed) {
400 /* portion may need to be returned to reserved area */
401 if (allocated - guaranteed > 0)
402 from_rsvd = count - (allocated - guaranteed);
408 res_alloc->allocated[(port - 1) *
409 (dev->persist->num_vfs + 1) + slave] -= count;
410 res_alloc->res_port_free[port - 1] += count;
411 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
413 res_alloc->allocated[slave] -= count;
414 res_alloc->res_free += count;
415 res_alloc->res_reserved += from_rsvd;
418 spin_unlock(&res_alloc->alloc_lock);
422 static inline void initialize_res_quotas(struct mlx4_dev *dev,
423 struct resource_allocator *res_alloc,
424 enum mlx4_resource res_type,
425 int vf, int num_instances)
427 res_alloc->guaranteed[vf] = num_instances /
428 (2 * (dev->persist->num_vfs + 1));
429 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
430 if (vf == mlx4_master_func_num(dev)) {
431 res_alloc->res_free = num_instances;
432 if (res_type == RES_MTT) {
433 /* reserved mtts will be taken out of the PF allocation */
434 res_alloc->res_free += dev->caps.reserved_mtts;
435 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
436 res_alloc->quota[vf] += dev->caps.reserved_mtts;
441 void mlx4_init_quotas(struct mlx4_dev *dev)
443 struct mlx4_priv *priv = mlx4_priv(dev);
446 /* quotas for VFs are initialized in mlx4_slave_cap */
447 if (mlx4_is_slave(dev))
450 if (!mlx4_is_mfunc(dev)) {
451 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
452 mlx4_num_reserved_sqps(dev);
453 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
454 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
455 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
456 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
460 pf = mlx4_master_func_num(dev);
462 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
464 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
466 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
468 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
470 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
473 static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
475 /* reduce the sink counter */
476 return (dev->caps.max_counters - 1 -
477 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
481 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
483 struct mlx4_priv *priv = mlx4_priv(dev);
486 int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
488 priv->mfunc.master.res_tracker.slave_list =
489 kzalloc(dev->num_slaves * sizeof(struct slave_list),
491 if (!priv->mfunc.master.res_tracker.slave_list)
494 for (i = 0 ; i < dev->num_slaves; i++) {
495 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
496 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
497 slave_list[i].res_list[t]);
498 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
501 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
503 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
504 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
506 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
507 struct resource_allocator *res_alloc =
508 &priv->mfunc.master.res_tracker.res_alloc[i];
509 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
510 sizeof(int), GFP_KERNEL);
511 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
512 sizeof(int), GFP_KERNEL);
513 if (i == RES_MAC || i == RES_VLAN)
514 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
515 (dev->persist->num_vfs
517 sizeof(int), GFP_KERNEL);
519 res_alloc->allocated = kzalloc((dev->persist->
521 sizeof(int), GFP_KERNEL);
522 /* Reduce the sink counter */
523 if (i == RES_COUNTER)
524 res_alloc->res_free = dev->caps.max_counters - 1;
526 if (!res_alloc->quota || !res_alloc->guaranteed ||
527 !res_alloc->allocated)
530 spin_lock_init(&res_alloc->alloc_lock);
531 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
532 struct mlx4_active_ports actv_ports =
533 mlx4_get_active_ports(dev, t);
536 initialize_res_quotas(dev, res_alloc, RES_QP,
537 t, dev->caps.num_qps -
538 dev->caps.reserved_qps -
539 mlx4_num_reserved_sqps(dev));
542 initialize_res_quotas(dev, res_alloc, RES_CQ,
543 t, dev->caps.num_cqs -
544 dev->caps.reserved_cqs);
547 initialize_res_quotas(dev, res_alloc, RES_SRQ,
548 t, dev->caps.num_srqs -
549 dev->caps.reserved_srqs);
552 initialize_res_quotas(dev, res_alloc, RES_MPT,
553 t, dev->caps.num_mpts -
554 dev->caps.reserved_mrws);
557 initialize_res_quotas(dev, res_alloc, RES_MTT,
558 t, dev->caps.num_mtts -
559 dev->caps.reserved_mtts);
562 if (t == mlx4_master_func_num(dev)) {
563 int max_vfs_pport = 0;
564 /* Calculate the max vfs per port for */
566 for (j = 0; j < dev->caps.num_ports;
568 struct mlx4_slaves_pport slaves_pport =
569 mlx4_phys_to_slaves_pport(dev, j + 1);
570 unsigned current_slaves =
571 bitmap_weight(slaves_pport.slaves,
572 dev->caps.num_ports) - 1;
573 if (max_vfs_pport < current_slaves)
577 res_alloc->quota[t] =
580 res_alloc->guaranteed[t] = 2;
581 for (j = 0; j < MLX4_MAX_PORTS; j++)
582 res_alloc->res_port_free[j] =
585 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
586 res_alloc->guaranteed[t] = 2;
590 if (t == mlx4_master_func_num(dev)) {
591 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
592 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
593 for (j = 0; j < MLX4_MAX_PORTS; j++)
594 res_alloc->res_port_free[j] =
597 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
598 res_alloc->guaranteed[t] = 0;
602 res_alloc->quota[t] = dev->caps.max_counters;
603 if (t == mlx4_master_func_num(dev))
604 res_alloc->guaranteed[t] =
605 MLX4_PF_COUNTERS_PER_PORT *
607 else if (t <= max_vfs_guarantee_counter)
608 res_alloc->guaranteed[t] =
609 MLX4_VF_COUNTERS_PER_PORT *
612 res_alloc->guaranteed[t] = 0;
613 res_alloc->res_free -= res_alloc->guaranteed[t];
618 if (i == RES_MAC || i == RES_VLAN) {
619 for (j = 0; j < dev->caps.num_ports; j++)
620 if (test_bit(j, actv_ports.ports))
621 res_alloc->res_port_rsvd[j] +=
622 res_alloc->guaranteed[t];
624 res_alloc->res_reserved += res_alloc->guaranteed[t];
628 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
632 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
633 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
634 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
635 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
636 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
637 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
638 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
643 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
644 enum mlx4_res_tracker_free_type type)
646 struct mlx4_priv *priv = mlx4_priv(dev);
649 if (priv->mfunc.master.res_tracker.slave_list) {
650 if (type != RES_TR_FREE_STRUCTS_ONLY) {
651 for (i = 0; i < dev->num_slaves; i++) {
652 if (type == RES_TR_FREE_ALL ||
653 dev->caps.function != i)
654 mlx4_delete_all_resources_for_slave(dev, i);
656 /* free master's vlans */
657 i = dev->caps.function;
658 mlx4_reset_roce_gids(dev, i);
659 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
660 rem_slave_vlans(dev, i);
661 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
664 if (type != RES_TR_FREE_SLAVES_ONLY) {
665 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
666 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
667 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
668 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
669 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
670 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
671 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
673 kfree(priv->mfunc.master.res_tracker.slave_list);
674 priv->mfunc.master.res_tracker.slave_list = NULL;
679 static void update_pkey_index(struct mlx4_dev *dev, int slave,
680 struct mlx4_cmd_mailbox *inbox)
682 u8 sched = *(u8 *)(inbox->buf + 64);
683 u8 orig_index = *(u8 *)(inbox->buf + 35);
685 struct mlx4_priv *priv = mlx4_priv(dev);
688 port = (sched >> 6 & 1) + 1;
690 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
691 *(u8 *)(inbox->buf + 35) = new_index;
694 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
697 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
698 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
699 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
702 if (MLX4_QP_ST_UD == ts) {
703 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
704 if (mlx4_is_eth(dev, port))
705 qp_ctx->pri_path.mgid_index =
706 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
708 qp_ctx->pri_path.mgid_index = slave | 0x80;
710 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
711 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
712 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
713 if (mlx4_is_eth(dev, port)) {
714 qp_ctx->pri_path.mgid_index +=
715 mlx4_get_base_gid_ix(dev, slave, port);
716 qp_ctx->pri_path.mgid_index &= 0x7f;
718 qp_ctx->pri_path.mgid_index = slave & 0x7F;
721 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
722 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
723 if (mlx4_is_eth(dev, port)) {
724 qp_ctx->alt_path.mgid_index +=
725 mlx4_get_base_gid_ix(dev, slave, port);
726 qp_ctx->alt_path.mgid_index &= 0x7f;
728 qp_ctx->alt_path.mgid_index = slave & 0x7F;
734 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
737 static int update_vport_qp_param(struct mlx4_dev *dev,
738 struct mlx4_cmd_mailbox *inbox,
741 struct mlx4_qp_context *qpc = inbox->buf + 8;
742 struct mlx4_vport_oper_state *vp_oper;
743 struct mlx4_priv *priv;
747 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
748 priv = mlx4_priv(dev);
749 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
750 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
752 err = handle_counter(dev, qpc, slave, port);
756 if (MLX4_VGT != vp_oper->state.default_vlan) {
757 /* the reserved QPs (special, proxy, tunnel)
758 * do not operate over vlans
760 if (mlx4_is_qp_reserved(dev, qpn))
763 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
764 if (qp_type == MLX4_QP_ST_UD ||
765 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
766 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
767 *(__be32 *)inbox->buf =
768 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
769 MLX4_QP_OPTPAR_VLAN_STRIPPING);
770 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
772 struct mlx4_update_qp_params params = {.flags = 0};
774 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
780 /* preserve IF_COUNTER flag */
781 qpc->pri_path.vlan_control &=
782 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
783 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
784 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
785 qpc->pri_path.vlan_control |=
786 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
787 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
788 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
789 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
790 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
791 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
792 } else if (0 != vp_oper->state.default_vlan) {
793 qpc->pri_path.vlan_control |=
794 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
795 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
796 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
797 } else { /* priority tagged */
798 qpc->pri_path.vlan_control |=
799 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
800 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
803 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
804 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
805 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
806 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
807 qpc->pri_path.sched_queue &= 0xC7;
808 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
809 qpc->qos_vport = vp_oper->state.qos_vport;
811 if (vp_oper->state.spoofchk) {
812 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
813 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
819 static int mpt_mask(struct mlx4_dev *dev)
821 return dev->caps.num_mpts - 1;
824 static void *find_res(struct mlx4_dev *dev, u64 res_id,
825 enum mlx4_resource type)
827 struct mlx4_priv *priv = mlx4_priv(dev);
829 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
833 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
834 enum mlx4_resource type,
837 struct res_common *r;
840 spin_lock_irq(mlx4_tlock(dev));
841 r = find_res(dev, res_id, type);
847 if (r->state == RES_ANY_BUSY) {
852 if (r->owner != slave) {
857 r->from_state = r->state;
858 r->state = RES_ANY_BUSY;
861 *((struct res_common **)res) = r;
864 spin_unlock_irq(mlx4_tlock(dev));
868 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
869 enum mlx4_resource type,
870 u64 res_id, int *slave)
873 struct res_common *r;
879 spin_lock(mlx4_tlock(dev));
881 r = find_res(dev, id, type);
886 spin_unlock(mlx4_tlock(dev));
891 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
892 enum mlx4_resource type)
894 struct res_common *r;
896 spin_lock_irq(mlx4_tlock(dev));
897 r = find_res(dev, res_id, type);
899 r->state = r->from_state;
900 spin_unlock_irq(mlx4_tlock(dev));
903 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
904 u64 in_param, u64 *out_param, int port);
906 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
909 struct res_common *r;
910 struct res_counter *counter;
913 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
916 spin_lock_irq(mlx4_tlock(dev));
917 r = find_res(dev, counter_index, RES_COUNTER);
918 if (!r || r->owner != slave) {
921 counter = container_of(r, struct res_counter, com);
923 counter->port = port;
926 spin_unlock_irq(mlx4_tlock(dev));
930 static int handle_unexisting_counter(struct mlx4_dev *dev,
931 struct mlx4_qp_context *qpc, u8 slave,
934 struct mlx4_priv *priv = mlx4_priv(dev);
935 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
936 struct res_common *tmp;
937 struct res_counter *counter;
938 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
941 spin_lock_irq(mlx4_tlock(dev));
942 list_for_each_entry(tmp,
943 &tracker->slave_list[slave].res_list[RES_COUNTER],
945 counter = container_of(tmp, struct res_counter, com);
946 if (port == counter->port) {
947 qpc->pri_path.counter_index = counter->com.res_id;
948 spin_unlock_irq(mlx4_tlock(dev));
952 spin_unlock_irq(mlx4_tlock(dev));
954 /* No existing counter, need to allocate a new counter */
955 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
957 if (err == -ENOENT) {
959 } else if (err && err != -ENOSPC) {
960 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
961 __func__, slave, err);
963 qpc->pri_path.counter_index = counter_idx;
964 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
965 __func__, slave, qpc->pri_path.counter_index);
972 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
975 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
976 return handle_existing_counter(dev, slave, port,
977 qpc->pri_path.counter_index);
979 return handle_unexisting_counter(dev, qpc, slave, port);
982 static struct res_common *alloc_qp_tr(int id)
986 ret = kzalloc(sizeof *ret, GFP_KERNEL);
990 ret->com.res_id = id;
991 ret->com.state = RES_QP_RESERVED;
993 INIT_LIST_HEAD(&ret->mcg_list);
994 spin_lock_init(&ret->mcg_spl);
995 atomic_set(&ret->ref_count, 0);
1000 static struct res_common *alloc_mtt_tr(int id, int order)
1002 struct res_mtt *ret;
1004 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1008 ret->com.res_id = id;
1010 ret->com.state = RES_MTT_ALLOCATED;
1011 atomic_set(&ret->ref_count, 0);
1016 static struct res_common *alloc_mpt_tr(int id, int key)
1018 struct res_mpt *ret;
1020 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1024 ret->com.res_id = id;
1025 ret->com.state = RES_MPT_RESERVED;
1031 static struct res_common *alloc_eq_tr(int id)
1035 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1039 ret->com.res_id = id;
1040 ret->com.state = RES_EQ_RESERVED;
1045 static struct res_common *alloc_cq_tr(int id)
1049 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1053 ret->com.res_id = id;
1054 ret->com.state = RES_CQ_ALLOCATED;
1055 atomic_set(&ret->ref_count, 0);
1060 static struct res_common *alloc_srq_tr(int id)
1062 struct res_srq *ret;
1064 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1068 ret->com.res_id = id;
1069 ret->com.state = RES_SRQ_ALLOCATED;
1070 atomic_set(&ret->ref_count, 0);
1075 static struct res_common *alloc_counter_tr(int id, int port)
1077 struct res_counter *ret;
1079 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1083 ret->com.res_id = id;
1084 ret->com.state = RES_COUNTER_ALLOCATED;
1090 static struct res_common *alloc_xrcdn_tr(int id)
1092 struct res_xrcdn *ret;
1094 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1098 ret->com.res_id = id;
1099 ret->com.state = RES_XRCD_ALLOCATED;
1104 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1106 struct res_fs_rule *ret;
1108 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1112 ret->com.res_id = id;
1113 ret->com.state = RES_FS_RULE_ALLOCATED;
1118 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1121 struct res_common *ret;
1125 ret = alloc_qp_tr(id);
1128 ret = alloc_mpt_tr(id, extra);
1131 ret = alloc_mtt_tr(id, extra);
1134 ret = alloc_eq_tr(id);
1137 ret = alloc_cq_tr(id);
1140 ret = alloc_srq_tr(id);
1143 pr_err("implementation missing\n");
1146 ret = alloc_counter_tr(id, extra);
1149 ret = alloc_xrcdn_tr(id);
1152 ret = alloc_fs_rule_tr(id, extra);
1163 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1164 struct mlx4_counter *data)
1166 struct mlx4_priv *priv = mlx4_priv(dev);
1167 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1168 struct res_common *tmp;
1169 struct res_counter *counter;
1173 memset(data, 0, sizeof(*data));
1175 counters_arr = kmalloc_array(dev->caps.max_counters,
1176 sizeof(*counters_arr), GFP_KERNEL);
1180 spin_lock_irq(mlx4_tlock(dev));
1181 list_for_each_entry(tmp,
1182 &tracker->slave_list[slave].res_list[RES_COUNTER],
1184 counter = container_of(tmp, struct res_counter, com);
1185 if (counter->port == port) {
1186 counters_arr[i] = (int)tmp->res_id;
1190 spin_unlock_irq(mlx4_tlock(dev));
1191 counters_arr[i] = -1;
1195 while (counters_arr[i] != -1) {
1196 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1199 memset(data, 0, sizeof(*data));
1206 kfree(counters_arr);
1210 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1211 enum mlx4_resource type, int extra)
1215 struct mlx4_priv *priv = mlx4_priv(dev);
1216 struct res_common **res_arr;
1217 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1218 struct rb_root *root = &tracker->res_tree[type];
1220 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1224 for (i = 0; i < count; ++i) {
1225 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1227 for (--i; i >= 0; --i)
1235 spin_lock_irq(mlx4_tlock(dev));
1236 for (i = 0; i < count; ++i) {
1237 if (find_res(dev, base + i, type)) {
1241 err = res_tracker_insert(root, res_arr[i]);
1244 list_add_tail(&res_arr[i]->list,
1245 &tracker->slave_list[slave].res_list[type]);
1247 spin_unlock_irq(mlx4_tlock(dev));
1253 for (--i; i >= 0; --i) {
1254 rb_erase(&res_arr[i]->node, root);
1255 list_del_init(&res_arr[i]->list);
1258 spin_unlock_irq(mlx4_tlock(dev));
1260 for (i = 0; i < count; ++i)
1268 static int remove_qp_ok(struct res_qp *res)
1270 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1271 !list_empty(&res->mcg_list)) {
1272 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1273 res->com.state, atomic_read(&res->ref_count));
1275 } else if (res->com.state != RES_QP_RESERVED) {
1282 static int remove_mtt_ok(struct res_mtt *res, int order)
1284 if (res->com.state == RES_MTT_BUSY ||
1285 atomic_read(&res->ref_count)) {
1286 pr_devel("%s-%d: state %s, ref_count %d\n",
1288 mtt_states_str(res->com.state),
1289 atomic_read(&res->ref_count));
1291 } else if (res->com.state != RES_MTT_ALLOCATED)
1293 else if (res->order != order)
1299 static int remove_mpt_ok(struct res_mpt *res)
1301 if (res->com.state == RES_MPT_BUSY)
1303 else if (res->com.state != RES_MPT_RESERVED)
1309 static int remove_eq_ok(struct res_eq *res)
1311 if (res->com.state == RES_MPT_BUSY)
1313 else if (res->com.state != RES_MPT_RESERVED)
1319 static int remove_counter_ok(struct res_counter *res)
1321 if (res->com.state == RES_COUNTER_BUSY)
1323 else if (res->com.state != RES_COUNTER_ALLOCATED)
1329 static int remove_xrcdn_ok(struct res_xrcdn *res)
1331 if (res->com.state == RES_XRCD_BUSY)
1333 else if (res->com.state != RES_XRCD_ALLOCATED)
1339 static int remove_fs_rule_ok(struct res_fs_rule *res)
1341 if (res->com.state == RES_FS_RULE_BUSY)
1343 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1349 static int remove_cq_ok(struct res_cq *res)
1351 if (res->com.state == RES_CQ_BUSY)
1353 else if (res->com.state != RES_CQ_ALLOCATED)
1359 static int remove_srq_ok(struct res_srq *res)
1361 if (res->com.state == RES_SRQ_BUSY)
1363 else if (res->com.state != RES_SRQ_ALLOCATED)
1369 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1373 return remove_qp_ok((struct res_qp *)res);
1375 return remove_cq_ok((struct res_cq *)res);
1377 return remove_srq_ok((struct res_srq *)res);
1379 return remove_mpt_ok((struct res_mpt *)res);
1381 return remove_mtt_ok((struct res_mtt *)res, extra);
1385 return remove_eq_ok((struct res_eq *)res);
1387 return remove_counter_ok((struct res_counter *)res);
1389 return remove_xrcdn_ok((struct res_xrcdn *)res);
1391 return remove_fs_rule_ok((struct res_fs_rule *)res);
1397 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1398 enum mlx4_resource type, int extra)
1402 struct mlx4_priv *priv = mlx4_priv(dev);
1403 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1404 struct res_common *r;
1406 spin_lock_irq(mlx4_tlock(dev));
1407 for (i = base; i < base + count; ++i) {
1408 r = res_tracker_lookup(&tracker->res_tree[type], i);
1413 if (r->owner != slave) {
1417 err = remove_ok(r, type, extra);
1422 for (i = base; i < base + count; ++i) {
1423 r = res_tracker_lookup(&tracker->res_tree[type], i);
1424 rb_erase(&r->node, &tracker->res_tree[type]);
1431 spin_unlock_irq(mlx4_tlock(dev));
1436 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1437 enum res_qp_states state, struct res_qp **qp,
1440 struct mlx4_priv *priv = mlx4_priv(dev);
1441 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1445 spin_lock_irq(mlx4_tlock(dev));
1446 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1449 else if (r->com.owner != slave)
1454 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1455 __func__, r->com.res_id);
1459 case RES_QP_RESERVED:
1460 if (r->com.state == RES_QP_MAPPED && !alloc)
1463 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1468 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1469 r->com.state == RES_QP_HW)
1472 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1480 if (r->com.state != RES_QP_MAPPED)
1488 r->com.from_state = r->com.state;
1489 r->com.to_state = state;
1490 r->com.state = RES_QP_BUSY;
1496 spin_unlock_irq(mlx4_tlock(dev));
1501 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1502 enum res_mpt_states state, struct res_mpt **mpt)
1504 struct mlx4_priv *priv = mlx4_priv(dev);
1505 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1509 spin_lock_irq(mlx4_tlock(dev));
1510 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1513 else if (r->com.owner != slave)
1521 case RES_MPT_RESERVED:
1522 if (r->com.state != RES_MPT_MAPPED)
1526 case RES_MPT_MAPPED:
1527 if (r->com.state != RES_MPT_RESERVED &&
1528 r->com.state != RES_MPT_HW)
1533 if (r->com.state != RES_MPT_MAPPED)
1541 r->com.from_state = r->com.state;
1542 r->com.to_state = state;
1543 r->com.state = RES_MPT_BUSY;
1549 spin_unlock_irq(mlx4_tlock(dev));
1554 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1555 enum res_eq_states state, struct res_eq **eq)
1557 struct mlx4_priv *priv = mlx4_priv(dev);
1558 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1562 spin_lock_irq(mlx4_tlock(dev));
1563 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1566 else if (r->com.owner != slave)
1574 case RES_EQ_RESERVED:
1575 if (r->com.state != RES_EQ_HW)
1580 if (r->com.state != RES_EQ_RESERVED)
1589 r->com.from_state = r->com.state;
1590 r->com.to_state = state;
1591 r->com.state = RES_EQ_BUSY;
1597 spin_unlock_irq(mlx4_tlock(dev));
1602 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1603 enum res_cq_states state, struct res_cq **cq)
1605 struct mlx4_priv *priv = mlx4_priv(dev);
1606 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1610 spin_lock_irq(mlx4_tlock(dev));
1611 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1614 } else if (r->com.owner != slave) {
1616 } else if (state == RES_CQ_ALLOCATED) {
1617 if (r->com.state != RES_CQ_HW)
1619 else if (atomic_read(&r->ref_count))
1623 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1630 r->com.from_state = r->com.state;
1631 r->com.to_state = state;
1632 r->com.state = RES_CQ_BUSY;
1637 spin_unlock_irq(mlx4_tlock(dev));
1642 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1643 enum res_srq_states state, struct res_srq **srq)
1645 struct mlx4_priv *priv = mlx4_priv(dev);
1646 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1650 spin_lock_irq(mlx4_tlock(dev));
1651 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1654 } else if (r->com.owner != slave) {
1656 } else if (state == RES_SRQ_ALLOCATED) {
1657 if (r->com.state != RES_SRQ_HW)
1659 else if (atomic_read(&r->ref_count))
1661 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1666 r->com.from_state = r->com.state;
1667 r->com.to_state = state;
1668 r->com.state = RES_SRQ_BUSY;
1673 spin_unlock_irq(mlx4_tlock(dev));
1678 static void res_abort_move(struct mlx4_dev *dev, int slave,
1679 enum mlx4_resource type, int id)
1681 struct mlx4_priv *priv = mlx4_priv(dev);
1682 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1683 struct res_common *r;
1685 spin_lock_irq(mlx4_tlock(dev));
1686 r = res_tracker_lookup(&tracker->res_tree[type], id);
1687 if (r && (r->owner == slave))
1688 r->state = r->from_state;
1689 spin_unlock_irq(mlx4_tlock(dev));
1692 static void res_end_move(struct mlx4_dev *dev, int slave,
1693 enum mlx4_resource type, int id)
1695 struct mlx4_priv *priv = mlx4_priv(dev);
1696 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1697 struct res_common *r;
1699 spin_lock_irq(mlx4_tlock(dev));
1700 r = res_tracker_lookup(&tracker->res_tree[type], id);
1701 if (r && (r->owner == slave))
1702 r->state = r->to_state;
1703 spin_unlock_irq(mlx4_tlock(dev));
1706 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1708 return mlx4_is_qp_reserved(dev, qpn) &&
1709 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1712 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1714 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1717 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1718 u64 in_param, u64 *out_param)
1728 case RES_OP_RESERVE:
1729 count = get_param_l(&in_param) & 0xffffff;
1730 /* Turn off all unsupported QP allocation flags that the
1731 * slave tries to set.
1733 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1734 align = get_param_h(&in_param);
1735 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1739 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1741 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1745 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1747 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1748 __mlx4_qp_release_range(dev, base, count);
1751 set_param_l(out_param, base);
1753 case RES_OP_MAP_ICM:
1754 qpn = get_param_l(&in_param) & 0x7fffff;
1755 if (valid_reserved(dev, slave, qpn)) {
1756 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1761 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1766 if (!fw_reserved(dev, qpn)) {
1767 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1769 res_abort_move(dev, slave, RES_QP, qpn);
1774 res_end_move(dev, slave, RES_QP, qpn);
1784 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1785 u64 in_param, u64 *out_param)
1791 if (op != RES_OP_RESERVE_AND_MAP)
1794 order = get_param_l(&in_param);
1796 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1800 base = __mlx4_alloc_mtt_range(dev, order);
1802 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1806 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1808 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1809 __mlx4_free_mtt_range(dev, base, order);
1811 set_param_l(out_param, base);
1817 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1818 u64 in_param, u64 *out_param)
1823 struct res_mpt *mpt;
1826 case RES_OP_RESERVE:
1827 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1831 index = __mlx4_mpt_reserve(dev);
1833 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1836 id = index & mpt_mask(dev);
1838 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1840 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1841 __mlx4_mpt_release(dev, index);
1844 set_param_l(out_param, index);
1846 case RES_OP_MAP_ICM:
1847 index = get_param_l(&in_param);
1848 id = index & mpt_mask(dev);
1849 err = mr_res_start_move_to(dev, slave, id,
1850 RES_MPT_MAPPED, &mpt);
1854 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1856 res_abort_move(dev, slave, RES_MPT, id);
1860 res_end_move(dev, slave, RES_MPT, id);
1866 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1867 u64 in_param, u64 *out_param)
1873 case RES_OP_RESERVE_AND_MAP:
1874 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1878 err = __mlx4_cq_alloc_icm(dev, &cqn);
1880 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1884 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1886 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1887 __mlx4_cq_free_icm(dev, cqn);
1891 set_param_l(out_param, cqn);
1901 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1902 u64 in_param, u64 *out_param)
1908 case RES_OP_RESERVE_AND_MAP:
1909 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1913 err = __mlx4_srq_alloc_icm(dev, &srqn);
1915 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1919 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1921 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1922 __mlx4_srq_free_icm(dev, srqn);
1926 set_param_l(out_param, srqn);
1936 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1937 u8 smac_index, u64 *mac)
1939 struct mlx4_priv *priv = mlx4_priv(dev);
1940 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1941 struct list_head *mac_list =
1942 &tracker->slave_list[slave].res_list[RES_MAC];
1943 struct mac_res *res, *tmp;
1945 list_for_each_entry_safe(res, tmp, mac_list, list) {
1946 if (res->smac_index == smac_index && res->port == (u8) port) {
1954 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1956 struct mlx4_priv *priv = mlx4_priv(dev);
1957 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1958 struct list_head *mac_list =
1959 &tracker->slave_list[slave].res_list[RES_MAC];
1960 struct mac_res *res, *tmp;
1962 list_for_each_entry_safe(res, tmp, mac_list, list) {
1963 if (res->mac == mac && res->port == (u8) port) {
1964 /* mac found. update ref count */
1970 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1972 res = kzalloc(sizeof *res, GFP_KERNEL);
1974 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1978 res->port = (u8) port;
1979 res->smac_index = smac_index;
1981 list_add_tail(&res->list,
1982 &tracker->slave_list[slave].res_list[RES_MAC]);
1986 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1989 struct mlx4_priv *priv = mlx4_priv(dev);
1990 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1991 struct list_head *mac_list =
1992 &tracker->slave_list[slave].res_list[RES_MAC];
1993 struct mac_res *res, *tmp;
1995 list_for_each_entry_safe(res, tmp, mac_list, list) {
1996 if (res->mac == mac && res->port == (u8) port) {
1997 if (!--res->ref_count) {
1998 list_del(&res->list);
1999 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2007 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2009 struct mlx4_priv *priv = mlx4_priv(dev);
2010 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2011 struct list_head *mac_list =
2012 &tracker->slave_list[slave].res_list[RES_MAC];
2013 struct mac_res *res, *tmp;
2016 list_for_each_entry_safe(res, tmp, mac_list, list) {
2017 list_del(&res->list);
2018 /* dereference the mac the num times the slave referenced it */
2019 for (i = 0; i < res->ref_count; i++)
2020 __mlx4_unregister_mac(dev, res->port, res->mac);
2021 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2026 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2027 u64 in_param, u64 *out_param, int in_port)
2034 if (op != RES_OP_RESERVE_AND_MAP)
2037 port = !in_port ? get_param_l(out_param) : in_port;
2038 port = mlx4_slave_convert_port(
2045 err = __mlx4_register_mac(dev, port, mac);
2048 set_param_l(out_param, err);
2053 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2055 __mlx4_unregister_mac(dev, port, mac);
2060 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2061 int port, int vlan_index)
2063 struct mlx4_priv *priv = mlx4_priv(dev);
2064 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2065 struct list_head *vlan_list =
2066 &tracker->slave_list[slave].res_list[RES_VLAN];
2067 struct vlan_res *res, *tmp;
2069 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2070 if (res->vlan == vlan && res->port == (u8) port) {
2071 /* vlan found. update ref count */
2077 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2079 res = kzalloc(sizeof(*res), GFP_KERNEL);
2081 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2085 res->port = (u8) port;
2086 res->vlan_index = vlan_index;
2088 list_add_tail(&res->list,
2089 &tracker->slave_list[slave].res_list[RES_VLAN]);
2094 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2097 struct mlx4_priv *priv = mlx4_priv(dev);
2098 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2099 struct list_head *vlan_list =
2100 &tracker->slave_list[slave].res_list[RES_VLAN];
2101 struct vlan_res *res, *tmp;
2103 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2104 if (res->vlan == vlan && res->port == (u8) port) {
2105 if (!--res->ref_count) {
2106 list_del(&res->list);
2107 mlx4_release_resource(dev, slave, RES_VLAN,
2116 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2118 struct mlx4_priv *priv = mlx4_priv(dev);
2119 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2120 struct list_head *vlan_list =
2121 &tracker->slave_list[slave].res_list[RES_VLAN];
2122 struct vlan_res *res, *tmp;
2125 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2126 list_del(&res->list);
2127 /* dereference the vlan the num times the slave referenced it */
2128 for (i = 0; i < res->ref_count; i++)
2129 __mlx4_unregister_vlan(dev, res->port, res->vlan);
2130 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2135 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2136 u64 in_param, u64 *out_param, int in_port)
2138 struct mlx4_priv *priv = mlx4_priv(dev);
2139 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2145 port = !in_port ? get_param_l(out_param) : in_port;
2147 if (!port || op != RES_OP_RESERVE_AND_MAP)
2150 port = mlx4_slave_convert_port(
2155 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2156 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2157 slave_state[slave].old_vlan_api = true;
2161 vlan = (u16) in_param;
2163 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2165 set_param_l(out_param, (u32) vlan_index);
2166 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2168 __mlx4_unregister_vlan(dev, port, vlan);
2173 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2174 u64 in_param, u64 *out_param, int port)
2179 if (op != RES_OP_RESERVE)
2182 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2186 err = __mlx4_counter_alloc(dev, &index);
2188 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2192 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2194 __mlx4_counter_free(dev, index);
2195 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2197 set_param_l(out_param, index);
2203 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2204 u64 in_param, u64 *out_param)
2209 if (op != RES_OP_RESERVE)
2212 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2216 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2218 __mlx4_xrcd_free(dev, xrcdn);
2220 set_param_l(out_param, xrcdn);
2225 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2226 struct mlx4_vhcr *vhcr,
2227 struct mlx4_cmd_mailbox *inbox,
2228 struct mlx4_cmd_mailbox *outbox,
2229 struct mlx4_cmd_info *cmd)
2232 int alop = vhcr->op_modifier;
2234 switch (vhcr->in_modifier & 0xFF) {
2236 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2237 vhcr->in_param, &vhcr->out_param);
2241 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2242 vhcr->in_param, &vhcr->out_param);
2246 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2247 vhcr->in_param, &vhcr->out_param);
2251 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2252 vhcr->in_param, &vhcr->out_param);
2256 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2257 vhcr->in_param, &vhcr->out_param);
2261 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2262 vhcr->in_param, &vhcr->out_param,
2263 (vhcr->in_modifier >> 8) & 0xFF);
2267 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2268 vhcr->in_param, &vhcr->out_param,
2269 (vhcr->in_modifier >> 8) & 0xFF);
2273 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2274 vhcr->in_param, &vhcr->out_param, 0);
2278 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2279 vhcr->in_param, &vhcr->out_param);
2290 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2299 case RES_OP_RESERVE:
2300 base = get_param_l(&in_param) & 0x7fffff;
2301 count = get_param_h(&in_param);
2302 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2305 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2306 __mlx4_qp_release_range(dev, base, count);
2308 case RES_OP_MAP_ICM:
2309 qpn = get_param_l(&in_param) & 0x7fffff;
2310 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2315 if (!fw_reserved(dev, qpn))
2316 __mlx4_qp_free_icm(dev, qpn);
2318 res_end_move(dev, slave, RES_QP, qpn);
2320 if (valid_reserved(dev, slave, qpn))
2321 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2330 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2331 u64 in_param, u64 *out_param)
2337 if (op != RES_OP_RESERVE_AND_MAP)
2340 base = get_param_l(&in_param);
2341 order = get_param_h(&in_param);
2342 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2344 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2345 __mlx4_free_mtt_range(dev, base, order);
2350 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2356 struct res_mpt *mpt;
2359 case RES_OP_RESERVE:
2360 index = get_param_l(&in_param);
2361 id = index & mpt_mask(dev);
2362 err = get_res(dev, slave, id, RES_MPT, &mpt);
2366 put_res(dev, slave, id, RES_MPT);
2368 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2371 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2372 __mlx4_mpt_release(dev, index);
2374 case RES_OP_MAP_ICM:
2375 index = get_param_l(&in_param);
2376 id = index & mpt_mask(dev);
2377 err = mr_res_start_move_to(dev, slave, id,
2378 RES_MPT_RESERVED, &mpt);
2382 __mlx4_mpt_free_icm(dev, mpt->key);
2383 res_end_move(dev, slave, RES_MPT, id);
2393 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2394 u64 in_param, u64 *out_param)
2400 case RES_OP_RESERVE_AND_MAP:
2401 cqn = get_param_l(&in_param);
2402 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2406 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2407 __mlx4_cq_free_icm(dev, cqn);
2418 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2419 u64 in_param, u64 *out_param)
2425 case RES_OP_RESERVE_AND_MAP:
2426 srqn = get_param_l(&in_param);
2427 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2431 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2432 __mlx4_srq_free_icm(dev, srqn);
2443 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2444 u64 in_param, u64 *out_param, int in_port)
2450 case RES_OP_RESERVE_AND_MAP:
2451 port = !in_port ? get_param_l(out_param) : in_port;
2452 port = mlx4_slave_convert_port(
2457 mac_del_from_slave(dev, slave, in_param, port);
2458 __mlx4_unregister_mac(dev, port, in_param);
2469 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2470 u64 in_param, u64 *out_param, int port)
2472 struct mlx4_priv *priv = mlx4_priv(dev);
2473 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2476 port = mlx4_slave_convert_port(
2482 case RES_OP_RESERVE_AND_MAP:
2483 if (slave_state[slave].old_vlan_api)
2487 vlan_del_from_slave(dev, slave, in_param, port);
2488 __mlx4_unregister_vlan(dev, port, in_param);
2498 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2499 u64 in_param, u64 *out_param)
2504 if (op != RES_OP_RESERVE)
2507 index = get_param_l(&in_param);
2508 if (index == MLX4_SINK_COUNTER_INDEX(dev))
2511 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2515 __mlx4_counter_free(dev, index);
2516 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2521 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2522 u64 in_param, u64 *out_param)
2527 if (op != RES_OP_RESERVE)
2530 xrcdn = get_param_l(&in_param);
2531 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2535 __mlx4_xrcd_free(dev, xrcdn);
2540 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2541 struct mlx4_vhcr *vhcr,
2542 struct mlx4_cmd_mailbox *inbox,
2543 struct mlx4_cmd_mailbox *outbox,
2544 struct mlx4_cmd_info *cmd)
2547 int alop = vhcr->op_modifier;
2549 switch (vhcr->in_modifier & 0xFF) {
2551 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2556 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2557 vhcr->in_param, &vhcr->out_param);
2561 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2566 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2567 vhcr->in_param, &vhcr->out_param);
2571 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2572 vhcr->in_param, &vhcr->out_param);
2576 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2577 vhcr->in_param, &vhcr->out_param,
2578 (vhcr->in_modifier >> 8) & 0xFF);
2582 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2583 vhcr->in_param, &vhcr->out_param,
2584 (vhcr->in_modifier >> 8) & 0xFF);
2588 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2589 vhcr->in_param, &vhcr->out_param);
2593 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2594 vhcr->in_param, &vhcr->out_param);
2602 /* ugly but other choices are uglier */
2603 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2605 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2608 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2610 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2613 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2615 return be32_to_cpu(mpt->mtt_sz);
2618 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2620 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2623 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2625 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2628 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2630 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2633 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2635 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2638 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2640 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2643 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2645 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2648 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2650 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2651 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2652 int log_sq_sride = qpc->sq_size_stride & 7;
2653 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2654 int log_rq_stride = qpc->rq_size_stride & 7;
2655 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2656 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2657 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2658 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2663 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2665 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2666 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2667 total_mem = sq_size + rq_size;
2669 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2675 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2676 int size, struct res_mtt *mtt)
2678 int res_start = mtt->com.res_id;
2679 int res_size = (1 << mtt->order);
2681 if (start < res_start || start + size > res_start + res_size)
2686 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2687 struct mlx4_vhcr *vhcr,
2688 struct mlx4_cmd_mailbox *inbox,
2689 struct mlx4_cmd_mailbox *outbox,
2690 struct mlx4_cmd_info *cmd)
2693 int index = vhcr->in_modifier;
2694 struct res_mtt *mtt;
2695 struct res_mpt *mpt;
2696 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2702 id = index & mpt_mask(dev);
2703 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2707 /* Disable memory windows for VFs. */
2708 if (!mr_is_region(inbox->buf)) {
2713 /* Make sure that the PD bits related to the slave id are zeros. */
2714 pd = mr_get_pd(inbox->buf);
2715 pd_slave = (pd >> 17) & 0x7f;
2716 if (pd_slave != 0 && --pd_slave != slave) {
2721 if (mr_is_fmr(inbox->buf)) {
2722 /* FMR and Bind Enable are forbidden in slave devices. */
2723 if (mr_is_bind_enabled(inbox->buf)) {
2727 /* FMR and Memory Windows are also forbidden. */
2728 if (!mr_is_region(inbox->buf)) {
2734 phys = mr_phys_mpt(inbox->buf);
2736 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2740 err = check_mtt_range(dev, slave, mtt_base,
2741 mr_get_mtt_size(inbox->buf), mtt);
2748 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2753 atomic_inc(&mtt->ref_count);
2754 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2757 res_end_move(dev, slave, RES_MPT, id);
2762 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2764 res_abort_move(dev, slave, RES_MPT, id);
2769 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2770 struct mlx4_vhcr *vhcr,
2771 struct mlx4_cmd_mailbox *inbox,
2772 struct mlx4_cmd_mailbox *outbox,
2773 struct mlx4_cmd_info *cmd)
2776 int index = vhcr->in_modifier;
2777 struct res_mpt *mpt;
2780 id = index & mpt_mask(dev);
2781 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2785 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2790 atomic_dec(&mpt->mtt->ref_count);
2792 res_end_move(dev, slave, RES_MPT, id);
2796 res_abort_move(dev, slave, RES_MPT, id);
2801 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2802 struct mlx4_vhcr *vhcr,
2803 struct mlx4_cmd_mailbox *inbox,
2804 struct mlx4_cmd_mailbox *outbox,
2805 struct mlx4_cmd_info *cmd)
2808 int index = vhcr->in_modifier;
2809 struct res_mpt *mpt;
2812 id = index & mpt_mask(dev);
2813 err = get_res(dev, slave, id, RES_MPT, &mpt);
2817 if (mpt->com.from_state == RES_MPT_MAPPED) {
2818 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2819 * that, the VF must read the MPT. But since the MPT entry memory is not
2820 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2821 * entry contents. To guarantee that the MPT cannot be changed, the driver
2822 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2823 * ownership fofollowing the change. The change here allows the VF to
2824 * perform QUERY_MPT also when the entry is in SW ownership.
2826 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2827 &mlx4_priv(dev)->mr_table.dmpt_table,
2830 if (NULL == mpt_entry || NULL == outbox->buf) {
2835 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2838 } else if (mpt->com.from_state == RES_MPT_HW) {
2839 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2847 put_res(dev, slave, id, RES_MPT);
2851 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2853 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2856 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2858 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2861 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2863 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2866 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2867 struct mlx4_qp_context *context)
2869 u32 qpn = vhcr->in_modifier & 0xffffff;
2872 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2875 /* adjust qkey in qp context */
2876 context->qkey = cpu_to_be32(qkey);
2879 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2880 struct mlx4_qp_context *qpc,
2881 struct mlx4_cmd_mailbox *inbox);
2883 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2884 struct mlx4_vhcr *vhcr,
2885 struct mlx4_cmd_mailbox *inbox,
2886 struct mlx4_cmd_mailbox *outbox,
2887 struct mlx4_cmd_info *cmd)
2890 int qpn = vhcr->in_modifier & 0x7fffff;
2891 struct res_mtt *mtt;
2893 struct mlx4_qp_context *qpc = inbox->buf + 8;
2894 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2895 int mtt_size = qp_get_mtt_size(qpc);
2898 int rcqn = qp_get_rcqn(qpc);
2899 int scqn = qp_get_scqn(qpc);
2900 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2901 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2902 struct res_srq *srq;
2903 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2905 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2909 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2912 qp->local_qpn = local_qpn;
2913 qp->sched_queue = 0;
2915 qp->vlan_control = 0;
2917 qp->pri_path_fl = 0;
2920 qp->qpc_flags = be32_to_cpu(qpc->flags);
2922 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2926 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2930 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2935 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2942 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2947 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2948 update_pkey_index(dev, slave, inbox);
2949 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2952 atomic_inc(&mtt->ref_count);
2954 atomic_inc(&rcq->ref_count);
2956 atomic_inc(&scq->ref_count);
2960 put_res(dev, slave, scqn, RES_CQ);
2963 atomic_inc(&srq->ref_count);
2964 put_res(dev, slave, srqn, RES_SRQ);
2967 put_res(dev, slave, rcqn, RES_CQ);
2968 put_res(dev, slave, mtt_base, RES_MTT);
2969 res_end_move(dev, slave, RES_QP, qpn);
2975 put_res(dev, slave, srqn, RES_SRQ);
2978 put_res(dev, slave, scqn, RES_CQ);
2980 put_res(dev, slave, rcqn, RES_CQ);
2982 put_res(dev, slave, mtt_base, RES_MTT);
2984 res_abort_move(dev, slave, RES_QP, qpn);
2989 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2991 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2994 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2996 int log_eq_size = eqc->log_eq_size & 0x1f;
2997 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2999 if (log_eq_size + 5 < page_shift)
3002 return 1 << (log_eq_size + 5 - page_shift);
3005 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3007 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3010 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3012 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3013 int page_shift = (cqc->log_page_size & 0x3f) + 12;
3015 if (log_cq_size + 5 < page_shift)
3018 return 1 << (log_cq_size + 5 - page_shift);
3021 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3022 struct mlx4_vhcr *vhcr,
3023 struct mlx4_cmd_mailbox *inbox,
3024 struct mlx4_cmd_mailbox *outbox,
3025 struct mlx4_cmd_info *cmd)
3028 int eqn = vhcr->in_modifier;
3029 int res_id = (slave << 10) | eqn;
3030 struct mlx4_eq_context *eqc = inbox->buf;
3031 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3032 int mtt_size = eq_get_mtt_size(eqc);
3034 struct res_mtt *mtt;
3036 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3039 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3043 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3047 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3051 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3055 atomic_inc(&mtt->ref_count);
3057 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3058 res_end_move(dev, slave, RES_EQ, res_id);
3062 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3064 res_abort_move(dev, slave, RES_EQ, res_id);
3066 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3070 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3071 struct mlx4_vhcr *vhcr,
3072 struct mlx4_cmd_mailbox *inbox,
3073 struct mlx4_cmd_mailbox *outbox,
3074 struct mlx4_cmd_info *cmd)
3077 u8 get = vhcr->op_modifier;
3082 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3087 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3088 int len, struct res_mtt **res)
3090 struct mlx4_priv *priv = mlx4_priv(dev);
3091 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3092 struct res_mtt *mtt;
3095 spin_lock_irq(mlx4_tlock(dev));
3096 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3098 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3100 mtt->com.from_state = mtt->com.state;
3101 mtt->com.state = RES_MTT_BUSY;
3106 spin_unlock_irq(mlx4_tlock(dev));
3111 static int verify_qp_parameters(struct mlx4_dev *dev,
3112 struct mlx4_vhcr *vhcr,
3113 struct mlx4_cmd_mailbox *inbox,
3114 enum qp_transition transition, u8 slave)
3118 struct mlx4_qp_context *qp_ctx;
3119 enum mlx4_qp_optpar optpar;
3123 qp_ctx = inbox->buf + 8;
3124 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3125 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
3127 if (slave != mlx4_master_func_num(dev)) {
3128 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3129 /* setting QP rate-limit is disallowed for VFs */
3130 if (qp_ctx->rate_limit_params)
3136 case MLX4_QP_ST_XRC:
3138 switch (transition) {
3139 case QP_TRANS_INIT2RTR:
3140 case QP_TRANS_RTR2RTS:
3141 case QP_TRANS_RTS2RTS:
3142 case QP_TRANS_SQD2SQD:
3143 case QP_TRANS_SQD2RTS:
3144 if (slave != mlx4_master_func_num(dev)) {
3145 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3146 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3147 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3148 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3151 if (qp_ctx->pri_path.mgid_index >= num_gids)
3154 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3155 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3156 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3157 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3160 if (qp_ctx->alt_path.mgid_index >= num_gids)
3170 case MLX4_QP_ST_MLX:
3171 qpn = vhcr->in_modifier & 0x7fffff;
3172 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3173 if (transition == QP_TRANS_INIT2RTR &&
3174 slave != mlx4_master_func_num(dev) &&
3175 mlx4_is_qp_reserved(dev, qpn) &&
3176 !mlx4_vf_smi_enabled(dev, slave, port)) {
3177 /* only enabled VFs may create MLX proxy QPs */
3178 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3179 __func__, slave, port);
3191 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3192 struct mlx4_vhcr *vhcr,
3193 struct mlx4_cmd_mailbox *inbox,
3194 struct mlx4_cmd_mailbox *outbox,
3195 struct mlx4_cmd_info *cmd)
3197 struct mlx4_mtt mtt;
3198 __be64 *page_list = inbox->buf;
3199 u64 *pg_list = (u64 *)page_list;
3201 struct res_mtt *rmtt = NULL;
3202 int start = be64_to_cpu(page_list[0]);
3203 int npages = vhcr->in_modifier;
3206 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3210 /* Call the SW implementation of write_mtt:
3211 * - Prepare a dummy mtt struct
3212 * - Translate inbox contents to simple addresses in host endianness */
3213 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3214 we don't really use it */
3217 for (i = 0; i < npages; ++i)
3218 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3220 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3221 ((u64 *)page_list + 2));
3224 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3229 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3230 struct mlx4_vhcr *vhcr,
3231 struct mlx4_cmd_mailbox *inbox,
3232 struct mlx4_cmd_mailbox *outbox,
3233 struct mlx4_cmd_info *cmd)
3235 int eqn = vhcr->in_modifier;
3236 int res_id = eqn | (slave << 10);
3240 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3244 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3248 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3252 atomic_dec(&eq->mtt->ref_count);
3253 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3254 res_end_move(dev, slave, RES_EQ, res_id);
3255 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3260 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3262 res_abort_move(dev, slave, RES_EQ, res_id);
3267 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3269 struct mlx4_priv *priv = mlx4_priv(dev);
3270 struct mlx4_slave_event_eq_info *event_eq;
3271 struct mlx4_cmd_mailbox *mailbox;
3272 u32 in_modifier = 0;
3277 if (!priv->mfunc.master.slave_state)
3280 /* check for slave valid, slave not PF, and slave active */
3281 if (slave < 0 || slave > dev->persist->num_vfs ||
3282 slave == dev->caps.function ||
3283 !priv->mfunc.master.slave_state[slave].active)
3286 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3288 /* Create the event only if the slave is registered */
3289 if (event_eq->eqn < 0)
3292 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3293 res_id = (slave << 10) | event_eq->eqn;
3294 err = get_res(dev, slave, res_id, RES_EQ, &req);
3298 if (req->com.from_state != RES_EQ_HW) {
3303 mailbox = mlx4_alloc_cmd_mailbox(dev);
3304 if (IS_ERR(mailbox)) {
3305 err = PTR_ERR(mailbox);
3309 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3311 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3314 memcpy(mailbox->buf, (u8 *) eqe, 28);
3316 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3318 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3319 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3322 put_res(dev, slave, res_id, RES_EQ);
3323 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3324 mlx4_free_cmd_mailbox(dev, mailbox);
3328 put_res(dev, slave, res_id, RES_EQ);
3331 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3335 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3336 struct mlx4_vhcr *vhcr,
3337 struct mlx4_cmd_mailbox *inbox,
3338 struct mlx4_cmd_mailbox *outbox,
3339 struct mlx4_cmd_info *cmd)
3341 int eqn = vhcr->in_modifier;
3342 int res_id = eqn | (slave << 10);
3346 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3350 if (eq->com.from_state != RES_EQ_HW) {
3355 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3358 put_res(dev, slave, res_id, RES_EQ);
3362 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3363 struct mlx4_vhcr *vhcr,
3364 struct mlx4_cmd_mailbox *inbox,
3365 struct mlx4_cmd_mailbox *outbox,
3366 struct mlx4_cmd_info *cmd)
3369 int cqn = vhcr->in_modifier;
3370 struct mlx4_cq_context *cqc = inbox->buf;
3371 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3372 struct res_cq *cq = NULL;
3373 struct res_mtt *mtt;
3375 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3378 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3381 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3384 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3387 atomic_inc(&mtt->ref_count);
3389 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3390 res_end_move(dev, slave, RES_CQ, cqn);
3394 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3396 res_abort_move(dev, slave, RES_CQ, cqn);
3400 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3401 struct mlx4_vhcr *vhcr,
3402 struct mlx4_cmd_mailbox *inbox,
3403 struct mlx4_cmd_mailbox *outbox,
3404 struct mlx4_cmd_info *cmd)
3407 int cqn = vhcr->in_modifier;
3408 struct res_cq *cq = NULL;
3410 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3413 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3416 atomic_dec(&cq->mtt->ref_count);
3417 res_end_move(dev, slave, RES_CQ, cqn);
3421 res_abort_move(dev, slave, RES_CQ, cqn);
3425 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3426 struct mlx4_vhcr *vhcr,
3427 struct mlx4_cmd_mailbox *inbox,
3428 struct mlx4_cmd_mailbox *outbox,
3429 struct mlx4_cmd_info *cmd)
3431 int cqn = vhcr->in_modifier;
3435 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3439 if (cq->com.from_state != RES_CQ_HW)
3442 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3444 put_res(dev, slave, cqn, RES_CQ);
3449 static int handle_resize(struct mlx4_dev *dev, int slave,
3450 struct mlx4_vhcr *vhcr,
3451 struct mlx4_cmd_mailbox *inbox,
3452 struct mlx4_cmd_mailbox *outbox,
3453 struct mlx4_cmd_info *cmd,
3457 struct res_mtt *orig_mtt;
3458 struct res_mtt *mtt;
3459 struct mlx4_cq_context *cqc = inbox->buf;
3460 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3462 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3466 if (orig_mtt != cq->mtt) {
3471 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3475 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3478 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3481 atomic_dec(&orig_mtt->ref_count);
3482 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3483 atomic_inc(&mtt->ref_count);
3485 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3489 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3491 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3497 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3498 struct mlx4_vhcr *vhcr,
3499 struct mlx4_cmd_mailbox *inbox,
3500 struct mlx4_cmd_mailbox *outbox,
3501 struct mlx4_cmd_info *cmd)
3503 int cqn = vhcr->in_modifier;
3507 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3511 if (cq->com.from_state != RES_CQ_HW)
3514 if (vhcr->op_modifier == 0) {
3515 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3519 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3521 put_res(dev, slave, cqn, RES_CQ);
3526 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3528 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3529 int log_rq_stride = srqc->logstride & 7;
3530 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3532 if (log_srq_size + log_rq_stride + 4 < page_shift)
3535 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3538 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3539 struct mlx4_vhcr *vhcr,
3540 struct mlx4_cmd_mailbox *inbox,
3541 struct mlx4_cmd_mailbox *outbox,
3542 struct mlx4_cmd_info *cmd)
3545 int srqn = vhcr->in_modifier;
3546 struct res_mtt *mtt;
3547 struct res_srq *srq = NULL;
3548 struct mlx4_srq_context *srqc = inbox->buf;
3549 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3551 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3554 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3557 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3560 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3565 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3569 atomic_inc(&mtt->ref_count);
3571 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3572 res_end_move(dev, slave, RES_SRQ, srqn);
3576 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3578 res_abort_move(dev, slave, RES_SRQ, srqn);
3583 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3584 struct mlx4_vhcr *vhcr,
3585 struct mlx4_cmd_mailbox *inbox,
3586 struct mlx4_cmd_mailbox *outbox,
3587 struct mlx4_cmd_info *cmd)
3590 int srqn = vhcr->in_modifier;
3591 struct res_srq *srq = NULL;
3593 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3596 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3599 atomic_dec(&srq->mtt->ref_count);
3601 atomic_dec(&srq->cq->ref_count);
3602 res_end_move(dev, slave, RES_SRQ, srqn);
3607 res_abort_move(dev, slave, RES_SRQ, srqn);
3612 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3613 struct mlx4_vhcr *vhcr,
3614 struct mlx4_cmd_mailbox *inbox,
3615 struct mlx4_cmd_mailbox *outbox,
3616 struct mlx4_cmd_info *cmd)
3619 int srqn = vhcr->in_modifier;
3620 struct res_srq *srq;
3622 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3625 if (srq->com.from_state != RES_SRQ_HW) {
3629 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3631 put_res(dev, slave, srqn, RES_SRQ);
3635 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3636 struct mlx4_vhcr *vhcr,
3637 struct mlx4_cmd_mailbox *inbox,
3638 struct mlx4_cmd_mailbox *outbox,
3639 struct mlx4_cmd_info *cmd)
3642 int srqn = vhcr->in_modifier;
3643 struct res_srq *srq;
3645 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3649 if (srq->com.from_state != RES_SRQ_HW) {
3654 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3656 put_res(dev, slave, srqn, RES_SRQ);
3660 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3661 struct mlx4_vhcr *vhcr,
3662 struct mlx4_cmd_mailbox *inbox,
3663 struct mlx4_cmd_mailbox *outbox,
3664 struct mlx4_cmd_info *cmd)
3667 int qpn = vhcr->in_modifier & 0x7fffff;
3670 err = get_res(dev, slave, qpn, RES_QP, &qp);
3673 if (qp->com.from_state != RES_QP_HW) {
3678 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3680 put_res(dev, slave, qpn, RES_QP);
3684 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3685 struct mlx4_vhcr *vhcr,
3686 struct mlx4_cmd_mailbox *inbox,
3687 struct mlx4_cmd_mailbox *outbox,
3688 struct mlx4_cmd_info *cmd)
3690 struct mlx4_qp_context *context = inbox->buf + 8;
3691 adjust_proxy_tun_qkey(dev, vhcr, context);
3692 update_pkey_index(dev, slave, inbox);
3693 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3696 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3697 struct mlx4_qp_context *qpc,
3698 struct mlx4_cmd_mailbox *inbox)
3700 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3702 int port = mlx4_slave_convert_port(
3703 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3708 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3711 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3712 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3713 qpc->pri_path.sched_queue = pri_sched_queue;
3716 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3717 port = mlx4_slave_convert_port(
3718 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3722 qpc->alt_path.sched_queue =
3723 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3729 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3730 struct mlx4_qp_context *qpc,
3731 struct mlx4_cmd_mailbox *inbox)
3735 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3736 u8 sched = *(u8 *)(inbox->buf + 64);
3739 port = (sched >> 6 & 1) + 1;
3740 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3741 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3742 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3748 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3749 struct mlx4_vhcr *vhcr,
3750 struct mlx4_cmd_mailbox *inbox,
3751 struct mlx4_cmd_mailbox *outbox,
3752 struct mlx4_cmd_info *cmd)
3755 struct mlx4_qp_context *qpc = inbox->buf + 8;
3756 int qpn = vhcr->in_modifier & 0x7fffff;
3758 u8 orig_sched_queue;
3759 __be32 orig_param3 = qpc->param3;
3760 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3761 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3762 u8 orig_pri_path_fl = qpc->pri_path.fl;
3763 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3764 u8 orig_feup = qpc->pri_path.feup;
3766 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3769 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3773 if (roce_verify_mac(dev, slave, qpc, inbox))
3776 update_pkey_index(dev, slave, inbox);
3777 update_gid(dev, inbox, (u8)slave);
3778 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3779 orig_sched_queue = qpc->pri_path.sched_queue;
3781 err = get_res(dev, slave, qpn, RES_QP, &qp);
3784 if (qp->com.from_state != RES_QP_HW) {
3789 err = update_vport_qp_param(dev, inbox, slave, qpn);
3793 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3795 /* if no error, save sched queue value passed in by VF. This is
3796 * essentially the QOS value provided by the VF. This will be useful
3797 * if we allow dynamic changes from VST back to VGT
3800 qp->sched_queue = orig_sched_queue;
3801 qp->param3 = orig_param3;
3802 qp->vlan_control = orig_vlan_control;
3803 qp->fvl_rx = orig_fvl_rx;
3804 qp->pri_path_fl = orig_pri_path_fl;
3805 qp->vlan_index = orig_vlan_index;
3806 qp->feup = orig_feup;
3808 put_res(dev, slave, qpn, RES_QP);
3812 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3813 struct mlx4_vhcr *vhcr,
3814 struct mlx4_cmd_mailbox *inbox,
3815 struct mlx4_cmd_mailbox *outbox,
3816 struct mlx4_cmd_info *cmd)
3819 struct mlx4_qp_context *context = inbox->buf + 8;
3821 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3824 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3828 update_pkey_index(dev, slave, inbox);
3829 update_gid(dev, inbox, (u8)slave);
3830 adjust_proxy_tun_qkey(dev, vhcr, context);
3831 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3834 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3835 struct mlx4_vhcr *vhcr,
3836 struct mlx4_cmd_mailbox *inbox,
3837 struct mlx4_cmd_mailbox *outbox,
3838 struct mlx4_cmd_info *cmd)
3841 struct mlx4_qp_context *context = inbox->buf + 8;
3843 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3846 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3850 update_pkey_index(dev, slave, inbox);
3851 update_gid(dev, inbox, (u8)slave);
3852 adjust_proxy_tun_qkey(dev, vhcr, context);
3853 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3857 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3858 struct mlx4_vhcr *vhcr,
3859 struct mlx4_cmd_mailbox *inbox,
3860 struct mlx4_cmd_mailbox *outbox,
3861 struct mlx4_cmd_info *cmd)
3863 struct mlx4_qp_context *context = inbox->buf + 8;
3864 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3867 adjust_proxy_tun_qkey(dev, vhcr, context);
3868 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3871 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3872 struct mlx4_vhcr *vhcr,
3873 struct mlx4_cmd_mailbox *inbox,
3874 struct mlx4_cmd_mailbox *outbox,
3875 struct mlx4_cmd_info *cmd)
3878 struct mlx4_qp_context *context = inbox->buf + 8;
3880 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3883 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3887 adjust_proxy_tun_qkey(dev, vhcr, context);
3888 update_gid(dev, inbox, (u8)slave);
3889 update_pkey_index(dev, slave, inbox);
3890 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3893 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3894 struct mlx4_vhcr *vhcr,
3895 struct mlx4_cmd_mailbox *inbox,
3896 struct mlx4_cmd_mailbox *outbox,
3897 struct mlx4_cmd_info *cmd)
3900 struct mlx4_qp_context *context = inbox->buf + 8;
3902 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3905 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3909 adjust_proxy_tun_qkey(dev, vhcr, context);
3910 update_gid(dev, inbox, (u8)slave);
3911 update_pkey_index(dev, slave, inbox);
3912 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3915 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3916 struct mlx4_vhcr *vhcr,
3917 struct mlx4_cmd_mailbox *inbox,
3918 struct mlx4_cmd_mailbox *outbox,
3919 struct mlx4_cmd_info *cmd)
3922 int qpn = vhcr->in_modifier & 0x7fffff;
3925 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3928 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3932 atomic_dec(&qp->mtt->ref_count);
3933 atomic_dec(&qp->rcq->ref_count);
3934 atomic_dec(&qp->scq->ref_count);
3936 atomic_dec(&qp->srq->ref_count);
3937 res_end_move(dev, slave, RES_QP, qpn);
3941 res_abort_move(dev, slave, RES_QP, qpn);
3946 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3947 struct res_qp *rqp, u8 *gid)
3949 struct res_gid *res;
3951 list_for_each_entry(res, &rqp->mcg_list, list) {
3952 if (!memcmp(res->gid, gid, 16))
3958 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3959 u8 *gid, enum mlx4_protocol prot,
3960 enum mlx4_steer_type steer, u64 reg_id)
3962 struct res_gid *res;
3965 res = kzalloc(sizeof *res, GFP_KERNEL);
3969 spin_lock_irq(&rqp->mcg_spl);
3970 if (find_gid(dev, slave, rqp, gid)) {
3974 memcpy(res->gid, gid, 16);
3977 res->reg_id = reg_id;
3978 list_add_tail(&res->list, &rqp->mcg_list);
3981 spin_unlock_irq(&rqp->mcg_spl);
3986 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3987 u8 *gid, enum mlx4_protocol prot,
3988 enum mlx4_steer_type steer, u64 *reg_id)
3990 struct res_gid *res;
3993 spin_lock_irq(&rqp->mcg_spl);
3994 res = find_gid(dev, slave, rqp, gid);
3995 if (!res || res->prot != prot || res->steer != steer)
3998 *reg_id = res->reg_id;
3999 list_del(&res->list);
4003 spin_unlock_irq(&rqp->mcg_spl);
4008 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4009 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
4010 enum mlx4_steer_type type, u64 *reg_id)
4012 switch (dev->caps.steering_mode) {
4013 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4014 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4017 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4018 block_loopback, prot,
4021 case MLX4_STEERING_MODE_B0:
4022 if (prot == MLX4_PROT_ETH) {
4023 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4028 return mlx4_qp_attach_common(dev, qp, gid,
4029 block_loopback, prot, type);
4035 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4036 u8 gid[16], enum mlx4_protocol prot,
4037 enum mlx4_steer_type type, u64 reg_id)
4039 switch (dev->caps.steering_mode) {
4040 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4041 return mlx4_flow_detach(dev, reg_id);
4042 case MLX4_STEERING_MODE_B0:
4043 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4049 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4050 u8 *gid, enum mlx4_protocol prot)
4054 if (prot != MLX4_PROT_ETH)
4057 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4058 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4059 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4068 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4069 struct mlx4_vhcr *vhcr,
4070 struct mlx4_cmd_mailbox *inbox,
4071 struct mlx4_cmd_mailbox *outbox,
4072 struct mlx4_cmd_info *cmd)
4074 struct mlx4_qp qp; /* dummy for calling attach/detach */
4075 u8 *gid = inbox->buf;
4076 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4081 int attach = vhcr->op_modifier;
4082 int block_loopback = vhcr->in_modifier >> 31;
4083 u8 steer_type_mask = 2;
4084 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4086 qpn = vhcr->in_modifier & 0xffffff;
4087 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4093 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4096 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4099 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4103 err = mlx4_adjust_port(dev, slave, gid, prot);
4107 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
4111 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4113 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4116 put_res(dev, slave, qpn, RES_QP);
4120 qp_detach(dev, &qp, gid, prot, type, reg_id);
4122 put_res(dev, slave, qpn, RES_QP);
4127 * MAC validation for Flow Steering rules.
4128 * VF can attach rules only with a mac address which is assigned to it.
4130 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4131 struct list_head *rlist)
4133 struct mac_res *res, *tmp;
4136 /* make sure it isn't multicast or broadcast mac*/
4137 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4138 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4139 list_for_each_entry_safe(res, tmp, rlist, list) {
4140 be_mac = cpu_to_be64(res->mac << 16);
4141 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4144 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4145 eth_header->eth.dst_mac, slave);
4151 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
4152 struct _rule_hw *eth_header)
4154 if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
4155 is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4156 struct mlx4_net_trans_rule_hw_eth *eth =
4157 (struct mlx4_net_trans_rule_hw_eth *)eth_header;
4158 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
4159 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
4160 next_rule->rsvd == 0;
4163 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
4168 * In case of missing eth header, append eth header with a MAC address
4169 * assigned to the VF.
4171 static int add_eth_header(struct mlx4_dev *dev, int slave,
4172 struct mlx4_cmd_mailbox *inbox,
4173 struct list_head *rlist, int header_id)
4175 struct mac_res *res, *tmp;
4177 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4178 struct mlx4_net_trans_rule_hw_eth *eth_header;
4179 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4180 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4182 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4184 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4186 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4188 /* Clear a space in the inbox for eth header */
4189 switch (header_id) {
4190 case MLX4_NET_TRANS_RULE_ID_IPV4:
4192 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4193 memmove(ip_header, eth_header,
4194 sizeof(*ip_header) + sizeof(*l4_header));
4196 case MLX4_NET_TRANS_RULE_ID_TCP:
4197 case MLX4_NET_TRANS_RULE_ID_UDP:
4198 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4200 memmove(l4_header, eth_header, sizeof(*l4_header));
4205 list_for_each_entry_safe(res, tmp, rlist, list) {
4206 if (port == res->port) {
4207 be_mac = cpu_to_be64(res->mac << 16);
4212 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4217 memset(eth_header, 0, sizeof(*eth_header));
4218 eth_header->size = sizeof(*eth_header) >> 2;
4219 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4220 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4221 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4227 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
4228 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
4229 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4230 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4231 struct mlx4_vhcr *vhcr,
4232 struct mlx4_cmd_mailbox *inbox,
4233 struct mlx4_cmd_mailbox *outbox,
4234 struct mlx4_cmd_info *cmd_info)
4237 u32 qpn = vhcr->in_modifier & 0xffffff;
4241 u64 pri_addr_path_mask;
4242 struct mlx4_update_qp_context *cmd;
4245 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4247 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4248 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4249 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4252 if ((pri_addr_path_mask &
4253 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4254 !(dev->caps.flags2 &
4255 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4257 "Src check LB for slave %d isn't supported\n",
4262 /* Just change the smac for the QP */
4263 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4265 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4269 port = (rqp->sched_queue >> 6 & 1) + 1;
4271 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4272 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4273 err = mac_find_smac_ix_in_slave(dev, slave, port,
4277 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4283 err = mlx4_cmd(dev, inbox->dma,
4284 vhcr->in_modifier, 0,
4285 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4288 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4293 put_res(dev, slave, qpn, RES_QP);
4297 static u32 qp_attach_mbox_size(void *mbox)
4299 u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4300 struct _rule_hw *rule_header;
4302 rule_header = (struct _rule_hw *)(mbox + size);
4304 while (rule_header->size) {
4305 size += rule_header->size * sizeof(u32);
4311 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4313 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4314 struct mlx4_vhcr *vhcr,
4315 struct mlx4_cmd_mailbox *inbox,
4316 struct mlx4_cmd_mailbox *outbox,
4317 struct mlx4_cmd_info *cmd)
4320 struct mlx4_priv *priv = mlx4_priv(dev);
4321 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4322 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4326 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4327 struct _rule_hw *rule_header;
4329 struct res_fs_rule *rrule;
4332 if (dev->caps.steering_mode !=
4333 MLX4_STEERING_MODE_DEVICE_MANAGED)
4336 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4337 err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4341 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4342 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4344 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4347 rule_header = (struct _rule_hw *)(ctrl + 1);
4348 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4350 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4351 handle_eth_header_mcast_prio(ctrl, rule_header);
4353 if (slave == dev->caps.function)
4356 switch (header_id) {
4357 case MLX4_NET_TRANS_RULE_ID_ETH:
4358 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4363 case MLX4_NET_TRANS_RULE_ID_IB:
4365 case MLX4_NET_TRANS_RULE_ID_IPV4:
4366 case MLX4_NET_TRANS_RULE_ID_TCP:
4367 case MLX4_NET_TRANS_RULE_ID_UDP:
4368 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4369 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4373 vhcr->in_modifier +=
4374 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4377 pr_err("Corrupted mailbox\n");
4383 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4384 vhcr->in_modifier, 0,
4385 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4391 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4393 mlx4_err(dev, "Fail to add flow steering resources\n");
4397 err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4401 mbox_size = qp_attach_mbox_size(inbox->buf);
4402 rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4403 if (!rrule->mirr_mbox) {
4407 rrule->mirr_mbox_size = mbox_size;
4408 rrule->mirr_rule_id = 0;
4409 memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4411 /* set different port */
4412 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4413 if (ctrl->port == 1)
4418 if (mlx4_is_bonded(dev))
4419 mlx4_do_mirror_rule(dev, rrule);
4421 atomic_inc(&rqp->ref_count);
4424 put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4426 /* detach rule on error */
4428 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4429 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4432 put_res(dev, slave, qpn, RES_QP);
4436 static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4440 err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4442 mlx4_err(dev, "Fail to remove flow steering resources\n");
4446 mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4447 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4451 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4452 struct mlx4_vhcr *vhcr,
4453 struct mlx4_cmd_mailbox *inbox,
4454 struct mlx4_cmd_mailbox *outbox,
4455 struct mlx4_cmd_info *cmd)
4459 struct res_fs_rule *rrule;
4462 if (dev->caps.steering_mode !=
4463 MLX4_STEERING_MODE_DEVICE_MANAGED)
4466 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4470 if (!rrule->mirr_mbox) {
4471 mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4472 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4475 mirr_reg_id = rrule->mirr_rule_id;
4476 kfree(rrule->mirr_mbox);
4478 /* Release the rule form busy state before removal */
4479 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4480 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4484 if (mirr_reg_id && mlx4_is_bonded(dev)) {
4485 err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4487 mlx4_err(dev, "Fail to get resource of mirror rule\n");
4489 put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4490 mlx4_undo_mirror_rule(dev, rrule);
4493 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4495 mlx4_err(dev, "Fail to remove flow steering resources\n");
4499 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4500 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4503 atomic_dec(&rqp->ref_count);
4505 put_res(dev, slave, rrule->qpn, RES_QP);
4510 BUSY_MAX_RETRIES = 10
4513 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4514 struct mlx4_vhcr *vhcr,
4515 struct mlx4_cmd_mailbox *inbox,
4516 struct mlx4_cmd_mailbox *outbox,
4517 struct mlx4_cmd_info *cmd)
4520 int index = vhcr->in_modifier & 0xffff;
4522 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4526 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4527 put_res(dev, slave, index, RES_COUNTER);
4531 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4533 struct res_gid *rgid;
4534 struct res_gid *tmp;
4535 struct mlx4_qp qp; /* dummy for calling attach/detach */
4537 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4538 switch (dev->caps.steering_mode) {
4539 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4540 mlx4_flow_detach(dev, rgid->reg_id);
4542 case MLX4_STEERING_MODE_B0:
4543 qp.qpn = rqp->local_qpn;
4544 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4545 rgid->prot, rgid->steer);
4548 list_del(&rgid->list);
4553 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4554 enum mlx4_resource type, int print)
4556 struct mlx4_priv *priv = mlx4_priv(dev);
4557 struct mlx4_resource_tracker *tracker =
4558 &priv->mfunc.master.res_tracker;
4559 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4560 struct res_common *r;
4561 struct res_common *tmp;
4565 spin_lock_irq(mlx4_tlock(dev));
4566 list_for_each_entry_safe(r, tmp, rlist, list) {
4567 if (r->owner == slave) {
4569 if (r->state == RES_ANY_BUSY) {
4572 "%s id 0x%llx is busy\n",
4577 r->from_state = r->state;
4578 r->state = RES_ANY_BUSY;
4584 spin_unlock_irq(mlx4_tlock(dev));
4589 static int move_all_busy(struct mlx4_dev *dev, int slave,
4590 enum mlx4_resource type)
4592 unsigned long begin;
4597 busy = _move_all_busy(dev, slave, type, 0);
4598 if (time_after(jiffies, begin + 5 * HZ))
4605 busy = _move_all_busy(dev, slave, type, 1);
4609 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4611 struct mlx4_priv *priv = mlx4_priv(dev);
4612 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4613 struct list_head *qp_list =
4614 &tracker->slave_list[slave].res_list[RES_QP];
4622 err = move_all_busy(dev, slave, RES_QP);
4624 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4627 spin_lock_irq(mlx4_tlock(dev));
4628 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4629 spin_unlock_irq(mlx4_tlock(dev));
4630 if (qp->com.owner == slave) {
4631 qpn = qp->com.res_id;
4632 detach_qp(dev, slave, qp);
4633 state = qp->com.from_state;
4634 while (state != 0) {
4636 case RES_QP_RESERVED:
4637 spin_lock_irq(mlx4_tlock(dev));
4638 rb_erase(&qp->com.node,
4639 &tracker->res_tree[RES_QP]);
4640 list_del(&qp->com.list);
4641 spin_unlock_irq(mlx4_tlock(dev));
4642 if (!valid_reserved(dev, slave, qpn)) {
4643 __mlx4_qp_release_range(dev, qpn, 1);
4644 mlx4_release_resource(dev, slave,
4651 if (!valid_reserved(dev, slave, qpn))
4652 __mlx4_qp_free_icm(dev, qpn);
4653 state = RES_QP_RESERVED;
4657 err = mlx4_cmd(dev, in_param,
4660 MLX4_CMD_TIME_CLASS_A,
4663 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4664 slave, qp->local_qpn);
4665 atomic_dec(&qp->rcq->ref_count);
4666 atomic_dec(&qp->scq->ref_count);
4667 atomic_dec(&qp->mtt->ref_count);
4669 atomic_dec(&qp->srq->ref_count);
4670 state = RES_QP_MAPPED;
4677 spin_lock_irq(mlx4_tlock(dev));
4679 spin_unlock_irq(mlx4_tlock(dev));
4682 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4684 struct mlx4_priv *priv = mlx4_priv(dev);
4685 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4686 struct list_head *srq_list =
4687 &tracker->slave_list[slave].res_list[RES_SRQ];
4688 struct res_srq *srq;
4689 struct res_srq *tmp;
4696 err = move_all_busy(dev, slave, RES_SRQ);
4698 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4701 spin_lock_irq(mlx4_tlock(dev));
4702 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4703 spin_unlock_irq(mlx4_tlock(dev));
4704 if (srq->com.owner == slave) {
4705 srqn = srq->com.res_id;
4706 state = srq->com.from_state;
4707 while (state != 0) {
4709 case RES_SRQ_ALLOCATED:
4710 __mlx4_srq_free_icm(dev, srqn);
4711 spin_lock_irq(mlx4_tlock(dev));
4712 rb_erase(&srq->com.node,
4713 &tracker->res_tree[RES_SRQ]);
4714 list_del(&srq->com.list);
4715 spin_unlock_irq(mlx4_tlock(dev));
4716 mlx4_release_resource(dev, slave,
4724 err = mlx4_cmd(dev, in_param, srqn, 1,
4726 MLX4_CMD_TIME_CLASS_A,
4729 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4732 atomic_dec(&srq->mtt->ref_count);
4734 atomic_dec(&srq->cq->ref_count);
4735 state = RES_SRQ_ALLOCATED;
4743 spin_lock_irq(mlx4_tlock(dev));
4745 spin_unlock_irq(mlx4_tlock(dev));
4748 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4750 struct mlx4_priv *priv = mlx4_priv(dev);
4751 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4752 struct list_head *cq_list =
4753 &tracker->slave_list[slave].res_list[RES_CQ];
4762 err = move_all_busy(dev, slave, RES_CQ);
4764 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4767 spin_lock_irq(mlx4_tlock(dev));
4768 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4769 spin_unlock_irq(mlx4_tlock(dev));
4770 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4771 cqn = cq->com.res_id;
4772 state = cq->com.from_state;
4773 while (state != 0) {
4775 case RES_CQ_ALLOCATED:
4776 __mlx4_cq_free_icm(dev, cqn);
4777 spin_lock_irq(mlx4_tlock(dev));
4778 rb_erase(&cq->com.node,
4779 &tracker->res_tree[RES_CQ]);
4780 list_del(&cq->com.list);
4781 spin_unlock_irq(mlx4_tlock(dev));
4782 mlx4_release_resource(dev, slave,
4790 err = mlx4_cmd(dev, in_param, cqn, 1,
4792 MLX4_CMD_TIME_CLASS_A,
4795 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4797 atomic_dec(&cq->mtt->ref_count);
4798 state = RES_CQ_ALLOCATED;
4806 spin_lock_irq(mlx4_tlock(dev));
4808 spin_unlock_irq(mlx4_tlock(dev));
4811 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4813 struct mlx4_priv *priv = mlx4_priv(dev);
4814 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4815 struct list_head *mpt_list =
4816 &tracker->slave_list[slave].res_list[RES_MPT];
4817 struct res_mpt *mpt;
4818 struct res_mpt *tmp;
4825 err = move_all_busy(dev, slave, RES_MPT);
4827 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4830 spin_lock_irq(mlx4_tlock(dev));
4831 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4832 spin_unlock_irq(mlx4_tlock(dev));
4833 if (mpt->com.owner == slave) {
4834 mptn = mpt->com.res_id;
4835 state = mpt->com.from_state;
4836 while (state != 0) {
4838 case RES_MPT_RESERVED:
4839 __mlx4_mpt_release(dev, mpt->key);
4840 spin_lock_irq(mlx4_tlock(dev));
4841 rb_erase(&mpt->com.node,
4842 &tracker->res_tree[RES_MPT]);
4843 list_del(&mpt->com.list);
4844 spin_unlock_irq(mlx4_tlock(dev));
4845 mlx4_release_resource(dev, slave,
4851 case RES_MPT_MAPPED:
4852 __mlx4_mpt_free_icm(dev, mpt->key);
4853 state = RES_MPT_RESERVED;
4858 err = mlx4_cmd(dev, in_param, mptn, 0,
4860 MLX4_CMD_TIME_CLASS_A,
4863 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4866 atomic_dec(&mpt->mtt->ref_count);
4867 state = RES_MPT_MAPPED;
4874 spin_lock_irq(mlx4_tlock(dev));
4876 spin_unlock_irq(mlx4_tlock(dev));
4879 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4881 struct mlx4_priv *priv = mlx4_priv(dev);
4882 struct mlx4_resource_tracker *tracker =
4883 &priv->mfunc.master.res_tracker;
4884 struct list_head *mtt_list =
4885 &tracker->slave_list[slave].res_list[RES_MTT];
4886 struct res_mtt *mtt;
4887 struct res_mtt *tmp;
4893 err = move_all_busy(dev, slave, RES_MTT);
4895 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4898 spin_lock_irq(mlx4_tlock(dev));
4899 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4900 spin_unlock_irq(mlx4_tlock(dev));
4901 if (mtt->com.owner == slave) {
4902 base = mtt->com.res_id;
4903 state = mtt->com.from_state;
4904 while (state != 0) {
4906 case RES_MTT_ALLOCATED:
4907 __mlx4_free_mtt_range(dev, base,
4909 spin_lock_irq(mlx4_tlock(dev));
4910 rb_erase(&mtt->com.node,
4911 &tracker->res_tree[RES_MTT]);
4912 list_del(&mtt->com.list);
4913 spin_unlock_irq(mlx4_tlock(dev));
4914 mlx4_release_resource(dev, slave, RES_MTT,
4915 1 << mtt->order, 0);
4925 spin_lock_irq(mlx4_tlock(dev));
4927 spin_unlock_irq(mlx4_tlock(dev));
4930 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4932 struct mlx4_cmd_mailbox *mailbox;
4934 struct res_fs_rule *mirr_rule;
4937 mailbox = mlx4_alloc_cmd_mailbox(dev);
4938 if (IS_ERR(mailbox))
4939 return PTR_ERR(mailbox);
4941 if (!fs_rule->mirr_mbox) {
4942 mlx4_err(dev, "rule mirroring mailbox is null\n");
4945 memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4946 err = mlx4_cmd_imm(dev, mailbox->dma, ®_id, fs_rule->mirr_mbox_size >> 2, 0,
4947 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4949 mlx4_free_cmd_mailbox(dev, mailbox);
4954 err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
4958 err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
4962 fs_rule->mirr_rule_id = reg_id;
4963 mirr_rule->mirr_rule_id = 0;
4964 mirr_rule->mirr_mbox_size = 0;
4965 mirr_rule->mirr_mbox = NULL;
4966 put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
4970 rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
4972 mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4973 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4978 static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
4980 struct mlx4_priv *priv = mlx4_priv(dev);
4981 struct mlx4_resource_tracker *tracker =
4982 &priv->mfunc.master.res_tracker;
4983 struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
4985 struct res_fs_rule *fs_rule;
4987 LIST_HEAD(mirr_list);
4989 for (p = rb_first(root); p; p = rb_next(p)) {
4990 fs_rule = rb_entry(p, struct res_fs_rule, com.node);
4991 if ((bond && fs_rule->mirr_mbox_size) ||
4992 (!bond && !fs_rule->mirr_mbox_size))
4993 list_add_tail(&fs_rule->mirr_list, &mirr_list);
4996 list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
4998 err += mlx4_do_mirror_rule(dev, fs_rule);
5000 err += mlx4_undo_mirror_rule(dev, fs_rule);
5005 int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5007 return mlx4_mirror_fs_rules(dev, true);
5010 int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5012 return mlx4_mirror_fs_rules(dev, false);
5015 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5017 struct mlx4_priv *priv = mlx4_priv(dev);
5018 struct mlx4_resource_tracker *tracker =
5019 &priv->mfunc.master.res_tracker;
5020 struct list_head *fs_rule_list =
5021 &tracker->slave_list[slave].res_list[RES_FS_RULE];
5022 struct res_fs_rule *fs_rule;
5023 struct res_fs_rule *tmp;
5028 err = move_all_busy(dev, slave, RES_FS_RULE);
5030 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5033 spin_lock_irq(mlx4_tlock(dev));
5034 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5035 spin_unlock_irq(mlx4_tlock(dev));
5036 if (fs_rule->com.owner == slave) {
5037 base = fs_rule->com.res_id;
5038 state = fs_rule->com.from_state;
5039 while (state != 0) {
5041 case RES_FS_RULE_ALLOCATED:
5043 err = mlx4_cmd(dev, base, 0, 0,
5044 MLX4_QP_FLOW_STEERING_DETACH,
5045 MLX4_CMD_TIME_CLASS_A,
5048 spin_lock_irq(mlx4_tlock(dev));
5049 rb_erase(&fs_rule->com.node,
5050 &tracker->res_tree[RES_FS_RULE]);
5051 list_del(&fs_rule->com.list);
5052 spin_unlock_irq(mlx4_tlock(dev));
5062 spin_lock_irq(mlx4_tlock(dev));
5064 spin_unlock_irq(mlx4_tlock(dev));
5067 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5069 struct mlx4_priv *priv = mlx4_priv(dev);
5070 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5071 struct list_head *eq_list =
5072 &tracker->slave_list[slave].res_list[RES_EQ];
5080 err = move_all_busy(dev, slave, RES_EQ);
5082 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5085 spin_lock_irq(mlx4_tlock(dev));
5086 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5087 spin_unlock_irq(mlx4_tlock(dev));
5088 if (eq->com.owner == slave) {
5089 eqn = eq->com.res_id;
5090 state = eq->com.from_state;
5091 while (state != 0) {
5093 case RES_EQ_RESERVED:
5094 spin_lock_irq(mlx4_tlock(dev));
5095 rb_erase(&eq->com.node,
5096 &tracker->res_tree[RES_EQ]);
5097 list_del(&eq->com.list);
5098 spin_unlock_irq(mlx4_tlock(dev));
5104 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
5105 1, MLX4_CMD_HW2SW_EQ,
5106 MLX4_CMD_TIME_CLASS_A,
5109 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5110 slave, eqn & 0x3ff);
5111 atomic_dec(&eq->mtt->ref_count);
5112 state = RES_EQ_RESERVED;
5120 spin_lock_irq(mlx4_tlock(dev));
5122 spin_unlock_irq(mlx4_tlock(dev));
5125 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5127 struct mlx4_priv *priv = mlx4_priv(dev);
5128 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5129 struct list_head *counter_list =
5130 &tracker->slave_list[slave].res_list[RES_COUNTER];
5131 struct res_counter *counter;
5132 struct res_counter *tmp;
5134 int *counters_arr = NULL;
5137 err = move_all_busy(dev, slave, RES_COUNTER);
5139 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5142 counters_arr = kmalloc_array(dev->caps.max_counters,
5143 sizeof(*counters_arr), GFP_KERNEL);
5150 spin_lock_irq(mlx4_tlock(dev));
5151 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5152 if (counter->com.owner == slave) {
5153 counters_arr[i++] = counter->com.res_id;
5154 rb_erase(&counter->com.node,
5155 &tracker->res_tree[RES_COUNTER]);
5156 list_del(&counter->com.list);
5160 spin_unlock_irq(mlx4_tlock(dev));
5163 __mlx4_counter_free(dev, counters_arr[j++]);
5164 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
5168 kfree(counters_arr);
5171 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5173 struct mlx4_priv *priv = mlx4_priv(dev);
5174 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5175 struct list_head *xrcdn_list =
5176 &tracker->slave_list[slave].res_list[RES_XRCD];
5177 struct res_xrcdn *xrcd;
5178 struct res_xrcdn *tmp;
5182 err = move_all_busy(dev, slave, RES_XRCD);
5184 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5187 spin_lock_irq(mlx4_tlock(dev));
5188 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5189 if (xrcd->com.owner == slave) {
5190 xrcdn = xrcd->com.res_id;
5191 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
5192 list_del(&xrcd->com.list);
5194 __mlx4_xrcd_free(dev, xrcdn);
5197 spin_unlock_irq(mlx4_tlock(dev));
5200 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5202 struct mlx4_priv *priv = mlx4_priv(dev);
5203 mlx4_reset_roce_gids(dev, slave);
5204 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5205 rem_slave_vlans(dev, slave);
5206 rem_slave_macs(dev, slave);
5207 rem_slave_fs_rule(dev, slave);
5208 rem_slave_qps(dev, slave);
5209 rem_slave_srqs(dev, slave);
5210 rem_slave_cqs(dev, slave);
5211 rem_slave_mrs(dev, slave);
5212 rem_slave_eqs(dev, slave);
5213 rem_slave_mtts(dev, slave);
5214 rem_slave_counters(dev, slave);
5215 rem_slave_xrcdns(dev, slave);
5216 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5219 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5221 struct mlx4_vf_immed_vlan_work *work =
5222 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5223 struct mlx4_cmd_mailbox *mailbox;
5224 struct mlx4_update_qp_context *upd_context;
5225 struct mlx4_dev *dev = &work->priv->dev;
5226 struct mlx4_resource_tracker *tracker =
5227 &work->priv->mfunc.master.res_tracker;
5228 struct list_head *qp_list =
5229 &tracker->slave_list[work->slave].res_list[RES_QP];
5232 u64 qp_path_mask_vlan_ctrl =
5233 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5234 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5235 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5236 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5237 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5238 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5240 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5241 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5242 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5243 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5244 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5245 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5246 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5249 int port, errors = 0;
5252 if (mlx4_is_slave(dev)) {
5253 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5258 mailbox = mlx4_alloc_cmd_mailbox(dev);
5259 if (IS_ERR(mailbox))
5261 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5262 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5263 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5264 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5265 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5266 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5267 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5268 else if (!work->vlan_id)
5269 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5270 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5272 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5273 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5274 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5276 upd_context = mailbox->buf;
5277 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5279 spin_lock_irq(mlx4_tlock(dev));
5280 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5281 spin_unlock_irq(mlx4_tlock(dev));
5282 if (qp->com.owner == work->slave) {
5283 if (qp->com.from_state != RES_QP_HW ||
5284 !qp->sched_queue || /* no INIT2RTR trans yet */
5285 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5286 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5287 spin_lock_irq(mlx4_tlock(dev));
5290 port = (qp->sched_queue >> 6 & 1) + 1;
5291 if (port != work->port) {
5292 spin_lock_irq(mlx4_tlock(dev));
5295 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5296 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5298 upd_context->primary_addr_path_mask =
5299 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5300 if (work->vlan_id == MLX4_VGT) {
5301 upd_context->qp_context.param3 = qp->param3;
5302 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5303 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5304 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5305 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5306 upd_context->qp_context.pri_path.feup = qp->feup;
5307 upd_context->qp_context.pri_path.sched_queue =
5310 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5311 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5312 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5313 upd_context->qp_context.pri_path.fvl_rx =
5314 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5315 upd_context->qp_context.pri_path.fl =
5316 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
5317 upd_context->qp_context.pri_path.feup =
5318 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5319 upd_context->qp_context.pri_path.sched_queue =
5320 qp->sched_queue & 0xC7;
5321 upd_context->qp_context.pri_path.sched_queue |=
5322 ((work->qos & 0x7) << 3);
5323 upd_context->qp_mask |=
5325 MLX4_UPD_QP_MASK_QOS_VPP);
5326 upd_context->qp_context.qos_vport =
5330 err = mlx4_cmd(dev, mailbox->dma,
5331 qp->local_qpn & 0xffffff,
5332 0, MLX4_CMD_UPDATE_QP,
5333 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5335 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5336 work->slave, port, qp->local_qpn, err);
5340 spin_lock_irq(mlx4_tlock(dev));
5342 spin_unlock_irq(mlx4_tlock(dev));
5343 mlx4_free_cmd_mailbox(dev, mailbox);
5346 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5347 errors, work->slave, work->port);
5349 /* unregister previous vlan_id if needed and we had no errors
5350 * while updating the QPs
5352 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5353 NO_INDX != work->orig_vlan_ix)
5354 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5355 work->orig_vlan_id);