c5f3dfca226b5f9140b12bfac3a908822e4529a1
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID          (1ull << 63)
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         int ref_count;
56         u8 smac_index;
57         u8 port;
58 };
59
60 struct vlan_res {
61         struct list_head list;
62         u16 vlan;
63         int ref_count;
64         int vlan_index;
65         u8 port;
66 };
67
68 struct res_common {
69         struct list_head        list;
70         struct rb_node          node;
71         u64                     res_id;
72         int                     owner;
73         int                     state;
74         int                     from_state;
75         int                     to_state;
76         int                     removing;
77 };
78
79 enum {
80         RES_ANY_BUSY = 1
81 };
82
83 struct res_gid {
84         struct list_head        list;
85         u8                      gid[16];
86         enum mlx4_protocol      prot;
87         enum mlx4_steer_type    steer;
88         u64                     reg_id;
89 };
90
91 enum res_qp_states {
92         RES_QP_BUSY = RES_ANY_BUSY,
93
94         /* QP number was allocated */
95         RES_QP_RESERVED,
96
97         /* ICM memory for QP context was mapped */
98         RES_QP_MAPPED,
99
100         /* QP is in hw ownership */
101         RES_QP_HW
102 };
103
104 struct res_qp {
105         struct res_common       com;
106         struct res_mtt         *mtt;
107         struct res_cq          *rcq;
108         struct res_cq          *scq;
109         struct res_srq         *srq;
110         struct list_head        mcg_list;
111         spinlock_t              mcg_spl;
112         int                     local_qpn;
113         atomic_t                ref_count;
114         u32                     qpc_flags;
115         /* saved qp params before VST enforcement in order to restore on VGT */
116         u8                      sched_queue;
117         __be32                  param3;
118         u8                      vlan_control;
119         u8                      fvl_rx;
120         u8                      pri_path_fl;
121         u8                      vlan_index;
122         u8                      feup;
123 };
124
125 enum res_mtt_states {
126         RES_MTT_BUSY = RES_ANY_BUSY,
127         RES_MTT_ALLOCATED,
128 };
129
130 static inline const char *mtt_states_str(enum res_mtt_states state)
131 {
132         switch (state) {
133         case RES_MTT_BUSY: return "RES_MTT_BUSY";
134         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135         default: return "Unknown";
136         }
137 }
138
139 struct res_mtt {
140         struct res_common       com;
141         int                     order;
142         atomic_t                ref_count;
143 };
144
145 enum res_mpt_states {
146         RES_MPT_BUSY = RES_ANY_BUSY,
147         RES_MPT_RESERVED,
148         RES_MPT_MAPPED,
149         RES_MPT_HW,
150 };
151
152 struct res_mpt {
153         struct res_common       com;
154         struct res_mtt         *mtt;
155         int                     key;
156 };
157
158 enum res_eq_states {
159         RES_EQ_BUSY = RES_ANY_BUSY,
160         RES_EQ_RESERVED,
161         RES_EQ_HW,
162 };
163
164 struct res_eq {
165         struct res_common       com;
166         struct res_mtt         *mtt;
167 };
168
169 enum res_cq_states {
170         RES_CQ_BUSY = RES_ANY_BUSY,
171         RES_CQ_ALLOCATED,
172         RES_CQ_HW,
173 };
174
175 struct res_cq {
176         struct res_common       com;
177         struct res_mtt         *mtt;
178         atomic_t                ref_count;
179 };
180
181 enum res_srq_states {
182         RES_SRQ_BUSY = RES_ANY_BUSY,
183         RES_SRQ_ALLOCATED,
184         RES_SRQ_HW,
185 };
186
187 struct res_srq {
188         struct res_common       com;
189         struct res_mtt         *mtt;
190         struct res_cq          *cq;
191         atomic_t                ref_count;
192 };
193
194 enum res_counter_states {
195         RES_COUNTER_BUSY = RES_ANY_BUSY,
196         RES_COUNTER_ALLOCATED,
197 };
198
199 struct res_counter {
200         struct res_common       com;
201         int                     port;
202 };
203
204 enum res_xrcdn_states {
205         RES_XRCD_BUSY = RES_ANY_BUSY,
206         RES_XRCD_ALLOCATED,
207 };
208
209 struct res_xrcdn {
210         struct res_common       com;
211         int                     port;
212 };
213
214 enum res_fs_rule_states {
215         RES_FS_RULE_BUSY = RES_ANY_BUSY,
216         RES_FS_RULE_ALLOCATED,
217 };
218
219 struct res_fs_rule {
220         struct res_common       com;
221         int                     qpn;
222 };
223
224 static int mlx4_is_eth(struct mlx4_dev *dev, int port)
225 {
226         return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
227 }
228
229 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
230 {
231         struct rb_node *node = root->rb_node;
232
233         while (node) {
234                 struct res_common *res = container_of(node, struct res_common,
235                                                       node);
236
237                 if (res_id < res->res_id)
238                         node = node->rb_left;
239                 else if (res_id > res->res_id)
240                         node = node->rb_right;
241                 else
242                         return res;
243         }
244         return NULL;
245 }
246
247 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
248 {
249         struct rb_node **new = &(root->rb_node), *parent = NULL;
250
251         /* Figure out where to put new node */
252         while (*new) {
253                 struct res_common *this = container_of(*new, struct res_common,
254                                                        node);
255
256                 parent = *new;
257                 if (res->res_id < this->res_id)
258                         new = &((*new)->rb_left);
259                 else if (res->res_id > this->res_id)
260                         new = &((*new)->rb_right);
261                 else
262                         return -EEXIST;
263         }
264
265         /* Add new node and rebalance tree. */
266         rb_link_node(&res->node, parent, new);
267         rb_insert_color(&res->node, root);
268
269         return 0;
270 }
271
272 enum qp_transition {
273         QP_TRANS_INIT2RTR,
274         QP_TRANS_RTR2RTS,
275         QP_TRANS_RTS2RTS,
276         QP_TRANS_SQERR2RTS,
277         QP_TRANS_SQD2SQD,
278         QP_TRANS_SQD2RTS
279 };
280
281 /* For Debug uses */
282 static const char *resource_str(enum mlx4_resource rt)
283 {
284         switch (rt) {
285         case RES_QP: return "RES_QP";
286         case RES_CQ: return "RES_CQ";
287         case RES_SRQ: return "RES_SRQ";
288         case RES_MPT: return "RES_MPT";
289         case RES_MTT: return "RES_MTT";
290         case RES_MAC: return  "RES_MAC";
291         case RES_VLAN: return  "RES_VLAN";
292         case RES_EQ: return "RES_EQ";
293         case RES_COUNTER: return "RES_COUNTER";
294         case RES_FS_RULE: return "RES_FS_RULE";
295         case RES_XRCD: return "RES_XRCD";
296         default: return "Unknown resource type !!!";
297         };
298 }
299
300 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
301 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
302                                       enum mlx4_resource res_type, int count,
303                                       int port)
304 {
305         struct mlx4_priv *priv = mlx4_priv(dev);
306         struct resource_allocator *res_alloc =
307                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
308         int err = -EINVAL;
309         int allocated, free, reserved, guaranteed, from_free;
310         int from_rsvd;
311
312         if (slave > dev->persist->num_vfs)
313                 return -EINVAL;
314
315         spin_lock(&res_alloc->alloc_lock);
316         allocated = (port > 0) ?
317                 res_alloc->allocated[(port - 1) *
318                 (dev->persist->num_vfs + 1) + slave] :
319                 res_alloc->allocated[slave];
320         free = (port > 0) ? res_alloc->res_port_free[port - 1] :
321                 res_alloc->res_free;
322         reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
323                 res_alloc->res_reserved;
324         guaranteed = res_alloc->guaranteed[slave];
325
326         if (allocated + count > res_alloc->quota[slave]) {
327                 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
328                           slave, port, resource_str(res_type), count,
329                           allocated, res_alloc->quota[slave]);
330                 goto out;
331         }
332
333         if (allocated + count <= guaranteed) {
334                 err = 0;
335                 from_rsvd = count;
336         } else {
337                 /* portion may need to be obtained from free area */
338                 if (guaranteed - allocated > 0)
339                         from_free = count - (guaranteed - allocated);
340                 else
341                         from_free = count;
342
343                 from_rsvd = count - from_free;
344
345                 if (free - from_free >= reserved)
346                         err = 0;
347                 else
348                         mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
349                                   slave, port, resource_str(res_type), free,
350                                   from_free, reserved);
351         }
352
353         if (!err) {
354                 /* grant the request */
355                 if (port > 0) {
356                         res_alloc->allocated[(port - 1) *
357                         (dev->persist->num_vfs + 1) + slave] += count;
358                         res_alloc->res_port_free[port - 1] -= count;
359                         res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
360                 } else {
361                         res_alloc->allocated[slave] += count;
362                         res_alloc->res_free -= count;
363                         res_alloc->res_reserved -= from_rsvd;
364                 }
365         }
366
367 out:
368         spin_unlock(&res_alloc->alloc_lock);
369         return err;
370 }
371
372 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
373                                     enum mlx4_resource res_type, int count,
374                                     int port)
375 {
376         struct mlx4_priv *priv = mlx4_priv(dev);
377         struct resource_allocator *res_alloc =
378                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
379         int allocated, guaranteed, from_rsvd;
380
381         if (slave > dev->persist->num_vfs)
382                 return;
383
384         spin_lock(&res_alloc->alloc_lock);
385
386         allocated = (port > 0) ?
387                 res_alloc->allocated[(port - 1) *
388                 (dev->persist->num_vfs + 1) + slave] :
389                 res_alloc->allocated[slave];
390         guaranteed = res_alloc->guaranteed[slave];
391
392         if (allocated - count >= guaranteed) {
393                 from_rsvd = 0;
394         } else {
395                 /* portion may need to be returned to reserved area */
396                 if (allocated - guaranteed > 0)
397                         from_rsvd = count - (allocated - guaranteed);
398                 else
399                         from_rsvd = count;
400         }
401
402         if (port > 0) {
403                 res_alloc->allocated[(port - 1) *
404                 (dev->persist->num_vfs + 1) + slave] -= count;
405                 res_alloc->res_port_free[port - 1] += count;
406                 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
407         } else {
408                 res_alloc->allocated[slave] -= count;
409                 res_alloc->res_free += count;
410                 res_alloc->res_reserved += from_rsvd;
411         }
412
413         spin_unlock(&res_alloc->alloc_lock);
414         return;
415 }
416
417 static inline void initialize_res_quotas(struct mlx4_dev *dev,
418                                          struct resource_allocator *res_alloc,
419                                          enum mlx4_resource res_type,
420                                          int vf, int num_instances)
421 {
422         res_alloc->guaranteed[vf] = num_instances /
423                                     (2 * (dev->persist->num_vfs + 1));
424         res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
425         if (vf == mlx4_master_func_num(dev)) {
426                 res_alloc->res_free = num_instances;
427                 if (res_type == RES_MTT) {
428                         /* reserved mtts will be taken out of the PF allocation */
429                         res_alloc->res_free += dev->caps.reserved_mtts;
430                         res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
431                         res_alloc->quota[vf] += dev->caps.reserved_mtts;
432                 }
433         }
434 }
435
436 void mlx4_init_quotas(struct mlx4_dev *dev)
437 {
438         struct mlx4_priv *priv = mlx4_priv(dev);
439         int pf;
440
441         /* quotas for VFs are initialized in mlx4_slave_cap */
442         if (mlx4_is_slave(dev))
443                 return;
444
445         if (!mlx4_is_mfunc(dev)) {
446                 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
447                         mlx4_num_reserved_sqps(dev);
448                 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
449                 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
450                 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
451                 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
452                 return;
453         }
454
455         pf = mlx4_master_func_num(dev);
456         dev->quotas.qp =
457                 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
458         dev->quotas.cq =
459                 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
460         dev->quotas.srq =
461                 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
462         dev->quotas.mtt =
463                 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
464         dev->quotas.mpt =
465                 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
466 }
467 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
468 {
469         struct mlx4_priv *priv = mlx4_priv(dev);
470         int i, j;
471         int t;
472
473         priv->mfunc.master.res_tracker.slave_list =
474                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
475                         GFP_KERNEL);
476         if (!priv->mfunc.master.res_tracker.slave_list)
477                 return -ENOMEM;
478
479         for (i = 0 ; i < dev->num_slaves; i++) {
480                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
481                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
482                                        slave_list[i].res_list[t]);
483                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
484         }
485
486         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
487                  dev->num_slaves);
488         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
489                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
490
491         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
492                 struct resource_allocator *res_alloc =
493                         &priv->mfunc.master.res_tracker.res_alloc[i];
494                 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
495                                            sizeof(int), GFP_KERNEL);
496                 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
497                                                 sizeof(int), GFP_KERNEL);
498                 if (i == RES_MAC || i == RES_VLAN)
499                         res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
500                                                        (dev->persist->num_vfs
501                                                        + 1) *
502                                                        sizeof(int), GFP_KERNEL);
503                 else
504                         res_alloc->allocated = kzalloc((dev->persist->
505                                                         num_vfs + 1) *
506                                                        sizeof(int), GFP_KERNEL);
507
508                 if (!res_alloc->quota || !res_alloc->guaranteed ||
509                     !res_alloc->allocated)
510                         goto no_mem_err;
511
512                 spin_lock_init(&res_alloc->alloc_lock);
513                 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
514                         struct mlx4_active_ports actv_ports =
515                                 mlx4_get_active_ports(dev, t);
516                         switch (i) {
517                         case RES_QP:
518                                 initialize_res_quotas(dev, res_alloc, RES_QP,
519                                                       t, dev->caps.num_qps -
520                                                       dev->caps.reserved_qps -
521                                                       mlx4_num_reserved_sqps(dev));
522                                 break;
523                         case RES_CQ:
524                                 initialize_res_quotas(dev, res_alloc, RES_CQ,
525                                                       t, dev->caps.num_cqs -
526                                                       dev->caps.reserved_cqs);
527                                 break;
528                         case RES_SRQ:
529                                 initialize_res_quotas(dev, res_alloc, RES_SRQ,
530                                                       t, dev->caps.num_srqs -
531                                                       dev->caps.reserved_srqs);
532                                 break;
533                         case RES_MPT:
534                                 initialize_res_quotas(dev, res_alloc, RES_MPT,
535                                                       t, dev->caps.num_mpts -
536                                                       dev->caps.reserved_mrws);
537                                 break;
538                         case RES_MTT:
539                                 initialize_res_quotas(dev, res_alloc, RES_MTT,
540                                                       t, dev->caps.num_mtts -
541                                                       dev->caps.reserved_mtts);
542                                 break;
543                         case RES_MAC:
544                                 if (t == mlx4_master_func_num(dev)) {
545                                         int max_vfs_pport = 0;
546                                         /* Calculate the max vfs per port for */
547                                         /* both ports.                        */
548                                         for (j = 0; j < dev->caps.num_ports;
549                                              j++) {
550                                                 struct mlx4_slaves_pport slaves_pport =
551                                                         mlx4_phys_to_slaves_pport(dev, j + 1);
552                                                 unsigned current_slaves =
553                                                         bitmap_weight(slaves_pport.slaves,
554                                                                       dev->caps.num_ports) - 1;
555                                                 if (max_vfs_pport < current_slaves)
556                                                         max_vfs_pport =
557                                                                 current_slaves;
558                                         }
559                                         res_alloc->quota[t] =
560                                                 MLX4_MAX_MAC_NUM -
561                                                 2 * max_vfs_pport;
562                                         res_alloc->guaranteed[t] = 2;
563                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
564                                                 res_alloc->res_port_free[j] =
565                                                         MLX4_MAX_MAC_NUM;
566                                 } else {
567                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
568                                         res_alloc->guaranteed[t] = 2;
569                                 }
570                                 break;
571                         case RES_VLAN:
572                                 if (t == mlx4_master_func_num(dev)) {
573                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
574                                         res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
575                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
576                                                 res_alloc->res_port_free[j] =
577                                                         res_alloc->quota[t];
578                                 } else {
579                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
580                                         res_alloc->guaranteed[t] = 0;
581                                 }
582                                 break;
583                         case RES_COUNTER:
584                                 res_alloc->quota[t] = dev->caps.max_counters;
585                                 res_alloc->guaranteed[t] = 0;
586                                 if (t == mlx4_master_func_num(dev))
587                                         res_alloc->res_free = res_alloc->quota[t];
588                                 break;
589                         default:
590                                 break;
591                         }
592                         if (i == RES_MAC || i == RES_VLAN) {
593                                 for (j = 0; j < dev->caps.num_ports; j++)
594                                         if (test_bit(j, actv_ports.ports))
595                                                 res_alloc->res_port_rsvd[j] +=
596                                                         res_alloc->guaranteed[t];
597                         } else {
598                                 res_alloc->res_reserved += res_alloc->guaranteed[t];
599                         }
600                 }
601         }
602         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
603         return 0;
604
605 no_mem_err:
606         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
607                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
608                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
609                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
610                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
611                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
612                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
613         }
614         return -ENOMEM;
615 }
616
617 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
618                                 enum mlx4_res_tracker_free_type type)
619 {
620         struct mlx4_priv *priv = mlx4_priv(dev);
621         int i;
622
623         if (priv->mfunc.master.res_tracker.slave_list) {
624                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
625                         for (i = 0; i < dev->num_slaves; i++) {
626                                 if (type == RES_TR_FREE_ALL ||
627                                     dev->caps.function != i)
628                                         mlx4_delete_all_resources_for_slave(dev, i);
629                         }
630                         /* free master's vlans */
631                         i = dev->caps.function;
632                         mlx4_reset_roce_gids(dev, i);
633                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
634                         rem_slave_vlans(dev, i);
635                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
636                 }
637
638                 if (type != RES_TR_FREE_SLAVES_ONLY) {
639                         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
640                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
641                                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
642                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
643                                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
644                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
645                                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
646                         }
647                         kfree(priv->mfunc.master.res_tracker.slave_list);
648                         priv->mfunc.master.res_tracker.slave_list = NULL;
649                 }
650         }
651 }
652
653 static void update_pkey_index(struct mlx4_dev *dev, int slave,
654                               struct mlx4_cmd_mailbox *inbox)
655 {
656         u8 sched = *(u8 *)(inbox->buf + 64);
657         u8 orig_index = *(u8 *)(inbox->buf + 35);
658         u8 new_index;
659         struct mlx4_priv *priv = mlx4_priv(dev);
660         int port;
661
662         port = (sched >> 6 & 1) + 1;
663
664         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
665         *(u8 *)(inbox->buf + 35) = new_index;
666 }
667
668 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
669                        u8 slave)
670 {
671         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
672         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
673         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
674         int port;
675
676         if (MLX4_QP_ST_UD == ts) {
677                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
678                 if (mlx4_is_eth(dev, port))
679                         qp_ctx->pri_path.mgid_index =
680                                 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
681                 else
682                         qp_ctx->pri_path.mgid_index = slave | 0x80;
683
684         } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
685                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
686                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
687                         if (mlx4_is_eth(dev, port)) {
688                                 qp_ctx->pri_path.mgid_index +=
689                                         mlx4_get_base_gid_ix(dev, slave, port);
690                                 qp_ctx->pri_path.mgid_index &= 0x7f;
691                         } else {
692                                 qp_ctx->pri_path.mgid_index = slave & 0x7F;
693                         }
694                 }
695                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
696                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
697                         if (mlx4_is_eth(dev, port)) {
698                                 qp_ctx->alt_path.mgid_index +=
699                                         mlx4_get_base_gid_ix(dev, slave, port);
700                                 qp_ctx->alt_path.mgid_index &= 0x7f;
701                         } else {
702                                 qp_ctx->alt_path.mgid_index = slave & 0x7F;
703                         }
704                 }
705         }
706 }
707
708 static int update_vport_qp_param(struct mlx4_dev *dev,
709                                  struct mlx4_cmd_mailbox *inbox,
710                                  u8 slave, u32 qpn)
711 {
712         struct mlx4_qp_context  *qpc = inbox->buf + 8;
713         struct mlx4_vport_oper_state *vp_oper;
714         struct mlx4_priv *priv;
715         u32 qp_type;
716         int port;
717
718         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
719         priv = mlx4_priv(dev);
720         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
721         qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
722
723         if (MLX4_VGT != vp_oper->state.default_vlan) {
724                 /* the reserved QPs (special, proxy, tunnel)
725                  * do not operate over vlans
726                  */
727                 if (mlx4_is_qp_reserved(dev, qpn))
728                         return 0;
729
730                 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
731                 if (qp_type == MLX4_QP_ST_UD ||
732                     (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
733                         if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
734                                 *(__be32 *)inbox->buf =
735                                         cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
736                                         MLX4_QP_OPTPAR_VLAN_STRIPPING);
737                                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
738                         } else {
739                                 struct mlx4_update_qp_params params = {.flags = 0};
740
741                                 mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
742                         }
743                 }
744
745                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
746                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
747                         qpc->pri_path.vlan_control =
748                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
749                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
750                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
751                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
752                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
753                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
754                 } else if (0 != vp_oper->state.default_vlan) {
755                         qpc->pri_path.vlan_control =
756                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
757                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
758                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
759                 } else { /* priority tagged */
760                         qpc->pri_path.vlan_control =
761                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
762                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
763                 }
764
765                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
766                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
767                 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
768                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
769                 qpc->pri_path.sched_queue &= 0xC7;
770                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
771         }
772         if (vp_oper->state.spoofchk) {
773                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
774                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
775         }
776         return 0;
777 }
778
779 static int mpt_mask(struct mlx4_dev *dev)
780 {
781         return dev->caps.num_mpts - 1;
782 }
783
784 static void *find_res(struct mlx4_dev *dev, u64 res_id,
785                       enum mlx4_resource type)
786 {
787         struct mlx4_priv *priv = mlx4_priv(dev);
788
789         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
790                                   res_id);
791 }
792
793 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
794                    enum mlx4_resource type,
795                    void *res)
796 {
797         struct res_common *r;
798         int err = 0;
799
800         spin_lock_irq(mlx4_tlock(dev));
801         r = find_res(dev, res_id, type);
802         if (!r) {
803                 err = -ENONET;
804                 goto exit;
805         }
806
807         if (r->state == RES_ANY_BUSY) {
808                 err = -EBUSY;
809                 goto exit;
810         }
811
812         if (r->owner != slave) {
813                 err = -EPERM;
814                 goto exit;
815         }
816
817         r->from_state = r->state;
818         r->state = RES_ANY_BUSY;
819
820         if (res)
821                 *((struct res_common **)res) = r;
822
823 exit:
824         spin_unlock_irq(mlx4_tlock(dev));
825         return err;
826 }
827
828 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
829                                     enum mlx4_resource type,
830                                     u64 res_id, int *slave)
831 {
832
833         struct res_common *r;
834         int err = -ENOENT;
835         int id = res_id;
836
837         if (type == RES_QP)
838                 id &= 0x7fffff;
839         spin_lock(mlx4_tlock(dev));
840
841         r = find_res(dev, id, type);
842         if (r) {
843                 *slave = r->owner;
844                 err = 0;
845         }
846         spin_unlock(mlx4_tlock(dev));
847
848         return err;
849 }
850
851 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
852                     enum mlx4_resource type)
853 {
854         struct res_common *r;
855
856         spin_lock_irq(mlx4_tlock(dev));
857         r = find_res(dev, res_id, type);
858         if (r)
859                 r->state = r->from_state;
860         spin_unlock_irq(mlx4_tlock(dev));
861 }
862
863 static struct res_common *alloc_qp_tr(int id)
864 {
865         struct res_qp *ret;
866
867         ret = kzalloc(sizeof *ret, GFP_KERNEL);
868         if (!ret)
869                 return NULL;
870
871         ret->com.res_id = id;
872         ret->com.state = RES_QP_RESERVED;
873         ret->local_qpn = id;
874         INIT_LIST_HEAD(&ret->mcg_list);
875         spin_lock_init(&ret->mcg_spl);
876         atomic_set(&ret->ref_count, 0);
877
878         return &ret->com;
879 }
880
881 static struct res_common *alloc_mtt_tr(int id, int order)
882 {
883         struct res_mtt *ret;
884
885         ret = kzalloc(sizeof *ret, GFP_KERNEL);
886         if (!ret)
887                 return NULL;
888
889         ret->com.res_id = id;
890         ret->order = order;
891         ret->com.state = RES_MTT_ALLOCATED;
892         atomic_set(&ret->ref_count, 0);
893
894         return &ret->com;
895 }
896
897 static struct res_common *alloc_mpt_tr(int id, int key)
898 {
899         struct res_mpt *ret;
900
901         ret = kzalloc(sizeof *ret, GFP_KERNEL);
902         if (!ret)
903                 return NULL;
904
905         ret->com.res_id = id;
906         ret->com.state = RES_MPT_RESERVED;
907         ret->key = key;
908
909         return &ret->com;
910 }
911
912 static struct res_common *alloc_eq_tr(int id)
913 {
914         struct res_eq *ret;
915
916         ret = kzalloc(sizeof *ret, GFP_KERNEL);
917         if (!ret)
918                 return NULL;
919
920         ret->com.res_id = id;
921         ret->com.state = RES_EQ_RESERVED;
922
923         return &ret->com;
924 }
925
926 static struct res_common *alloc_cq_tr(int id)
927 {
928         struct res_cq *ret;
929
930         ret = kzalloc(sizeof *ret, GFP_KERNEL);
931         if (!ret)
932                 return NULL;
933
934         ret->com.res_id = id;
935         ret->com.state = RES_CQ_ALLOCATED;
936         atomic_set(&ret->ref_count, 0);
937
938         return &ret->com;
939 }
940
941 static struct res_common *alloc_srq_tr(int id)
942 {
943         struct res_srq *ret;
944
945         ret = kzalloc(sizeof *ret, GFP_KERNEL);
946         if (!ret)
947                 return NULL;
948
949         ret->com.res_id = id;
950         ret->com.state = RES_SRQ_ALLOCATED;
951         atomic_set(&ret->ref_count, 0);
952
953         return &ret->com;
954 }
955
956 static struct res_common *alloc_counter_tr(int id)
957 {
958         struct res_counter *ret;
959
960         ret = kzalloc(sizeof *ret, GFP_KERNEL);
961         if (!ret)
962                 return NULL;
963
964         ret->com.res_id = id;
965         ret->com.state = RES_COUNTER_ALLOCATED;
966
967         return &ret->com;
968 }
969
970 static struct res_common *alloc_xrcdn_tr(int id)
971 {
972         struct res_xrcdn *ret;
973
974         ret = kzalloc(sizeof *ret, GFP_KERNEL);
975         if (!ret)
976                 return NULL;
977
978         ret->com.res_id = id;
979         ret->com.state = RES_XRCD_ALLOCATED;
980
981         return &ret->com;
982 }
983
984 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
985 {
986         struct res_fs_rule *ret;
987
988         ret = kzalloc(sizeof *ret, GFP_KERNEL);
989         if (!ret)
990                 return NULL;
991
992         ret->com.res_id = id;
993         ret->com.state = RES_FS_RULE_ALLOCATED;
994         ret->qpn = qpn;
995         return &ret->com;
996 }
997
998 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
999                                    int extra)
1000 {
1001         struct res_common *ret;
1002
1003         switch (type) {
1004         case RES_QP:
1005                 ret = alloc_qp_tr(id);
1006                 break;
1007         case RES_MPT:
1008                 ret = alloc_mpt_tr(id, extra);
1009                 break;
1010         case RES_MTT:
1011                 ret = alloc_mtt_tr(id, extra);
1012                 break;
1013         case RES_EQ:
1014                 ret = alloc_eq_tr(id);
1015                 break;
1016         case RES_CQ:
1017                 ret = alloc_cq_tr(id);
1018                 break;
1019         case RES_SRQ:
1020                 ret = alloc_srq_tr(id);
1021                 break;
1022         case RES_MAC:
1023                 pr_err("implementation missing\n");
1024                 return NULL;
1025         case RES_COUNTER:
1026                 ret = alloc_counter_tr(id);
1027                 break;
1028         case RES_XRCD:
1029                 ret = alloc_xrcdn_tr(id);
1030                 break;
1031         case RES_FS_RULE:
1032                 ret = alloc_fs_rule_tr(id, extra);
1033                 break;
1034         default:
1035                 return NULL;
1036         }
1037         if (ret)
1038                 ret->owner = slave;
1039
1040         return ret;
1041 }
1042
1043 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1044                          enum mlx4_resource type, int extra)
1045 {
1046         int i;
1047         int err;
1048         struct mlx4_priv *priv = mlx4_priv(dev);
1049         struct res_common **res_arr;
1050         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1051         struct rb_root *root = &tracker->res_tree[type];
1052
1053         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1054         if (!res_arr)
1055                 return -ENOMEM;
1056
1057         for (i = 0; i < count; ++i) {
1058                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1059                 if (!res_arr[i]) {
1060                         for (--i; i >= 0; --i)
1061                                 kfree(res_arr[i]);
1062
1063                         kfree(res_arr);
1064                         return -ENOMEM;
1065                 }
1066         }
1067
1068         spin_lock_irq(mlx4_tlock(dev));
1069         for (i = 0; i < count; ++i) {
1070                 if (find_res(dev, base + i, type)) {
1071                         err = -EEXIST;
1072                         goto undo;
1073                 }
1074                 err = res_tracker_insert(root, res_arr[i]);
1075                 if (err)
1076                         goto undo;
1077                 list_add_tail(&res_arr[i]->list,
1078                               &tracker->slave_list[slave].res_list[type]);
1079         }
1080         spin_unlock_irq(mlx4_tlock(dev));
1081         kfree(res_arr);
1082
1083         return 0;
1084
1085 undo:
1086         for (--i; i >= base; --i)
1087                 rb_erase(&res_arr[i]->node, root);
1088
1089         spin_unlock_irq(mlx4_tlock(dev));
1090
1091         for (i = 0; i < count; ++i)
1092                 kfree(res_arr[i]);
1093
1094         kfree(res_arr);
1095
1096         return err;
1097 }
1098
1099 static int remove_qp_ok(struct res_qp *res)
1100 {
1101         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1102             !list_empty(&res->mcg_list)) {
1103                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1104                        res->com.state, atomic_read(&res->ref_count));
1105                 return -EBUSY;
1106         } else if (res->com.state != RES_QP_RESERVED) {
1107                 return -EPERM;
1108         }
1109
1110         return 0;
1111 }
1112
1113 static int remove_mtt_ok(struct res_mtt *res, int order)
1114 {
1115         if (res->com.state == RES_MTT_BUSY ||
1116             atomic_read(&res->ref_count)) {
1117                 pr_devel("%s-%d: state %s, ref_count %d\n",
1118                          __func__, __LINE__,
1119                          mtt_states_str(res->com.state),
1120                          atomic_read(&res->ref_count));
1121                 return -EBUSY;
1122         } else if (res->com.state != RES_MTT_ALLOCATED)
1123                 return -EPERM;
1124         else if (res->order != order)
1125                 return -EINVAL;
1126
1127         return 0;
1128 }
1129
1130 static int remove_mpt_ok(struct res_mpt *res)
1131 {
1132         if (res->com.state == RES_MPT_BUSY)
1133                 return -EBUSY;
1134         else if (res->com.state != RES_MPT_RESERVED)
1135                 return -EPERM;
1136
1137         return 0;
1138 }
1139
1140 static int remove_eq_ok(struct res_eq *res)
1141 {
1142         if (res->com.state == RES_MPT_BUSY)
1143                 return -EBUSY;
1144         else if (res->com.state != RES_MPT_RESERVED)
1145                 return -EPERM;
1146
1147         return 0;
1148 }
1149
1150 static int remove_counter_ok(struct res_counter *res)
1151 {
1152         if (res->com.state == RES_COUNTER_BUSY)
1153                 return -EBUSY;
1154         else if (res->com.state != RES_COUNTER_ALLOCATED)
1155                 return -EPERM;
1156
1157         return 0;
1158 }
1159
1160 static int remove_xrcdn_ok(struct res_xrcdn *res)
1161 {
1162         if (res->com.state == RES_XRCD_BUSY)
1163                 return -EBUSY;
1164         else if (res->com.state != RES_XRCD_ALLOCATED)
1165                 return -EPERM;
1166
1167         return 0;
1168 }
1169
1170 static int remove_fs_rule_ok(struct res_fs_rule *res)
1171 {
1172         if (res->com.state == RES_FS_RULE_BUSY)
1173                 return -EBUSY;
1174         else if (res->com.state != RES_FS_RULE_ALLOCATED)
1175                 return -EPERM;
1176
1177         return 0;
1178 }
1179
1180 static int remove_cq_ok(struct res_cq *res)
1181 {
1182         if (res->com.state == RES_CQ_BUSY)
1183                 return -EBUSY;
1184         else if (res->com.state != RES_CQ_ALLOCATED)
1185                 return -EPERM;
1186
1187         return 0;
1188 }
1189
1190 static int remove_srq_ok(struct res_srq *res)
1191 {
1192         if (res->com.state == RES_SRQ_BUSY)
1193                 return -EBUSY;
1194         else if (res->com.state != RES_SRQ_ALLOCATED)
1195                 return -EPERM;
1196
1197         return 0;
1198 }
1199
1200 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1201 {
1202         switch (type) {
1203         case RES_QP:
1204                 return remove_qp_ok((struct res_qp *)res);
1205         case RES_CQ:
1206                 return remove_cq_ok((struct res_cq *)res);
1207         case RES_SRQ:
1208                 return remove_srq_ok((struct res_srq *)res);
1209         case RES_MPT:
1210                 return remove_mpt_ok((struct res_mpt *)res);
1211         case RES_MTT:
1212                 return remove_mtt_ok((struct res_mtt *)res, extra);
1213         case RES_MAC:
1214                 return -ENOSYS;
1215         case RES_EQ:
1216                 return remove_eq_ok((struct res_eq *)res);
1217         case RES_COUNTER:
1218                 return remove_counter_ok((struct res_counter *)res);
1219         case RES_XRCD:
1220                 return remove_xrcdn_ok((struct res_xrcdn *)res);
1221         case RES_FS_RULE:
1222                 return remove_fs_rule_ok((struct res_fs_rule *)res);
1223         default:
1224                 return -EINVAL;
1225         }
1226 }
1227
1228 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1229                          enum mlx4_resource type, int extra)
1230 {
1231         u64 i;
1232         int err;
1233         struct mlx4_priv *priv = mlx4_priv(dev);
1234         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1235         struct res_common *r;
1236
1237         spin_lock_irq(mlx4_tlock(dev));
1238         for (i = base; i < base + count; ++i) {
1239                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1240                 if (!r) {
1241                         err = -ENOENT;
1242                         goto out;
1243                 }
1244                 if (r->owner != slave) {
1245                         err = -EPERM;
1246                         goto out;
1247                 }
1248                 err = remove_ok(r, type, extra);
1249                 if (err)
1250                         goto out;
1251         }
1252
1253         for (i = base; i < base + count; ++i) {
1254                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1255                 rb_erase(&r->node, &tracker->res_tree[type]);
1256                 list_del(&r->list);
1257                 kfree(r);
1258         }
1259         err = 0;
1260
1261 out:
1262         spin_unlock_irq(mlx4_tlock(dev));
1263
1264         return err;
1265 }
1266
1267 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1268                                 enum res_qp_states state, struct res_qp **qp,
1269                                 int alloc)
1270 {
1271         struct mlx4_priv *priv = mlx4_priv(dev);
1272         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1273         struct res_qp *r;
1274         int err = 0;
1275
1276         spin_lock_irq(mlx4_tlock(dev));
1277         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1278         if (!r)
1279                 err = -ENOENT;
1280         else if (r->com.owner != slave)
1281                 err = -EPERM;
1282         else {
1283                 switch (state) {
1284                 case RES_QP_BUSY:
1285                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1286                                  __func__, r->com.res_id);
1287                         err = -EBUSY;
1288                         break;
1289
1290                 case RES_QP_RESERVED:
1291                         if (r->com.state == RES_QP_MAPPED && !alloc)
1292                                 break;
1293
1294                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1295                         err = -EINVAL;
1296                         break;
1297
1298                 case RES_QP_MAPPED:
1299                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
1300                             r->com.state == RES_QP_HW)
1301                                 break;
1302                         else {
1303                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1304                                           r->com.res_id);
1305                                 err = -EINVAL;
1306                         }
1307
1308                         break;
1309
1310                 case RES_QP_HW:
1311                         if (r->com.state != RES_QP_MAPPED)
1312                                 err = -EINVAL;
1313                         break;
1314                 default:
1315                         err = -EINVAL;
1316                 }
1317
1318                 if (!err) {
1319                         r->com.from_state = r->com.state;
1320                         r->com.to_state = state;
1321                         r->com.state = RES_QP_BUSY;
1322                         if (qp)
1323                                 *qp = r;
1324                 }
1325         }
1326
1327         spin_unlock_irq(mlx4_tlock(dev));
1328
1329         return err;
1330 }
1331
1332 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1333                                 enum res_mpt_states state, struct res_mpt **mpt)
1334 {
1335         struct mlx4_priv *priv = mlx4_priv(dev);
1336         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1337         struct res_mpt *r;
1338         int err = 0;
1339
1340         spin_lock_irq(mlx4_tlock(dev));
1341         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1342         if (!r)
1343                 err = -ENOENT;
1344         else if (r->com.owner != slave)
1345                 err = -EPERM;
1346         else {
1347                 switch (state) {
1348                 case RES_MPT_BUSY:
1349                         err = -EINVAL;
1350                         break;
1351
1352                 case RES_MPT_RESERVED:
1353                         if (r->com.state != RES_MPT_MAPPED)
1354                                 err = -EINVAL;
1355                         break;
1356
1357                 case RES_MPT_MAPPED:
1358                         if (r->com.state != RES_MPT_RESERVED &&
1359                             r->com.state != RES_MPT_HW)
1360                                 err = -EINVAL;
1361                         break;
1362
1363                 case RES_MPT_HW:
1364                         if (r->com.state != RES_MPT_MAPPED)
1365                                 err = -EINVAL;
1366                         break;
1367                 default:
1368                         err = -EINVAL;
1369                 }
1370
1371                 if (!err) {
1372                         r->com.from_state = r->com.state;
1373                         r->com.to_state = state;
1374                         r->com.state = RES_MPT_BUSY;
1375                         if (mpt)
1376                                 *mpt = r;
1377                 }
1378         }
1379
1380         spin_unlock_irq(mlx4_tlock(dev));
1381
1382         return err;
1383 }
1384
1385 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1386                                 enum res_eq_states state, struct res_eq **eq)
1387 {
1388         struct mlx4_priv *priv = mlx4_priv(dev);
1389         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1390         struct res_eq *r;
1391         int err = 0;
1392
1393         spin_lock_irq(mlx4_tlock(dev));
1394         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1395         if (!r)
1396                 err = -ENOENT;
1397         else if (r->com.owner != slave)
1398                 err = -EPERM;
1399         else {
1400                 switch (state) {
1401                 case RES_EQ_BUSY:
1402                         err = -EINVAL;
1403                         break;
1404
1405                 case RES_EQ_RESERVED:
1406                         if (r->com.state != RES_EQ_HW)
1407                                 err = -EINVAL;
1408                         break;
1409
1410                 case RES_EQ_HW:
1411                         if (r->com.state != RES_EQ_RESERVED)
1412                                 err = -EINVAL;
1413                         break;
1414
1415                 default:
1416                         err = -EINVAL;
1417                 }
1418
1419                 if (!err) {
1420                         r->com.from_state = r->com.state;
1421                         r->com.to_state = state;
1422                         r->com.state = RES_EQ_BUSY;
1423                         if (eq)
1424                                 *eq = r;
1425                 }
1426         }
1427
1428         spin_unlock_irq(mlx4_tlock(dev));
1429
1430         return err;
1431 }
1432
1433 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1434                                 enum res_cq_states state, struct res_cq **cq)
1435 {
1436         struct mlx4_priv *priv = mlx4_priv(dev);
1437         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1438         struct res_cq *r;
1439         int err;
1440
1441         spin_lock_irq(mlx4_tlock(dev));
1442         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1443         if (!r) {
1444                 err = -ENOENT;
1445         } else if (r->com.owner != slave) {
1446                 err = -EPERM;
1447         } else if (state == RES_CQ_ALLOCATED) {
1448                 if (r->com.state != RES_CQ_HW)
1449                         err = -EINVAL;
1450                 else if (atomic_read(&r->ref_count))
1451                         err = -EBUSY;
1452                 else
1453                         err = 0;
1454         } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1455                 err = -EINVAL;
1456         } else {
1457                 err = 0;
1458         }
1459
1460         if (!err) {
1461                 r->com.from_state = r->com.state;
1462                 r->com.to_state = state;
1463                 r->com.state = RES_CQ_BUSY;
1464                 if (cq)
1465                         *cq = r;
1466         }
1467
1468         spin_unlock_irq(mlx4_tlock(dev));
1469
1470         return err;
1471 }
1472
1473 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1474                                  enum res_srq_states state, struct res_srq **srq)
1475 {
1476         struct mlx4_priv *priv = mlx4_priv(dev);
1477         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1478         struct res_srq *r;
1479         int err = 0;
1480
1481         spin_lock_irq(mlx4_tlock(dev));
1482         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1483         if (!r) {
1484                 err = -ENOENT;
1485         } else if (r->com.owner != slave) {
1486                 err = -EPERM;
1487         } else if (state == RES_SRQ_ALLOCATED) {
1488                 if (r->com.state != RES_SRQ_HW)
1489                         err = -EINVAL;
1490                 else if (atomic_read(&r->ref_count))
1491                         err = -EBUSY;
1492         } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1493                 err = -EINVAL;
1494         }
1495
1496         if (!err) {
1497                 r->com.from_state = r->com.state;
1498                 r->com.to_state = state;
1499                 r->com.state = RES_SRQ_BUSY;
1500                 if (srq)
1501                         *srq = r;
1502         }
1503
1504         spin_unlock_irq(mlx4_tlock(dev));
1505
1506         return err;
1507 }
1508
1509 static void res_abort_move(struct mlx4_dev *dev, int slave,
1510                            enum mlx4_resource type, int id)
1511 {
1512         struct mlx4_priv *priv = mlx4_priv(dev);
1513         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1514         struct res_common *r;
1515
1516         spin_lock_irq(mlx4_tlock(dev));
1517         r = res_tracker_lookup(&tracker->res_tree[type], id);
1518         if (r && (r->owner == slave))
1519                 r->state = r->from_state;
1520         spin_unlock_irq(mlx4_tlock(dev));
1521 }
1522
1523 static void res_end_move(struct mlx4_dev *dev, int slave,
1524                          enum mlx4_resource type, int id)
1525 {
1526         struct mlx4_priv *priv = mlx4_priv(dev);
1527         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1528         struct res_common *r;
1529
1530         spin_lock_irq(mlx4_tlock(dev));
1531         r = res_tracker_lookup(&tracker->res_tree[type], id);
1532         if (r && (r->owner == slave))
1533                 r->state = r->to_state;
1534         spin_unlock_irq(mlx4_tlock(dev));
1535 }
1536
1537 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1538 {
1539         return mlx4_is_qp_reserved(dev, qpn) &&
1540                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1541 }
1542
1543 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1544 {
1545         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1546 }
1547
1548 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1549                         u64 in_param, u64 *out_param)
1550 {
1551         int err;
1552         int count;
1553         int align;
1554         int base;
1555         int qpn;
1556         u8 flags;
1557
1558         switch (op) {
1559         case RES_OP_RESERVE:
1560                 count = get_param_l(&in_param) & 0xffffff;
1561                 /* Turn off all unsupported QP allocation flags that the
1562                  * slave tries to set.
1563                  */
1564                 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1565                 align = get_param_h(&in_param);
1566                 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1567                 if (err)
1568                         return err;
1569
1570                 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1571                 if (err) {
1572                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1573                         return err;
1574                 }
1575
1576                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1577                 if (err) {
1578                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1579                         __mlx4_qp_release_range(dev, base, count);
1580                         return err;
1581                 }
1582                 set_param_l(out_param, base);
1583                 break;
1584         case RES_OP_MAP_ICM:
1585                 qpn = get_param_l(&in_param) & 0x7fffff;
1586                 if (valid_reserved(dev, slave, qpn)) {
1587                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1588                         if (err)
1589                                 return err;
1590                 }
1591
1592                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1593                                            NULL, 1);
1594                 if (err)
1595                         return err;
1596
1597                 if (!fw_reserved(dev, qpn)) {
1598                         err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1599                         if (err) {
1600                                 res_abort_move(dev, slave, RES_QP, qpn);
1601                                 return err;
1602                         }
1603                 }
1604
1605                 res_end_move(dev, slave, RES_QP, qpn);
1606                 break;
1607
1608         default:
1609                 err = -EINVAL;
1610                 break;
1611         }
1612         return err;
1613 }
1614
1615 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1616                          u64 in_param, u64 *out_param)
1617 {
1618         int err = -EINVAL;
1619         int base;
1620         int order;
1621
1622         if (op != RES_OP_RESERVE_AND_MAP)
1623                 return err;
1624
1625         order = get_param_l(&in_param);
1626
1627         err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1628         if (err)
1629                 return err;
1630
1631         base = __mlx4_alloc_mtt_range(dev, order);
1632         if (base == -1) {
1633                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1634                 return -ENOMEM;
1635         }
1636
1637         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1638         if (err) {
1639                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1640                 __mlx4_free_mtt_range(dev, base, order);
1641         } else {
1642                 set_param_l(out_param, base);
1643         }
1644
1645         return err;
1646 }
1647
1648 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1649                          u64 in_param, u64 *out_param)
1650 {
1651         int err = -EINVAL;
1652         int index;
1653         int id;
1654         struct res_mpt *mpt;
1655
1656         switch (op) {
1657         case RES_OP_RESERVE:
1658                 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1659                 if (err)
1660                         break;
1661
1662                 index = __mlx4_mpt_reserve(dev);
1663                 if (index == -1) {
1664                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1665                         break;
1666                 }
1667                 id = index & mpt_mask(dev);
1668
1669                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1670                 if (err) {
1671                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1672                         __mlx4_mpt_release(dev, index);
1673                         break;
1674                 }
1675                 set_param_l(out_param, index);
1676                 break;
1677         case RES_OP_MAP_ICM:
1678                 index = get_param_l(&in_param);
1679                 id = index & mpt_mask(dev);
1680                 err = mr_res_start_move_to(dev, slave, id,
1681                                            RES_MPT_MAPPED, &mpt);
1682                 if (err)
1683                         return err;
1684
1685                 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1686                 if (err) {
1687                         res_abort_move(dev, slave, RES_MPT, id);
1688                         return err;
1689                 }
1690
1691                 res_end_move(dev, slave, RES_MPT, id);
1692                 break;
1693         }
1694         return err;
1695 }
1696
1697 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1698                         u64 in_param, u64 *out_param)
1699 {
1700         int cqn;
1701         int err;
1702
1703         switch (op) {
1704         case RES_OP_RESERVE_AND_MAP:
1705                 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1706                 if (err)
1707                         break;
1708
1709                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1710                 if (err) {
1711                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1712                         break;
1713                 }
1714
1715                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1716                 if (err) {
1717                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1718                         __mlx4_cq_free_icm(dev, cqn);
1719                         break;
1720                 }
1721
1722                 set_param_l(out_param, cqn);
1723                 break;
1724
1725         default:
1726                 err = -EINVAL;
1727         }
1728
1729         return err;
1730 }
1731
1732 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1733                          u64 in_param, u64 *out_param)
1734 {
1735         int srqn;
1736         int err;
1737
1738         switch (op) {
1739         case RES_OP_RESERVE_AND_MAP:
1740                 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1741                 if (err)
1742                         break;
1743
1744                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1745                 if (err) {
1746                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1747                         break;
1748                 }
1749
1750                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1751                 if (err) {
1752                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1753                         __mlx4_srq_free_icm(dev, srqn);
1754                         break;
1755                 }
1756
1757                 set_param_l(out_param, srqn);
1758                 break;
1759
1760         default:
1761                 err = -EINVAL;
1762         }
1763
1764         return err;
1765 }
1766
1767 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1768                                      u8 smac_index, u64 *mac)
1769 {
1770         struct mlx4_priv *priv = mlx4_priv(dev);
1771         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1772         struct list_head *mac_list =
1773                 &tracker->slave_list[slave].res_list[RES_MAC];
1774         struct mac_res *res, *tmp;
1775
1776         list_for_each_entry_safe(res, tmp, mac_list, list) {
1777                 if (res->smac_index == smac_index && res->port == (u8) port) {
1778                         *mac = res->mac;
1779                         return 0;
1780                 }
1781         }
1782         return -ENOENT;
1783 }
1784
1785 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1786 {
1787         struct mlx4_priv *priv = mlx4_priv(dev);
1788         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1789         struct list_head *mac_list =
1790                 &tracker->slave_list[slave].res_list[RES_MAC];
1791         struct mac_res *res, *tmp;
1792
1793         list_for_each_entry_safe(res, tmp, mac_list, list) {
1794                 if (res->mac == mac && res->port == (u8) port) {
1795                         /* mac found. update ref count */
1796                         ++res->ref_count;
1797                         return 0;
1798                 }
1799         }
1800
1801         if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1802                 return -EINVAL;
1803         res = kzalloc(sizeof *res, GFP_KERNEL);
1804         if (!res) {
1805                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1806                 return -ENOMEM;
1807         }
1808         res->mac = mac;
1809         res->port = (u8) port;
1810         res->smac_index = smac_index;
1811         res->ref_count = 1;
1812         list_add_tail(&res->list,
1813                       &tracker->slave_list[slave].res_list[RES_MAC]);
1814         return 0;
1815 }
1816
1817 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1818                                int port)
1819 {
1820         struct mlx4_priv *priv = mlx4_priv(dev);
1821         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1822         struct list_head *mac_list =
1823                 &tracker->slave_list[slave].res_list[RES_MAC];
1824         struct mac_res *res, *tmp;
1825
1826         list_for_each_entry_safe(res, tmp, mac_list, list) {
1827                 if (res->mac == mac && res->port == (u8) port) {
1828                         if (!--res->ref_count) {
1829                                 list_del(&res->list);
1830                                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1831                                 kfree(res);
1832                         }
1833                         break;
1834                 }
1835         }
1836 }
1837
1838 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1839 {
1840         struct mlx4_priv *priv = mlx4_priv(dev);
1841         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1842         struct list_head *mac_list =
1843                 &tracker->slave_list[slave].res_list[RES_MAC];
1844         struct mac_res *res, *tmp;
1845         int i;
1846
1847         list_for_each_entry_safe(res, tmp, mac_list, list) {
1848                 list_del(&res->list);
1849                 /* dereference the mac the num times the slave referenced it */
1850                 for (i = 0; i < res->ref_count; i++)
1851                         __mlx4_unregister_mac(dev, res->port, res->mac);
1852                 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1853                 kfree(res);
1854         }
1855 }
1856
1857 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1858                          u64 in_param, u64 *out_param, int in_port)
1859 {
1860         int err = -EINVAL;
1861         int port;
1862         u64 mac;
1863         u8 smac_index;
1864
1865         if (op != RES_OP_RESERVE_AND_MAP)
1866                 return err;
1867
1868         port = !in_port ? get_param_l(out_param) : in_port;
1869         port = mlx4_slave_convert_port(
1870                         dev, slave, port);
1871
1872         if (port < 0)
1873                 return -EINVAL;
1874         mac = in_param;
1875
1876         err = __mlx4_register_mac(dev, port, mac);
1877         if (err >= 0) {
1878                 smac_index = err;
1879                 set_param_l(out_param, err);
1880                 err = 0;
1881         }
1882
1883         if (!err) {
1884                 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1885                 if (err)
1886                         __mlx4_unregister_mac(dev, port, mac);
1887         }
1888         return err;
1889 }
1890
1891 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1892                              int port, int vlan_index)
1893 {
1894         struct mlx4_priv *priv = mlx4_priv(dev);
1895         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1896         struct list_head *vlan_list =
1897                 &tracker->slave_list[slave].res_list[RES_VLAN];
1898         struct vlan_res *res, *tmp;
1899
1900         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1901                 if (res->vlan == vlan && res->port == (u8) port) {
1902                         /* vlan found. update ref count */
1903                         ++res->ref_count;
1904                         return 0;
1905                 }
1906         }
1907
1908         if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1909                 return -EINVAL;
1910         res = kzalloc(sizeof(*res), GFP_KERNEL);
1911         if (!res) {
1912                 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1913                 return -ENOMEM;
1914         }
1915         res->vlan = vlan;
1916         res->port = (u8) port;
1917         res->vlan_index = vlan_index;
1918         res->ref_count = 1;
1919         list_add_tail(&res->list,
1920                       &tracker->slave_list[slave].res_list[RES_VLAN]);
1921         return 0;
1922 }
1923
1924
1925 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1926                                 int port)
1927 {
1928         struct mlx4_priv *priv = mlx4_priv(dev);
1929         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1930         struct list_head *vlan_list =
1931                 &tracker->slave_list[slave].res_list[RES_VLAN];
1932         struct vlan_res *res, *tmp;
1933
1934         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1935                 if (res->vlan == vlan && res->port == (u8) port) {
1936                         if (!--res->ref_count) {
1937                                 list_del(&res->list);
1938                                 mlx4_release_resource(dev, slave, RES_VLAN,
1939                                                       1, port);
1940                                 kfree(res);
1941                         }
1942                         break;
1943                 }
1944         }
1945 }
1946
1947 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1948 {
1949         struct mlx4_priv *priv = mlx4_priv(dev);
1950         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1951         struct list_head *vlan_list =
1952                 &tracker->slave_list[slave].res_list[RES_VLAN];
1953         struct vlan_res *res, *tmp;
1954         int i;
1955
1956         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1957                 list_del(&res->list);
1958                 /* dereference the vlan the num times the slave referenced it */
1959                 for (i = 0; i < res->ref_count; i++)
1960                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
1961                 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1962                 kfree(res);
1963         }
1964 }
1965
1966 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1967                           u64 in_param, u64 *out_param, int in_port)
1968 {
1969         struct mlx4_priv *priv = mlx4_priv(dev);
1970         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1971         int err;
1972         u16 vlan;
1973         int vlan_index;
1974         int port;
1975
1976         port = !in_port ? get_param_l(out_param) : in_port;
1977
1978         if (!port || op != RES_OP_RESERVE_AND_MAP)
1979                 return -EINVAL;
1980
1981         port = mlx4_slave_convert_port(
1982                         dev, slave, port);
1983
1984         if (port < 0)
1985                 return -EINVAL;
1986         /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1987         if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1988                 slave_state[slave].old_vlan_api = true;
1989                 return 0;
1990         }
1991
1992         vlan = (u16) in_param;
1993
1994         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1995         if (!err) {
1996                 set_param_l(out_param, (u32) vlan_index);
1997                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1998                 if (err)
1999                         __mlx4_unregister_vlan(dev, port, vlan);
2000         }
2001         return err;
2002 }
2003
2004 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2005                              u64 in_param, u64 *out_param)
2006 {
2007         u32 index;
2008         int err;
2009
2010         if (op != RES_OP_RESERVE)
2011                 return -EINVAL;
2012
2013         err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2014         if (err)
2015                 return err;
2016
2017         err = __mlx4_counter_alloc(dev, &index);
2018         if (err) {
2019                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2020                 return err;
2021         }
2022
2023         err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2024         if (err) {
2025                 __mlx4_counter_free(dev, index);
2026                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2027         } else {
2028                 set_param_l(out_param, index);
2029         }
2030
2031         return err;
2032 }
2033
2034 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2035                            u64 in_param, u64 *out_param)
2036 {
2037         u32 xrcdn;
2038         int err;
2039
2040         if (op != RES_OP_RESERVE)
2041                 return -EINVAL;
2042
2043         err = __mlx4_xrcd_alloc(dev, &xrcdn);
2044         if (err)
2045                 return err;
2046
2047         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2048         if (err)
2049                 __mlx4_xrcd_free(dev, xrcdn);
2050         else
2051                 set_param_l(out_param, xrcdn);
2052
2053         return err;
2054 }
2055
2056 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2057                            struct mlx4_vhcr *vhcr,
2058                            struct mlx4_cmd_mailbox *inbox,
2059                            struct mlx4_cmd_mailbox *outbox,
2060                            struct mlx4_cmd_info *cmd)
2061 {
2062         int err;
2063         int alop = vhcr->op_modifier;
2064
2065         switch (vhcr->in_modifier & 0xFF) {
2066         case RES_QP:
2067                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2068                                    vhcr->in_param, &vhcr->out_param);
2069                 break;
2070
2071         case RES_MTT:
2072                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2073                                     vhcr->in_param, &vhcr->out_param);
2074                 break;
2075
2076         case RES_MPT:
2077                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2078                                     vhcr->in_param, &vhcr->out_param);
2079                 break;
2080
2081         case RES_CQ:
2082                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2083                                    vhcr->in_param, &vhcr->out_param);
2084                 break;
2085
2086         case RES_SRQ:
2087                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2088                                     vhcr->in_param, &vhcr->out_param);
2089                 break;
2090
2091         case RES_MAC:
2092                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2093                                     vhcr->in_param, &vhcr->out_param,
2094                                     (vhcr->in_modifier >> 8) & 0xFF);
2095                 break;
2096
2097         case RES_VLAN:
2098                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2099                                      vhcr->in_param, &vhcr->out_param,
2100                                      (vhcr->in_modifier >> 8) & 0xFF);
2101                 break;
2102
2103         case RES_COUNTER:
2104                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2105                                         vhcr->in_param, &vhcr->out_param);
2106                 break;
2107
2108         case RES_XRCD:
2109                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2110                                       vhcr->in_param, &vhcr->out_param);
2111                 break;
2112
2113         default:
2114                 err = -EINVAL;
2115                 break;
2116         }
2117
2118         return err;
2119 }
2120
2121 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2122                        u64 in_param)
2123 {
2124         int err;
2125         int count;
2126         int base;
2127         int qpn;
2128
2129         switch (op) {
2130         case RES_OP_RESERVE:
2131                 base = get_param_l(&in_param) & 0x7fffff;
2132                 count = get_param_h(&in_param);
2133                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2134                 if (err)
2135                         break;
2136                 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2137                 __mlx4_qp_release_range(dev, base, count);
2138                 break;
2139         case RES_OP_MAP_ICM:
2140                 qpn = get_param_l(&in_param) & 0x7fffff;
2141                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2142                                            NULL, 0);
2143                 if (err)
2144                         return err;
2145
2146                 if (!fw_reserved(dev, qpn))
2147                         __mlx4_qp_free_icm(dev, qpn);
2148
2149                 res_end_move(dev, slave, RES_QP, qpn);
2150
2151                 if (valid_reserved(dev, slave, qpn))
2152                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2153                 break;
2154         default:
2155                 err = -EINVAL;
2156                 break;
2157         }
2158         return err;
2159 }
2160
2161 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2162                         u64 in_param, u64 *out_param)
2163 {
2164         int err = -EINVAL;
2165         int base;
2166         int order;
2167
2168         if (op != RES_OP_RESERVE_AND_MAP)
2169                 return err;
2170
2171         base = get_param_l(&in_param);
2172         order = get_param_h(&in_param);
2173         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2174         if (!err) {
2175                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2176                 __mlx4_free_mtt_range(dev, base, order);
2177         }
2178         return err;
2179 }
2180
2181 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2182                         u64 in_param)
2183 {
2184         int err = -EINVAL;
2185         int index;
2186         int id;
2187         struct res_mpt *mpt;
2188
2189         switch (op) {
2190         case RES_OP_RESERVE:
2191                 index = get_param_l(&in_param);
2192                 id = index & mpt_mask(dev);
2193                 err = get_res(dev, slave, id, RES_MPT, &mpt);
2194                 if (err)
2195                         break;
2196                 index = mpt->key;
2197                 put_res(dev, slave, id, RES_MPT);
2198
2199                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2200                 if (err)
2201                         break;
2202                 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2203                 __mlx4_mpt_release(dev, index);
2204                 break;
2205         case RES_OP_MAP_ICM:
2206                         index = get_param_l(&in_param);
2207                         id = index & mpt_mask(dev);
2208                         err = mr_res_start_move_to(dev, slave, id,
2209                                                    RES_MPT_RESERVED, &mpt);
2210                         if (err)
2211                                 return err;
2212
2213                         __mlx4_mpt_free_icm(dev, mpt->key);
2214                         res_end_move(dev, slave, RES_MPT, id);
2215                         return err;
2216                 break;
2217         default:
2218                 err = -EINVAL;
2219                 break;
2220         }
2221         return err;
2222 }
2223
2224 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2225                        u64 in_param, u64 *out_param)
2226 {
2227         int cqn;
2228         int err;
2229
2230         switch (op) {
2231         case RES_OP_RESERVE_AND_MAP:
2232                 cqn = get_param_l(&in_param);
2233                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2234                 if (err)
2235                         break;
2236
2237                 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2238                 __mlx4_cq_free_icm(dev, cqn);
2239                 break;
2240
2241         default:
2242                 err = -EINVAL;
2243                 break;
2244         }
2245
2246         return err;
2247 }
2248
2249 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2250                         u64 in_param, u64 *out_param)
2251 {
2252         int srqn;
2253         int err;
2254
2255         switch (op) {
2256         case RES_OP_RESERVE_AND_MAP:
2257                 srqn = get_param_l(&in_param);
2258                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2259                 if (err)
2260                         break;
2261
2262                 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2263                 __mlx4_srq_free_icm(dev, srqn);
2264                 break;
2265
2266         default:
2267                 err = -EINVAL;
2268                 break;
2269         }
2270
2271         return err;
2272 }
2273
2274 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2275                             u64 in_param, u64 *out_param, int in_port)
2276 {
2277         int port;
2278         int err = 0;
2279
2280         switch (op) {
2281         case RES_OP_RESERVE_AND_MAP:
2282                 port = !in_port ? get_param_l(out_param) : in_port;
2283                 port = mlx4_slave_convert_port(
2284                                 dev, slave, port);
2285
2286                 if (port < 0)
2287                         return -EINVAL;
2288                 mac_del_from_slave(dev, slave, in_param, port);
2289                 __mlx4_unregister_mac(dev, port, in_param);
2290                 break;
2291         default:
2292                 err = -EINVAL;
2293                 break;
2294         }
2295
2296         return err;
2297
2298 }
2299
2300 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2301                             u64 in_param, u64 *out_param, int port)
2302 {
2303         struct mlx4_priv *priv = mlx4_priv(dev);
2304         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2305         int err = 0;
2306
2307         port = mlx4_slave_convert_port(
2308                         dev, slave, port);
2309
2310         if (port < 0)
2311                 return -EINVAL;
2312         switch (op) {
2313         case RES_OP_RESERVE_AND_MAP:
2314                 if (slave_state[slave].old_vlan_api)
2315                         return 0;
2316                 if (!port)
2317                         return -EINVAL;
2318                 vlan_del_from_slave(dev, slave, in_param, port);
2319                 __mlx4_unregister_vlan(dev, port, in_param);
2320                 break;
2321         default:
2322                 err = -EINVAL;
2323                 break;
2324         }
2325
2326         return err;
2327 }
2328
2329 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2330                             u64 in_param, u64 *out_param)
2331 {
2332         int index;
2333         int err;
2334
2335         if (op != RES_OP_RESERVE)
2336                 return -EINVAL;
2337
2338         index = get_param_l(&in_param);
2339         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2340         if (err)
2341                 return err;
2342
2343         __mlx4_counter_free(dev, index);
2344         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2345
2346         return err;
2347 }
2348
2349 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2350                           u64 in_param, u64 *out_param)
2351 {
2352         int xrcdn;
2353         int err;
2354
2355         if (op != RES_OP_RESERVE)
2356                 return -EINVAL;
2357
2358         xrcdn = get_param_l(&in_param);
2359         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2360         if (err)
2361                 return err;
2362
2363         __mlx4_xrcd_free(dev, xrcdn);
2364
2365         return err;
2366 }
2367
2368 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2369                           struct mlx4_vhcr *vhcr,
2370                           struct mlx4_cmd_mailbox *inbox,
2371                           struct mlx4_cmd_mailbox *outbox,
2372                           struct mlx4_cmd_info *cmd)
2373 {
2374         int err = -EINVAL;
2375         int alop = vhcr->op_modifier;
2376
2377         switch (vhcr->in_modifier & 0xFF) {
2378         case RES_QP:
2379                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2380                                   vhcr->in_param);
2381                 break;
2382
2383         case RES_MTT:
2384                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2385                                    vhcr->in_param, &vhcr->out_param);
2386                 break;
2387
2388         case RES_MPT:
2389                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2390                                    vhcr->in_param);
2391                 break;
2392
2393         case RES_CQ:
2394                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2395                                   vhcr->in_param, &vhcr->out_param);
2396                 break;
2397
2398         case RES_SRQ:
2399                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2400                                    vhcr->in_param, &vhcr->out_param);
2401                 break;
2402
2403         case RES_MAC:
2404                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2405                                    vhcr->in_param, &vhcr->out_param,
2406                                    (vhcr->in_modifier >> 8) & 0xFF);
2407                 break;
2408
2409         case RES_VLAN:
2410                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2411                                     vhcr->in_param, &vhcr->out_param,
2412                                     (vhcr->in_modifier >> 8) & 0xFF);
2413                 break;
2414
2415         case RES_COUNTER:
2416                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2417                                        vhcr->in_param, &vhcr->out_param);
2418                 break;
2419
2420         case RES_XRCD:
2421                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2422                                      vhcr->in_param, &vhcr->out_param);
2423
2424         default:
2425                 break;
2426         }
2427         return err;
2428 }
2429
2430 /* ugly but other choices are uglier */
2431 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2432 {
2433         return (be32_to_cpu(mpt->flags) >> 9) & 1;
2434 }
2435
2436 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2437 {
2438         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2439 }
2440
2441 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2442 {
2443         return be32_to_cpu(mpt->mtt_sz);
2444 }
2445
2446 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2447 {
2448         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2449 }
2450
2451 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2452 {
2453         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2454 }
2455
2456 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2457 {
2458         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2459 }
2460
2461 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2462 {
2463         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2464 }
2465
2466 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2467 {
2468         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2469 }
2470
2471 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2472 {
2473         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2474 }
2475
2476 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2477 {
2478         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2479         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2480         int log_sq_sride = qpc->sq_size_stride & 7;
2481         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2482         int log_rq_stride = qpc->rq_size_stride & 7;
2483         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2484         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2485         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2486         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2487         int sq_size;
2488         int rq_size;
2489         int total_pages;
2490         int total_mem;
2491         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2492
2493         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2494         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2495         total_mem = sq_size + rq_size;
2496         total_pages =
2497                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2498                                    page_shift);
2499
2500         return total_pages;
2501 }
2502
2503 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2504                            int size, struct res_mtt *mtt)
2505 {
2506         int res_start = mtt->com.res_id;
2507         int res_size = (1 << mtt->order);
2508
2509         if (start < res_start || start + size > res_start + res_size)
2510                 return -EPERM;
2511         return 0;
2512 }
2513
2514 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2515                            struct mlx4_vhcr *vhcr,
2516                            struct mlx4_cmd_mailbox *inbox,
2517                            struct mlx4_cmd_mailbox *outbox,
2518                            struct mlx4_cmd_info *cmd)
2519 {
2520         int err;
2521         int index = vhcr->in_modifier;
2522         struct res_mtt *mtt;
2523         struct res_mpt *mpt;
2524         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2525         int phys;
2526         int id;
2527         u32 pd;
2528         int pd_slave;
2529
2530         id = index & mpt_mask(dev);
2531         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2532         if (err)
2533                 return err;
2534
2535         /* Disable memory windows for VFs. */
2536         if (!mr_is_region(inbox->buf)) {
2537                 err = -EPERM;
2538                 goto ex_abort;
2539         }
2540
2541         /* Make sure that the PD bits related to the slave id are zeros. */
2542         pd = mr_get_pd(inbox->buf);
2543         pd_slave = (pd >> 17) & 0x7f;
2544         if (pd_slave != 0 && pd_slave != slave) {
2545                 err = -EPERM;
2546                 goto ex_abort;
2547         }
2548
2549         if (mr_is_fmr(inbox->buf)) {
2550                 /* FMR and Bind Enable are forbidden in slave devices. */
2551                 if (mr_is_bind_enabled(inbox->buf)) {
2552                         err = -EPERM;
2553                         goto ex_abort;
2554                 }
2555                 /* FMR and Memory Windows are also forbidden. */
2556                 if (!mr_is_region(inbox->buf)) {
2557                         err = -EPERM;
2558                         goto ex_abort;
2559                 }
2560         }
2561
2562         phys = mr_phys_mpt(inbox->buf);
2563         if (!phys) {
2564                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2565                 if (err)
2566                         goto ex_abort;
2567
2568                 err = check_mtt_range(dev, slave, mtt_base,
2569                                       mr_get_mtt_size(inbox->buf), mtt);
2570                 if (err)
2571                         goto ex_put;
2572
2573                 mpt->mtt = mtt;
2574         }
2575
2576         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2577         if (err)
2578                 goto ex_put;
2579
2580         if (!phys) {
2581                 atomic_inc(&mtt->ref_count);
2582                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2583         }
2584
2585         res_end_move(dev, slave, RES_MPT, id);
2586         return 0;
2587
2588 ex_put:
2589         if (!phys)
2590                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2591 ex_abort:
2592         res_abort_move(dev, slave, RES_MPT, id);
2593
2594         return err;
2595 }
2596
2597 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2598                            struct mlx4_vhcr *vhcr,
2599                            struct mlx4_cmd_mailbox *inbox,
2600                            struct mlx4_cmd_mailbox *outbox,
2601                            struct mlx4_cmd_info *cmd)
2602 {
2603         int err;
2604         int index = vhcr->in_modifier;
2605         struct res_mpt *mpt;
2606         int id;
2607
2608         id = index & mpt_mask(dev);
2609         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2610         if (err)
2611                 return err;
2612
2613         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2614         if (err)
2615                 goto ex_abort;
2616
2617         if (mpt->mtt)
2618                 atomic_dec(&mpt->mtt->ref_count);
2619
2620         res_end_move(dev, slave, RES_MPT, id);
2621         return 0;
2622
2623 ex_abort:
2624         res_abort_move(dev, slave, RES_MPT, id);
2625
2626         return err;
2627 }
2628
2629 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2630                            struct mlx4_vhcr *vhcr,
2631                            struct mlx4_cmd_mailbox *inbox,
2632                            struct mlx4_cmd_mailbox *outbox,
2633                            struct mlx4_cmd_info *cmd)
2634 {
2635         int err;
2636         int index = vhcr->in_modifier;
2637         struct res_mpt *mpt;
2638         int id;
2639
2640         id = index & mpt_mask(dev);
2641         err = get_res(dev, slave, id, RES_MPT, &mpt);
2642         if (err)
2643                 return err;
2644
2645         if (mpt->com.from_state == RES_MPT_MAPPED) {
2646                 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2647                  * that, the VF must read the MPT. But since the MPT entry memory is not
2648                  * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2649                  * entry contents. To guarantee that the MPT cannot be changed, the driver
2650                  * must perform HW2SW_MPT before this query and return the MPT entry to HW
2651                  * ownership fofollowing the change. The change here allows the VF to
2652                  * perform QUERY_MPT also when the entry is in SW ownership.
2653                  */
2654                 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2655                                         &mlx4_priv(dev)->mr_table.dmpt_table,
2656                                         mpt->key, NULL);
2657
2658                 if (NULL == mpt_entry || NULL == outbox->buf) {
2659                         err = -EINVAL;
2660                         goto out;
2661                 }
2662
2663                 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2664
2665                 err = 0;
2666         } else if (mpt->com.from_state == RES_MPT_HW) {
2667                 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2668         } else {
2669                 err = -EBUSY;
2670                 goto out;
2671         }
2672
2673
2674 out:
2675         put_res(dev, slave, id, RES_MPT);
2676         return err;
2677 }
2678
2679 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2680 {
2681         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2682 }
2683
2684 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2685 {
2686         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2687 }
2688
2689 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2690 {
2691         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2692 }
2693
2694 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2695                                   struct mlx4_qp_context *context)
2696 {
2697         u32 qpn = vhcr->in_modifier & 0xffffff;
2698         u32 qkey = 0;
2699
2700         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2701                 return;
2702
2703         /* adjust qkey in qp context */
2704         context->qkey = cpu_to_be32(qkey);
2705 }
2706
2707 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2708                              struct mlx4_vhcr *vhcr,
2709                              struct mlx4_cmd_mailbox *inbox,
2710                              struct mlx4_cmd_mailbox *outbox,
2711                              struct mlx4_cmd_info *cmd)
2712 {
2713         int err;
2714         int qpn = vhcr->in_modifier & 0x7fffff;
2715         struct res_mtt *mtt;
2716         struct res_qp *qp;
2717         struct mlx4_qp_context *qpc = inbox->buf + 8;
2718         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2719         int mtt_size = qp_get_mtt_size(qpc);
2720         struct res_cq *rcq;
2721         struct res_cq *scq;
2722         int rcqn = qp_get_rcqn(qpc);
2723         int scqn = qp_get_scqn(qpc);
2724         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2725         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2726         struct res_srq *srq;
2727         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2728
2729         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2730         if (err)
2731                 return err;
2732         qp->local_qpn = local_qpn;
2733         qp->sched_queue = 0;
2734         qp->param3 = 0;
2735         qp->vlan_control = 0;
2736         qp->fvl_rx = 0;
2737         qp->pri_path_fl = 0;
2738         qp->vlan_index = 0;
2739         qp->feup = 0;
2740         qp->qpc_flags = be32_to_cpu(qpc->flags);
2741
2742         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2743         if (err)
2744                 goto ex_abort;
2745
2746         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2747         if (err)
2748                 goto ex_put_mtt;
2749
2750         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2751         if (err)
2752                 goto ex_put_mtt;
2753
2754         if (scqn != rcqn) {
2755                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2756                 if (err)
2757                         goto ex_put_rcq;
2758         } else
2759                 scq = rcq;
2760
2761         if (use_srq) {
2762                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2763                 if (err)
2764                         goto ex_put_scq;
2765         }
2766
2767         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2768         update_pkey_index(dev, slave, inbox);
2769         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2770         if (err)
2771                 goto ex_put_srq;
2772         atomic_inc(&mtt->ref_count);
2773         qp->mtt = mtt;
2774         atomic_inc(&rcq->ref_count);
2775         qp->rcq = rcq;
2776         atomic_inc(&scq->ref_count);
2777         qp->scq = scq;
2778
2779         if (scqn != rcqn)
2780                 put_res(dev, slave, scqn, RES_CQ);
2781
2782         if (use_srq) {
2783                 atomic_inc(&srq->ref_count);
2784                 put_res(dev, slave, srqn, RES_SRQ);
2785                 qp->srq = srq;
2786         }
2787         put_res(dev, slave, rcqn, RES_CQ);
2788         put_res(dev, slave, mtt_base, RES_MTT);
2789         res_end_move(dev, slave, RES_QP, qpn);
2790
2791         return 0;
2792
2793 ex_put_srq:
2794         if (use_srq)
2795                 put_res(dev, slave, srqn, RES_SRQ);
2796 ex_put_scq:
2797         if (scqn != rcqn)
2798                 put_res(dev, slave, scqn, RES_CQ);
2799 ex_put_rcq:
2800         put_res(dev, slave, rcqn, RES_CQ);
2801 ex_put_mtt:
2802         put_res(dev, slave, mtt_base, RES_MTT);
2803 ex_abort:
2804         res_abort_move(dev, slave, RES_QP, qpn);
2805
2806         return err;
2807 }
2808
2809 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2810 {
2811         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2812 }
2813
2814 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2815 {
2816         int log_eq_size = eqc->log_eq_size & 0x1f;
2817         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2818
2819         if (log_eq_size + 5 < page_shift)
2820                 return 1;
2821
2822         return 1 << (log_eq_size + 5 - page_shift);
2823 }
2824
2825 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2826 {
2827         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2828 }
2829
2830 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2831 {
2832         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2833         int page_shift = (cqc->log_page_size & 0x3f) + 12;
2834
2835         if (log_cq_size + 5 < page_shift)
2836                 return 1;
2837
2838         return 1 << (log_cq_size + 5 - page_shift);
2839 }
2840
2841 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2842                           struct mlx4_vhcr *vhcr,
2843                           struct mlx4_cmd_mailbox *inbox,
2844                           struct mlx4_cmd_mailbox *outbox,
2845                           struct mlx4_cmd_info *cmd)
2846 {
2847         int err;
2848         int eqn = vhcr->in_modifier;
2849         int res_id = (slave << 8) | eqn;
2850         struct mlx4_eq_context *eqc = inbox->buf;
2851         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2852         int mtt_size = eq_get_mtt_size(eqc);
2853         struct res_eq *eq;
2854         struct res_mtt *mtt;
2855
2856         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2857         if (err)
2858                 return err;
2859         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2860         if (err)
2861                 goto out_add;
2862
2863         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2864         if (err)
2865                 goto out_move;
2866
2867         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2868         if (err)
2869                 goto out_put;
2870
2871         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2872         if (err)
2873                 goto out_put;
2874
2875         atomic_inc(&mtt->ref_count);
2876         eq->mtt = mtt;
2877         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2878         res_end_move(dev, slave, RES_EQ, res_id);
2879         return 0;
2880
2881 out_put:
2882         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2883 out_move:
2884         res_abort_move(dev, slave, RES_EQ, res_id);
2885 out_add:
2886         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2887         return err;
2888 }
2889
2890 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
2891                             struct mlx4_vhcr *vhcr,
2892                             struct mlx4_cmd_mailbox *inbox,
2893                             struct mlx4_cmd_mailbox *outbox,
2894                             struct mlx4_cmd_info *cmd)
2895 {
2896         int err;
2897         u8 get = vhcr->op_modifier;
2898
2899         if (get != 1)
2900                 return -EPERM;
2901
2902         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2903
2904         return err;
2905 }
2906
2907 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2908                               int len, struct res_mtt **res)
2909 {
2910         struct mlx4_priv *priv = mlx4_priv(dev);
2911         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2912         struct res_mtt *mtt;
2913         int err = -EINVAL;
2914
2915         spin_lock_irq(mlx4_tlock(dev));
2916         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2917                             com.list) {
2918                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2919                         *res = mtt;
2920                         mtt->com.from_state = mtt->com.state;
2921                         mtt->com.state = RES_MTT_BUSY;
2922                         err = 0;
2923                         break;
2924                 }
2925         }
2926         spin_unlock_irq(mlx4_tlock(dev));
2927
2928         return err;
2929 }
2930
2931 static int verify_qp_parameters(struct mlx4_dev *dev,
2932                                 struct mlx4_vhcr *vhcr,
2933                                 struct mlx4_cmd_mailbox *inbox,
2934                                 enum qp_transition transition, u8 slave)
2935 {
2936         u32                     qp_type;
2937         u32                     qpn;
2938         struct mlx4_qp_context  *qp_ctx;
2939         enum mlx4_qp_optpar     optpar;
2940         int port;
2941         int num_gids;
2942
2943         qp_ctx  = inbox->buf + 8;
2944         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2945         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2946
2947         if (slave != mlx4_master_func_num(dev))
2948                 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
2949
2950         switch (qp_type) {
2951         case MLX4_QP_ST_RC:
2952         case MLX4_QP_ST_XRC:
2953         case MLX4_QP_ST_UC:
2954                 switch (transition) {
2955                 case QP_TRANS_INIT2RTR:
2956                 case QP_TRANS_RTR2RTS:
2957                 case QP_TRANS_RTS2RTS:
2958                 case QP_TRANS_SQD2SQD:
2959                 case QP_TRANS_SQD2RTS:
2960                         if (slave != mlx4_master_func_num(dev))
2961                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2962                                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2963                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2964                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2965                                         else
2966                                                 num_gids = 1;
2967                                         if (qp_ctx->pri_path.mgid_index >= num_gids)
2968                                                 return -EINVAL;
2969                                 }
2970                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2971                                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2972                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2973                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2974                                         else
2975                                                 num_gids = 1;
2976                                         if (qp_ctx->alt_path.mgid_index >= num_gids)
2977                                                 return -EINVAL;
2978                                 }
2979                         break;
2980                 default:
2981                         break;
2982                 }
2983                 break;
2984
2985         case MLX4_QP_ST_MLX:
2986                 qpn = vhcr->in_modifier & 0x7fffff;
2987                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2988                 if (transition == QP_TRANS_INIT2RTR &&
2989                     slave != mlx4_master_func_num(dev) &&
2990                     mlx4_is_qp_reserved(dev, qpn) &&
2991                     !mlx4_vf_smi_enabled(dev, slave, port)) {
2992                         /* only enabled VFs may create MLX proxy QPs */
2993                         mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
2994                                  __func__, slave, port);
2995                         return -EPERM;
2996                 }
2997                 break;
2998
2999         default:
3000                 break;
3001         }
3002
3003         return 0;
3004 }
3005
3006 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3007                            struct mlx4_vhcr *vhcr,
3008                            struct mlx4_cmd_mailbox *inbox,
3009                            struct mlx4_cmd_mailbox *outbox,
3010                            struct mlx4_cmd_info *cmd)
3011 {
3012         struct mlx4_mtt mtt;
3013         __be64 *page_list = inbox->buf;
3014         u64 *pg_list = (u64 *)page_list;
3015         int i;
3016         struct res_mtt *rmtt = NULL;
3017         int start = be64_to_cpu(page_list[0]);
3018         int npages = vhcr->in_modifier;
3019         int err;
3020
3021         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3022         if (err)
3023                 return err;
3024
3025         /* Call the SW implementation of write_mtt:
3026          * - Prepare a dummy mtt struct
3027          * - Translate inbox contents to simple addresses in host endianess */
3028         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3029                             we don't really use it */
3030         mtt.order = 0;
3031         mtt.page_shift = 0;
3032         for (i = 0; i < npages; ++i)
3033                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3034
3035         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3036                                ((u64 *)page_list + 2));
3037
3038         if (rmtt)
3039                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3040
3041         return err;
3042 }
3043
3044 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3045                           struct mlx4_vhcr *vhcr,
3046                           struct mlx4_cmd_mailbox *inbox,
3047                           struct mlx4_cmd_mailbox *outbox,
3048                           struct mlx4_cmd_info *cmd)
3049 {
3050         int eqn = vhcr->in_modifier;
3051         int res_id = eqn | (slave << 8);
3052         struct res_eq *eq;
3053         int err;
3054
3055         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3056         if (err)
3057                 return err;
3058
3059         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3060         if (err)
3061                 goto ex_abort;
3062
3063         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3064         if (err)
3065                 goto ex_put;
3066
3067         atomic_dec(&eq->mtt->ref_count);
3068         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3069         res_end_move(dev, slave, RES_EQ, res_id);
3070         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3071
3072         return 0;
3073
3074 ex_put:
3075         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3076 ex_abort:
3077         res_abort_move(dev, slave, RES_EQ, res_id);
3078
3079         return err;
3080 }
3081
3082 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3083 {
3084         struct mlx4_priv *priv = mlx4_priv(dev);
3085         struct mlx4_slave_event_eq_info *event_eq;
3086         struct mlx4_cmd_mailbox *mailbox;
3087         u32 in_modifier = 0;
3088         int err;
3089         int res_id;
3090         struct res_eq *req;
3091
3092         if (!priv->mfunc.master.slave_state)
3093                 return -EINVAL;
3094
3095         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3096
3097         /* Create the event only if the slave is registered */
3098         if (event_eq->eqn < 0)
3099                 return 0;
3100
3101         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3102         res_id = (slave << 8) | event_eq->eqn;
3103         err = get_res(dev, slave, res_id, RES_EQ, &req);
3104         if (err)
3105                 goto unlock;
3106
3107         if (req->com.from_state != RES_EQ_HW) {
3108                 err = -EINVAL;
3109                 goto put;
3110         }
3111
3112         mailbox = mlx4_alloc_cmd_mailbox(dev);
3113         if (IS_ERR(mailbox)) {
3114                 err = PTR_ERR(mailbox);
3115                 goto put;
3116         }
3117
3118         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3119                 ++event_eq->token;
3120                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3121         }
3122
3123         memcpy(mailbox->buf, (u8 *) eqe, 28);
3124
3125         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3126
3127         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3128                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3129                        MLX4_CMD_NATIVE);
3130
3131         put_res(dev, slave, res_id, RES_EQ);
3132         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3133         mlx4_free_cmd_mailbox(dev, mailbox);
3134         return err;
3135
3136 put:
3137         put_res(dev, slave, res_id, RES_EQ);
3138
3139 unlock:
3140         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3141         return err;
3142 }
3143
3144 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3145                           struct mlx4_vhcr *vhcr,
3146                           struct mlx4_cmd_mailbox *inbox,
3147                           struct mlx4_cmd_mailbox *outbox,
3148                           struct mlx4_cmd_info *cmd)
3149 {
3150         int eqn = vhcr->in_modifier;
3151         int res_id = eqn | (slave << 8);
3152         struct res_eq *eq;
3153         int err;
3154
3155         err = get_res(dev, slave, res_id, RES_EQ, &eq);
3156         if (err)
3157                 return err;
3158
3159         if (eq->com.from_state != RES_EQ_HW) {
3160                 err = -EINVAL;
3161                 goto ex_put;
3162         }
3163
3164         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3165
3166 ex_put:
3167         put_res(dev, slave, res_id, RES_EQ);
3168         return err;
3169 }
3170
3171 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3172                           struct mlx4_vhcr *vhcr,
3173                           struct mlx4_cmd_mailbox *inbox,
3174                           struct mlx4_cmd_mailbox *outbox,
3175                           struct mlx4_cmd_info *cmd)
3176 {
3177         int err;
3178         int cqn = vhcr->in_modifier;
3179         struct mlx4_cq_context *cqc = inbox->buf;
3180         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3181         struct res_cq *cq;
3182         struct res_mtt *mtt;
3183
3184         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3185         if (err)
3186                 return err;
3187         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3188         if (err)
3189                 goto out_move;
3190         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3191         if (err)
3192                 goto out_put;
3193         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3194         if (err)
3195                 goto out_put;
3196         atomic_inc(&mtt->ref_count);
3197         cq->mtt = mtt;
3198         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3199         res_end_move(dev, slave, RES_CQ, cqn);
3200         return 0;
3201
3202 out_put:
3203         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3204 out_move:
3205         res_abort_move(dev, slave, RES_CQ, cqn);
3206         return err;
3207 }
3208
3209 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3210                           struct mlx4_vhcr *vhcr,
3211                           struct mlx4_cmd_mailbox *inbox,
3212                           struct mlx4_cmd_mailbox *outbox,
3213                           struct mlx4_cmd_info *cmd)
3214 {
3215         int err;
3216         int cqn = vhcr->in_modifier;
3217         struct res_cq *cq;
3218
3219         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3220         if (err)
3221                 return err;
3222         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3223         if (err)
3224                 goto out_move;
3225         atomic_dec(&cq->mtt->ref_count);
3226         res_end_move(dev, slave, RES_CQ, cqn);
3227         return 0;
3228
3229 out_move:
3230         res_abort_move(dev, slave, RES_CQ, cqn);
3231         return err;
3232 }
3233
3234 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3235                           struct mlx4_vhcr *vhcr,
3236                           struct mlx4_cmd_mailbox *inbox,
3237                           struct mlx4_cmd_mailbox *outbox,
3238                           struct mlx4_cmd_info *cmd)
3239 {
3240         int cqn = vhcr->in_modifier;
3241         struct res_cq *cq;
3242         int err;
3243
3244         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3245         if (err)
3246                 return err;
3247
3248         if (cq->com.from_state != RES_CQ_HW)
3249                 goto ex_put;
3250
3251         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3252 ex_put:
3253         put_res(dev, slave, cqn, RES_CQ);
3254
3255         return err;
3256 }
3257
3258 static int handle_resize(struct mlx4_dev *dev, int slave,
3259                          struct mlx4_vhcr *vhcr,
3260                          struct mlx4_cmd_mailbox *inbox,
3261                          struct mlx4_cmd_mailbox *outbox,
3262                          struct mlx4_cmd_info *cmd,
3263                          struct res_cq *cq)
3264 {
3265         int err;
3266         struct res_mtt *orig_mtt;
3267         struct res_mtt *mtt;
3268         struct mlx4_cq_context *cqc = inbox->buf;
3269         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3270
3271         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3272         if (err)
3273                 return err;
3274
3275         if (orig_mtt != cq->mtt) {
3276                 err = -EINVAL;
3277                 goto ex_put;
3278         }
3279
3280         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3281         if (err)
3282                 goto ex_put;
3283
3284         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3285         if (err)
3286                 goto ex_put1;
3287         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3288         if (err)
3289                 goto ex_put1;
3290         atomic_dec(&orig_mtt->ref_count);
3291         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3292         atomic_inc(&mtt->ref_count);
3293         cq->mtt = mtt;
3294         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3295         return 0;
3296
3297 ex_put1:
3298         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3299 ex_put:
3300         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3301
3302         return err;
3303
3304 }
3305
3306 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3307                            struct mlx4_vhcr *vhcr,
3308                            struct mlx4_cmd_mailbox *inbox,
3309                            struct mlx4_cmd_mailbox *outbox,
3310                            struct mlx4_cmd_info *cmd)
3311 {
3312         int cqn = vhcr->in_modifier;
3313         struct res_cq *cq;
3314         int err;
3315
3316         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3317         if (err)
3318                 return err;
3319
3320         if (cq->com.from_state != RES_CQ_HW)
3321                 goto ex_put;
3322
3323         if (vhcr->op_modifier == 0) {
3324                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3325                 goto ex_put;
3326         }
3327
3328         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3329 ex_put:
3330         put_res(dev, slave, cqn, RES_CQ);
3331
3332         return err;
3333 }
3334
3335 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3336 {
3337         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3338         int log_rq_stride = srqc->logstride & 7;
3339         int page_shift = (srqc->log_page_size & 0x3f) + 12;
3340
3341         if (log_srq_size + log_rq_stride + 4 < page_shift)
3342                 return 1;
3343
3344         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3345 }
3346
3347 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3348                            struct mlx4_vhcr *vhcr,
3349                            struct mlx4_cmd_mailbox *inbox,
3350                            struct mlx4_cmd_mailbox *outbox,
3351                            struct mlx4_cmd_info *cmd)
3352 {
3353         int err;
3354         int srqn = vhcr->in_modifier;
3355         struct res_mtt *mtt;
3356         struct res_srq *srq;
3357         struct mlx4_srq_context *srqc = inbox->buf;
3358         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3359
3360         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3361                 return -EINVAL;
3362
3363         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3364         if (err)
3365                 return err;
3366         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3367         if (err)
3368                 goto ex_abort;
3369         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3370                               mtt);
3371         if (err)
3372                 goto ex_put_mtt;
3373
3374         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3375         if (err)
3376                 goto ex_put_mtt;
3377
3378         atomic_inc(&mtt->ref_count);
3379         srq->mtt = mtt;
3380         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3381         res_end_move(dev, slave, RES_SRQ, srqn);
3382         return 0;
3383
3384 ex_put_mtt:
3385         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3386 ex_abort:
3387         res_abort_move(dev, slave, RES_SRQ, srqn);
3388
3389         return err;
3390 }
3391
3392 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3393                            struct mlx4_vhcr *vhcr,
3394                            struct mlx4_cmd_mailbox *inbox,
3395                            struct mlx4_cmd_mailbox *outbox,
3396                            struct mlx4_cmd_info *cmd)
3397 {
3398         int err;
3399         int srqn = vhcr->in_modifier;
3400         struct res_srq *srq;
3401
3402         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3403         if (err)
3404                 return err;
3405         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3406         if (err)
3407                 goto ex_abort;
3408         atomic_dec(&srq->mtt->ref_count);
3409         if (srq->cq)
3410                 atomic_dec(&srq->cq->ref_count);
3411         res_end_move(dev, slave, RES_SRQ, srqn);
3412
3413         return 0;
3414
3415 ex_abort:
3416         res_abort_move(dev, slave, RES_SRQ, srqn);
3417
3418         return err;
3419 }
3420
3421 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3422                            struct mlx4_vhcr *vhcr,
3423                            struct mlx4_cmd_mailbox *inbox,
3424                            struct mlx4_cmd_mailbox *outbox,
3425                            struct mlx4_cmd_info *cmd)
3426 {
3427         int err;
3428         int srqn = vhcr->in_modifier;
3429         struct res_srq *srq;
3430
3431         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3432         if (err)
3433                 return err;
3434         if (srq->com.from_state != RES_SRQ_HW) {
3435                 err = -EBUSY;
3436                 goto out;
3437         }
3438         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3439 out:
3440         put_res(dev, slave, srqn, RES_SRQ);
3441         return err;
3442 }
3443
3444 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3445                          struct mlx4_vhcr *vhcr,
3446                          struct mlx4_cmd_mailbox *inbox,
3447                          struct mlx4_cmd_mailbox *outbox,
3448                          struct mlx4_cmd_info *cmd)
3449 {
3450         int err;
3451         int srqn = vhcr->in_modifier;
3452         struct res_srq *srq;
3453
3454         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3455         if (err)
3456                 return err;
3457
3458         if (srq->com.from_state != RES_SRQ_HW) {
3459                 err = -EBUSY;
3460                 goto out;
3461         }
3462
3463         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3464 out:
3465         put_res(dev, slave, srqn, RES_SRQ);
3466         return err;
3467 }
3468
3469 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3470                         struct mlx4_vhcr *vhcr,
3471                         struct mlx4_cmd_mailbox *inbox,
3472                         struct mlx4_cmd_mailbox *outbox,
3473                         struct mlx4_cmd_info *cmd)
3474 {
3475         int err;
3476         int qpn = vhcr->in_modifier & 0x7fffff;
3477         struct res_qp *qp;
3478
3479         err = get_res(dev, slave, qpn, RES_QP, &qp);
3480         if (err)
3481                 return err;
3482         if (qp->com.from_state != RES_QP_HW) {
3483                 err = -EBUSY;
3484                 goto out;
3485         }
3486
3487         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3488 out:
3489         put_res(dev, slave, qpn, RES_QP);
3490         return err;
3491 }
3492
3493 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3494                               struct mlx4_vhcr *vhcr,
3495                               struct mlx4_cmd_mailbox *inbox,
3496                               struct mlx4_cmd_mailbox *outbox,
3497                               struct mlx4_cmd_info *cmd)
3498 {
3499         struct mlx4_qp_context *context = inbox->buf + 8;
3500         adjust_proxy_tun_qkey(dev, vhcr, context);
3501         update_pkey_index(dev, slave, inbox);
3502         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3503 }
3504
3505 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3506                                   struct mlx4_qp_context *qpc,
3507                                   struct mlx4_cmd_mailbox *inbox)
3508 {
3509         enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3510         u8 pri_sched_queue;
3511         int port = mlx4_slave_convert_port(
3512                    dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3513
3514         if (port < 0)
3515                 return -EINVAL;
3516
3517         pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3518                           ((port & 1) << 6);
3519
3520         if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3521             mlx4_is_eth(dev, port + 1)) {
3522                 qpc->pri_path.sched_queue = pri_sched_queue;
3523         }
3524
3525         if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3526                 port = mlx4_slave_convert_port(
3527                                 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3528                                 + 1) - 1;
3529                 if (port < 0)
3530                         return -EINVAL;
3531                 qpc->alt_path.sched_queue =
3532                         (qpc->alt_path.sched_queue & ~(1 << 6)) |
3533                         (port & 1) << 6;
3534         }
3535         return 0;
3536 }
3537
3538 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3539                                 struct mlx4_qp_context *qpc,
3540                                 struct mlx4_cmd_mailbox *inbox)
3541 {
3542         u64 mac;
3543         int port;
3544         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3545         u8 sched = *(u8 *)(inbox->buf + 64);
3546         u8 smac_ix;
3547
3548         port = (sched >> 6 & 1) + 1;
3549         if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3550                 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3551                 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3552                         return -ENOENT;
3553         }
3554         return 0;
3555 }
3556
3557 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3558                              struct mlx4_vhcr *vhcr,
3559                              struct mlx4_cmd_mailbox *inbox,
3560                              struct mlx4_cmd_mailbox *outbox,
3561                              struct mlx4_cmd_info *cmd)
3562 {
3563         int err;
3564         struct mlx4_qp_context *qpc = inbox->buf + 8;
3565         int qpn = vhcr->in_modifier & 0x7fffff;
3566         struct res_qp *qp;
3567         u8 orig_sched_queue;
3568         __be32  orig_param3 = qpc->param3;
3569         u8 orig_vlan_control = qpc->pri_path.vlan_control;
3570         u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3571         u8 orig_pri_path_fl = qpc->pri_path.fl;
3572         u8 orig_vlan_index = qpc->pri_path.vlan_index;
3573         u8 orig_feup = qpc->pri_path.feup;
3574
3575         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3576         if (err)
3577                 return err;
3578         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3579         if (err)
3580                 return err;
3581
3582         if (roce_verify_mac(dev, slave, qpc, inbox))
3583                 return -EINVAL;
3584
3585         update_pkey_index(dev, slave, inbox);
3586         update_gid(dev, inbox, (u8)slave);
3587         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3588         orig_sched_queue = qpc->pri_path.sched_queue;
3589         err = update_vport_qp_param(dev, inbox, slave, qpn);
3590         if (err)
3591                 return err;
3592
3593         err = get_res(dev, slave, qpn, RES_QP, &qp);
3594         if (err)
3595                 return err;
3596         if (qp->com.from_state != RES_QP_HW) {
3597                 err = -EBUSY;
3598                 goto out;
3599         }
3600
3601         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3602 out:
3603         /* if no error, save sched queue value passed in by VF. This is
3604          * essentially the QOS value provided by the VF. This will be useful
3605          * if we allow dynamic changes from VST back to VGT
3606          */
3607         if (!err) {
3608                 qp->sched_queue = orig_sched_queue;
3609                 qp->param3      = orig_param3;
3610                 qp->vlan_control = orig_vlan_control;
3611                 qp->fvl_rx      =  orig_fvl_rx;
3612                 qp->pri_path_fl = orig_pri_path_fl;
3613                 qp->vlan_index  = orig_vlan_index;
3614                 qp->feup        = orig_feup;
3615         }
3616         put_res(dev, slave, qpn, RES_QP);
3617         return err;
3618 }
3619
3620 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3621                             struct mlx4_vhcr *vhcr,
3622                             struct mlx4_cmd_mailbox *inbox,
3623                             struct mlx4_cmd_mailbox *outbox,
3624                             struct mlx4_cmd_info *cmd)
3625 {
3626         int err;
3627         struct mlx4_qp_context *context = inbox->buf + 8;
3628
3629         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3630         if (err)
3631                 return err;
3632         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3633         if (err)
3634                 return err;
3635
3636         update_pkey_index(dev, slave, inbox);
3637         update_gid(dev, inbox, (u8)slave);
3638         adjust_proxy_tun_qkey(dev, vhcr, context);
3639         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3640 }
3641
3642 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3643                             struct mlx4_vhcr *vhcr,
3644                             struct mlx4_cmd_mailbox *inbox,
3645                             struct mlx4_cmd_mailbox *outbox,
3646                             struct mlx4_cmd_info *cmd)
3647 {
3648         int err;
3649         struct mlx4_qp_context *context = inbox->buf + 8;
3650
3651         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3652         if (err)
3653                 return err;
3654         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3655         if (err)
3656                 return err;
3657
3658         update_pkey_index(dev, slave, inbox);
3659         update_gid(dev, inbox, (u8)slave);
3660         adjust_proxy_tun_qkey(dev, vhcr, context);
3661         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3662 }
3663
3664
3665 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3666                               struct mlx4_vhcr *vhcr,
3667                               struct mlx4_cmd_mailbox *inbox,
3668                               struct mlx4_cmd_mailbox *outbox,
3669                               struct mlx4_cmd_info *cmd)
3670 {
3671         struct mlx4_qp_context *context = inbox->buf + 8;
3672         int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3673         if (err)
3674                 return err;
3675         adjust_proxy_tun_qkey(dev, vhcr, context);
3676         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3677 }
3678
3679 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3680                             struct mlx4_vhcr *vhcr,
3681                             struct mlx4_cmd_mailbox *inbox,
3682                             struct mlx4_cmd_mailbox *outbox,
3683                             struct mlx4_cmd_info *cmd)
3684 {
3685         int err;
3686         struct mlx4_qp_context *context = inbox->buf + 8;
3687
3688         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3689         if (err)
3690                 return err;
3691         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3692         if (err)
3693                 return err;
3694
3695         adjust_proxy_tun_qkey(dev, vhcr, context);
3696         update_gid(dev, inbox, (u8)slave);
3697         update_pkey_index(dev, slave, inbox);
3698         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3699 }
3700
3701 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3702                             struct mlx4_vhcr *vhcr,
3703                             struct mlx4_cmd_mailbox *inbox,
3704                             struct mlx4_cmd_mailbox *outbox,
3705                             struct mlx4_cmd_info *cmd)
3706 {
3707         int err;
3708         struct mlx4_qp_context *context = inbox->buf + 8;
3709
3710         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3711         if (err)
3712                 return err;
3713         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3714         if (err)
3715                 return err;
3716
3717         adjust_proxy_tun_qkey(dev, vhcr, context);
3718         update_gid(dev, inbox, (u8)slave);
3719         update_pkey_index(dev, slave, inbox);
3720         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3721 }
3722
3723 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3724                          struct mlx4_vhcr *vhcr,
3725                          struct mlx4_cmd_mailbox *inbox,
3726                          struct mlx4_cmd_mailbox *outbox,
3727                          struct mlx4_cmd_info *cmd)
3728 {
3729         int err;
3730         int qpn = vhcr->in_modifier & 0x7fffff;
3731         struct res_qp *qp;
3732
3733         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3734         if (err)
3735                 return err;
3736         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3737         if (err)
3738                 goto ex_abort;
3739
3740         atomic_dec(&qp->mtt->ref_count);
3741         atomic_dec(&qp->rcq->ref_count);
3742         atomic_dec(&qp->scq->ref_count);
3743         if (qp->srq)
3744                 atomic_dec(&qp->srq->ref_count);
3745         res_end_move(dev, slave, RES_QP, qpn);
3746         return 0;
3747
3748 ex_abort:
3749         res_abort_move(dev, slave, RES_QP, qpn);
3750
3751         return err;
3752 }
3753
3754 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3755                                 struct res_qp *rqp, u8 *gid)
3756 {
3757         struct res_gid *res;
3758
3759         list_for_each_entry(res, &rqp->mcg_list, list) {
3760                 if (!memcmp(res->gid, gid, 16))
3761                         return res;
3762         }
3763         return NULL;
3764 }
3765
3766 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3767                        u8 *gid, enum mlx4_protocol prot,
3768                        enum mlx4_steer_type steer, u64 reg_id)
3769 {
3770         struct res_gid *res;
3771         int err;
3772
3773         res = kzalloc(sizeof *res, GFP_KERNEL);
3774         if (!res)
3775                 return -ENOMEM;
3776
3777         spin_lock_irq(&rqp->mcg_spl);
3778         if (find_gid(dev, slave, rqp, gid)) {
3779                 kfree(res);
3780                 err = -EEXIST;
3781         } else {
3782                 memcpy(res->gid, gid, 16);
3783                 res->prot = prot;
3784                 res->steer = steer;
3785                 res->reg_id = reg_id;
3786                 list_add_tail(&res->list, &rqp->mcg_list);
3787                 err = 0;
3788         }
3789         spin_unlock_irq(&rqp->mcg_spl);
3790
3791         return err;
3792 }
3793
3794 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3795                        u8 *gid, enum mlx4_protocol prot,
3796                        enum mlx4_steer_type steer, u64 *reg_id)
3797 {
3798         struct res_gid *res;
3799         int err;
3800
3801         spin_lock_irq(&rqp->mcg_spl);
3802         res = find_gid(dev, slave, rqp, gid);
3803         if (!res || res->prot != prot || res->steer != steer)
3804                 err = -EINVAL;
3805         else {
3806                 *reg_id = res->reg_id;
3807                 list_del(&res->list);
3808                 kfree(res);
3809                 err = 0;
3810         }
3811         spin_unlock_irq(&rqp->mcg_spl);
3812
3813         return err;
3814 }
3815
3816 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3817                      u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3818                      enum mlx4_steer_type type, u64 *reg_id)
3819 {
3820         switch (dev->caps.steering_mode) {
3821         case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3822                 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3823                 if (port < 0)
3824                         return port;
3825                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3826                                                 block_loopback, prot,
3827                                                 reg_id);
3828         }
3829         case MLX4_STEERING_MODE_B0:
3830                 if (prot == MLX4_PROT_ETH) {
3831                         int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3832                         if (port < 0)
3833                                 return port;
3834                         gid[5] = port;
3835                 }
3836                 return mlx4_qp_attach_common(dev, qp, gid,
3837                                             block_loopback, prot, type);
3838         default:
3839                 return -EINVAL;
3840         }
3841 }
3842
3843 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3844                      u8 gid[16], enum mlx4_protocol prot,
3845                      enum mlx4_steer_type type, u64 reg_id)
3846 {
3847         switch (dev->caps.steering_mode) {
3848         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3849                 return mlx4_flow_detach(dev, reg_id);
3850         case MLX4_STEERING_MODE_B0:
3851                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3852         default:
3853                 return -EINVAL;
3854         }
3855 }
3856
3857 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3858                             u8 *gid, enum mlx4_protocol prot)
3859 {
3860         int real_port;
3861
3862         if (prot != MLX4_PROT_ETH)
3863                 return 0;
3864
3865         if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3866             dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3867                 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3868                 if (real_port < 0)
3869                         return -EINVAL;
3870                 gid[5] = real_port;
3871         }
3872
3873         return 0;
3874 }
3875
3876 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3877                                struct mlx4_vhcr *vhcr,
3878                                struct mlx4_cmd_mailbox *inbox,
3879                                struct mlx4_cmd_mailbox *outbox,
3880                                struct mlx4_cmd_info *cmd)
3881 {
3882         struct mlx4_qp qp; /* dummy for calling attach/detach */
3883         u8 *gid = inbox->buf;
3884         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3885         int err;
3886         int qpn;
3887         struct res_qp *rqp;
3888         u64 reg_id = 0;
3889         int attach = vhcr->op_modifier;
3890         int block_loopback = vhcr->in_modifier >> 31;
3891         u8 steer_type_mask = 2;
3892         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3893
3894         qpn = vhcr->in_modifier & 0xffffff;
3895         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3896         if (err)
3897                 return err;
3898
3899         qp.qpn = qpn;
3900         if (attach) {
3901                 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3902                                 type, &reg_id);
3903                 if (err) {
3904                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3905                         goto ex_put;
3906                 }
3907                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3908                 if (err)
3909                         goto ex_detach;
3910         } else {
3911                 err = mlx4_adjust_port(dev, slave, gid, prot);
3912                 if (err)
3913                         goto ex_put;
3914
3915                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3916                 if (err)
3917                         goto ex_put;
3918
3919                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3920                 if (err)
3921                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3922                                qpn, reg_id);
3923         }
3924         put_res(dev, slave, qpn, RES_QP);
3925         return err;
3926
3927 ex_detach:
3928         qp_detach(dev, &qp, gid, prot, type, reg_id);
3929 ex_put:
3930         put_res(dev, slave, qpn, RES_QP);
3931         return err;
3932 }
3933
3934 /*
3935  * MAC validation for Flow Steering rules.
3936  * VF can attach rules only with a mac address which is assigned to it.
3937  */
3938 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3939                                    struct list_head *rlist)
3940 {
3941         struct mac_res *res, *tmp;
3942         __be64 be_mac;
3943
3944         /* make sure it isn't multicast or broadcast mac*/
3945         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3946             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3947                 list_for_each_entry_safe(res, tmp, rlist, list) {
3948                         be_mac = cpu_to_be64(res->mac << 16);
3949                         if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3950                                 return 0;
3951                 }
3952                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3953                        eth_header->eth.dst_mac, slave);
3954                 return -EINVAL;
3955         }
3956         return 0;
3957 }
3958
3959 /*
3960  * In case of missing eth header, append eth header with a MAC address
3961  * assigned to the VF.
3962  */
3963 static int add_eth_header(struct mlx4_dev *dev, int slave,
3964                           struct mlx4_cmd_mailbox *inbox,
3965                           struct list_head *rlist, int header_id)
3966 {
3967         struct mac_res *res, *tmp;
3968         u8 port;
3969         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3970         struct mlx4_net_trans_rule_hw_eth *eth_header;
3971         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3972         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3973         __be64 be_mac = 0;
3974         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3975
3976         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3977         port = ctrl->port;
3978         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3979
3980         /* Clear a space in the inbox for eth header */
3981         switch (header_id) {
3982         case MLX4_NET_TRANS_RULE_ID_IPV4:
3983                 ip_header =
3984                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3985                 memmove(ip_header, eth_header,
3986                         sizeof(*ip_header) + sizeof(*l4_header));
3987                 break;
3988         case MLX4_NET_TRANS_RULE_ID_TCP:
3989         case MLX4_NET_TRANS_RULE_ID_UDP:
3990                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3991                             (eth_header + 1);
3992                 memmove(l4_header, eth_header, sizeof(*l4_header));
3993                 break;
3994         default:
3995                 return -EINVAL;
3996         }
3997         list_for_each_entry_safe(res, tmp, rlist, list) {
3998                 if (port == res->port) {
3999                         be_mac = cpu_to_be64(res->mac << 16);
4000                         break;
4001                 }
4002         }
4003         if (!be_mac) {
4004                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4005                        port);
4006                 return -EINVAL;
4007         }
4008
4009         memset(eth_header, 0, sizeof(*eth_header));
4010         eth_header->size = sizeof(*eth_header) >> 2;
4011         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4012         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4013         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4014
4015         return 0;
4016
4017 }
4018
4019 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4020 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4021                            struct mlx4_vhcr *vhcr,
4022                            struct mlx4_cmd_mailbox *inbox,
4023                            struct mlx4_cmd_mailbox *outbox,
4024                            struct mlx4_cmd_info *cmd_info)
4025 {
4026         int err;
4027         u32 qpn = vhcr->in_modifier & 0xffffff;
4028         struct res_qp *rqp;
4029         u64 mac;
4030         unsigned port;
4031         u64 pri_addr_path_mask;
4032         struct mlx4_update_qp_context *cmd;
4033         int smac_index;
4034
4035         cmd = (struct mlx4_update_qp_context *)inbox->buf;
4036
4037         pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4038         if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4039             (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4040                 return -EPERM;
4041
4042         /* Just change the smac for the QP */
4043         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4044         if (err) {
4045                 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4046                 return err;
4047         }
4048
4049         port = (rqp->sched_queue >> 6 & 1) + 1;
4050
4051         if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4052                 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4053                 err = mac_find_smac_ix_in_slave(dev, slave, port,
4054                                                 smac_index, &mac);
4055
4056                 if (err) {
4057                         mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4058                                  qpn, smac_index);
4059                         goto err_mac;
4060                 }
4061         }
4062
4063         err = mlx4_cmd(dev, inbox->dma,
4064                        vhcr->in_modifier, 0,
4065                        MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4066                        MLX4_CMD_NATIVE);
4067         if (err) {
4068                 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4069                 goto err_mac;
4070         }
4071
4072 err_mac:
4073         put_res(dev, slave, qpn, RES_QP);
4074         return err;
4075 }
4076
4077 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4078                                          struct mlx4_vhcr *vhcr,
4079                                          struct mlx4_cmd_mailbox *inbox,
4080                                          struct mlx4_cmd_mailbox *outbox,
4081                                          struct mlx4_cmd_info *cmd)
4082 {
4083
4084         struct mlx4_priv *priv = mlx4_priv(dev);
4085         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4086         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4087         int err;
4088         int qpn;
4089         struct res_qp *rqp;
4090         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4091         struct _rule_hw  *rule_header;
4092         int header_id;
4093
4094         if (dev->caps.steering_mode !=
4095             MLX4_STEERING_MODE_DEVICE_MANAGED)
4096                 return -EOPNOTSUPP;
4097
4098         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4099         ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4100         if (ctrl->port <= 0)
4101                 return -EINVAL;
4102         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4103         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4104         if (err) {
4105                 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4106                 return err;
4107         }
4108         rule_header = (struct _rule_hw *)(ctrl + 1);
4109         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4110
4111         switch (header_id) {
4112         case MLX4_NET_TRANS_RULE_ID_ETH:
4113                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4114                         err = -EINVAL;
4115                         goto err_put;
4116                 }
4117                 break;
4118         case MLX4_NET_TRANS_RULE_ID_IB:
4119                 break;
4120         case MLX4_NET_TRANS_RULE_ID_IPV4:
4121         case MLX4_NET_TRANS_RULE_ID_TCP:
4122         case MLX4_NET_TRANS_RULE_ID_UDP:
4123                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4124                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4125                         err = -EINVAL;
4126                         goto err_put;
4127                 }
4128                 vhcr->in_modifier +=
4129                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4130                 break;
4131         default:
4132                 pr_err("Corrupted mailbox\n");
4133                 err = -EINVAL;
4134                 goto err_put;
4135         }
4136
4137         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4138                            vhcr->in_modifier, 0,
4139                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4140                            MLX4_CMD_NATIVE);
4141         if (err)
4142                 goto err_put;
4143
4144         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4145         if (err) {
4146                 mlx4_err(dev, "Fail to add flow steering resources\n");
4147                 /* detach rule*/
4148                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4149                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4150                          MLX4_CMD_NATIVE);
4151                 goto err_put;
4152         }
4153         atomic_inc(&rqp->ref_count);
4154 err_put:
4155         put_res(dev, slave, qpn, RES_QP);
4156         return err;
4157 }
4158
4159 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4160                                          struct mlx4_vhcr *vhcr,
4161                                          struct mlx4_cmd_mailbox *inbox,
4162                                          struct mlx4_cmd_mailbox *outbox,
4163                                          struct mlx4_cmd_info *cmd)
4164 {
4165         int err;
4166         struct res_qp *rqp;
4167         struct res_fs_rule *rrule;
4168
4169         if (dev->caps.steering_mode !=
4170             MLX4_STEERING_MODE_DEVICE_MANAGED)
4171                 return -EOPNOTSUPP;
4172
4173         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4174         if (err)
4175                 return err;
4176         /* Release the rule form busy state before removal */
4177         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4178         err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4179         if (err)
4180                 return err;
4181
4182         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4183         if (err) {
4184                 mlx4_err(dev, "Fail to remove flow steering resources\n");
4185                 goto out;
4186         }
4187
4188         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4189                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4190                        MLX4_CMD_NATIVE);
4191         if (!err)
4192                 atomic_dec(&rqp->ref_count);
4193 out:
4194         put_res(dev, slave, rrule->qpn, RES_QP);
4195         return err;
4196 }
4197
4198 enum {
4199         BUSY_MAX_RETRIES = 10
4200 };
4201
4202 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4203                                struct mlx4_vhcr *vhcr,
4204                                struct mlx4_cmd_mailbox *inbox,
4205                                struct mlx4_cmd_mailbox *outbox,
4206                                struct mlx4_cmd_info *cmd)
4207 {
4208         int err;
4209         int index = vhcr->in_modifier & 0xffff;
4210
4211         err = get_res(dev, slave, index, RES_COUNTER, NULL);
4212         if (err)
4213                 return err;
4214
4215         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4216         put_res(dev, slave, index, RES_COUNTER);
4217         return err;
4218 }
4219
4220 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4221 {
4222         struct res_gid *rgid;
4223         struct res_gid *tmp;
4224         struct mlx4_qp qp; /* dummy for calling attach/detach */
4225
4226         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4227                 switch (dev->caps.steering_mode) {
4228                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4229                         mlx4_flow_detach(dev, rgid->reg_id);
4230                         break;
4231                 case MLX4_STEERING_MODE_B0:
4232                         qp.qpn = rqp->local_qpn;
4233                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4234                                                      rgid->prot, rgid->steer);
4235                         break;
4236                 }
4237                 list_del(&rgid->list);
4238                 kfree(rgid);
4239         }
4240 }
4241
4242 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4243                           enum mlx4_resource type, int print)
4244 {
4245         struct mlx4_priv *priv = mlx4_priv(dev);
4246         struct mlx4_resource_tracker *tracker =
4247                 &priv->mfunc.master.res_tracker;
4248         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4249         struct res_common *r;
4250         struct res_common *tmp;
4251         int busy;
4252
4253         busy = 0;
4254         spin_lock_irq(mlx4_tlock(dev));
4255         list_for_each_entry_safe(r, tmp, rlist, list) {
4256                 if (r->owner == slave) {
4257                         if (!r->removing) {
4258                                 if (r->state == RES_ANY_BUSY) {
4259                                         if (print)
4260                                                 mlx4_dbg(dev,
4261                                                          "%s id 0x%llx is busy\n",
4262                                                           resource_str(type),
4263                                                           r->res_id);
4264                                         ++busy;
4265                                 } else {
4266                                         r->from_state = r->state;
4267                                         r->state = RES_ANY_BUSY;
4268                                         r->removing = 1;
4269                                 }
4270                         }
4271                 }
4272         }
4273         spin_unlock_irq(mlx4_tlock(dev));
4274
4275         return busy;
4276 }
4277
4278 static int move_all_busy(struct mlx4_dev *dev, int slave,
4279                          enum mlx4_resource type)
4280 {
4281         unsigned long begin;
4282         int busy;
4283
4284         begin = jiffies;
4285         do {
4286                 busy = _move_all_busy(dev, slave, type, 0);
4287                 if (time_after(jiffies, begin + 5 * HZ))
4288                         break;
4289                 if (busy)
4290                         cond_resched();
4291         } while (busy);
4292
4293         if (busy)
4294                 busy = _move_all_busy(dev, slave, type, 1);
4295
4296         return busy;
4297 }
4298 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4299 {
4300         struct mlx4_priv *priv = mlx4_priv(dev);
4301         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4302         struct list_head *qp_list =
4303                 &tracker->slave_list[slave].res_list[RES_QP];
4304         struct res_qp *qp;
4305         struct res_qp *tmp;
4306         int state;
4307         u64 in_param;
4308         int qpn;
4309         int err;
4310
4311         err = move_all_busy(dev, slave, RES_QP);
4312         if (err)
4313                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4314                           slave);
4315
4316         spin_lock_irq(mlx4_tlock(dev));
4317         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4318                 spin_unlock_irq(mlx4_tlock(dev));
4319                 if (qp->com.owner == slave) {
4320                         qpn = qp->com.res_id;
4321                         detach_qp(dev, slave, qp);
4322                         state = qp->com.from_state;
4323                         while (state != 0) {
4324                                 switch (state) {
4325                                 case RES_QP_RESERVED:
4326                                         spin_lock_irq(mlx4_tlock(dev));
4327                                         rb_erase(&qp->com.node,
4328                                                  &tracker->res_tree[RES_QP]);
4329                                         list_del(&qp->com.list);
4330                                         spin_unlock_irq(mlx4_tlock(dev));
4331                                         if (!valid_reserved(dev, slave, qpn)) {
4332                                                 __mlx4_qp_release_range(dev, qpn, 1);
4333                                                 mlx4_release_resource(dev, slave,
4334                                                                       RES_QP, 1, 0);
4335                                         }
4336                                         kfree(qp);
4337                                         state = 0;
4338                                         break;
4339                                 case RES_QP_MAPPED:
4340                                         if (!valid_reserved(dev, slave, qpn))
4341                                                 __mlx4_qp_free_icm(dev, qpn);
4342                                         state = RES_QP_RESERVED;
4343                                         break;
4344                                 case RES_QP_HW:
4345                                         in_param = slave;
4346                                         err = mlx4_cmd(dev, in_param,
4347                                                        qp->local_qpn, 2,
4348                                                        MLX4_CMD_2RST_QP,
4349                                                        MLX4_CMD_TIME_CLASS_A,
4350                                                        MLX4_CMD_NATIVE);
4351                                         if (err)
4352                                                 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4353                                                          slave, qp->local_qpn);
4354                                         atomic_dec(&qp->rcq->ref_count);
4355                                         atomic_dec(&qp->scq->ref_count);
4356                                         atomic_dec(&qp->mtt->ref_count);
4357                                         if (qp->srq)
4358                                                 atomic_dec(&qp->srq->ref_count);
4359                                         state = RES_QP_MAPPED;
4360                                         break;
4361                                 default:
4362                                         state = 0;
4363                                 }
4364                         }
4365                 }
4366                 spin_lock_irq(mlx4_tlock(dev));
4367         }
4368         spin_unlock_irq(mlx4_tlock(dev));
4369 }
4370
4371 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4372 {
4373         struct mlx4_priv *priv = mlx4_priv(dev);
4374         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4375         struct list_head *srq_list =
4376                 &tracker->slave_list[slave].res_list[RES_SRQ];
4377         struct res_srq *srq;
4378         struct res_srq *tmp;
4379         int state;
4380         u64 in_param;
4381         LIST_HEAD(tlist);
4382         int srqn;
4383         int err;
4384
4385         err = move_all_busy(dev, slave, RES_SRQ);
4386         if (err)
4387                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4388                           slave);
4389
4390         spin_lock_irq(mlx4_tlock(dev));
4391         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4392                 spin_unlock_irq(mlx4_tlock(dev));
4393                 if (srq->com.owner == slave) {
4394                         srqn = srq->com.res_id;
4395                         state = srq->com.from_state;
4396                         while (state != 0) {
4397                                 switch (state) {
4398                                 case RES_SRQ_ALLOCATED:
4399                                         __mlx4_srq_free_icm(dev, srqn);
4400                                         spin_lock_irq(mlx4_tlock(dev));
4401                                         rb_erase(&srq->com.node,
4402                                                  &tracker->res_tree[RES_SRQ]);
4403                                         list_del(&srq->com.list);
4404                                         spin_unlock_irq(mlx4_tlock(dev));
4405                                         mlx4_release_resource(dev, slave,
4406                                                               RES_SRQ, 1, 0);
4407                                         kfree(srq);
4408                                         state = 0;
4409                                         break;
4410
4411                                 case RES_SRQ_HW:
4412                                         in_param = slave;
4413                                         err = mlx4_cmd(dev, in_param, srqn, 1,
4414                                                        MLX4_CMD_HW2SW_SRQ,
4415                                                        MLX4_CMD_TIME_CLASS_A,
4416                                                        MLX4_CMD_NATIVE);
4417                                         if (err)
4418                                                 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4419                                                          slave, srqn);
4420
4421                                         atomic_dec(&srq->mtt->ref_count);
4422                                         if (srq->cq)
4423                                                 atomic_dec(&srq->cq->ref_count);
4424                                         state = RES_SRQ_ALLOCATED;
4425                                         break;
4426
4427                                 default:
4428                                         state = 0;
4429                                 }
4430                         }
4431                 }
4432                 spin_lock_irq(mlx4_tlock(dev));
4433         }
4434         spin_unlock_irq(mlx4_tlock(dev));
4435 }
4436
4437 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4438 {
4439         struct mlx4_priv *priv = mlx4_priv(dev);
4440         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4441         struct list_head *cq_list =
4442                 &tracker->slave_list[slave].res_list[RES_CQ];
4443         struct res_cq *cq;
4444         struct res_cq *tmp;
4445         int state;
4446         u64 in_param;
4447         LIST_HEAD(tlist);
4448         int cqn;
4449         int err;
4450
4451         err = move_all_busy(dev, slave, RES_CQ);
4452         if (err)
4453                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4454                           slave);
4455
4456         spin_lock_irq(mlx4_tlock(dev));
4457         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4458                 spin_unlock_irq(mlx4_tlock(dev));
4459                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4460                         cqn = cq->com.res_id;
4461                         state = cq->com.from_state;
4462                         while (state != 0) {
4463                                 switch (state) {
4464                                 case RES_CQ_ALLOCATED:
4465                                         __mlx4_cq_free_icm(dev, cqn);
4466                                         spin_lock_irq(mlx4_tlock(dev));
4467                                         rb_erase(&cq->com.node,
4468                                                  &tracker->res_tree[RES_CQ]);
4469                                         list_del(&cq->com.list);
4470                                         spin_unlock_irq(mlx4_tlock(dev));
4471                                         mlx4_release_resource(dev, slave,
4472                                                               RES_CQ, 1, 0);
4473                                         kfree(cq);
4474                                         state = 0;
4475                                         break;
4476
4477                                 case RES_CQ_HW:
4478                                         in_param = slave;
4479                                         err = mlx4_cmd(dev, in_param, cqn, 1,
4480                                                        MLX4_CMD_HW2SW_CQ,
4481                                                        MLX4_CMD_TIME_CLASS_A,
4482                                                        MLX4_CMD_NATIVE);
4483                                         if (err)
4484                                                 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4485                                                          slave, cqn);
4486                                         atomic_dec(&cq->mtt->ref_count);
4487                                         state = RES_CQ_ALLOCATED;
4488                                         break;
4489
4490                                 default:
4491                                         state = 0;
4492                                 }
4493                         }
4494                 }
4495                 spin_lock_irq(mlx4_tlock(dev));
4496         }
4497         spin_unlock_irq(mlx4_tlock(dev));
4498 }
4499
4500 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4501 {
4502         struct mlx4_priv *priv = mlx4_priv(dev);
4503         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4504         struct list_head *mpt_list =
4505                 &tracker->slave_list[slave].res_list[RES_MPT];
4506         struct res_mpt *mpt;
4507         struct res_mpt *tmp;
4508         int state;
4509         u64 in_param;
4510         LIST_HEAD(tlist);
4511         int mptn;
4512         int err;
4513
4514         err = move_all_busy(dev, slave, RES_MPT);
4515         if (err)
4516                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4517                           slave);
4518
4519         spin_lock_irq(mlx4_tlock(dev));
4520         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4521                 spin_unlock_irq(mlx4_tlock(dev));
4522                 if (mpt->com.owner == slave) {
4523                         mptn = mpt->com.res_id;
4524                         state = mpt->com.from_state;
4525                         while (state != 0) {
4526                                 switch (state) {
4527                                 case RES_MPT_RESERVED:
4528                                         __mlx4_mpt_release(dev, mpt->key);
4529                                         spin_lock_irq(mlx4_tlock(dev));
4530                                         rb_erase(&mpt->com.node,
4531                                                  &tracker->res_tree[RES_MPT]);
4532                                         list_del(&mpt->com.list);
4533                                         spin_unlock_irq(mlx4_tlock(dev));
4534                                         mlx4_release_resource(dev, slave,
4535                                                               RES_MPT, 1, 0);
4536                                         kfree(mpt);
4537                                         state = 0;
4538                                         break;
4539
4540                                 case RES_MPT_MAPPED:
4541                                         __mlx4_mpt_free_icm(dev, mpt->key);
4542                                         state = RES_MPT_RESERVED;
4543                                         break;
4544
4545                                 case RES_MPT_HW:
4546                                         in_param = slave;
4547                                         err = mlx4_cmd(dev, in_param, mptn, 0,
4548                                                      MLX4_CMD_HW2SW_MPT,
4549                                                      MLX4_CMD_TIME_CLASS_A,
4550                                                      MLX4_CMD_NATIVE);
4551                                         if (err)
4552                                                 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4553                                                          slave, mptn);
4554                                         if (mpt->mtt)
4555                                                 atomic_dec(&mpt->mtt->ref_count);
4556                                         state = RES_MPT_MAPPED;
4557                                         break;
4558                                 default:
4559                                         state = 0;
4560                                 }
4561                         }
4562                 }
4563                 spin_lock_irq(mlx4_tlock(dev));
4564         }
4565         spin_unlock_irq(mlx4_tlock(dev));
4566 }
4567
4568 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4569 {
4570         struct mlx4_priv *priv = mlx4_priv(dev);
4571         struct mlx4_resource_tracker *tracker =
4572                 &priv->mfunc.master.res_tracker;
4573         struct list_head *mtt_list =
4574                 &tracker->slave_list[slave].res_list[RES_MTT];
4575         struct res_mtt *mtt;
4576         struct res_mtt *tmp;
4577         int state;
4578         LIST_HEAD(tlist);
4579         int base;
4580         int err;
4581
4582         err = move_all_busy(dev, slave, RES_MTT);
4583         if (err)
4584                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4585                           slave);
4586
4587         spin_lock_irq(mlx4_tlock(dev));
4588         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4589                 spin_unlock_irq(mlx4_tlock(dev));
4590                 if (mtt->com.owner == slave) {
4591                         base = mtt->com.res_id;
4592                         state = mtt->com.from_state;
4593                         while (state != 0) {
4594                                 switch (state) {
4595                                 case RES_MTT_ALLOCATED:
4596                                         __mlx4_free_mtt_range(dev, base,
4597                                                               mtt->order);
4598                                         spin_lock_irq(mlx4_tlock(dev));
4599                                         rb_erase(&mtt->com.node,
4600                                                  &tracker->res_tree[RES_MTT]);
4601                                         list_del(&mtt->com.list);
4602                                         spin_unlock_irq(mlx4_tlock(dev));
4603                                         mlx4_release_resource(dev, slave, RES_MTT,
4604                                                               1 << mtt->order, 0);
4605                                         kfree(mtt);
4606                                         state = 0;
4607                                         break;
4608
4609                                 default:
4610                                         state = 0;
4611                                 }
4612                         }
4613                 }
4614                 spin_lock_irq(mlx4_tlock(dev));
4615         }
4616         spin_unlock_irq(mlx4_tlock(dev));
4617 }
4618
4619 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4620 {
4621         struct mlx4_priv *priv = mlx4_priv(dev);
4622         struct mlx4_resource_tracker *tracker =
4623                 &priv->mfunc.master.res_tracker;
4624         struct list_head *fs_rule_list =
4625                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4626         struct res_fs_rule *fs_rule;
4627         struct res_fs_rule *tmp;
4628         int state;
4629         u64 base;
4630         int err;
4631
4632         err = move_all_busy(dev, slave, RES_FS_RULE);
4633         if (err)
4634                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4635                           slave);
4636
4637         spin_lock_irq(mlx4_tlock(dev));
4638         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4639                 spin_unlock_irq(mlx4_tlock(dev));
4640                 if (fs_rule->com.owner == slave) {
4641                         base = fs_rule->com.res_id;
4642                         state = fs_rule->com.from_state;
4643                         while (state != 0) {
4644                                 switch (state) {
4645                                 case RES_FS_RULE_ALLOCATED:
4646                                         /* detach rule */
4647                                         err = mlx4_cmd(dev, base, 0, 0,
4648                                                        MLX4_QP_FLOW_STEERING_DETACH,
4649                                                        MLX4_CMD_TIME_CLASS_A,
4650                                                        MLX4_CMD_NATIVE);
4651
4652                                         spin_lock_irq(mlx4_tlock(dev));
4653                                         rb_erase(&fs_rule->com.node,
4654                                                  &tracker->res_tree[RES_FS_RULE]);
4655                                         list_del(&fs_rule->com.list);
4656                                         spin_unlock_irq(mlx4_tlock(dev));
4657                                         kfree(fs_rule);
4658                                         state = 0;
4659                                         break;
4660
4661                                 default:
4662                                         state = 0;
4663                                 }
4664                         }
4665                 }
4666                 spin_lock_irq(mlx4_tlock(dev));
4667         }
4668         spin_unlock_irq(mlx4_tlock(dev));
4669 }
4670
4671 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4672 {
4673         struct mlx4_priv *priv = mlx4_priv(dev);
4674         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4675         struct list_head *eq_list =
4676                 &tracker->slave_list[slave].res_list[RES_EQ];
4677         struct res_eq *eq;
4678         struct res_eq *tmp;
4679         int err;
4680         int state;
4681         LIST_HEAD(tlist);
4682         int eqn;
4683
4684         err = move_all_busy(dev, slave, RES_EQ);
4685         if (err)
4686                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4687                           slave);
4688
4689         spin_lock_irq(mlx4_tlock(dev));
4690         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4691                 spin_unlock_irq(mlx4_tlock(dev));
4692                 if (eq->com.owner == slave) {
4693                         eqn = eq->com.res_id;
4694                         state = eq->com.from_state;
4695                         while (state != 0) {
4696                                 switch (state) {
4697                                 case RES_EQ_RESERVED:
4698                                         spin_lock_irq(mlx4_tlock(dev));
4699                                         rb_erase(&eq->com.node,
4700                                                  &tracker->res_tree[RES_EQ]);
4701                                         list_del(&eq->com.list);
4702                                         spin_unlock_irq(mlx4_tlock(dev));
4703                                         kfree(eq);
4704                                         state = 0;
4705                                         break;
4706
4707                                 case RES_EQ_HW:
4708                                         err = mlx4_cmd(dev, slave, eqn & 0xff,
4709                                                        1, MLX4_CMD_HW2SW_EQ,
4710                                                        MLX4_CMD_TIME_CLASS_A,
4711                                                        MLX4_CMD_NATIVE);
4712                                         if (err)
4713                                                 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4714                                                          slave, eqn);
4715                                         atomic_dec(&eq->mtt->ref_count);
4716                                         state = RES_EQ_RESERVED;
4717                                         break;
4718
4719                                 default:
4720                                         state = 0;
4721                                 }
4722                         }
4723                 }
4724                 spin_lock_irq(mlx4_tlock(dev));
4725         }
4726         spin_unlock_irq(mlx4_tlock(dev));
4727 }
4728
4729 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4730 {
4731         struct mlx4_priv *priv = mlx4_priv(dev);
4732         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4733         struct list_head *counter_list =
4734                 &tracker->slave_list[slave].res_list[RES_COUNTER];
4735         struct res_counter *counter;
4736         struct res_counter *tmp;
4737         int err;
4738         int index;
4739
4740         err = move_all_busy(dev, slave, RES_COUNTER);
4741         if (err)
4742                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4743                           slave);
4744
4745         spin_lock_irq(mlx4_tlock(dev));
4746         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4747                 if (counter->com.owner == slave) {
4748                         index = counter->com.res_id;
4749                         rb_erase(&counter->com.node,
4750                                  &tracker->res_tree[RES_COUNTER]);
4751                         list_del(&counter->com.list);
4752                         kfree(counter);
4753                         __mlx4_counter_free(dev, index);
4754                         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4755                 }
4756         }
4757         spin_unlock_irq(mlx4_tlock(dev));
4758 }
4759
4760 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4761 {
4762         struct mlx4_priv *priv = mlx4_priv(dev);
4763         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4764         struct list_head *xrcdn_list =
4765                 &tracker->slave_list[slave].res_list[RES_XRCD];
4766         struct res_xrcdn *xrcd;
4767         struct res_xrcdn *tmp;
4768         int err;
4769         int xrcdn;
4770
4771         err = move_all_busy(dev, slave, RES_XRCD);
4772         if (err)
4773                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4774                           slave);
4775
4776         spin_lock_irq(mlx4_tlock(dev));
4777         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4778                 if (xrcd->com.owner == slave) {
4779                         xrcdn = xrcd->com.res_id;
4780                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4781                         list_del(&xrcd->com.list);
4782                         kfree(xrcd);
4783                         __mlx4_xrcd_free(dev, xrcdn);
4784                 }
4785         }
4786         spin_unlock_irq(mlx4_tlock(dev));
4787 }
4788
4789 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4790 {
4791         struct mlx4_priv *priv = mlx4_priv(dev);
4792         mlx4_reset_roce_gids(dev, slave);
4793         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4794         rem_slave_vlans(dev, slave);
4795         rem_slave_macs(dev, slave);
4796         rem_slave_fs_rule(dev, slave);
4797         rem_slave_qps(dev, slave);
4798         rem_slave_srqs(dev, slave);
4799         rem_slave_cqs(dev, slave);
4800         rem_slave_mrs(dev, slave);
4801         rem_slave_eqs(dev, slave);
4802         rem_slave_mtts(dev, slave);
4803         rem_slave_counters(dev, slave);
4804         rem_slave_xrcdns(dev, slave);
4805         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4806 }
4807
4808 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4809 {
4810         struct mlx4_vf_immed_vlan_work *work =
4811                 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4812         struct mlx4_cmd_mailbox *mailbox;
4813         struct mlx4_update_qp_context *upd_context;
4814         struct mlx4_dev *dev = &work->priv->dev;
4815         struct mlx4_resource_tracker *tracker =
4816                 &work->priv->mfunc.master.res_tracker;
4817         struct list_head *qp_list =
4818                 &tracker->slave_list[work->slave].res_list[RES_QP];
4819         struct res_qp *qp;
4820         struct res_qp *tmp;
4821         u64 qp_path_mask_vlan_ctrl =
4822                        ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4823                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4824                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4825                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4826                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4827                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4828
4829         u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4830                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4831                        (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4832                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4833                        (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4834                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4835                        (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4836
4837         int err;
4838         int port, errors = 0;
4839         u8 vlan_control;
4840
4841         if (mlx4_is_slave(dev)) {
4842                 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4843                           work->slave);
4844                 goto out;
4845         }
4846
4847         mailbox = mlx4_alloc_cmd_mailbox(dev);
4848         if (IS_ERR(mailbox))
4849                 goto out;
4850         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4851                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4852                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4853                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4854                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4855                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4856                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4857         else if (!work->vlan_id)
4858                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4859                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4860         else
4861                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4862                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4863                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4864
4865         upd_context = mailbox->buf;
4866         upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
4867
4868         spin_lock_irq(mlx4_tlock(dev));
4869         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4870                 spin_unlock_irq(mlx4_tlock(dev));
4871                 if (qp->com.owner == work->slave) {
4872                         if (qp->com.from_state != RES_QP_HW ||
4873                             !qp->sched_queue ||  /* no INIT2RTR trans yet */
4874                             mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4875                             qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4876                                 spin_lock_irq(mlx4_tlock(dev));
4877                                 continue;
4878                         }
4879                         port = (qp->sched_queue >> 6 & 1) + 1;
4880                         if (port != work->port) {
4881                                 spin_lock_irq(mlx4_tlock(dev));
4882                                 continue;
4883                         }
4884                         if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4885                                 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4886                         else
4887                                 upd_context->primary_addr_path_mask =
4888                                         cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4889                         if (work->vlan_id == MLX4_VGT) {
4890                                 upd_context->qp_context.param3 = qp->param3;
4891                                 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4892                                 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4893                                 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4894                                 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4895                                 upd_context->qp_context.pri_path.feup = qp->feup;
4896                                 upd_context->qp_context.pri_path.sched_queue =
4897                                         qp->sched_queue;
4898                         } else {
4899                                 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4900                                 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4901                                 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4902                                 upd_context->qp_context.pri_path.fvl_rx =
4903                                         qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4904                                 upd_context->qp_context.pri_path.fl =
4905                                         qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4906                                 upd_context->qp_context.pri_path.feup =
4907                                         qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4908                                 upd_context->qp_context.pri_path.sched_queue =
4909                                         qp->sched_queue & 0xC7;
4910                                 upd_context->qp_context.pri_path.sched_queue |=
4911                                         ((work->qos & 0x7) << 3);
4912                         }
4913
4914                         err = mlx4_cmd(dev, mailbox->dma,
4915                                        qp->local_qpn & 0xffffff,
4916                                        0, MLX4_CMD_UPDATE_QP,
4917                                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4918                         if (err) {
4919                                 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4920                                           work->slave, port, qp->local_qpn, err);
4921                                 errors++;
4922                         }
4923                 }
4924                 spin_lock_irq(mlx4_tlock(dev));
4925         }
4926         spin_unlock_irq(mlx4_tlock(dev));
4927         mlx4_free_cmd_mailbox(dev, mailbox);
4928
4929         if (errors)
4930                 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4931                          errors, work->slave, work->port);
4932
4933         /* unregister previous vlan_id if needed and we had no errors
4934          * while updating the QPs
4935          */
4936         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4937             NO_INDX != work->orig_vlan_ix)
4938                 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4939                                        work->orig_vlan_id);
4940 out:
4941         kfree(work);
4942         return;
4943 }