net/mlx4_core: Add support for filtering multicast loopback
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49 #include "mlx4_stats.h"
50
51 #define MLX4_MAC_VALID          (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT       2
53 #define MLX4_VF_COUNTERS_PER_PORT       1
54
55 struct mac_res {
56         struct list_head list;
57         u64 mac;
58         int ref_count;
59         u8 smac_index;
60         u8 port;
61 };
62
63 struct vlan_res {
64         struct list_head list;
65         u16 vlan;
66         int ref_count;
67         int vlan_index;
68         u8 port;
69 };
70
71 struct res_common {
72         struct list_head        list;
73         struct rb_node          node;
74         u64                     res_id;
75         int                     owner;
76         int                     state;
77         int                     from_state;
78         int                     to_state;
79         int                     removing;
80 };
81
82 enum {
83         RES_ANY_BUSY = 1
84 };
85
86 struct res_gid {
87         struct list_head        list;
88         u8                      gid[16];
89         enum mlx4_protocol      prot;
90         enum mlx4_steer_type    steer;
91         u64                     reg_id;
92 };
93
94 enum res_qp_states {
95         RES_QP_BUSY = RES_ANY_BUSY,
96
97         /* QP number was allocated */
98         RES_QP_RESERVED,
99
100         /* ICM memory for QP context was mapped */
101         RES_QP_MAPPED,
102
103         /* QP is in hw ownership */
104         RES_QP_HW
105 };
106
107 struct res_qp {
108         struct res_common       com;
109         struct res_mtt         *mtt;
110         struct res_cq          *rcq;
111         struct res_cq          *scq;
112         struct res_srq         *srq;
113         struct list_head        mcg_list;
114         spinlock_t              mcg_spl;
115         int                     local_qpn;
116         atomic_t                ref_count;
117         u32                     qpc_flags;
118         /* saved qp params before VST enforcement in order to restore on VGT */
119         u8                      sched_queue;
120         __be32                  param3;
121         u8                      vlan_control;
122         u8                      fvl_rx;
123         u8                      pri_path_fl;
124         u8                      vlan_index;
125         u8                      feup;
126 };
127
128 enum res_mtt_states {
129         RES_MTT_BUSY = RES_ANY_BUSY,
130         RES_MTT_ALLOCATED,
131 };
132
133 static inline const char *mtt_states_str(enum res_mtt_states state)
134 {
135         switch (state) {
136         case RES_MTT_BUSY: return "RES_MTT_BUSY";
137         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
138         default: return "Unknown";
139         }
140 }
141
142 struct res_mtt {
143         struct res_common       com;
144         int                     order;
145         atomic_t                ref_count;
146 };
147
148 enum res_mpt_states {
149         RES_MPT_BUSY = RES_ANY_BUSY,
150         RES_MPT_RESERVED,
151         RES_MPT_MAPPED,
152         RES_MPT_HW,
153 };
154
155 struct res_mpt {
156         struct res_common       com;
157         struct res_mtt         *mtt;
158         int                     key;
159 };
160
161 enum res_eq_states {
162         RES_EQ_BUSY = RES_ANY_BUSY,
163         RES_EQ_RESERVED,
164         RES_EQ_HW,
165 };
166
167 struct res_eq {
168         struct res_common       com;
169         struct res_mtt         *mtt;
170 };
171
172 enum res_cq_states {
173         RES_CQ_BUSY = RES_ANY_BUSY,
174         RES_CQ_ALLOCATED,
175         RES_CQ_HW,
176 };
177
178 struct res_cq {
179         struct res_common       com;
180         struct res_mtt         *mtt;
181         atomic_t                ref_count;
182 };
183
184 enum res_srq_states {
185         RES_SRQ_BUSY = RES_ANY_BUSY,
186         RES_SRQ_ALLOCATED,
187         RES_SRQ_HW,
188 };
189
190 struct res_srq {
191         struct res_common       com;
192         struct res_mtt         *mtt;
193         struct res_cq          *cq;
194         atomic_t                ref_count;
195 };
196
197 enum res_counter_states {
198         RES_COUNTER_BUSY = RES_ANY_BUSY,
199         RES_COUNTER_ALLOCATED,
200 };
201
202 struct res_counter {
203         struct res_common       com;
204         int                     port;
205 };
206
207 enum res_xrcdn_states {
208         RES_XRCD_BUSY = RES_ANY_BUSY,
209         RES_XRCD_ALLOCATED,
210 };
211
212 struct res_xrcdn {
213         struct res_common       com;
214         int                     port;
215 };
216
217 enum res_fs_rule_states {
218         RES_FS_RULE_BUSY = RES_ANY_BUSY,
219         RES_FS_RULE_ALLOCATED,
220 };
221
222 struct res_fs_rule {
223         struct res_common       com;
224         int                     qpn;
225 };
226
227 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
228 {
229         struct rb_node *node = root->rb_node;
230
231         while (node) {
232                 struct res_common *res = container_of(node, struct res_common,
233                                                       node);
234
235                 if (res_id < res->res_id)
236                         node = node->rb_left;
237                 else if (res_id > res->res_id)
238                         node = node->rb_right;
239                 else
240                         return res;
241         }
242         return NULL;
243 }
244
245 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
246 {
247         struct rb_node **new = &(root->rb_node), *parent = NULL;
248
249         /* Figure out where to put new node */
250         while (*new) {
251                 struct res_common *this = container_of(*new, struct res_common,
252                                                        node);
253
254                 parent = *new;
255                 if (res->res_id < this->res_id)
256                         new = &((*new)->rb_left);
257                 else if (res->res_id > this->res_id)
258                         new = &((*new)->rb_right);
259                 else
260                         return -EEXIST;
261         }
262
263         /* Add new node and rebalance tree. */
264         rb_link_node(&res->node, parent, new);
265         rb_insert_color(&res->node, root);
266
267         return 0;
268 }
269
270 enum qp_transition {
271         QP_TRANS_INIT2RTR,
272         QP_TRANS_RTR2RTS,
273         QP_TRANS_RTS2RTS,
274         QP_TRANS_SQERR2RTS,
275         QP_TRANS_SQD2SQD,
276         QP_TRANS_SQD2RTS
277 };
278
279 /* For Debug uses */
280 static const char *resource_str(enum mlx4_resource rt)
281 {
282         switch (rt) {
283         case RES_QP: return "RES_QP";
284         case RES_CQ: return "RES_CQ";
285         case RES_SRQ: return "RES_SRQ";
286         case RES_MPT: return "RES_MPT";
287         case RES_MTT: return "RES_MTT";
288         case RES_MAC: return  "RES_MAC";
289         case RES_VLAN: return  "RES_VLAN";
290         case RES_EQ: return "RES_EQ";
291         case RES_COUNTER: return "RES_COUNTER";
292         case RES_FS_RULE: return "RES_FS_RULE";
293         case RES_XRCD: return "RES_XRCD";
294         default: return "Unknown resource type !!!";
295         };
296 }
297
298 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
299 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
300                                       enum mlx4_resource res_type, int count,
301                                       int port)
302 {
303         struct mlx4_priv *priv = mlx4_priv(dev);
304         struct resource_allocator *res_alloc =
305                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
306         int err = -EINVAL;
307         int allocated, free, reserved, guaranteed, from_free;
308         int from_rsvd;
309
310         if (slave > dev->persist->num_vfs)
311                 return -EINVAL;
312
313         spin_lock(&res_alloc->alloc_lock);
314         allocated = (port > 0) ?
315                 res_alloc->allocated[(port - 1) *
316                 (dev->persist->num_vfs + 1) + slave] :
317                 res_alloc->allocated[slave];
318         free = (port > 0) ? res_alloc->res_port_free[port - 1] :
319                 res_alloc->res_free;
320         reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
321                 res_alloc->res_reserved;
322         guaranteed = res_alloc->guaranteed[slave];
323
324         if (allocated + count > res_alloc->quota[slave]) {
325                 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
326                           slave, port, resource_str(res_type), count,
327                           allocated, res_alloc->quota[slave]);
328                 goto out;
329         }
330
331         if (allocated + count <= guaranteed) {
332                 err = 0;
333                 from_rsvd = count;
334         } else {
335                 /* portion may need to be obtained from free area */
336                 if (guaranteed - allocated > 0)
337                         from_free = count - (guaranteed - allocated);
338                 else
339                         from_free = count;
340
341                 from_rsvd = count - from_free;
342
343                 if (free - from_free >= reserved)
344                         err = 0;
345                 else
346                         mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
347                                   slave, port, resource_str(res_type), free,
348                                   from_free, reserved);
349         }
350
351         if (!err) {
352                 /* grant the request */
353                 if (port > 0) {
354                         res_alloc->allocated[(port - 1) *
355                         (dev->persist->num_vfs + 1) + slave] += count;
356                         res_alloc->res_port_free[port - 1] -= count;
357                         res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
358                 } else {
359                         res_alloc->allocated[slave] += count;
360                         res_alloc->res_free -= count;
361                         res_alloc->res_reserved -= from_rsvd;
362                 }
363         }
364
365 out:
366         spin_unlock(&res_alloc->alloc_lock);
367         return err;
368 }
369
370 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
371                                     enum mlx4_resource res_type, int count,
372                                     int port)
373 {
374         struct mlx4_priv *priv = mlx4_priv(dev);
375         struct resource_allocator *res_alloc =
376                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
377         int allocated, guaranteed, from_rsvd;
378
379         if (slave > dev->persist->num_vfs)
380                 return;
381
382         spin_lock(&res_alloc->alloc_lock);
383
384         allocated = (port > 0) ?
385                 res_alloc->allocated[(port - 1) *
386                 (dev->persist->num_vfs + 1) + slave] :
387                 res_alloc->allocated[slave];
388         guaranteed = res_alloc->guaranteed[slave];
389
390         if (allocated - count >= guaranteed) {
391                 from_rsvd = 0;
392         } else {
393                 /* portion may need to be returned to reserved area */
394                 if (allocated - guaranteed > 0)
395                         from_rsvd = count - (allocated - guaranteed);
396                 else
397                         from_rsvd = count;
398         }
399
400         if (port > 0) {
401                 res_alloc->allocated[(port - 1) *
402                 (dev->persist->num_vfs + 1) + slave] -= count;
403                 res_alloc->res_port_free[port - 1] += count;
404                 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
405         } else {
406                 res_alloc->allocated[slave] -= count;
407                 res_alloc->res_free += count;
408                 res_alloc->res_reserved += from_rsvd;
409         }
410
411         spin_unlock(&res_alloc->alloc_lock);
412         return;
413 }
414
415 static inline void initialize_res_quotas(struct mlx4_dev *dev,
416                                          struct resource_allocator *res_alloc,
417                                          enum mlx4_resource res_type,
418                                          int vf, int num_instances)
419 {
420         res_alloc->guaranteed[vf] = num_instances /
421                                     (2 * (dev->persist->num_vfs + 1));
422         res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
423         if (vf == mlx4_master_func_num(dev)) {
424                 res_alloc->res_free = num_instances;
425                 if (res_type == RES_MTT) {
426                         /* reserved mtts will be taken out of the PF allocation */
427                         res_alloc->res_free += dev->caps.reserved_mtts;
428                         res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
429                         res_alloc->quota[vf] += dev->caps.reserved_mtts;
430                 }
431         }
432 }
433
434 void mlx4_init_quotas(struct mlx4_dev *dev)
435 {
436         struct mlx4_priv *priv = mlx4_priv(dev);
437         int pf;
438
439         /* quotas for VFs are initialized in mlx4_slave_cap */
440         if (mlx4_is_slave(dev))
441                 return;
442
443         if (!mlx4_is_mfunc(dev)) {
444                 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
445                         mlx4_num_reserved_sqps(dev);
446                 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
447                 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
448                 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
449                 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
450                 return;
451         }
452
453         pf = mlx4_master_func_num(dev);
454         dev->quotas.qp =
455                 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
456         dev->quotas.cq =
457                 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
458         dev->quotas.srq =
459                 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
460         dev->quotas.mtt =
461                 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
462         dev->quotas.mpt =
463                 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
464 }
465
466 static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
467 {
468         /* reduce the sink counter */
469         return (dev->caps.max_counters - 1 -
470                 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
471                 / MLX4_MAX_PORTS;
472 }
473
474 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
475 {
476         struct mlx4_priv *priv = mlx4_priv(dev);
477         int i, j;
478         int t;
479         int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
480
481         priv->mfunc.master.res_tracker.slave_list =
482                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
483                         GFP_KERNEL);
484         if (!priv->mfunc.master.res_tracker.slave_list)
485                 return -ENOMEM;
486
487         for (i = 0 ; i < dev->num_slaves; i++) {
488                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
489                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
490                                        slave_list[i].res_list[t]);
491                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
492         }
493
494         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
495                  dev->num_slaves);
496         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
497                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
498
499         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
500                 struct resource_allocator *res_alloc =
501                         &priv->mfunc.master.res_tracker.res_alloc[i];
502                 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
503                                            sizeof(int), GFP_KERNEL);
504                 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
505                                                 sizeof(int), GFP_KERNEL);
506                 if (i == RES_MAC || i == RES_VLAN)
507                         res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
508                                                        (dev->persist->num_vfs
509                                                        + 1) *
510                                                        sizeof(int), GFP_KERNEL);
511                 else
512                         res_alloc->allocated = kzalloc((dev->persist->
513                                                         num_vfs + 1) *
514                                                        sizeof(int), GFP_KERNEL);
515                 /* Reduce the sink counter */
516                 if (i == RES_COUNTER)
517                         res_alloc->res_free = dev->caps.max_counters - 1;
518
519                 if (!res_alloc->quota || !res_alloc->guaranteed ||
520                     !res_alloc->allocated)
521                         goto no_mem_err;
522
523                 spin_lock_init(&res_alloc->alloc_lock);
524                 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
525                         struct mlx4_active_ports actv_ports =
526                                 mlx4_get_active_ports(dev, t);
527                         switch (i) {
528                         case RES_QP:
529                                 initialize_res_quotas(dev, res_alloc, RES_QP,
530                                                       t, dev->caps.num_qps -
531                                                       dev->caps.reserved_qps -
532                                                       mlx4_num_reserved_sqps(dev));
533                                 break;
534                         case RES_CQ:
535                                 initialize_res_quotas(dev, res_alloc, RES_CQ,
536                                                       t, dev->caps.num_cqs -
537                                                       dev->caps.reserved_cqs);
538                                 break;
539                         case RES_SRQ:
540                                 initialize_res_quotas(dev, res_alloc, RES_SRQ,
541                                                       t, dev->caps.num_srqs -
542                                                       dev->caps.reserved_srqs);
543                                 break;
544                         case RES_MPT:
545                                 initialize_res_quotas(dev, res_alloc, RES_MPT,
546                                                       t, dev->caps.num_mpts -
547                                                       dev->caps.reserved_mrws);
548                                 break;
549                         case RES_MTT:
550                                 initialize_res_quotas(dev, res_alloc, RES_MTT,
551                                                       t, dev->caps.num_mtts -
552                                                       dev->caps.reserved_mtts);
553                                 break;
554                         case RES_MAC:
555                                 if (t == mlx4_master_func_num(dev)) {
556                                         int max_vfs_pport = 0;
557                                         /* Calculate the max vfs per port for */
558                                         /* both ports.                        */
559                                         for (j = 0; j < dev->caps.num_ports;
560                                              j++) {
561                                                 struct mlx4_slaves_pport slaves_pport =
562                                                         mlx4_phys_to_slaves_pport(dev, j + 1);
563                                                 unsigned current_slaves =
564                                                         bitmap_weight(slaves_pport.slaves,
565                                                                       dev->caps.num_ports) - 1;
566                                                 if (max_vfs_pport < current_slaves)
567                                                         max_vfs_pport =
568                                                                 current_slaves;
569                                         }
570                                         res_alloc->quota[t] =
571                                                 MLX4_MAX_MAC_NUM -
572                                                 2 * max_vfs_pport;
573                                         res_alloc->guaranteed[t] = 2;
574                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
575                                                 res_alloc->res_port_free[j] =
576                                                         MLX4_MAX_MAC_NUM;
577                                 } else {
578                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
579                                         res_alloc->guaranteed[t] = 2;
580                                 }
581                                 break;
582                         case RES_VLAN:
583                                 if (t == mlx4_master_func_num(dev)) {
584                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
585                                         res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
586                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
587                                                 res_alloc->res_port_free[j] =
588                                                         res_alloc->quota[t];
589                                 } else {
590                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
591                                         res_alloc->guaranteed[t] = 0;
592                                 }
593                                 break;
594                         case RES_COUNTER:
595                                 res_alloc->quota[t] = dev->caps.max_counters;
596                                 if (t == mlx4_master_func_num(dev))
597                                         res_alloc->guaranteed[t] =
598                                                 MLX4_PF_COUNTERS_PER_PORT *
599                                                 MLX4_MAX_PORTS;
600                                 else if (t <= max_vfs_guarantee_counter)
601                                         res_alloc->guaranteed[t] =
602                                                 MLX4_VF_COUNTERS_PER_PORT *
603                                                 MLX4_MAX_PORTS;
604                                 else
605                                         res_alloc->guaranteed[t] = 0;
606                                 res_alloc->res_free -= res_alloc->guaranteed[t];
607                                 break;
608                         default:
609                                 break;
610                         }
611                         if (i == RES_MAC || i == RES_VLAN) {
612                                 for (j = 0; j < dev->caps.num_ports; j++)
613                                         if (test_bit(j, actv_ports.ports))
614                                                 res_alloc->res_port_rsvd[j] +=
615                                                         res_alloc->guaranteed[t];
616                         } else {
617                                 res_alloc->res_reserved += res_alloc->guaranteed[t];
618                         }
619                 }
620         }
621         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
622         return 0;
623
624 no_mem_err:
625         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
626                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
627                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
628                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
629                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
630                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
631                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
632         }
633         return -ENOMEM;
634 }
635
636 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
637                                 enum mlx4_res_tracker_free_type type)
638 {
639         struct mlx4_priv *priv = mlx4_priv(dev);
640         int i;
641
642         if (priv->mfunc.master.res_tracker.slave_list) {
643                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
644                         for (i = 0; i < dev->num_slaves; i++) {
645                                 if (type == RES_TR_FREE_ALL ||
646                                     dev->caps.function != i)
647                                         mlx4_delete_all_resources_for_slave(dev, i);
648                         }
649                         /* free master's vlans */
650                         i = dev->caps.function;
651                         mlx4_reset_roce_gids(dev, i);
652                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
653                         rem_slave_vlans(dev, i);
654                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
655                 }
656
657                 if (type != RES_TR_FREE_SLAVES_ONLY) {
658                         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
659                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
660                                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
661                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
662                                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
663                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
664                                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
665                         }
666                         kfree(priv->mfunc.master.res_tracker.slave_list);
667                         priv->mfunc.master.res_tracker.slave_list = NULL;
668                 }
669         }
670 }
671
672 static void update_pkey_index(struct mlx4_dev *dev, int slave,
673                               struct mlx4_cmd_mailbox *inbox)
674 {
675         u8 sched = *(u8 *)(inbox->buf + 64);
676         u8 orig_index = *(u8 *)(inbox->buf + 35);
677         u8 new_index;
678         struct mlx4_priv *priv = mlx4_priv(dev);
679         int port;
680
681         port = (sched >> 6 & 1) + 1;
682
683         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
684         *(u8 *)(inbox->buf + 35) = new_index;
685 }
686
687 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
688                        u8 slave)
689 {
690         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
691         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
692         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
693         int port;
694
695         if (MLX4_QP_ST_UD == ts) {
696                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
697                 if (mlx4_is_eth(dev, port))
698                         qp_ctx->pri_path.mgid_index =
699                                 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
700                 else
701                         qp_ctx->pri_path.mgid_index = slave | 0x80;
702
703         } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
704                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
705                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
706                         if (mlx4_is_eth(dev, port)) {
707                                 qp_ctx->pri_path.mgid_index +=
708                                         mlx4_get_base_gid_ix(dev, slave, port);
709                                 qp_ctx->pri_path.mgid_index &= 0x7f;
710                         } else {
711                                 qp_ctx->pri_path.mgid_index = slave & 0x7F;
712                         }
713                 }
714                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
715                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
716                         if (mlx4_is_eth(dev, port)) {
717                                 qp_ctx->alt_path.mgid_index +=
718                                         mlx4_get_base_gid_ix(dev, slave, port);
719                                 qp_ctx->alt_path.mgid_index &= 0x7f;
720                         } else {
721                                 qp_ctx->alt_path.mgid_index = slave & 0x7F;
722                         }
723                 }
724         }
725 }
726
727 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
728                           u8 slave, int port);
729
730 static int update_vport_qp_param(struct mlx4_dev *dev,
731                                  struct mlx4_cmd_mailbox *inbox,
732                                  u8 slave, u32 qpn)
733 {
734         struct mlx4_qp_context  *qpc = inbox->buf + 8;
735         struct mlx4_vport_oper_state *vp_oper;
736         struct mlx4_priv *priv;
737         u32 qp_type;
738         int port, err = 0;
739
740         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
741         priv = mlx4_priv(dev);
742         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
743         qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
744
745         err = handle_counter(dev, qpc, slave, port);
746         if (err)
747                 goto out;
748
749         if (MLX4_VGT != vp_oper->state.default_vlan) {
750                 /* the reserved QPs (special, proxy, tunnel)
751                  * do not operate over vlans
752                  */
753                 if (mlx4_is_qp_reserved(dev, qpn))
754                         return 0;
755
756                 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
757                 if (qp_type == MLX4_QP_ST_UD ||
758                     (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
759                         if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
760                                 *(__be32 *)inbox->buf =
761                                         cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
762                                         MLX4_QP_OPTPAR_VLAN_STRIPPING);
763                                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
764                         } else {
765                                 struct mlx4_update_qp_params params = {.flags = 0};
766
767                                 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
768                                 if (err)
769                                         goto out;
770                         }
771                 }
772
773                 /* preserve IF_COUNTER flag */
774                 qpc->pri_path.vlan_control &=
775                         MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
776                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
777                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
778                         qpc->pri_path.vlan_control |=
779                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
780                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
781                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
782                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
783                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
784                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
785                 } else if (0 != vp_oper->state.default_vlan) {
786                         qpc->pri_path.vlan_control |=
787                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
788                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
789                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
790                 } else { /* priority tagged */
791                         qpc->pri_path.vlan_control |=
792                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
793                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
794                 }
795
796                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
797                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
798                 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
799                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
800                 qpc->pri_path.sched_queue &= 0xC7;
801                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
802                 qpc->qos_vport = vp_oper->state.qos_vport;
803         }
804         if (vp_oper->state.spoofchk) {
805                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
806                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
807         }
808 out:
809         return err;
810 }
811
812 static int mpt_mask(struct mlx4_dev *dev)
813 {
814         return dev->caps.num_mpts - 1;
815 }
816
817 static void *find_res(struct mlx4_dev *dev, u64 res_id,
818                       enum mlx4_resource type)
819 {
820         struct mlx4_priv *priv = mlx4_priv(dev);
821
822         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
823                                   res_id);
824 }
825
826 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
827                    enum mlx4_resource type,
828                    void *res)
829 {
830         struct res_common *r;
831         int err = 0;
832
833         spin_lock_irq(mlx4_tlock(dev));
834         r = find_res(dev, res_id, type);
835         if (!r) {
836                 err = -ENONET;
837                 goto exit;
838         }
839
840         if (r->state == RES_ANY_BUSY) {
841                 err = -EBUSY;
842                 goto exit;
843         }
844
845         if (r->owner != slave) {
846                 err = -EPERM;
847                 goto exit;
848         }
849
850         r->from_state = r->state;
851         r->state = RES_ANY_BUSY;
852
853         if (res)
854                 *((struct res_common **)res) = r;
855
856 exit:
857         spin_unlock_irq(mlx4_tlock(dev));
858         return err;
859 }
860
861 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
862                                     enum mlx4_resource type,
863                                     u64 res_id, int *slave)
864 {
865
866         struct res_common *r;
867         int err = -ENOENT;
868         int id = res_id;
869
870         if (type == RES_QP)
871                 id &= 0x7fffff;
872         spin_lock(mlx4_tlock(dev));
873
874         r = find_res(dev, id, type);
875         if (r) {
876                 *slave = r->owner;
877                 err = 0;
878         }
879         spin_unlock(mlx4_tlock(dev));
880
881         return err;
882 }
883
884 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
885                     enum mlx4_resource type)
886 {
887         struct res_common *r;
888
889         spin_lock_irq(mlx4_tlock(dev));
890         r = find_res(dev, res_id, type);
891         if (r)
892                 r->state = r->from_state;
893         spin_unlock_irq(mlx4_tlock(dev));
894 }
895
896 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
897                              u64 in_param, u64 *out_param, int port);
898
899 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
900                                    int counter_index)
901 {
902         struct res_common *r;
903         struct res_counter *counter;
904         int ret = 0;
905
906         if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
907                 return ret;
908
909         spin_lock_irq(mlx4_tlock(dev));
910         r = find_res(dev, counter_index, RES_COUNTER);
911         if (!r || r->owner != slave)
912                 ret = -EINVAL;
913         counter = container_of(r, struct res_counter, com);
914         if (!counter->port)
915                 counter->port = port;
916
917         spin_unlock_irq(mlx4_tlock(dev));
918         return ret;
919 }
920
921 static int handle_unexisting_counter(struct mlx4_dev *dev,
922                                      struct mlx4_qp_context *qpc, u8 slave,
923                                      int port)
924 {
925         struct mlx4_priv *priv = mlx4_priv(dev);
926         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
927         struct res_common *tmp;
928         struct res_counter *counter;
929         u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
930         int err = 0;
931
932         spin_lock_irq(mlx4_tlock(dev));
933         list_for_each_entry(tmp,
934                             &tracker->slave_list[slave].res_list[RES_COUNTER],
935                             list) {
936                 counter = container_of(tmp, struct res_counter, com);
937                 if (port == counter->port) {
938                         qpc->pri_path.counter_index  = counter->com.res_id;
939                         spin_unlock_irq(mlx4_tlock(dev));
940                         return 0;
941                 }
942         }
943         spin_unlock_irq(mlx4_tlock(dev));
944
945         /* No existing counter, need to allocate a new counter */
946         err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
947                                 port);
948         if (err == -ENOENT) {
949                 err = 0;
950         } else if (err && err != -ENOSPC) {
951                 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
952                          __func__, slave, err);
953         } else {
954                 qpc->pri_path.counter_index = counter_idx;
955                 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
956                          __func__, slave, qpc->pri_path.counter_index);
957                 err = 0;
958         }
959
960         return err;
961 }
962
963 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
964                           u8 slave, int port)
965 {
966         if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
967                 return handle_existing_counter(dev, slave, port,
968                                                qpc->pri_path.counter_index);
969
970         return handle_unexisting_counter(dev, qpc, slave, port);
971 }
972
973 static struct res_common *alloc_qp_tr(int id)
974 {
975         struct res_qp *ret;
976
977         ret = kzalloc(sizeof *ret, GFP_KERNEL);
978         if (!ret)
979                 return NULL;
980
981         ret->com.res_id = id;
982         ret->com.state = RES_QP_RESERVED;
983         ret->local_qpn = id;
984         INIT_LIST_HEAD(&ret->mcg_list);
985         spin_lock_init(&ret->mcg_spl);
986         atomic_set(&ret->ref_count, 0);
987
988         return &ret->com;
989 }
990
991 static struct res_common *alloc_mtt_tr(int id, int order)
992 {
993         struct res_mtt *ret;
994
995         ret = kzalloc(sizeof *ret, GFP_KERNEL);
996         if (!ret)
997                 return NULL;
998
999         ret->com.res_id = id;
1000         ret->order = order;
1001         ret->com.state = RES_MTT_ALLOCATED;
1002         atomic_set(&ret->ref_count, 0);
1003
1004         return &ret->com;
1005 }
1006
1007 static struct res_common *alloc_mpt_tr(int id, int key)
1008 {
1009         struct res_mpt *ret;
1010
1011         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1012         if (!ret)
1013                 return NULL;
1014
1015         ret->com.res_id = id;
1016         ret->com.state = RES_MPT_RESERVED;
1017         ret->key = key;
1018
1019         return &ret->com;
1020 }
1021
1022 static struct res_common *alloc_eq_tr(int id)
1023 {
1024         struct res_eq *ret;
1025
1026         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1027         if (!ret)
1028                 return NULL;
1029
1030         ret->com.res_id = id;
1031         ret->com.state = RES_EQ_RESERVED;
1032
1033         return &ret->com;
1034 }
1035
1036 static struct res_common *alloc_cq_tr(int id)
1037 {
1038         struct res_cq *ret;
1039
1040         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1041         if (!ret)
1042                 return NULL;
1043
1044         ret->com.res_id = id;
1045         ret->com.state = RES_CQ_ALLOCATED;
1046         atomic_set(&ret->ref_count, 0);
1047
1048         return &ret->com;
1049 }
1050
1051 static struct res_common *alloc_srq_tr(int id)
1052 {
1053         struct res_srq *ret;
1054
1055         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1056         if (!ret)
1057                 return NULL;
1058
1059         ret->com.res_id = id;
1060         ret->com.state = RES_SRQ_ALLOCATED;
1061         atomic_set(&ret->ref_count, 0);
1062
1063         return &ret->com;
1064 }
1065
1066 static struct res_common *alloc_counter_tr(int id, int port)
1067 {
1068         struct res_counter *ret;
1069
1070         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1071         if (!ret)
1072                 return NULL;
1073
1074         ret->com.res_id = id;
1075         ret->com.state = RES_COUNTER_ALLOCATED;
1076         ret->port = port;
1077
1078         return &ret->com;
1079 }
1080
1081 static struct res_common *alloc_xrcdn_tr(int id)
1082 {
1083         struct res_xrcdn *ret;
1084
1085         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1086         if (!ret)
1087                 return NULL;
1088
1089         ret->com.res_id = id;
1090         ret->com.state = RES_XRCD_ALLOCATED;
1091
1092         return &ret->com;
1093 }
1094
1095 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1096 {
1097         struct res_fs_rule *ret;
1098
1099         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1100         if (!ret)
1101                 return NULL;
1102
1103         ret->com.res_id = id;
1104         ret->com.state = RES_FS_RULE_ALLOCATED;
1105         ret->qpn = qpn;
1106         return &ret->com;
1107 }
1108
1109 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1110                                    int extra)
1111 {
1112         struct res_common *ret;
1113
1114         switch (type) {
1115         case RES_QP:
1116                 ret = alloc_qp_tr(id);
1117                 break;
1118         case RES_MPT:
1119                 ret = alloc_mpt_tr(id, extra);
1120                 break;
1121         case RES_MTT:
1122                 ret = alloc_mtt_tr(id, extra);
1123                 break;
1124         case RES_EQ:
1125                 ret = alloc_eq_tr(id);
1126                 break;
1127         case RES_CQ:
1128                 ret = alloc_cq_tr(id);
1129                 break;
1130         case RES_SRQ:
1131                 ret = alloc_srq_tr(id);
1132                 break;
1133         case RES_MAC:
1134                 pr_err("implementation missing\n");
1135                 return NULL;
1136         case RES_COUNTER:
1137                 ret = alloc_counter_tr(id, extra);
1138                 break;
1139         case RES_XRCD:
1140                 ret = alloc_xrcdn_tr(id);
1141                 break;
1142         case RES_FS_RULE:
1143                 ret = alloc_fs_rule_tr(id, extra);
1144                 break;
1145         default:
1146                 return NULL;
1147         }
1148         if (ret)
1149                 ret->owner = slave;
1150
1151         return ret;
1152 }
1153
1154 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1155                           struct mlx4_counter *data)
1156 {
1157         struct mlx4_priv *priv = mlx4_priv(dev);
1158         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1159         struct res_common *tmp;
1160         struct res_counter *counter;
1161         int *counters_arr;
1162         int i = 0, err = 0;
1163
1164         memset(data, 0, sizeof(*data));
1165
1166         counters_arr = kmalloc_array(dev->caps.max_counters,
1167                                      sizeof(*counters_arr), GFP_KERNEL);
1168         if (!counters_arr)
1169                 return -ENOMEM;
1170
1171         spin_lock_irq(mlx4_tlock(dev));
1172         list_for_each_entry(tmp,
1173                             &tracker->slave_list[slave].res_list[RES_COUNTER],
1174                             list) {
1175                 counter = container_of(tmp, struct res_counter, com);
1176                 if (counter->port == port) {
1177                         counters_arr[i] = (int)tmp->res_id;
1178                         i++;
1179                 }
1180         }
1181         spin_unlock_irq(mlx4_tlock(dev));
1182         counters_arr[i] = -1;
1183
1184         i = 0;
1185
1186         while (counters_arr[i] != -1) {
1187                 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1188                                              0);
1189                 if (err) {
1190                         memset(data, 0, sizeof(*data));
1191                         goto table_changed;
1192                 }
1193                 i++;
1194         }
1195
1196 table_changed:
1197         kfree(counters_arr);
1198         return 0;
1199 }
1200
1201 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1202                          enum mlx4_resource type, int extra)
1203 {
1204         int i;
1205         int err;
1206         struct mlx4_priv *priv = mlx4_priv(dev);
1207         struct res_common **res_arr;
1208         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1209         struct rb_root *root = &tracker->res_tree[type];
1210
1211         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1212         if (!res_arr)
1213                 return -ENOMEM;
1214
1215         for (i = 0; i < count; ++i) {
1216                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1217                 if (!res_arr[i]) {
1218                         for (--i; i >= 0; --i)
1219                                 kfree(res_arr[i]);
1220
1221                         kfree(res_arr);
1222                         return -ENOMEM;
1223                 }
1224         }
1225
1226         spin_lock_irq(mlx4_tlock(dev));
1227         for (i = 0; i < count; ++i) {
1228                 if (find_res(dev, base + i, type)) {
1229                         err = -EEXIST;
1230                         goto undo;
1231                 }
1232                 err = res_tracker_insert(root, res_arr[i]);
1233                 if (err)
1234                         goto undo;
1235                 list_add_tail(&res_arr[i]->list,
1236                               &tracker->slave_list[slave].res_list[type]);
1237         }
1238         spin_unlock_irq(mlx4_tlock(dev));
1239         kfree(res_arr);
1240
1241         return 0;
1242
1243 undo:
1244         for (--i; i >= base; --i)
1245                 rb_erase(&res_arr[i]->node, root);
1246
1247         spin_unlock_irq(mlx4_tlock(dev));
1248
1249         for (i = 0; i < count; ++i)
1250                 kfree(res_arr[i]);
1251
1252         kfree(res_arr);
1253
1254         return err;
1255 }
1256
1257 static int remove_qp_ok(struct res_qp *res)
1258 {
1259         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1260             !list_empty(&res->mcg_list)) {
1261                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1262                        res->com.state, atomic_read(&res->ref_count));
1263                 return -EBUSY;
1264         } else if (res->com.state != RES_QP_RESERVED) {
1265                 return -EPERM;
1266         }
1267
1268         return 0;
1269 }
1270
1271 static int remove_mtt_ok(struct res_mtt *res, int order)
1272 {
1273         if (res->com.state == RES_MTT_BUSY ||
1274             atomic_read(&res->ref_count)) {
1275                 pr_devel("%s-%d: state %s, ref_count %d\n",
1276                          __func__, __LINE__,
1277                          mtt_states_str(res->com.state),
1278                          atomic_read(&res->ref_count));
1279                 return -EBUSY;
1280         } else if (res->com.state != RES_MTT_ALLOCATED)
1281                 return -EPERM;
1282         else if (res->order != order)
1283                 return -EINVAL;
1284
1285         return 0;
1286 }
1287
1288 static int remove_mpt_ok(struct res_mpt *res)
1289 {
1290         if (res->com.state == RES_MPT_BUSY)
1291                 return -EBUSY;
1292         else if (res->com.state != RES_MPT_RESERVED)
1293                 return -EPERM;
1294
1295         return 0;
1296 }
1297
1298 static int remove_eq_ok(struct res_eq *res)
1299 {
1300         if (res->com.state == RES_MPT_BUSY)
1301                 return -EBUSY;
1302         else if (res->com.state != RES_MPT_RESERVED)
1303                 return -EPERM;
1304
1305         return 0;
1306 }
1307
1308 static int remove_counter_ok(struct res_counter *res)
1309 {
1310         if (res->com.state == RES_COUNTER_BUSY)
1311                 return -EBUSY;
1312         else if (res->com.state != RES_COUNTER_ALLOCATED)
1313                 return -EPERM;
1314
1315         return 0;
1316 }
1317
1318 static int remove_xrcdn_ok(struct res_xrcdn *res)
1319 {
1320         if (res->com.state == RES_XRCD_BUSY)
1321                 return -EBUSY;
1322         else if (res->com.state != RES_XRCD_ALLOCATED)
1323                 return -EPERM;
1324
1325         return 0;
1326 }
1327
1328 static int remove_fs_rule_ok(struct res_fs_rule *res)
1329 {
1330         if (res->com.state == RES_FS_RULE_BUSY)
1331                 return -EBUSY;
1332         else if (res->com.state != RES_FS_RULE_ALLOCATED)
1333                 return -EPERM;
1334
1335         return 0;
1336 }
1337
1338 static int remove_cq_ok(struct res_cq *res)
1339 {
1340         if (res->com.state == RES_CQ_BUSY)
1341                 return -EBUSY;
1342         else if (res->com.state != RES_CQ_ALLOCATED)
1343                 return -EPERM;
1344
1345         return 0;
1346 }
1347
1348 static int remove_srq_ok(struct res_srq *res)
1349 {
1350         if (res->com.state == RES_SRQ_BUSY)
1351                 return -EBUSY;
1352         else if (res->com.state != RES_SRQ_ALLOCATED)
1353                 return -EPERM;
1354
1355         return 0;
1356 }
1357
1358 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1359 {
1360         switch (type) {
1361         case RES_QP:
1362                 return remove_qp_ok((struct res_qp *)res);
1363         case RES_CQ:
1364                 return remove_cq_ok((struct res_cq *)res);
1365         case RES_SRQ:
1366                 return remove_srq_ok((struct res_srq *)res);
1367         case RES_MPT:
1368                 return remove_mpt_ok((struct res_mpt *)res);
1369         case RES_MTT:
1370                 return remove_mtt_ok((struct res_mtt *)res, extra);
1371         case RES_MAC:
1372                 return -ENOSYS;
1373         case RES_EQ:
1374                 return remove_eq_ok((struct res_eq *)res);
1375         case RES_COUNTER:
1376                 return remove_counter_ok((struct res_counter *)res);
1377         case RES_XRCD:
1378                 return remove_xrcdn_ok((struct res_xrcdn *)res);
1379         case RES_FS_RULE:
1380                 return remove_fs_rule_ok((struct res_fs_rule *)res);
1381         default:
1382                 return -EINVAL;
1383         }
1384 }
1385
1386 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1387                          enum mlx4_resource type, int extra)
1388 {
1389         u64 i;
1390         int err;
1391         struct mlx4_priv *priv = mlx4_priv(dev);
1392         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1393         struct res_common *r;
1394
1395         spin_lock_irq(mlx4_tlock(dev));
1396         for (i = base; i < base + count; ++i) {
1397                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1398                 if (!r) {
1399                         err = -ENOENT;
1400                         goto out;
1401                 }
1402                 if (r->owner != slave) {
1403                         err = -EPERM;
1404                         goto out;
1405                 }
1406                 err = remove_ok(r, type, extra);
1407                 if (err)
1408                         goto out;
1409         }
1410
1411         for (i = base; i < base + count; ++i) {
1412                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1413                 rb_erase(&r->node, &tracker->res_tree[type]);
1414                 list_del(&r->list);
1415                 kfree(r);
1416         }
1417         err = 0;
1418
1419 out:
1420         spin_unlock_irq(mlx4_tlock(dev));
1421
1422         return err;
1423 }
1424
1425 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1426                                 enum res_qp_states state, struct res_qp **qp,
1427                                 int alloc)
1428 {
1429         struct mlx4_priv *priv = mlx4_priv(dev);
1430         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1431         struct res_qp *r;
1432         int err = 0;
1433
1434         spin_lock_irq(mlx4_tlock(dev));
1435         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1436         if (!r)
1437                 err = -ENOENT;
1438         else if (r->com.owner != slave)
1439                 err = -EPERM;
1440         else {
1441                 switch (state) {
1442                 case RES_QP_BUSY:
1443                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1444                                  __func__, r->com.res_id);
1445                         err = -EBUSY;
1446                         break;
1447
1448                 case RES_QP_RESERVED:
1449                         if (r->com.state == RES_QP_MAPPED && !alloc)
1450                                 break;
1451
1452                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1453                         err = -EINVAL;
1454                         break;
1455
1456                 case RES_QP_MAPPED:
1457                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
1458                             r->com.state == RES_QP_HW)
1459                                 break;
1460                         else {
1461                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1462                                           r->com.res_id);
1463                                 err = -EINVAL;
1464                         }
1465
1466                         break;
1467
1468                 case RES_QP_HW:
1469                         if (r->com.state != RES_QP_MAPPED)
1470                                 err = -EINVAL;
1471                         break;
1472                 default:
1473                         err = -EINVAL;
1474                 }
1475
1476                 if (!err) {
1477                         r->com.from_state = r->com.state;
1478                         r->com.to_state = state;
1479                         r->com.state = RES_QP_BUSY;
1480                         if (qp)
1481                                 *qp = r;
1482                 }
1483         }
1484
1485         spin_unlock_irq(mlx4_tlock(dev));
1486
1487         return err;
1488 }
1489
1490 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1491                                 enum res_mpt_states state, struct res_mpt **mpt)
1492 {
1493         struct mlx4_priv *priv = mlx4_priv(dev);
1494         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1495         struct res_mpt *r;
1496         int err = 0;
1497
1498         spin_lock_irq(mlx4_tlock(dev));
1499         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1500         if (!r)
1501                 err = -ENOENT;
1502         else if (r->com.owner != slave)
1503                 err = -EPERM;
1504         else {
1505                 switch (state) {
1506                 case RES_MPT_BUSY:
1507                         err = -EINVAL;
1508                         break;
1509
1510                 case RES_MPT_RESERVED:
1511                         if (r->com.state != RES_MPT_MAPPED)
1512                                 err = -EINVAL;
1513                         break;
1514
1515                 case RES_MPT_MAPPED:
1516                         if (r->com.state != RES_MPT_RESERVED &&
1517                             r->com.state != RES_MPT_HW)
1518                                 err = -EINVAL;
1519                         break;
1520
1521                 case RES_MPT_HW:
1522                         if (r->com.state != RES_MPT_MAPPED)
1523                                 err = -EINVAL;
1524                         break;
1525                 default:
1526                         err = -EINVAL;
1527                 }
1528
1529                 if (!err) {
1530                         r->com.from_state = r->com.state;
1531                         r->com.to_state = state;
1532                         r->com.state = RES_MPT_BUSY;
1533                         if (mpt)
1534                                 *mpt = r;
1535                 }
1536         }
1537
1538         spin_unlock_irq(mlx4_tlock(dev));
1539
1540         return err;
1541 }
1542
1543 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1544                                 enum res_eq_states state, struct res_eq **eq)
1545 {
1546         struct mlx4_priv *priv = mlx4_priv(dev);
1547         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1548         struct res_eq *r;
1549         int err = 0;
1550
1551         spin_lock_irq(mlx4_tlock(dev));
1552         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1553         if (!r)
1554                 err = -ENOENT;
1555         else if (r->com.owner != slave)
1556                 err = -EPERM;
1557         else {
1558                 switch (state) {
1559                 case RES_EQ_BUSY:
1560                         err = -EINVAL;
1561                         break;
1562
1563                 case RES_EQ_RESERVED:
1564                         if (r->com.state != RES_EQ_HW)
1565                                 err = -EINVAL;
1566                         break;
1567
1568                 case RES_EQ_HW:
1569                         if (r->com.state != RES_EQ_RESERVED)
1570                                 err = -EINVAL;
1571                         break;
1572
1573                 default:
1574                         err = -EINVAL;
1575                 }
1576
1577                 if (!err) {
1578                         r->com.from_state = r->com.state;
1579                         r->com.to_state = state;
1580                         r->com.state = RES_EQ_BUSY;
1581                         if (eq)
1582                                 *eq = r;
1583                 }
1584         }
1585
1586         spin_unlock_irq(mlx4_tlock(dev));
1587
1588         return err;
1589 }
1590
1591 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1592                                 enum res_cq_states state, struct res_cq **cq)
1593 {
1594         struct mlx4_priv *priv = mlx4_priv(dev);
1595         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1596         struct res_cq *r;
1597         int err;
1598
1599         spin_lock_irq(mlx4_tlock(dev));
1600         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1601         if (!r) {
1602                 err = -ENOENT;
1603         } else if (r->com.owner != slave) {
1604                 err = -EPERM;
1605         } else if (state == RES_CQ_ALLOCATED) {
1606                 if (r->com.state != RES_CQ_HW)
1607                         err = -EINVAL;
1608                 else if (atomic_read(&r->ref_count))
1609                         err = -EBUSY;
1610                 else
1611                         err = 0;
1612         } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1613                 err = -EINVAL;
1614         } else {
1615                 err = 0;
1616         }
1617
1618         if (!err) {
1619                 r->com.from_state = r->com.state;
1620                 r->com.to_state = state;
1621                 r->com.state = RES_CQ_BUSY;
1622                 if (cq)
1623                         *cq = r;
1624         }
1625
1626         spin_unlock_irq(mlx4_tlock(dev));
1627
1628         return err;
1629 }
1630
1631 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1632                                  enum res_srq_states state, struct res_srq **srq)
1633 {
1634         struct mlx4_priv *priv = mlx4_priv(dev);
1635         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1636         struct res_srq *r;
1637         int err = 0;
1638
1639         spin_lock_irq(mlx4_tlock(dev));
1640         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1641         if (!r) {
1642                 err = -ENOENT;
1643         } else if (r->com.owner != slave) {
1644                 err = -EPERM;
1645         } else if (state == RES_SRQ_ALLOCATED) {
1646                 if (r->com.state != RES_SRQ_HW)
1647                         err = -EINVAL;
1648                 else if (atomic_read(&r->ref_count))
1649                         err = -EBUSY;
1650         } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1651                 err = -EINVAL;
1652         }
1653
1654         if (!err) {
1655                 r->com.from_state = r->com.state;
1656                 r->com.to_state = state;
1657                 r->com.state = RES_SRQ_BUSY;
1658                 if (srq)
1659                         *srq = r;
1660         }
1661
1662         spin_unlock_irq(mlx4_tlock(dev));
1663
1664         return err;
1665 }
1666
1667 static void res_abort_move(struct mlx4_dev *dev, int slave,
1668                            enum mlx4_resource type, int id)
1669 {
1670         struct mlx4_priv *priv = mlx4_priv(dev);
1671         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1672         struct res_common *r;
1673
1674         spin_lock_irq(mlx4_tlock(dev));
1675         r = res_tracker_lookup(&tracker->res_tree[type], id);
1676         if (r && (r->owner == slave))
1677                 r->state = r->from_state;
1678         spin_unlock_irq(mlx4_tlock(dev));
1679 }
1680
1681 static void res_end_move(struct mlx4_dev *dev, int slave,
1682                          enum mlx4_resource type, int id)
1683 {
1684         struct mlx4_priv *priv = mlx4_priv(dev);
1685         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1686         struct res_common *r;
1687
1688         spin_lock_irq(mlx4_tlock(dev));
1689         r = res_tracker_lookup(&tracker->res_tree[type], id);
1690         if (r && (r->owner == slave))
1691                 r->state = r->to_state;
1692         spin_unlock_irq(mlx4_tlock(dev));
1693 }
1694
1695 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1696 {
1697         return mlx4_is_qp_reserved(dev, qpn) &&
1698                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1699 }
1700
1701 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1702 {
1703         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1704 }
1705
1706 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1707                         u64 in_param, u64 *out_param)
1708 {
1709         int err;
1710         int count;
1711         int align;
1712         int base;
1713         int qpn;
1714         u8 flags;
1715
1716         switch (op) {
1717         case RES_OP_RESERVE:
1718                 count = get_param_l(&in_param) & 0xffffff;
1719                 /* Turn off all unsupported QP allocation flags that the
1720                  * slave tries to set.
1721                  */
1722                 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1723                 align = get_param_h(&in_param);
1724                 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1725                 if (err)
1726                         return err;
1727
1728                 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1729                 if (err) {
1730                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1731                         return err;
1732                 }
1733
1734                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1735                 if (err) {
1736                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1737                         __mlx4_qp_release_range(dev, base, count);
1738                         return err;
1739                 }
1740                 set_param_l(out_param, base);
1741                 break;
1742         case RES_OP_MAP_ICM:
1743                 qpn = get_param_l(&in_param) & 0x7fffff;
1744                 if (valid_reserved(dev, slave, qpn)) {
1745                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1746                         if (err)
1747                                 return err;
1748                 }
1749
1750                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1751                                            NULL, 1);
1752                 if (err)
1753                         return err;
1754
1755                 if (!fw_reserved(dev, qpn)) {
1756                         err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1757                         if (err) {
1758                                 res_abort_move(dev, slave, RES_QP, qpn);
1759                                 return err;
1760                         }
1761                 }
1762
1763                 res_end_move(dev, slave, RES_QP, qpn);
1764                 break;
1765
1766         default:
1767                 err = -EINVAL;
1768                 break;
1769         }
1770         return err;
1771 }
1772
1773 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1774                          u64 in_param, u64 *out_param)
1775 {
1776         int err = -EINVAL;
1777         int base;
1778         int order;
1779
1780         if (op != RES_OP_RESERVE_AND_MAP)
1781                 return err;
1782
1783         order = get_param_l(&in_param);
1784
1785         err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1786         if (err)
1787                 return err;
1788
1789         base = __mlx4_alloc_mtt_range(dev, order);
1790         if (base == -1) {
1791                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1792                 return -ENOMEM;
1793         }
1794
1795         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1796         if (err) {
1797                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1798                 __mlx4_free_mtt_range(dev, base, order);
1799         } else {
1800                 set_param_l(out_param, base);
1801         }
1802
1803         return err;
1804 }
1805
1806 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1807                          u64 in_param, u64 *out_param)
1808 {
1809         int err = -EINVAL;
1810         int index;
1811         int id;
1812         struct res_mpt *mpt;
1813
1814         switch (op) {
1815         case RES_OP_RESERVE:
1816                 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1817                 if (err)
1818                         break;
1819
1820                 index = __mlx4_mpt_reserve(dev);
1821                 if (index == -1) {
1822                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1823                         break;
1824                 }
1825                 id = index & mpt_mask(dev);
1826
1827                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1828                 if (err) {
1829                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1830                         __mlx4_mpt_release(dev, index);
1831                         break;
1832                 }
1833                 set_param_l(out_param, index);
1834                 break;
1835         case RES_OP_MAP_ICM:
1836                 index = get_param_l(&in_param);
1837                 id = index & mpt_mask(dev);
1838                 err = mr_res_start_move_to(dev, slave, id,
1839                                            RES_MPT_MAPPED, &mpt);
1840                 if (err)
1841                         return err;
1842
1843                 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1844                 if (err) {
1845                         res_abort_move(dev, slave, RES_MPT, id);
1846                         return err;
1847                 }
1848
1849                 res_end_move(dev, slave, RES_MPT, id);
1850                 break;
1851         }
1852         return err;
1853 }
1854
1855 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1856                         u64 in_param, u64 *out_param)
1857 {
1858         int cqn;
1859         int err;
1860
1861         switch (op) {
1862         case RES_OP_RESERVE_AND_MAP:
1863                 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1864                 if (err)
1865                         break;
1866
1867                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1868                 if (err) {
1869                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1870                         break;
1871                 }
1872
1873                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1874                 if (err) {
1875                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1876                         __mlx4_cq_free_icm(dev, cqn);
1877                         break;
1878                 }
1879
1880                 set_param_l(out_param, cqn);
1881                 break;
1882
1883         default:
1884                 err = -EINVAL;
1885         }
1886
1887         return err;
1888 }
1889
1890 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1891                          u64 in_param, u64 *out_param)
1892 {
1893         int srqn;
1894         int err;
1895
1896         switch (op) {
1897         case RES_OP_RESERVE_AND_MAP:
1898                 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1899                 if (err)
1900                         break;
1901
1902                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1903                 if (err) {
1904                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1905                         break;
1906                 }
1907
1908                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1909                 if (err) {
1910                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1911                         __mlx4_srq_free_icm(dev, srqn);
1912                         break;
1913                 }
1914
1915                 set_param_l(out_param, srqn);
1916                 break;
1917
1918         default:
1919                 err = -EINVAL;
1920         }
1921
1922         return err;
1923 }
1924
1925 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1926                                      u8 smac_index, u64 *mac)
1927 {
1928         struct mlx4_priv *priv = mlx4_priv(dev);
1929         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1930         struct list_head *mac_list =
1931                 &tracker->slave_list[slave].res_list[RES_MAC];
1932         struct mac_res *res, *tmp;
1933
1934         list_for_each_entry_safe(res, tmp, mac_list, list) {
1935                 if (res->smac_index == smac_index && res->port == (u8) port) {
1936                         *mac = res->mac;
1937                         return 0;
1938                 }
1939         }
1940         return -ENOENT;
1941 }
1942
1943 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1944 {
1945         struct mlx4_priv *priv = mlx4_priv(dev);
1946         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1947         struct list_head *mac_list =
1948                 &tracker->slave_list[slave].res_list[RES_MAC];
1949         struct mac_res *res, *tmp;
1950
1951         list_for_each_entry_safe(res, tmp, mac_list, list) {
1952                 if (res->mac == mac && res->port == (u8) port) {
1953                         /* mac found. update ref count */
1954                         ++res->ref_count;
1955                         return 0;
1956                 }
1957         }
1958
1959         if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1960                 return -EINVAL;
1961         res = kzalloc(sizeof *res, GFP_KERNEL);
1962         if (!res) {
1963                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1964                 return -ENOMEM;
1965         }
1966         res->mac = mac;
1967         res->port = (u8) port;
1968         res->smac_index = smac_index;
1969         res->ref_count = 1;
1970         list_add_tail(&res->list,
1971                       &tracker->slave_list[slave].res_list[RES_MAC]);
1972         return 0;
1973 }
1974
1975 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1976                                int port)
1977 {
1978         struct mlx4_priv *priv = mlx4_priv(dev);
1979         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1980         struct list_head *mac_list =
1981                 &tracker->slave_list[slave].res_list[RES_MAC];
1982         struct mac_res *res, *tmp;
1983
1984         list_for_each_entry_safe(res, tmp, mac_list, list) {
1985                 if (res->mac == mac && res->port == (u8) port) {
1986                         if (!--res->ref_count) {
1987                                 list_del(&res->list);
1988                                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1989                                 kfree(res);
1990                         }
1991                         break;
1992                 }
1993         }
1994 }
1995
1996 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1997 {
1998         struct mlx4_priv *priv = mlx4_priv(dev);
1999         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2000         struct list_head *mac_list =
2001                 &tracker->slave_list[slave].res_list[RES_MAC];
2002         struct mac_res *res, *tmp;
2003         int i;
2004
2005         list_for_each_entry_safe(res, tmp, mac_list, list) {
2006                 list_del(&res->list);
2007                 /* dereference the mac the num times the slave referenced it */
2008                 for (i = 0; i < res->ref_count; i++)
2009                         __mlx4_unregister_mac(dev, res->port, res->mac);
2010                 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2011                 kfree(res);
2012         }
2013 }
2014
2015 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2016                          u64 in_param, u64 *out_param, int in_port)
2017 {
2018         int err = -EINVAL;
2019         int port;
2020         u64 mac;
2021         u8 smac_index;
2022
2023         if (op != RES_OP_RESERVE_AND_MAP)
2024                 return err;
2025
2026         port = !in_port ? get_param_l(out_param) : in_port;
2027         port = mlx4_slave_convert_port(
2028                         dev, slave, port);
2029
2030         if (port < 0)
2031                 return -EINVAL;
2032         mac = in_param;
2033
2034         err = __mlx4_register_mac(dev, port, mac);
2035         if (err >= 0) {
2036                 smac_index = err;
2037                 set_param_l(out_param, err);
2038                 err = 0;
2039         }
2040
2041         if (!err) {
2042                 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2043                 if (err)
2044                         __mlx4_unregister_mac(dev, port, mac);
2045         }
2046         return err;
2047 }
2048
2049 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2050                              int port, int vlan_index)
2051 {
2052         struct mlx4_priv *priv = mlx4_priv(dev);
2053         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2054         struct list_head *vlan_list =
2055                 &tracker->slave_list[slave].res_list[RES_VLAN];
2056         struct vlan_res *res, *tmp;
2057
2058         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2059                 if (res->vlan == vlan && res->port == (u8) port) {
2060                         /* vlan found. update ref count */
2061                         ++res->ref_count;
2062                         return 0;
2063                 }
2064         }
2065
2066         if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2067                 return -EINVAL;
2068         res = kzalloc(sizeof(*res), GFP_KERNEL);
2069         if (!res) {
2070                 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2071                 return -ENOMEM;
2072         }
2073         res->vlan = vlan;
2074         res->port = (u8) port;
2075         res->vlan_index = vlan_index;
2076         res->ref_count = 1;
2077         list_add_tail(&res->list,
2078                       &tracker->slave_list[slave].res_list[RES_VLAN]);
2079         return 0;
2080 }
2081
2082
2083 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2084                                 int port)
2085 {
2086         struct mlx4_priv *priv = mlx4_priv(dev);
2087         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2088         struct list_head *vlan_list =
2089                 &tracker->slave_list[slave].res_list[RES_VLAN];
2090         struct vlan_res *res, *tmp;
2091
2092         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2093                 if (res->vlan == vlan && res->port == (u8) port) {
2094                         if (!--res->ref_count) {
2095                                 list_del(&res->list);
2096                                 mlx4_release_resource(dev, slave, RES_VLAN,
2097                                                       1, port);
2098                                 kfree(res);
2099                         }
2100                         break;
2101                 }
2102         }
2103 }
2104
2105 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2106 {
2107         struct mlx4_priv *priv = mlx4_priv(dev);
2108         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2109         struct list_head *vlan_list =
2110                 &tracker->slave_list[slave].res_list[RES_VLAN];
2111         struct vlan_res *res, *tmp;
2112         int i;
2113
2114         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2115                 list_del(&res->list);
2116                 /* dereference the vlan the num times the slave referenced it */
2117                 for (i = 0; i < res->ref_count; i++)
2118                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
2119                 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2120                 kfree(res);
2121         }
2122 }
2123
2124 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2125                           u64 in_param, u64 *out_param, int in_port)
2126 {
2127         struct mlx4_priv *priv = mlx4_priv(dev);
2128         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2129         int err;
2130         u16 vlan;
2131         int vlan_index;
2132         int port;
2133
2134         port = !in_port ? get_param_l(out_param) : in_port;
2135
2136         if (!port || op != RES_OP_RESERVE_AND_MAP)
2137                 return -EINVAL;
2138
2139         port = mlx4_slave_convert_port(
2140                         dev, slave, port);
2141
2142         if (port < 0)
2143                 return -EINVAL;
2144         /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2145         if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2146                 slave_state[slave].old_vlan_api = true;
2147                 return 0;
2148         }
2149
2150         vlan = (u16) in_param;
2151
2152         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2153         if (!err) {
2154                 set_param_l(out_param, (u32) vlan_index);
2155                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2156                 if (err)
2157                         __mlx4_unregister_vlan(dev, port, vlan);
2158         }
2159         return err;
2160 }
2161
2162 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2163                              u64 in_param, u64 *out_param, int port)
2164 {
2165         u32 index;
2166         int err;
2167
2168         if (op != RES_OP_RESERVE)
2169                 return -EINVAL;
2170
2171         err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2172         if (err)
2173                 return err;
2174
2175         err = __mlx4_counter_alloc(dev, &index);
2176         if (err) {
2177                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2178                 return err;
2179         }
2180
2181         err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2182         if (err) {
2183                 __mlx4_counter_free(dev, index);
2184                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2185         } else {
2186                 set_param_l(out_param, index);
2187         }
2188
2189         return err;
2190 }
2191
2192 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2193                            u64 in_param, u64 *out_param)
2194 {
2195         u32 xrcdn;
2196         int err;
2197
2198         if (op != RES_OP_RESERVE)
2199                 return -EINVAL;
2200
2201         err = __mlx4_xrcd_alloc(dev, &xrcdn);
2202         if (err)
2203                 return err;
2204
2205         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2206         if (err)
2207                 __mlx4_xrcd_free(dev, xrcdn);
2208         else
2209                 set_param_l(out_param, xrcdn);
2210
2211         return err;
2212 }
2213
2214 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2215                            struct mlx4_vhcr *vhcr,
2216                            struct mlx4_cmd_mailbox *inbox,
2217                            struct mlx4_cmd_mailbox *outbox,
2218                            struct mlx4_cmd_info *cmd)
2219 {
2220         int err;
2221         int alop = vhcr->op_modifier;
2222
2223         switch (vhcr->in_modifier & 0xFF) {
2224         case RES_QP:
2225                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2226                                    vhcr->in_param, &vhcr->out_param);
2227                 break;
2228
2229         case RES_MTT:
2230                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2231                                     vhcr->in_param, &vhcr->out_param);
2232                 break;
2233
2234         case RES_MPT:
2235                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2236                                     vhcr->in_param, &vhcr->out_param);
2237                 break;
2238
2239         case RES_CQ:
2240                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2241                                    vhcr->in_param, &vhcr->out_param);
2242                 break;
2243
2244         case RES_SRQ:
2245                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2246                                     vhcr->in_param, &vhcr->out_param);
2247                 break;
2248
2249         case RES_MAC:
2250                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2251                                     vhcr->in_param, &vhcr->out_param,
2252                                     (vhcr->in_modifier >> 8) & 0xFF);
2253                 break;
2254
2255         case RES_VLAN:
2256                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2257                                      vhcr->in_param, &vhcr->out_param,
2258                                      (vhcr->in_modifier >> 8) & 0xFF);
2259                 break;
2260
2261         case RES_COUNTER:
2262                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2263                                         vhcr->in_param, &vhcr->out_param, 0);
2264                 break;
2265
2266         case RES_XRCD:
2267                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2268                                       vhcr->in_param, &vhcr->out_param);
2269                 break;
2270
2271         default:
2272                 err = -EINVAL;
2273                 break;
2274         }
2275
2276         return err;
2277 }
2278
2279 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2280                        u64 in_param)
2281 {
2282         int err;
2283         int count;
2284         int base;
2285         int qpn;
2286
2287         switch (op) {
2288         case RES_OP_RESERVE:
2289                 base = get_param_l(&in_param) & 0x7fffff;
2290                 count = get_param_h(&in_param);
2291                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2292                 if (err)
2293                         break;
2294                 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2295                 __mlx4_qp_release_range(dev, base, count);
2296                 break;
2297         case RES_OP_MAP_ICM:
2298                 qpn = get_param_l(&in_param) & 0x7fffff;
2299                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2300                                            NULL, 0);
2301                 if (err)
2302                         return err;
2303
2304                 if (!fw_reserved(dev, qpn))
2305                         __mlx4_qp_free_icm(dev, qpn);
2306
2307                 res_end_move(dev, slave, RES_QP, qpn);
2308
2309                 if (valid_reserved(dev, slave, qpn))
2310                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2311                 break;
2312         default:
2313                 err = -EINVAL;
2314                 break;
2315         }
2316         return err;
2317 }
2318
2319 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2320                         u64 in_param, u64 *out_param)
2321 {
2322         int err = -EINVAL;
2323         int base;
2324         int order;
2325
2326         if (op != RES_OP_RESERVE_AND_MAP)
2327                 return err;
2328
2329         base = get_param_l(&in_param);
2330         order = get_param_h(&in_param);
2331         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2332         if (!err) {
2333                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2334                 __mlx4_free_mtt_range(dev, base, order);
2335         }
2336         return err;
2337 }
2338
2339 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2340                         u64 in_param)
2341 {
2342         int err = -EINVAL;
2343         int index;
2344         int id;
2345         struct res_mpt *mpt;
2346
2347         switch (op) {
2348         case RES_OP_RESERVE:
2349                 index = get_param_l(&in_param);
2350                 id = index & mpt_mask(dev);
2351                 err = get_res(dev, slave, id, RES_MPT, &mpt);
2352                 if (err)
2353                         break;
2354                 index = mpt->key;
2355                 put_res(dev, slave, id, RES_MPT);
2356
2357                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2358                 if (err)
2359                         break;
2360                 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2361                 __mlx4_mpt_release(dev, index);
2362                 break;
2363         case RES_OP_MAP_ICM:
2364                         index = get_param_l(&in_param);
2365                         id = index & mpt_mask(dev);
2366                         err = mr_res_start_move_to(dev, slave, id,
2367                                                    RES_MPT_RESERVED, &mpt);
2368                         if (err)
2369                                 return err;
2370
2371                         __mlx4_mpt_free_icm(dev, mpt->key);
2372                         res_end_move(dev, slave, RES_MPT, id);
2373                         return err;
2374                 break;
2375         default:
2376                 err = -EINVAL;
2377                 break;
2378         }
2379         return err;
2380 }
2381
2382 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2383                        u64 in_param, u64 *out_param)
2384 {
2385         int cqn;
2386         int err;
2387
2388         switch (op) {
2389         case RES_OP_RESERVE_AND_MAP:
2390                 cqn = get_param_l(&in_param);
2391                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2392                 if (err)
2393                         break;
2394
2395                 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2396                 __mlx4_cq_free_icm(dev, cqn);
2397                 break;
2398
2399         default:
2400                 err = -EINVAL;
2401                 break;
2402         }
2403
2404         return err;
2405 }
2406
2407 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2408                         u64 in_param, u64 *out_param)
2409 {
2410         int srqn;
2411         int err;
2412
2413         switch (op) {
2414         case RES_OP_RESERVE_AND_MAP:
2415                 srqn = get_param_l(&in_param);
2416                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2417                 if (err)
2418                         break;
2419
2420                 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2421                 __mlx4_srq_free_icm(dev, srqn);
2422                 break;
2423
2424         default:
2425                 err = -EINVAL;
2426                 break;
2427         }
2428
2429         return err;
2430 }
2431
2432 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2433                             u64 in_param, u64 *out_param, int in_port)
2434 {
2435         int port;
2436         int err = 0;
2437
2438         switch (op) {
2439         case RES_OP_RESERVE_AND_MAP:
2440                 port = !in_port ? get_param_l(out_param) : in_port;
2441                 port = mlx4_slave_convert_port(
2442                                 dev, slave, port);
2443
2444                 if (port < 0)
2445                         return -EINVAL;
2446                 mac_del_from_slave(dev, slave, in_param, port);
2447                 __mlx4_unregister_mac(dev, port, in_param);
2448                 break;
2449         default:
2450                 err = -EINVAL;
2451                 break;
2452         }
2453
2454         return err;
2455
2456 }
2457
2458 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2459                             u64 in_param, u64 *out_param, int port)
2460 {
2461         struct mlx4_priv *priv = mlx4_priv(dev);
2462         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2463         int err = 0;
2464
2465         port = mlx4_slave_convert_port(
2466                         dev, slave, port);
2467
2468         if (port < 0)
2469                 return -EINVAL;
2470         switch (op) {
2471         case RES_OP_RESERVE_AND_MAP:
2472                 if (slave_state[slave].old_vlan_api)
2473                         return 0;
2474                 if (!port)
2475                         return -EINVAL;
2476                 vlan_del_from_slave(dev, slave, in_param, port);
2477                 __mlx4_unregister_vlan(dev, port, in_param);
2478                 break;
2479         default:
2480                 err = -EINVAL;
2481                 break;
2482         }
2483
2484         return err;
2485 }
2486
2487 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2488                             u64 in_param, u64 *out_param)
2489 {
2490         int index;
2491         int err;
2492
2493         if (op != RES_OP_RESERVE)
2494                 return -EINVAL;
2495
2496         index = get_param_l(&in_param);
2497         if (index == MLX4_SINK_COUNTER_INDEX(dev))
2498                 return 0;
2499
2500         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2501         if (err)
2502                 return err;
2503
2504         __mlx4_counter_free(dev, index);
2505         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2506
2507         return err;
2508 }
2509
2510 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2511                           u64 in_param, u64 *out_param)
2512 {
2513         int xrcdn;
2514         int err;
2515
2516         if (op != RES_OP_RESERVE)
2517                 return -EINVAL;
2518
2519         xrcdn = get_param_l(&in_param);
2520         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2521         if (err)
2522                 return err;
2523
2524         __mlx4_xrcd_free(dev, xrcdn);
2525
2526         return err;
2527 }
2528
2529 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2530                           struct mlx4_vhcr *vhcr,
2531                           struct mlx4_cmd_mailbox *inbox,
2532                           struct mlx4_cmd_mailbox *outbox,
2533                           struct mlx4_cmd_info *cmd)
2534 {
2535         int err = -EINVAL;
2536         int alop = vhcr->op_modifier;
2537
2538         switch (vhcr->in_modifier & 0xFF) {
2539         case RES_QP:
2540                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2541                                   vhcr->in_param);
2542                 break;
2543
2544         case RES_MTT:
2545                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2546                                    vhcr->in_param, &vhcr->out_param);
2547                 break;
2548
2549         case RES_MPT:
2550                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2551                                    vhcr->in_param);
2552                 break;
2553
2554         case RES_CQ:
2555                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2556                                   vhcr->in_param, &vhcr->out_param);
2557                 break;
2558
2559         case RES_SRQ:
2560                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2561                                    vhcr->in_param, &vhcr->out_param);
2562                 break;
2563
2564         case RES_MAC:
2565                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2566                                    vhcr->in_param, &vhcr->out_param,
2567                                    (vhcr->in_modifier >> 8) & 0xFF);
2568                 break;
2569
2570         case RES_VLAN:
2571                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2572                                     vhcr->in_param, &vhcr->out_param,
2573                                     (vhcr->in_modifier >> 8) & 0xFF);
2574                 break;
2575
2576         case RES_COUNTER:
2577                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2578                                        vhcr->in_param, &vhcr->out_param);
2579                 break;
2580
2581         case RES_XRCD:
2582                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2583                                      vhcr->in_param, &vhcr->out_param);
2584
2585         default:
2586                 break;
2587         }
2588         return err;
2589 }
2590
2591 /* ugly but other choices are uglier */
2592 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2593 {
2594         return (be32_to_cpu(mpt->flags) >> 9) & 1;
2595 }
2596
2597 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2598 {
2599         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2600 }
2601
2602 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2603 {
2604         return be32_to_cpu(mpt->mtt_sz);
2605 }
2606
2607 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2608 {
2609         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2610 }
2611
2612 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2613 {
2614         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2615 }
2616
2617 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2618 {
2619         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2620 }
2621
2622 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2623 {
2624         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2625 }
2626
2627 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2628 {
2629         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2630 }
2631
2632 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2633 {
2634         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2635 }
2636
2637 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2638 {
2639         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2640         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2641         int log_sq_sride = qpc->sq_size_stride & 7;
2642         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2643         int log_rq_stride = qpc->rq_size_stride & 7;
2644         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2645         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2646         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2647         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2648         int sq_size;
2649         int rq_size;
2650         int total_pages;
2651         int total_mem;
2652         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2653
2654         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2655         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2656         total_mem = sq_size + rq_size;
2657         total_pages =
2658                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2659                                    page_shift);
2660
2661         return total_pages;
2662 }
2663
2664 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2665                            int size, struct res_mtt *mtt)
2666 {
2667         int res_start = mtt->com.res_id;
2668         int res_size = (1 << mtt->order);
2669
2670         if (start < res_start || start + size > res_start + res_size)
2671                 return -EPERM;
2672         return 0;
2673 }
2674
2675 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2676                            struct mlx4_vhcr *vhcr,
2677                            struct mlx4_cmd_mailbox *inbox,
2678                            struct mlx4_cmd_mailbox *outbox,
2679                            struct mlx4_cmd_info *cmd)
2680 {
2681         int err;
2682         int index = vhcr->in_modifier;
2683         struct res_mtt *mtt;
2684         struct res_mpt *mpt;
2685         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2686         int phys;
2687         int id;
2688         u32 pd;
2689         int pd_slave;
2690
2691         id = index & mpt_mask(dev);
2692         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2693         if (err)
2694                 return err;
2695
2696         /* Disable memory windows for VFs. */
2697         if (!mr_is_region(inbox->buf)) {
2698                 err = -EPERM;
2699                 goto ex_abort;
2700         }
2701
2702         /* Make sure that the PD bits related to the slave id are zeros. */
2703         pd = mr_get_pd(inbox->buf);
2704         pd_slave = (pd >> 17) & 0x7f;
2705         if (pd_slave != 0 && --pd_slave != slave) {
2706                 err = -EPERM;
2707                 goto ex_abort;
2708         }
2709
2710         if (mr_is_fmr(inbox->buf)) {
2711                 /* FMR and Bind Enable are forbidden in slave devices. */
2712                 if (mr_is_bind_enabled(inbox->buf)) {
2713                         err = -EPERM;
2714                         goto ex_abort;
2715                 }
2716                 /* FMR and Memory Windows are also forbidden. */
2717                 if (!mr_is_region(inbox->buf)) {
2718                         err = -EPERM;
2719                         goto ex_abort;
2720                 }
2721         }
2722
2723         phys = mr_phys_mpt(inbox->buf);
2724         if (!phys) {
2725                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2726                 if (err)
2727                         goto ex_abort;
2728
2729                 err = check_mtt_range(dev, slave, mtt_base,
2730                                       mr_get_mtt_size(inbox->buf), mtt);
2731                 if (err)
2732                         goto ex_put;
2733
2734                 mpt->mtt = mtt;
2735         }
2736
2737         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2738         if (err)
2739                 goto ex_put;
2740
2741         if (!phys) {
2742                 atomic_inc(&mtt->ref_count);
2743                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2744         }
2745
2746         res_end_move(dev, slave, RES_MPT, id);
2747         return 0;
2748
2749 ex_put:
2750         if (!phys)
2751                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2752 ex_abort:
2753         res_abort_move(dev, slave, RES_MPT, id);
2754
2755         return err;
2756 }
2757
2758 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2759                            struct mlx4_vhcr *vhcr,
2760                            struct mlx4_cmd_mailbox *inbox,
2761                            struct mlx4_cmd_mailbox *outbox,
2762                            struct mlx4_cmd_info *cmd)
2763 {
2764         int err;
2765         int index = vhcr->in_modifier;
2766         struct res_mpt *mpt;
2767         int id;
2768
2769         id = index & mpt_mask(dev);
2770         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2771         if (err)
2772                 return err;
2773
2774         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2775         if (err)
2776                 goto ex_abort;
2777
2778         if (mpt->mtt)
2779                 atomic_dec(&mpt->mtt->ref_count);
2780
2781         res_end_move(dev, slave, RES_MPT, id);
2782         return 0;
2783
2784 ex_abort:
2785         res_abort_move(dev, slave, RES_MPT, id);
2786
2787         return err;
2788 }
2789
2790 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2791                            struct mlx4_vhcr *vhcr,
2792                            struct mlx4_cmd_mailbox *inbox,
2793                            struct mlx4_cmd_mailbox *outbox,
2794                            struct mlx4_cmd_info *cmd)
2795 {
2796         int err;
2797         int index = vhcr->in_modifier;
2798         struct res_mpt *mpt;
2799         int id;
2800
2801         id = index & mpt_mask(dev);
2802         err = get_res(dev, slave, id, RES_MPT, &mpt);
2803         if (err)
2804                 return err;
2805
2806         if (mpt->com.from_state == RES_MPT_MAPPED) {
2807                 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2808                  * that, the VF must read the MPT. But since the MPT entry memory is not
2809                  * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2810                  * entry contents. To guarantee that the MPT cannot be changed, the driver
2811                  * must perform HW2SW_MPT before this query and return the MPT entry to HW
2812                  * ownership fofollowing the change. The change here allows the VF to
2813                  * perform QUERY_MPT also when the entry is in SW ownership.
2814                  */
2815                 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2816                                         &mlx4_priv(dev)->mr_table.dmpt_table,
2817                                         mpt->key, NULL);
2818
2819                 if (NULL == mpt_entry || NULL == outbox->buf) {
2820                         err = -EINVAL;
2821                         goto out;
2822                 }
2823
2824                 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2825
2826                 err = 0;
2827         } else if (mpt->com.from_state == RES_MPT_HW) {
2828                 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2829         } else {
2830                 err = -EBUSY;
2831                 goto out;
2832         }
2833
2834
2835 out:
2836         put_res(dev, slave, id, RES_MPT);
2837         return err;
2838 }
2839
2840 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2841 {
2842         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2843 }
2844
2845 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2846 {
2847         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2848 }
2849
2850 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2851 {
2852         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2853 }
2854
2855 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2856                                   struct mlx4_qp_context *context)
2857 {
2858         u32 qpn = vhcr->in_modifier & 0xffffff;
2859         u32 qkey = 0;
2860
2861         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2862                 return;
2863
2864         /* adjust qkey in qp context */
2865         context->qkey = cpu_to_be32(qkey);
2866 }
2867
2868 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2869                                  struct mlx4_qp_context *qpc,
2870                                  struct mlx4_cmd_mailbox *inbox);
2871
2872 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2873                              struct mlx4_vhcr *vhcr,
2874                              struct mlx4_cmd_mailbox *inbox,
2875                              struct mlx4_cmd_mailbox *outbox,
2876                              struct mlx4_cmd_info *cmd)
2877 {
2878         int err;
2879         int qpn = vhcr->in_modifier & 0x7fffff;
2880         struct res_mtt *mtt;
2881         struct res_qp *qp;
2882         struct mlx4_qp_context *qpc = inbox->buf + 8;
2883         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2884         int mtt_size = qp_get_mtt_size(qpc);
2885         struct res_cq *rcq;
2886         struct res_cq *scq;
2887         int rcqn = qp_get_rcqn(qpc);
2888         int scqn = qp_get_scqn(qpc);
2889         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2890         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2891         struct res_srq *srq;
2892         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2893
2894         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2895         if (err)
2896                 return err;
2897
2898         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2899         if (err)
2900                 return err;
2901         qp->local_qpn = local_qpn;
2902         qp->sched_queue = 0;
2903         qp->param3 = 0;
2904         qp->vlan_control = 0;
2905         qp->fvl_rx = 0;
2906         qp->pri_path_fl = 0;
2907         qp->vlan_index = 0;
2908         qp->feup = 0;
2909         qp->qpc_flags = be32_to_cpu(qpc->flags);
2910
2911         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2912         if (err)
2913                 goto ex_abort;
2914
2915         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2916         if (err)
2917                 goto ex_put_mtt;
2918
2919         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2920         if (err)
2921                 goto ex_put_mtt;
2922
2923         if (scqn != rcqn) {
2924                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2925                 if (err)
2926                         goto ex_put_rcq;
2927         } else
2928                 scq = rcq;
2929
2930         if (use_srq) {
2931                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2932                 if (err)
2933                         goto ex_put_scq;
2934         }
2935
2936         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2937         update_pkey_index(dev, slave, inbox);
2938         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2939         if (err)
2940                 goto ex_put_srq;
2941         atomic_inc(&mtt->ref_count);
2942         qp->mtt = mtt;
2943         atomic_inc(&rcq->ref_count);
2944         qp->rcq = rcq;
2945         atomic_inc(&scq->ref_count);
2946         qp->scq = scq;
2947
2948         if (scqn != rcqn)
2949                 put_res(dev, slave, scqn, RES_CQ);
2950
2951         if (use_srq) {
2952                 atomic_inc(&srq->ref_count);
2953                 put_res(dev, slave, srqn, RES_SRQ);
2954                 qp->srq = srq;
2955         }
2956         put_res(dev, slave, rcqn, RES_CQ);
2957         put_res(dev, slave, mtt_base, RES_MTT);
2958         res_end_move(dev, slave, RES_QP, qpn);
2959
2960         return 0;
2961
2962 ex_put_srq:
2963         if (use_srq)
2964                 put_res(dev, slave, srqn, RES_SRQ);
2965 ex_put_scq:
2966         if (scqn != rcqn)
2967                 put_res(dev, slave, scqn, RES_CQ);
2968 ex_put_rcq:
2969         put_res(dev, slave, rcqn, RES_CQ);
2970 ex_put_mtt:
2971         put_res(dev, slave, mtt_base, RES_MTT);
2972 ex_abort:
2973         res_abort_move(dev, slave, RES_QP, qpn);
2974
2975         return err;
2976 }
2977
2978 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2979 {
2980         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2981 }
2982
2983 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2984 {
2985         int log_eq_size = eqc->log_eq_size & 0x1f;
2986         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2987
2988         if (log_eq_size + 5 < page_shift)
2989                 return 1;
2990
2991         return 1 << (log_eq_size + 5 - page_shift);
2992 }
2993
2994 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2995 {
2996         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2997 }
2998
2999 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3000 {
3001         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3002         int page_shift = (cqc->log_page_size & 0x3f) + 12;
3003
3004         if (log_cq_size + 5 < page_shift)
3005                 return 1;
3006
3007         return 1 << (log_cq_size + 5 - page_shift);
3008 }
3009
3010 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3011                           struct mlx4_vhcr *vhcr,
3012                           struct mlx4_cmd_mailbox *inbox,
3013                           struct mlx4_cmd_mailbox *outbox,
3014                           struct mlx4_cmd_info *cmd)
3015 {
3016         int err;
3017         int eqn = vhcr->in_modifier;
3018         int res_id = (slave << 10) | eqn;
3019         struct mlx4_eq_context *eqc = inbox->buf;
3020         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3021         int mtt_size = eq_get_mtt_size(eqc);
3022         struct res_eq *eq;
3023         struct res_mtt *mtt;
3024
3025         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3026         if (err)
3027                 return err;
3028         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3029         if (err)
3030                 goto out_add;
3031
3032         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3033         if (err)
3034                 goto out_move;
3035
3036         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3037         if (err)
3038                 goto out_put;
3039
3040         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3041         if (err)
3042                 goto out_put;
3043
3044         atomic_inc(&mtt->ref_count);
3045         eq->mtt = mtt;
3046         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3047         res_end_move(dev, slave, RES_EQ, res_id);
3048         return 0;
3049
3050 out_put:
3051         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3052 out_move:
3053         res_abort_move(dev, slave, RES_EQ, res_id);
3054 out_add:
3055         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3056         return err;
3057 }
3058
3059 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3060                             struct mlx4_vhcr *vhcr,
3061                             struct mlx4_cmd_mailbox *inbox,
3062                             struct mlx4_cmd_mailbox *outbox,
3063                             struct mlx4_cmd_info *cmd)
3064 {
3065         int err;
3066         u8 get = vhcr->op_modifier;
3067
3068         if (get != 1)
3069                 return -EPERM;
3070
3071         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3072
3073         return err;
3074 }
3075
3076 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3077                               int len, struct res_mtt **res)
3078 {
3079         struct mlx4_priv *priv = mlx4_priv(dev);
3080         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3081         struct res_mtt *mtt;
3082         int err = -EINVAL;
3083
3084         spin_lock_irq(mlx4_tlock(dev));
3085         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3086                             com.list) {
3087                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3088                         *res = mtt;
3089                         mtt->com.from_state = mtt->com.state;
3090                         mtt->com.state = RES_MTT_BUSY;
3091                         err = 0;
3092                         break;
3093                 }
3094         }
3095         spin_unlock_irq(mlx4_tlock(dev));
3096
3097         return err;
3098 }
3099
3100 static int verify_qp_parameters(struct mlx4_dev *dev,
3101                                 struct mlx4_vhcr *vhcr,
3102                                 struct mlx4_cmd_mailbox *inbox,
3103                                 enum qp_transition transition, u8 slave)
3104 {
3105         u32                     qp_type;
3106         u32                     qpn;
3107         struct mlx4_qp_context  *qp_ctx;
3108         enum mlx4_qp_optpar     optpar;
3109         int port;
3110         int num_gids;
3111
3112         qp_ctx  = inbox->buf + 8;
3113         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3114         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
3115
3116         if (slave != mlx4_master_func_num(dev)) {
3117                 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3118                 /* setting QP rate-limit is disallowed for VFs */
3119                 if (qp_ctx->rate_limit_params)
3120                         return -EPERM;
3121         }
3122
3123         switch (qp_type) {
3124         case MLX4_QP_ST_RC:
3125         case MLX4_QP_ST_XRC:
3126         case MLX4_QP_ST_UC:
3127                 switch (transition) {
3128                 case QP_TRANS_INIT2RTR:
3129                 case QP_TRANS_RTR2RTS:
3130                 case QP_TRANS_RTS2RTS:
3131                 case QP_TRANS_SQD2SQD:
3132                 case QP_TRANS_SQD2RTS:
3133                         if (slave != mlx4_master_func_num(dev))
3134                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3135                                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3136                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3137                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3138                                         else
3139                                                 num_gids = 1;
3140                                         if (qp_ctx->pri_path.mgid_index >= num_gids)
3141                                                 return -EINVAL;
3142                                 }
3143                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3144                                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3145                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3146                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3147                                         else
3148                                                 num_gids = 1;
3149                                         if (qp_ctx->alt_path.mgid_index >= num_gids)
3150                                                 return -EINVAL;
3151                                 }
3152                         break;
3153                 default:
3154                         break;
3155                 }
3156                 break;
3157
3158         case MLX4_QP_ST_MLX:
3159                 qpn = vhcr->in_modifier & 0x7fffff;
3160                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3161                 if (transition == QP_TRANS_INIT2RTR &&
3162                     slave != mlx4_master_func_num(dev) &&
3163                     mlx4_is_qp_reserved(dev, qpn) &&
3164                     !mlx4_vf_smi_enabled(dev, slave, port)) {
3165                         /* only enabled VFs may create MLX proxy QPs */
3166                         mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3167                                  __func__, slave, port);
3168                         return -EPERM;
3169                 }
3170                 break;
3171
3172         default:
3173                 break;
3174         }
3175
3176         return 0;
3177 }
3178
3179 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3180                            struct mlx4_vhcr *vhcr,
3181                            struct mlx4_cmd_mailbox *inbox,
3182                            struct mlx4_cmd_mailbox *outbox,
3183                            struct mlx4_cmd_info *cmd)
3184 {
3185         struct mlx4_mtt mtt;
3186         __be64 *page_list = inbox->buf;
3187         u64 *pg_list = (u64 *)page_list;
3188         int i;
3189         struct res_mtt *rmtt = NULL;
3190         int start = be64_to_cpu(page_list[0]);
3191         int npages = vhcr->in_modifier;
3192         int err;
3193
3194         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3195         if (err)
3196                 return err;
3197
3198         /* Call the SW implementation of write_mtt:
3199          * - Prepare a dummy mtt struct
3200          * - Translate inbox contents to simple addresses in host endianness */
3201         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3202                             we don't really use it */
3203         mtt.order = 0;
3204         mtt.page_shift = 0;
3205         for (i = 0; i < npages; ++i)
3206                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3207
3208         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3209                                ((u64 *)page_list + 2));
3210
3211         if (rmtt)
3212                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3213
3214         return err;
3215 }
3216
3217 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3218                           struct mlx4_vhcr *vhcr,
3219                           struct mlx4_cmd_mailbox *inbox,
3220                           struct mlx4_cmd_mailbox *outbox,
3221                           struct mlx4_cmd_info *cmd)
3222 {
3223         int eqn = vhcr->in_modifier;
3224         int res_id = eqn | (slave << 10);
3225         struct res_eq *eq;
3226         int err;
3227
3228         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3229         if (err)
3230                 return err;
3231
3232         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3233         if (err)
3234                 goto ex_abort;
3235
3236         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3237         if (err)
3238                 goto ex_put;
3239
3240         atomic_dec(&eq->mtt->ref_count);
3241         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3242         res_end_move(dev, slave, RES_EQ, res_id);
3243         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3244
3245         return 0;
3246
3247 ex_put:
3248         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3249 ex_abort:
3250         res_abort_move(dev, slave, RES_EQ, res_id);
3251
3252         return err;
3253 }
3254
3255 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3256 {
3257         struct mlx4_priv *priv = mlx4_priv(dev);
3258         struct mlx4_slave_event_eq_info *event_eq;
3259         struct mlx4_cmd_mailbox *mailbox;
3260         u32 in_modifier = 0;
3261         int err;
3262         int res_id;
3263         struct res_eq *req;
3264
3265         if (!priv->mfunc.master.slave_state)
3266                 return -EINVAL;
3267
3268         /* check for slave valid, slave not PF, and slave active */
3269         if (slave < 0 || slave > dev->persist->num_vfs ||
3270             slave == dev->caps.function ||
3271             !priv->mfunc.master.slave_state[slave].active)
3272                 return 0;
3273
3274         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3275
3276         /* Create the event only if the slave is registered */
3277         if (event_eq->eqn < 0)
3278                 return 0;
3279
3280         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3281         res_id = (slave << 10) | event_eq->eqn;
3282         err = get_res(dev, slave, res_id, RES_EQ, &req);
3283         if (err)
3284                 goto unlock;
3285
3286         if (req->com.from_state != RES_EQ_HW) {
3287                 err = -EINVAL;
3288                 goto put;
3289         }
3290
3291         mailbox = mlx4_alloc_cmd_mailbox(dev);
3292         if (IS_ERR(mailbox)) {
3293                 err = PTR_ERR(mailbox);
3294                 goto put;
3295         }
3296
3297         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3298                 ++event_eq->token;
3299                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3300         }
3301
3302         memcpy(mailbox->buf, (u8 *) eqe, 28);
3303
3304         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3305
3306         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3307                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3308                        MLX4_CMD_NATIVE);
3309
3310         put_res(dev, slave, res_id, RES_EQ);
3311         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3312         mlx4_free_cmd_mailbox(dev, mailbox);
3313         return err;
3314
3315 put:
3316         put_res(dev, slave, res_id, RES_EQ);
3317
3318 unlock:
3319         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3320         return err;
3321 }
3322
3323 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3324                           struct mlx4_vhcr *vhcr,
3325                           struct mlx4_cmd_mailbox *inbox,
3326                           struct mlx4_cmd_mailbox *outbox,
3327                           struct mlx4_cmd_info *cmd)
3328 {
3329         int eqn = vhcr->in_modifier;
3330         int res_id = eqn | (slave << 10);
3331         struct res_eq *eq;
3332         int err;
3333
3334         err = get_res(dev, slave, res_id, RES_EQ, &eq);
3335         if (err)
3336                 return err;
3337
3338         if (eq->com.from_state != RES_EQ_HW) {
3339                 err = -EINVAL;
3340                 goto ex_put;
3341         }
3342
3343         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3344
3345 ex_put:
3346         put_res(dev, slave, res_id, RES_EQ);
3347         return err;
3348 }
3349
3350 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3351                           struct mlx4_vhcr *vhcr,
3352                           struct mlx4_cmd_mailbox *inbox,
3353                           struct mlx4_cmd_mailbox *outbox,
3354                           struct mlx4_cmd_info *cmd)
3355 {
3356         int err;
3357         int cqn = vhcr->in_modifier;
3358         struct mlx4_cq_context *cqc = inbox->buf;
3359         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3360         struct res_cq *cq = NULL;
3361         struct res_mtt *mtt;
3362
3363         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3364         if (err)
3365                 return err;
3366         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3367         if (err)
3368                 goto out_move;
3369         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3370         if (err)
3371                 goto out_put;
3372         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3373         if (err)
3374                 goto out_put;
3375         atomic_inc(&mtt->ref_count);
3376         cq->mtt = mtt;
3377         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3378         res_end_move(dev, slave, RES_CQ, cqn);
3379         return 0;
3380
3381 out_put:
3382         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3383 out_move:
3384         res_abort_move(dev, slave, RES_CQ, cqn);
3385         return err;
3386 }
3387
3388 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3389                           struct mlx4_vhcr *vhcr,
3390                           struct mlx4_cmd_mailbox *inbox,
3391                           struct mlx4_cmd_mailbox *outbox,
3392                           struct mlx4_cmd_info *cmd)
3393 {
3394         int err;
3395         int cqn = vhcr->in_modifier;
3396         struct res_cq *cq = NULL;
3397
3398         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3399         if (err)
3400                 return err;
3401         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3402         if (err)
3403                 goto out_move;
3404         atomic_dec(&cq->mtt->ref_count);
3405         res_end_move(dev, slave, RES_CQ, cqn);
3406         return 0;
3407
3408 out_move:
3409         res_abort_move(dev, slave, RES_CQ, cqn);
3410         return err;
3411 }
3412
3413 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3414                           struct mlx4_vhcr *vhcr,
3415                           struct mlx4_cmd_mailbox *inbox,
3416                           struct mlx4_cmd_mailbox *outbox,
3417                           struct mlx4_cmd_info *cmd)
3418 {
3419         int cqn = vhcr->in_modifier;
3420         struct res_cq *cq;
3421         int err;
3422
3423         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3424         if (err)
3425                 return err;
3426
3427         if (cq->com.from_state != RES_CQ_HW)
3428                 goto ex_put;
3429
3430         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3431 ex_put:
3432         put_res(dev, slave, cqn, RES_CQ);
3433
3434         return err;
3435 }
3436
3437 static int handle_resize(struct mlx4_dev *dev, int slave,
3438                          struct mlx4_vhcr *vhcr,
3439                          struct mlx4_cmd_mailbox *inbox,
3440                          struct mlx4_cmd_mailbox *outbox,
3441                          struct mlx4_cmd_info *cmd,
3442                          struct res_cq *cq)
3443 {
3444         int err;
3445         struct res_mtt *orig_mtt;
3446         struct res_mtt *mtt;
3447         struct mlx4_cq_context *cqc = inbox->buf;
3448         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3449
3450         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3451         if (err)
3452                 return err;
3453
3454         if (orig_mtt != cq->mtt) {
3455                 err = -EINVAL;
3456                 goto ex_put;
3457         }
3458
3459         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3460         if (err)
3461                 goto ex_put;
3462
3463         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3464         if (err)
3465                 goto ex_put1;
3466         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3467         if (err)
3468                 goto ex_put1;
3469         atomic_dec(&orig_mtt->ref_count);
3470         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3471         atomic_inc(&mtt->ref_count);
3472         cq->mtt = mtt;
3473         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3474         return 0;
3475
3476 ex_put1:
3477         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3478 ex_put:
3479         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3480
3481         return err;
3482
3483 }
3484
3485 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3486                            struct mlx4_vhcr *vhcr,
3487                            struct mlx4_cmd_mailbox *inbox,
3488                            struct mlx4_cmd_mailbox *outbox,
3489                            struct mlx4_cmd_info *cmd)
3490 {
3491         int cqn = vhcr->in_modifier;
3492         struct res_cq *cq;
3493         int err;
3494
3495         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3496         if (err)
3497                 return err;
3498
3499         if (cq->com.from_state != RES_CQ_HW)
3500                 goto ex_put;
3501
3502         if (vhcr->op_modifier == 0) {
3503                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3504                 goto ex_put;
3505         }
3506
3507         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3508 ex_put:
3509         put_res(dev, slave, cqn, RES_CQ);
3510
3511         return err;
3512 }
3513
3514 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3515 {
3516         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3517         int log_rq_stride = srqc->logstride & 7;
3518         int page_shift = (srqc->log_page_size & 0x3f) + 12;
3519
3520         if (log_srq_size + log_rq_stride + 4 < page_shift)
3521                 return 1;
3522
3523         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3524 }
3525
3526 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3527                            struct mlx4_vhcr *vhcr,
3528                            struct mlx4_cmd_mailbox *inbox,
3529                            struct mlx4_cmd_mailbox *outbox,
3530                            struct mlx4_cmd_info *cmd)
3531 {
3532         int err;
3533         int srqn = vhcr->in_modifier;
3534         struct res_mtt *mtt;
3535         struct res_srq *srq = NULL;
3536         struct mlx4_srq_context *srqc = inbox->buf;
3537         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3538
3539         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3540                 return -EINVAL;
3541
3542         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3543         if (err)
3544                 return err;
3545         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3546         if (err)
3547                 goto ex_abort;
3548         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3549                               mtt);
3550         if (err)
3551                 goto ex_put_mtt;
3552
3553         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3554         if (err)
3555                 goto ex_put_mtt;
3556
3557         atomic_inc(&mtt->ref_count);
3558         srq->mtt = mtt;
3559         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3560         res_end_move(dev, slave, RES_SRQ, srqn);
3561         return 0;
3562
3563 ex_put_mtt:
3564         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3565 ex_abort:
3566         res_abort_move(dev, slave, RES_SRQ, srqn);
3567
3568         return err;
3569 }
3570
3571 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3572                            struct mlx4_vhcr *vhcr,
3573                            struct mlx4_cmd_mailbox *inbox,
3574                            struct mlx4_cmd_mailbox *outbox,
3575                            struct mlx4_cmd_info *cmd)
3576 {
3577         int err;
3578         int srqn = vhcr->in_modifier;
3579         struct res_srq *srq = NULL;
3580
3581         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3582         if (err)
3583                 return err;
3584         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3585         if (err)
3586                 goto ex_abort;
3587         atomic_dec(&srq->mtt->ref_count);
3588         if (srq->cq)
3589                 atomic_dec(&srq->cq->ref_count);
3590         res_end_move(dev, slave, RES_SRQ, srqn);
3591
3592         return 0;
3593
3594 ex_abort:
3595         res_abort_move(dev, slave, RES_SRQ, srqn);
3596
3597         return err;
3598 }
3599
3600 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3601                            struct mlx4_vhcr *vhcr,
3602                            struct mlx4_cmd_mailbox *inbox,
3603                            struct mlx4_cmd_mailbox *outbox,
3604                            struct mlx4_cmd_info *cmd)
3605 {
3606         int err;
3607         int srqn = vhcr->in_modifier;
3608         struct res_srq *srq;
3609
3610         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3611         if (err)
3612                 return err;
3613         if (srq->com.from_state != RES_SRQ_HW) {
3614                 err = -EBUSY;
3615                 goto out;
3616         }
3617         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3618 out:
3619         put_res(dev, slave, srqn, RES_SRQ);
3620         return err;
3621 }
3622
3623 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3624                          struct mlx4_vhcr *vhcr,
3625                          struct mlx4_cmd_mailbox *inbox,
3626                          struct mlx4_cmd_mailbox *outbox,
3627                          struct mlx4_cmd_info *cmd)
3628 {
3629         int err;
3630         int srqn = vhcr->in_modifier;
3631         struct res_srq *srq;
3632
3633         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3634         if (err)
3635                 return err;
3636
3637         if (srq->com.from_state != RES_SRQ_HW) {
3638                 err = -EBUSY;
3639                 goto out;
3640         }
3641
3642         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3643 out:
3644         put_res(dev, slave, srqn, RES_SRQ);
3645         return err;
3646 }
3647
3648 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3649                         struct mlx4_vhcr *vhcr,
3650                         struct mlx4_cmd_mailbox *inbox,
3651                         struct mlx4_cmd_mailbox *outbox,
3652                         struct mlx4_cmd_info *cmd)
3653 {
3654         int err;
3655         int qpn = vhcr->in_modifier & 0x7fffff;
3656         struct res_qp *qp;
3657
3658         err = get_res(dev, slave, qpn, RES_QP, &qp);
3659         if (err)
3660                 return err;
3661         if (qp->com.from_state != RES_QP_HW) {
3662                 err = -EBUSY;
3663                 goto out;
3664         }
3665
3666         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3667 out:
3668         put_res(dev, slave, qpn, RES_QP);
3669         return err;
3670 }
3671
3672 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3673                               struct mlx4_vhcr *vhcr,
3674                               struct mlx4_cmd_mailbox *inbox,
3675                               struct mlx4_cmd_mailbox *outbox,
3676                               struct mlx4_cmd_info *cmd)
3677 {
3678         struct mlx4_qp_context *context = inbox->buf + 8;
3679         adjust_proxy_tun_qkey(dev, vhcr, context);
3680         update_pkey_index(dev, slave, inbox);
3681         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3682 }
3683
3684 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3685                                   struct mlx4_qp_context *qpc,
3686                                   struct mlx4_cmd_mailbox *inbox)
3687 {
3688         enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3689         u8 pri_sched_queue;
3690         int port = mlx4_slave_convert_port(
3691                    dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3692
3693         if (port < 0)
3694                 return -EINVAL;
3695
3696         pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3697                           ((port & 1) << 6);
3698
3699         if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3700             qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3701                 qpc->pri_path.sched_queue = pri_sched_queue;
3702         }
3703
3704         if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3705                 port = mlx4_slave_convert_port(
3706                                 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3707                                 + 1) - 1;
3708                 if (port < 0)
3709                         return -EINVAL;
3710                 qpc->alt_path.sched_queue =
3711                         (qpc->alt_path.sched_queue & ~(1 << 6)) |
3712                         (port & 1) << 6;
3713         }
3714         return 0;
3715 }
3716
3717 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3718                                 struct mlx4_qp_context *qpc,
3719                                 struct mlx4_cmd_mailbox *inbox)
3720 {
3721         u64 mac;
3722         int port;
3723         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3724         u8 sched = *(u8 *)(inbox->buf + 64);
3725         u8 smac_ix;
3726
3727         port = (sched >> 6 & 1) + 1;
3728         if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3729                 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3730                 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3731                         return -ENOENT;
3732         }
3733         return 0;
3734 }
3735
3736 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3737                              struct mlx4_vhcr *vhcr,
3738                              struct mlx4_cmd_mailbox *inbox,
3739                              struct mlx4_cmd_mailbox *outbox,
3740                              struct mlx4_cmd_info *cmd)
3741 {
3742         int err;
3743         struct mlx4_qp_context *qpc = inbox->buf + 8;
3744         int qpn = vhcr->in_modifier & 0x7fffff;
3745         struct res_qp *qp;
3746         u8 orig_sched_queue;
3747         __be32  orig_param3 = qpc->param3;
3748         u8 orig_vlan_control = qpc->pri_path.vlan_control;
3749         u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3750         u8 orig_pri_path_fl = qpc->pri_path.fl;
3751         u8 orig_vlan_index = qpc->pri_path.vlan_index;
3752         u8 orig_feup = qpc->pri_path.feup;
3753
3754         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3755         if (err)
3756                 return err;
3757         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3758         if (err)
3759                 return err;
3760
3761         if (roce_verify_mac(dev, slave, qpc, inbox))
3762                 return -EINVAL;
3763
3764         update_pkey_index(dev, slave, inbox);
3765         update_gid(dev, inbox, (u8)slave);
3766         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3767         orig_sched_queue = qpc->pri_path.sched_queue;
3768
3769         err = get_res(dev, slave, qpn, RES_QP, &qp);
3770         if (err)
3771                 return err;
3772         if (qp->com.from_state != RES_QP_HW) {
3773                 err = -EBUSY;
3774                 goto out;
3775         }
3776
3777         err = update_vport_qp_param(dev, inbox, slave, qpn);
3778         if (err)
3779                 goto out;
3780
3781         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3782 out:
3783         /* if no error, save sched queue value passed in by VF. This is
3784          * essentially the QOS value provided by the VF. This will be useful
3785          * if we allow dynamic changes from VST back to VGT
3786          */
3787         if (!err) {
3788                 qp->sched_queue = orig_sched_queue;
3789                 qp->param3      = orig_param3;
3790                 qp->vlan_control = orig_vlan_control;
3791                 qp->fvl_rx      =  orig_fvl_rx;
3792                 qp->pri_path_fl = orig_pri_path_fl;
3793                 qp->vlan_index  = orig_vlan_index;
3794                 qp->feup        = orig_feup;
3795         }
3796         put_res(dev, slave, qpn, RES_QP);
3797         return err;
3798 }
3799
3800 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3801                             struct mlx4_vhcr *vhcr,
3802                             struct mlx4_cmd_mailbox *inbox,
3803                             struct mlx4_cmd_mailbox *outbox,
3804                             struct mlx4_cmd_info *cmd)
3805 {
3806         int err;
3807         struct mlx4_qp_context *context = inbox->buf + 8;
3808
3809         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3810         if (err)
3811                 return err;
3812         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3813         if (err)
3814                 return err;
3815
3816         update_pkey_index(dev, slave, inbox);
3817         update_gid(dev, inbox, (u8)slave);
3818         adjust_proxy_tun_qkey(dev, vhcr, context);
3819         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3820 }
3821
3822 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3823                             struct mlx4_vhcr *vhcr,
3824                             struct mlx4_cmd_mailbox *inbox,
3825                             struct mlx4_cmd_mailbox *outbox,
3826                             struct mlx4_cmd_info *cmd)
3827 {
3828         int err;
3829         struct mlx4_qp_context *context = inbox->buf + 8;
3830
3831         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3832         if (err)
3833                 return err;
3834         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3835         if (err)
3836                 return err;
3837
3838         update_pkey_index(dev, slave, inbox);
3839         update_gid(dev, inbox, (u8)slave);
3840         adjust_proxy_tun_qkey(dev, vhcr, context);
3841         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3842 }
3843
3844
3845 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3846                               struct mlx4_vhcr *vhcr,
3847                               struct mlx4_cmd_mailbox *inbox,
3848                               struct mlx4_cmd_mailbox *outbox,
3849                               struct mlx4_cmd_info *cmd)
3850 {
3851         struct mlx4_qp_context *context = inbox->buf + 8;
3852         int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3853         if (err)
3854                 return err;
3855         adjust_proxy_tun_qkey(dev, vhcr, context);
3856         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3857 }
3858
3859 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3860                             struct mlx4_vhcr *vhcr,
3861                             struct mlx4_cmd_mailbox *inbox,
3862                             struct mlx4_cmd_mailbox *outbox,
3863                             struct mlx4_cmd_info *cmd)
3864 {
3865         int err;
3866         struct mlx4_qp_context *context = inbox->buf + 8;
3867
3868         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3869         if (err)
3870                 return err;
3871         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3872         if (err)
3873                 return err;
3874
3875         adjust_proxy_tun_qkey(dev, vhcr, context);
3876         update_gid(dev, inbox, (u8)slave);
3877         update_pkey_index(dev, slave, inbox);
3878         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3879 }
3880
3881 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3882                             struct mlx4_vhcr *vhcr,
3883                             struct mlx4_cmd_mailbox *inbox,
3884                             struct mlx4_cmd_mailbox *outbox,
3885                             struct mlx4_cmd_info *cmd)
3886 {
3887         int err;
3888         struct mlx4_qp_context *context = inbox->buf + 8;
3889
3890         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3891         if (err)
3892                 return err;
3893         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3894         if (err)
3895                 return err;
3896
3897         adjust_proxy_tun_qkey(dev, vhcr, context);
3898         update_gid(dev, inbox, (u8)slave);
3899         update_pkey_index(dev, slave, inbox);
3900         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3901 }
3902
3903 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3904                          struct mlx4_vhcr *vhcr,
3905                          struct mlx4_cmd_mailbox *inbox,
3906                          struct mlx4_cmd_mailbox *outbox,
3907                          struct mlx4_cmd_info *cmd)
3908 {
3909         int err;
3910         int qpn = vhcr->in_modifier & 0x7fffff;
3911         struct res_qp *qp;
3912
3913         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3914         if (err)
3915                 return err;
3916         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3917         if (err)
3918                 goto ex_abort;
3919
3920         atomic_dec(&qp->mtt->ref_count);
3921         atomic_dec(&qp->rcq->ref_count);
3922         atomic_dec(&qp->scq->ref_count);
3923         if (qp->srq)
3924                 atomic_dec(&qp->srq->ref_count);
3925         res_end_move(dev, slave, RES_QP, qpn);
3926         return 0;
3927
3928 ex_abort:
3929         res_abort_move(dev, slave, RES_QP, qpn);
3930
3931         return err;
3932 }
3933
3934 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3935                                 struct res_qp *rqp, u8 *gid)
3936 {
3937         struct res_gid *res;
3938
3939         list_for_each_entry(res, &rqp->mcg_list, list) {
3940                 if (!memcmp(res->gid, gid, 16))
3941                         return res;
3942         }
3943         return NULL;
3944 }
3945
3946 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3947                        u8 *gid, enum mlx4_protocol prot,
3948                        enum mlx4_steer_type steer, u64 reg_id)
3949 {
3950         struct res_gid *res;
3951         int err;
3952
3953         res = kzalloc(sizeof *res, GFP_KERNEL);
3954         if (!res)
3955                 return -ENOMEM;
3956
3957         spin_lock_irq(&rqp->mcg_spl);
3958         if (find_gid(dev, slave, rqp, gid)) {
3959                 kfree(res);
3960                 err = -EEXIST;
3961         } else {
3962                 memcpy(res->gid, gid, 16);
3963                 res->prot = prot;
3964                 res->steer = steer;
3965                 res->reg_id = reg_id;
3966                 list_add_tail(&res->list, &rqp->mcg_list);
3967                 err = 0;
3968         }
3969         spin_unlock_irq(&rqp->mcg_spl);
3970
3971         return err;
3972 }
3973
3974 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3975                        u8 *gid, enum mlx4_protocol prot,
3976                        enum mlx4_steer_type steer, u64 *reg_id)
3977 {
3978         struct res_gid *res;
3979         int err;
3980
3981         spin_lock_irq(&rqp->mcg_spl);
3982         res = find_gid(dev, slave, rqp, gid);
3983         if (!res || res->prot != prot || res->steer != steer)
3984                 err = -EINVAL;
3985         else {
3986                 *reg_id = res->reg_id;
3987                 list_del(&res->list);
3988                 kfree(res);
3989                 err = 0;
3990         }
3991         spin_unlock_irq(&rqp->mcg_spl);
3992
3993         return err;
3994 }
3995
3996 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3997                      u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3998                      enum mlx4_steer_type type, u64 *reg_id)
3999 {
4000         switch (dev->caps.steering_mode) {
4001         case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4002                 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4003                 if (port < 0)
4004                         return port;
4005                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4006                                                 block_loopback, prot,
4007                                                 reg_id);
4008         }
4009         case MLX4_STEERING_MODE_B0:
4010                 if (prot == MLX4_PROT_ETH) {
4011                         int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4012                         if (port < 0)
4013                                 return port;
4014                         gid[5] = port;
4015                 }
4016                 return mlx4_qp_attach_common(dev, qp, gid,
4017                                             block_loopback, prot, type);
4018         default:
4019                 return -EINVAL;
4020         }
4021 }
4022
4023 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4024                      u8 gid[16], enum mlx4_protocol prot,
4025                      enum mlx4_steer_type type, u64 reg_id)
4026 {
4027         switch (dev->caps.steering_mode) {
4028         case MLX4_STEERING_MODE_DEVICE_MANAGED:
4029                 return mlx4_flow_detach(dev, reg_id);
4030         case MLX4_STEERING_MODE_B0:
4031                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4032         default:
4033                 return -EINVAL;
4034         }
4035 }
4036
4037 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4038                             u8 *gid, enum mlx4_protocol prot)
4039 {
4040         int real_port;
4041
4042         if (prot != MLX4_PROT_ETH)
4043                 return 0;
4044
4045         if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4046             dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4047                 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4048                 if (real_port < 0)
4049                         return -EINVAL;
4050                 gid[5] = real_port;
4051         }
4052
4053         return 0;
4054 }
4055
4056 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4057                                struct mlx4_vhcr *vhcr,
4058                                struct mlx4_cmd_mailbox *inbox,
4059                                struct mlx4_cmd_mailbox *outbox,
4060                                struct mlx4_cmd_info *cmd)
4061 {
4062         struct mlx4_qp qp; /* dummy for calling attach/detach */
4063         u8 *gid = inbox->buf;
4064         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4065         int err;
4066         int qpn;
4067         struct res_qp *rqp;
4068         u64 reg_id = 0;
4069         int attach = vhcr->op_modifier;
4070         int block_loopback = vhcr->in_modifier >> 31;
4071         u8 steer_type_mask = 2;
4072         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4073
4074         qpn = vhcr->in_modifier & 0xffffff;
4075         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4076         if (err)
4077                 return err;
4078
4079         qp.qpn = qpn;
4080         if (attach) {
4081                 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4082                                 type, &reg_id);
4083                 if (err) {
4084                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4085                         goto ex_put;
4086                 }
4087                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4088                 if (err)
4089                         goto ex_detach;
4090         } else {
4091                 err = mlx4_adjust_port(dev, slave, gid, prot);
4092                 if (err)
4093                         goto ex_put;
4094
4095                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
4096                 if (err)
4097                         goto ex_put;
4098
4099                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4100                 if (err)
4101                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4102                                qpn, reg_id);
4103         }
4104         put_res(dev, slave, qpn, RES_QP);
4105         return err;
4106
4107 ex_detach:
4108         qp_detach(dev, &qp, gid, prot, type, reg_id);
4109 ex_put:
4110         put_res(dev, slave, qpn, RES_QP);
4111         return err;
4112 }
4113
4114 /*
4115  * MAC validation for Flow Steering rules.
4116  * VF can attach rules only with a mac address which is assigned to it.
4117  */
4118 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4119                                    struct list_head *rlist)
4120 {
4121         struct mac_res *res, *tmp;
4122         __be64 be_mac;
4123
4124         /* make sure it isn't multicast or broadcast mac*/
4125         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4126             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4127                 list_for_each_entry_safe(res, tmp, rlist, list) {
4128                         be_mac = cpu_to_be64(res->mac << 16);
4129                         if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4130                                 return 0;
4131                 }
4132                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4133                        eth_header->eth.dst_mac, slave);
4134                 return -EINVAL;
4135         }
4136         return 0;
4137 }
4138
4139 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
4140                                          struct _rule_hw *eth_header)
4141 {
4142         if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
4143             is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4144                 struct mlx4_net_trans_rule_hw_eth *eth =
4145                         (struct mlx4_net_trans_rule_hw_eth *)eth_header;
4146                 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
4147                 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
4148                         next_rule->rsvd == 0;
4149
4150                 if (last_rule)
4151                         ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
4152         }
4153 }
4154
4155 /*
4156  * In case of missing eth header, append eth header with a MAC address
4157  * assigned to the VF.
4158  */
4159 static int add_eth_header(struct mlx4_dev *dev, int slave,
4160                           struct mlx4_cmd_mailbox *inbox,
4161                           struct list_head *rlist, int header_id)
4162 {
4163         struct mac_res *res, *tmp;
4164         u8 port;
4165         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4166         struct mlx4_net_trans_rule_hw_eth *eth_header;
4167         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4168         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4169         __be64 be_mac = 0;
4170         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4171
4172         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4173         port = ctrl->port;
4174         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4175
4176         /* Clear a space in the inbox for eth header */
4177         switch (header_id) {
4178         case MLX4_NET_TRANS_RULE_ID_IPV4:
4179                 ip_header =
4180                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4181                 memmove(ip_header, eth_header,
4182                         sizeof(*ip_header) + sizeof(*l4_header));
4183                 break;
4184         case MLX4_NET_TRANS_RULE_ID_TCP:
4185         case MLX4_NET_TRANS_RULE_ID_UDP:
4186                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4187                             (eth_header + 1);
4188                 memmove(l4_header, eth_header, sizeof(*l4_header));
4189                 break;
4190         default:
4191                 return -EINVAL;
4192         }
4193         list_for_each_entry_safe(res, tmp, rlist, list) {
4194                 if (port == res->port) {
4195                         be_mac = cpu_to_be64(res->mac << 16);
4196                         break;
4197                 }
4198         }
4199         if (!be_mac) {
4200                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4201                        port);
4202                 return -EINVAL;
4203         }
4204
4205         memset(eth_header, 0, sizeof(*eth_header));
4206         eth_header->size = sizeof(*eth_header) >> 2;
4207         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4208         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4209         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4210
4211         return 0;
4212
4213 }
4214
4215 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED      (                                \
4216         1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX                     |\
4217         1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4218 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4219                            struct mlx4_vhcr *vhcr,
4220                            struct mlx4_cmd_mailbox *inbox,
4221                            struct mlx4_cmd_mailbox *outbox,
4222                            struct mlx4_cmd_info *cmd_info)
4223 {
4224         int err;
4225         u32 qpn = vhcr->in_modifier & 0xffffff;
4226         struct res_qp *rqp;
4227         u64 mac;
4228         unsigned port;
4229         u64 pri_addr_path_mask;
4230         struct mlx4_update_qp_context *cmd;
4231         int smac_index;
4232
4233         cmd = (struct mlx4_update_qp_context *)inbox->buf;
4234
4235         pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4236         if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4237             (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4238                 return -EPERM;
4239
4240         if ((pri_addr_path_mask &
4241              (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4242                 !(dev->caps.flags2 &
4243                   MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4244                         mlx4_warn(dev,
4245                                   "Src check LB for slave %d isn't supported\n",
4246                                    slave);
4247                 return -ENOTSUPP;
4248         }
4249
4250         /* Just change the smac for the QP */
4251         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4252         if (err) {
4253                 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4254                 return err;
4255         }
4256
4257         port = (rqp->sched_queue >> 6 & 1) + 1;
4258
4259         if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4260                 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4261                 err = mac_find_smac_ix_in_slave(dev, slave, port,
4262                                                 smac_index, &mac);
4263
4264                 if (err) {
4265                         mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4266                                  qpn, smac_index);
4267                         goto err_mac;
4268                 }
4269         }
4270
4271         err = mlx4_cmd(dev, inbox->dma,
4272                        vhcr->in_modifier, 0,
4273                        MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4274                        MLX4_CMD_NATIVE);
4275         if (err) {
4276                 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4277                 goto err_mac;
4278         }
4279
4280 err_mac:
4281         put_res(dev, slave, qpn, RES_QP);
4282         return err;
4283 }
4284
4285 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4286                                          struct mlx4_vhcr *vhcr,
4287                                          struct mlx4_cmd_mailbox *inbox,
4288                                          struct mlx4_cmd_mailbox *outbox,
4289                                          struct mlx4_cmd_info *cmd)
4290 {
4291
4292         struct mlx4_priv *priv = mlx4_priv(dev);
4293         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4294         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4295         int err;
4296         int qpn;
4297         struct res_qp *rqp;
4298         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4299         struct _rule_hw  *rule_header;
4300         int header_id;
4301
4302         if (dev->caps.steering_mode !=
4303             MLX4_STEERING_MODE_DEVICE_MANAGED)
4304                 return -EOPNOTSUPP;
4305
4306         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4307         ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4308         if (ctrl->port <= 0)
4309                 return -EINVAL;
4310         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4311         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4312         if (err) {
4313                 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4314                 return err;
4315         }
4316         rule_header = (struct _rule_hw *)(ctrl + 1);
4317         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4318
4319         if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4320                 handle_eth_header_mcast_prio(ctrl, rule_header);
4321
4322         if (slave == dev->caps.function)
4323                 goto execute;
4324
4325         switch (header_id) {
4326         case MLX4_NET_TRANS_RULE_ID_ETH:
4327                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4328                         err = -EINVAL;
4329                         goto err_put;
4330                 }
4331                 break;
4332         case MLX4_NET_TRANS_RULE_ID_IB:
4333                 break;
4334         case MLX4_NET_TRANS_RULE_ID_IPV4:
4335         case MLX4_NET_TRANS_RULE_ID_TCP:
4336         case MLX4_NET_TRANS_RULE_ID_UDP:
4337                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4338                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4339                         err = -EINVAL;
4340                         goto err_put;
4341                 }
4342                 vhcr->in_modifier +=
4343                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4344                 break;
4345         default:
4346                 pr_err("Corrupted mailbox\n");
4347                 err = -EINVAL;
4348                 goto err_put;
4349         }
4350
4351 execute:
4352         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4353                            vhcr->in_modifier, 0,
4354                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4355                            MLX4_CMD_NATIVE);
4356         if (err)
4357                 goto err_put;
4358
4359         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4360         if (err) {
4361                 mlx4_err(dev, "Fail to add flow steering resources\n");
4362                 /* detach rule*/
4363                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4364                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4365                          MLX4_CMD_NATIVE);
4366                 goto err_put;
4367         }
4368         atomic_inc(&rqp->ref_count);
4369 err_put:
4370         put_res(dev, slave, qpn, RES_QP);
4371         return err;
4372 }
4373
4374 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4375                                          struct mlx4_vhcr *vhcr,
4376                                          struct mlx4_cmd_mailbox *inbox,
4377                                          struct mlx4_cmd_mailbox *outbox,
4378                                          struct mlx4_cmd_info *cmd)
4379 {
4380         int err;
4381         struct res_qp *rqp;
4382         struct res_fs_rule *rrule;
4383
4384         if (dev->caps.steering_mode !=
4385             MLX4_STEERING_MODE_DEVICE_MANAGED)
4386                 return -EOPNOTSUPP;
4387
4388         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4389         if (err)
4390                 return err;
4391         /* Release the rule form busy state before removal */
4392         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4393         err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4394         if (err)
4395                 return err;
4396
4397         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4398         if (err) {
4399                 mlx4_err(dev, "Fail to remove flow steering resources\n");
4400                 goto out;
4401         }
4402
4403         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4404                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4405                        MLX4_CMD_NATIVE);
4406         if (!err)
4407                 atomic_dec(&rqp->ref_count);
4408 out:
4409         put_res(dev, slave, rrule->qpn, RES_QP);
4410         return err;
4411 }
4412
4413 enum {
4414         BUSY_MAX_RETRIES = 10
4415 };
4416
4417 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4418                                struct mlx4_vhcr *vhcr,
4419                                struct mlx4_cmd_mailbox *inbox,
4420                                struct mlx4_cmd_mailbox *outbox,
4421                                struct mlx4_cmd_info *cmd)
4422 {
4423         int err;
4424         int index = vhcr->in_modifier & 0xffff;
4425
4426         err = get_res(dev, slave, index, RES_COUNTER, NULL);
4427         if (err)
4428                 return err;
4429
4430         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4431         put_res(dev, slave, index, RES_COUNTER);
4432         return err;
4433 }
4434
4435 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4436 {
4437         struct res_gid *rgid;
4438         struct res_gid *tmp;
4439         struct mlx4_qp qp; /* dummy for calling attach/detach */
4440
4441         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4442                 switch (dev->caps.steering_mode) {
4443                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4444                         mlx4_flow_detach(dev, rgid->reg_id);
4445                         break;
4446                 case MLX4_STEERING_MODE_B0:
4447                         qp.qpn = rqp->local_qpn;
4448                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4449                                                      rgid->prot, rgid->steer);
4450                         break;
4451                 }
4452                 list_del(&rgid->list);
4453                 kfree(rgid);
4454         }
4455 }
4456
4457 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4458                           enum mlx4_resource type, int print)
4459 {
4460         struct mlx4_priv *priv = mlx4_priv(dev);
4461         struct mlx4_resource_tracker *tracker =
4462                 &priv->mfunc.master.res_tracker;
4463         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4464         struct res_common *r;
4465         struct res_common *tmp;
4466         int busy;
4467
4468         busy = 0;
4469         spin_lock_irq(mlx4_tlock(dev));
4470         list_for_each_entry_safe(r, tmp, rlist, list) {
4471                 if (r->owner == slave) {
4472                         if (!r->removing) {
4473                                 if (r->state == RES_ANY_BUSY) {
4474                                         if (print)
4475                                                 mlx4_dbg(dev,
4476                                                          "%s id 0x%llx is busy\n",
4477                                                           resource_str(type),
4478                                                           r->res_id);
4479                                         ++busy;
4480                                 } else {
4481                                         r->from_state = r->state;
4482                                         r->state = RES_ANY_BUSY;
4483                                         r->removing = 1;
4484                                 }
4485                         }
4486                 }
4487         }
4488         spin_unlock_irq(mlx4_tlock(dev));
4489
4490         return busy;
4491 }
4492
4493 static int move_all_busy(struct mlx4_dev *dev, int slave,
4494                          enum mlx4_resource type)
4495 {
4496         unsigned long begin;
4497         int busy;
4498
4499         begin = jiffies;
4500         do {
4501                 busy = _move_all_busy(dev, slave, type, 0);
4502                 if (time_after(jiffies, begin + 5 * HZ))
4503                         break;
4504                 if (busy)
4505                         cond_resched();
4506         } while (busy);
4507
4508         if (busy)
4509                 busy = _move_all_busy(dev, slave, type, 1);
4510
4511         return busy;
4512 }
4513 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4514 {
4515         struct mlx4_priv *priv = mlx4_priv(dev);
4516         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4517         struct list_head *qp_list =
4518                 &tracker->slave_list[slave].res_list[RES_QP];
4519         struct res_qp *qp;
4520         struct res_qp *tmp;
4521         int state;
4522         u64 in_param;
4523         int qpn;
4524         int err;
4525
4526         err = move_all_busy(dev, slave, RES_QP);
4527         if (err)
4528                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4529                           slave);
4530
4531         spin_lock_irq(mlx4_tlock(dev));
4532         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4533                 spin_unlock_irq(mlx4_tlock(dev));
4534                 if (qp->com.owner == slave) {
4535                         qpn = qp->com.res_id;
4536                         detach_qp(dev, slave, qp);
4537                         state = qp->com.from_state;
4538                         while (state != 0) {
4539                                 switch (state) {
4540                                 case RES_QP_RESERVED:
4541                                         spin_lock_irq(mlx4_tlock(dev));
4542                                         rb_erase(&qp->com.node,
4543                                                  &tracker->res_tree[RES_QP]);
4544                                         list_del(&qp->com.list);
4545                                         spin_unlock_irq(mlx4_tlock(dev));
4546                                         if (!valid_reserved(dev, slave, qpn)) {
4547                                                 __mlx4_qp_release_range(dev, qpn, 1);
4548                                                 mlx4_release_resource(dev, slave,
4549                                                                       RES_QP, 1, 0);
4550                                         }
4551                                         kfree(qp);
4552                                         state = 0;
4553                                         break;
4554                                 case RES_QP_MAPPED:
4555                                         if (!valid_reserved(dev, slave, qpn))
4556                                                 __mlx4_qp_free_icm(dev, qpn);
4557                                         state = RES_QP_RESERVED;
4558                                         break;
4559                                 case RES_QP_HW:
4560                                         in_param = slave;
4561                                         err = mlx4_cmd(dev, in_param,
4562                                                        qp->local_qpn, 2,
4563                                                        MLX4_CMD_2RST_QP,
4564                                                        MLX4_CMD_TIME_CLASS_A,
4565                                                        MLX4_CMD_NATIVE);
4566                                         if (err)
4567                                                 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4568                                                          slave, qp->local_qpn);
4569                                         atomic_dec(&qp->rcq->ref_count);
4570                                         atomic_dec(&qp->scq->ref_count);
4571                                         atomic_dec(&qp->mtt->ref_count);
4572                                         if (qp->srq)
4573                                                 atomic_dec(&qp->srq->ref_count);
4574                                         state = RES_QP_MAPPED;
4575                                         break;
4576                                 default:
4577                                         state = 0;
4578                                 }
4579                         }
4580                 }
4581                 spin_lock_irq(mlx4_tlock(dev));
4582         }
4583         spin_unlock_irq(mlx4_tlock(dev));
4584 }
4585
4586 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4587 {
4588         struct mlx4_priv *priv = mlx4_priv(dev);
4589         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4590         struct list_head *srq_list =
4591                 &tracker->slave_list[slave].res_list[RES_SRQ];
4592         struct res_srq *srq;
4593         struct res_srq *tmp;
4594         int state;
4595         u64 in_param;
4596         LIST_HEAD(tlist);
4597         int srqn;
4598         int err;
4599
4600         err = move_all_busy(dev, slave, RES_SRQ);
4601         if (err)
4602                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4603                           slave);
4604
4605         spin_lock_irq(mlx4_tlock(dev));
4606         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4607                 spin_unlock_irq(mlx4_tlock(dev));
4608                 if (srq->com.owner == slave) {
4609                         srqn = srq->com.res_id;
4610                         state = srq->com.from_state;
4611                         while (state != 0) {
4612                                 switch (state) {
4613                                 case RES_SRQ_ALLOCATED:
4614                                         __mlx4_srq_free_icm(dev, srqn);
4615                                         spin_lock_irq(mlx4_tlock(dev));
4616                                         rb_erase(&srq->com.node,
4617                                                  &tracker->res_tree[RES_SRQ]);
4618                                         list_del(&srq->com.list);
4619                                         spin_unlock_irq(mlx4_tlock(dev));
4620                                         mlx4_release_resource(dev, slave,
4621                                                               RES_SRQ, 1, 0);
4622                                         kfree(srq);
4623                                         state = 0;
4624                                         break;
4625
4626                                 case RES_SRQ_HW:
4627                                         in_param = slave;
4628                                         err = mlx4_cmd(dev, in_param, srqn, 1,
4629                                                        MLX4_CMD_HW2SW_SRQ,
4630                                                        MLX4_CMD_TIME_CLASS_A,
4631                                                        MLX4_CMD_NATIVE);
4632                                         if (err)
4633                                                 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4634                                                          slave, srqn);
4635
4636                                         atomic_dec(&srq->mtt->ref_count);
4637                                         if (srq->cq)
4638                                                 atomic_dec(&srq->cq->ref_count);
4639                                         state = RES_SRQ_ALLOCATED;
4640                                         break;
4641
4642                                 default:
4643                                         state = 0;
4644                                 }
4645                         }
4646                 }
4647                 spin_lock_irq(mlx4_tlock(dev));
4648         }
4649         spin_unlock_irq(mlx4_tlock(dev));
4650 }
4651
4652 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4653 {
4654         struct mlx4_priv *priv = mlx4_priv(dev);
4655         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4656         struct list_head *cq_list =
4657                 &tracker->slave_list[slave].res_list[RES_CQ];
4658         struct res_cq *cq;
4659         struct res_cq *tmp;
4660         int state;
4661         u64 in_param;
4662         LIST_HEAD(tlist);
4663         int cqn;
4664         int err;
4665
4666         err = move_all_busy(dev, slave, RES_CQ);
4667         if (err)
4668                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4669                           slave);
4670
4671         spin_lock_irq(mlx4_tlock(dev));
4672         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4673                 spin_unlock_irq(mlx4_tlock(dev));
4674                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4675                         cqn = cq->com.res_id;
4676                         state = cq->com.from_state;
4677                         while (state != 0) {
4678                                 switch (state) {
4679                                 case RES_CQ_ALLOCATED:
4680                                         __mlx4_cq_free_icm(dev, cqn);
4681                                         spin_lock_irq(mlx4_tlock(dev));
4682                                         rb_erase(&cq->com.node,
4683                                                  &tracker->res_tree[RES_CQ]);
4684                                         list_del(&cq->com.list);
4685                                         spin_unlock_irq(mlx4_tlock(dev));
4686                                         mlx4_release_resource(dev, slave,
4687                                                               RES_CQ, 1, 0);
4688                                         kfree(cq);
4689                                         state = 0;
4690                                         break;
4691
4692                                 case RES_CQ_HW:
4693                                         in_param = slave;
4694                                         err = mlx4_cmd(dev, in_param, cqn, 1,
4695                                                        MLX4_CMD_HW2SW_CQ,
4696                                                        MLX4_CMD_TIME_CLASS_A,
4697                                                        MLX4_CMD_NATIVE);
4698                                         if (err)
4699                                                 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4700                                                          slave, cqn);
4701                                         atomic_dec(&cq->mtt->ref_count);
4702                                         state = RES_CQ_ALLOCATED;
4703                                         break;
4704
4705                                 default:
4706                                         state = 0;
4707                                 }
4708                         }
4709                 }
4710                 spin_lock_irq(mlx4_tlock(dev));
4711         }
4712         spin_unlock_irq(mlx4_tlock(dev));
4713 }
4714
4715 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4716 {
4717         struct mlx4_priv *priv = mlx4_priv(dev);
4718         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4719         struct list_head *mpt_list =
4720                 &tracker->slave_list[slave].res_list[RES_MPT];
4721         struct res_mpt *mpt;
4722         struct res_mpt *tmp;
4723         int state;
4724         u64 in_param;
4725         LIST_HEAD(tlist);
4726         int mptn;
4727         int err;
4728
4729         err = move_all_busy(dev, slave, RES_MPT);
4730         if (err)
4731                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4732                           slave);
4733
4734         spin_lock_irq(mlx4_tlock(dev));
4735         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4736                 spin_unlock_irq(mlx4_tlock(dev));
4737                 if (mpt->com.owner == slave) {
4738                         mptn = mpt->com.res_id;
4739                         state = mpt->com.from_state;
4740                         while (state != 0) {
4741                                 switch (state) {
4742                                 case RES_MPT_RESERVED:
4743                                         __mlx4_mpt_release(dev, mpt->key);
4744                                         spin_lock_irq(mlx4_tlock(dev));
4745                                         rb_erase(&mpt->com.node,
4746                                                  &tracker->res_tree[RES_MPT]);
4747                                         list_del(&mpt->com.list);
4748                                         spin_unlock_irq(mlx4_tlock(dev));
4749                                         mlx4_release_resource(dev, slave,
4750                                                               RES_MPT, 1, 0);
4751                                         kfree(mpt);
4752                                         state = 0;
4753                                         break;
4754
4755                                 case RES_MPT_MAPPED:
4756                                         __mlx4_mpt_free_icm(dev, mpt->key);
4757                                         state = RES_MPT_RESERVED;
4758                                         break;
4759
4760                                 case RES_MPT_HW:
4761                                         in_param = slave;
4762                                         err = mlx4_cmd(dev, in_param, mptn, 0,
4763                                                      MLX4_CMD_HW2SW_MPT,
4764                                                      MLX4_CMD_TIME_CLASS_A,
4765                                                      MLX4_CMD_NATIVE);
4766                                         if (err)
4767                                                 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4768                                                          slave, mptn);
4769                                         if (mpt->mtt)
4770                                                 atomic_dec(&mpt->mtt->ref_count);
4771                                         state = RES_MPT_MAPPED;
4772                                         break;
4773                                 default:
4774                                         state = 0;
4775                                 }
4776                         }
4777                 }
4778                 spin_lock_irq(mlx4_tlock(dev));
4779         }
4780         spin_unlock_irq(mlx4_tlock(dev));
4781 }
4782
4783 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4784 {
4785         struct mlx4_priv *priv = mlx4_priv(dev);
4786         struct mlx4_resource_tracker *tracker =
4787                 &priv->mfunc.master.res_tracker;
4788         struct list_head *mtt_list =
4789                 &tracker->slave_list[slave].res_list[RES_MTT];
4790         struct res_mtt *mtt;
4791         struct res_mtt *tmp;
4792         int state;
4793         LIST_HEAD(tlist);
4794         int base;
4795         int err;
4796
4797         err = move_all_busy(dev, slave, RES_MTT);
4798         if (err)
4799                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4800                           slave);
4801
4802         spin_lock_irq(mlx4_tlock(dev));
4803         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4804                 spin_unlock_irq(mlx4_tlock(dev));
4805                 if (mtt->com.owner == slave) {
4806                         base = mtt->com.res_id;
4807                         state = mtt->com.from_state;
4808                         while (state != 0) {
4809                                 switch (state) {
4810                                 case RES_MTT_ALLOCATED:
4811                                         __mlx4_free_mtt_range(dev, base,
4812                                                               mtt->order);
4813                                         spin_lock_irq(mlx4_tlock(dev));
4814                                         rb_erase(&mtt->com.node,
4815                                                  &tracker->res_tree[RES_MTT]);
4816                                         list_del(&mtt->com.list);
4817                                         spin_unlock_irq(mlx4_tlock(dev));
4818                                         mlx4_release_resource(dev, slave, RES_MTT,
4819                                                               1 << mtt->order, 0);
4820                                         kfree(mtt);
4821                                         state = 0;
4822                                         break;
4823
4824                                 default:
4825                                         state = 0;
4826                                 }
4827                         }
4828                 }
4829                 spin_lock_irq(mlx4_tlock(dev));
4830         }
4831         spin_unlock_irq(mlx4_tlock(dev));
4832 }
4833
4834 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4835 {
4836         struct mlx4_priv *priv = mlx4_priv(dev);
4837         struct mlx4_resource_tracker *tracker =
4838                 &priv->mfunc.master.res_tracker;
4839         struct list_head *fs_rule_list =
4840                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4841         struct res_fs_rule *fs_rule;
4842         struct res_fs_rule *tmp;
4843         int state;
4844         u64 base;
4845         int err;
4846
4847         err = move_all_busy(dev, slave, RES_FS_RULE);
4848         if (err)
4849                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4850                           slave);
4851
4852         spin_lock_irq(mlx4_tlock(dev));
4853         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4854                 spin_unlock_irq(mlx4_tlock(dev));
4855                 if (fs_rule->com.owner == slave) {
4856                         base = fs_rule->com.res_id;
4857                         state = fs_rule->com.from_state;
4858                         while (state != 0) {
4859                                 switch (state) {
4860                                 case RES_FS_RULE_ALLOCATED:
4861                                         /* detach rule */
4862                                         err = mlx4_cmd(dev, base, 0, 0,
4863                                                        MLX4_QP_FLOW_STEERING_DETACH,
4864                                                        MLX4_CMD_TIME_CLASS_A,
4865                                                        MLX4_CMD_NATIVE);
4866
4867                                         spin_lock_irq(mlx4_tlock(dev));
4868                                         rb_erase(&fs_rule->com.node,
4869                                                  &tracker->res_tree[RES_FS_RULE]);
4870                                         list_del(&fs_rule->com.list);
4871                                         spin_unlock_irq(mlx4_tlock(dev));
4872                                         kfree(fs_rule);
4873                                         state = 0;
4874                                         break;
4875
4876                                 default:
4877                                         state = 0;
4878                                 }
4879                         }
4880                 }
4881                 spin_lock_irq(mlx4_tlock(dev));
4882         }
4883         spin_unlock_irq(mlx4_tlock(dev));
4884 }
4885
4886 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4887 {
4888         struct mlx4_priv *priv = mlx4_priv(dev);
4889         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4890         struct list_head *eq_list =
4891                 &tracker->slave_list[slave].res_list[RES_EQ];
4892         struct res_eq *eq;
4893         struct res_eq *tmp;
4894         int err;
4895         int state;
4896         LIST_HEAD(tlist);
4897         int eqn;
4898
4899         err = move_all_busy(dev, slave, RES_EQ);
4900         if (err)
4901                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4902                           slave);
4903
4904         spin_lock_irq(mlx4_tlock(dev));
4905         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4906                 spin_unlock_irq(mlx4_tlock(dev));
4907                 if (eq->com.owner == slave) {
4908                         eqn = eq->com.res_id;
4909                         state = eq->com.from_state;
4910                         while (state != 0) {
4911                                 switch (state) {
4912                                 case RES_EQ_RESERVED:
4913                                         spin_lock_irq(mlx4_tlock(dev));
4914                                         rb_erase(&eq->com.node,
4915                                                  &tracker->res_tree[RES_EQ]);
4916                                         list_del(&eq->com.list);
4917                                         spin_unlock_irq(mlx4_tlock(dev));
4918                                         kfree(eq);
4919                                         state = 0;
4920                                         break;
4921
4922                                 case RES_EQ_HW:
4923                                         err = mlx4_cmd(dev, slave, eqn & 0x3ff,
4924                                                        1, MLX4_CMD_HW2SW_EQ,
4925                                                        MLX4_CMD_TIME_CLASS_A,
4926                                                        MLX4_CMD_NATIVE);
4927                                         if (err)
4928                                                 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4929                                                          slave, eqn & 0x3ff);
4930                                         atomic_dec(&eq->mtt->ref_count);
4931                                         state = RES_EQ_RESERVED;
4932                                         break;
4933
4934                                 default:
4935                                         state = 0;
4936                                 }
4937                         }
4938                 }
4939                 spin_lock_irq(mlx4_tlock(dev));
4940         }
4941         spin_unlock_irq(mlx4_tlock(dev));
4942 }
4943
4944 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4945 {
4946         struct mlx4_priv *priv = mlx4_priv(dev);
4947         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4948         struct list_head *counter_list =
4949                 &tracker->slave_list[slave].res_list[RES_COUNTER];
4950         struct res_counter *counter;
4951         struct res_counter *tmp;
4952         int err;
4953         int index;
4954
4955         err = move_all_busy(dev, slave, RES_COUNTER);
4956         if (err)
4957                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4958                           slave);
4959
4960         spin_lock_irq(mlx4_tlock(dev));
4961         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4962                 if (counter->com.owner == slave) {
4963                         index = counter->com.res_id;
4964                         rb_erase(&counter->com.node,
4965                                  &tracker->res_tree[RES_COUNTER]);
4966                         list_del(&counter->com.list);
4967                         kfree(counter);
4968                         __mlx4_counter_free(dev, index);
4969                         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4970                 }
4971         }
4972         spin_unlock_irq(mlx4_tlock(dev));
4973 }
4974
4975 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4976 {
4977         struct mlx4_priv *priv = mlx4_priv(dev);
4978         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4979         struct list_head *xrcdn_list =
4980                 &tracker->slave_list[slave].res_list[RES_XRCD];
4981         struct res_xrcdn *xrcd;
4982         struct res_xrcdn *tmp;
4983         int err;
4984         int xrcdn;
4985
4986         err = move_all_busy(dev, slave, RES_XRCD);
4987         if (err)
4988                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4989                           slave);
4990
4991         spin_lock_irq(mlx4_tlock(dev));
4992         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4993                 if (xrcd->com.owner == slave) {
4994                         xrcdn = xrcd->com.res_id;
4995                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4996                         list_del(&xrcd->com.list);
4997                         kfree(xrcd);
4998                         __mlx4_xrcd_free(dev, xrcdn);
4999                 }
5000         }
5001         spin_unlock_irq(mlx4_tlock(dev));
5002 }
5003
5004 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5005 {
5006         struct mlx4_priv *priv = mlx4_priv(dev);
5007         mlx4_reset_roce_gids(dev, slave);
5008         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5009         rem_slave_vlans(dev, slave);
5010         rem_slave_macs(dev, slave);
5011         rem_slave_fs_rule(dev, slave);
5012         rem_slave_qps(dev, slave);
5013         rem_slave_srqs(dev, slave);
5014         rem_slave_cqs(dev, slave);
5015         rem_slave_mrs(dev, slave);
5016         rem_slave_eqs(dev, slave);
5017         rem_slave_mtts(dev, slave);
5018         rem_slave_counters(dev, slave);
5019         rem_slave_xrcdns(dev, slave);
5020         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5021 }
5022
5023 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5024 {
5025         struct mlx4_vf_immed_vlan_work *work =
5026                 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5027         struct mlx4_cmd_mailbox *mailbox;
5028         struct mlx4_update_qp_context *upd_context;
5029         struct mlx4_dev *dev = &work->priv->dev;
5030         struct mlx4_resource_tracker *tracker =
5031                 &work->priv->mfunc.master.res_tracker;
5032         struct list_head *qp_list =
5033                 &tracker->slave_list[work->slave].res_list[RES_QP];
5034         struct res_qp *qp;
5035         struct res_qp *tmp;
5036         u64 qp_path_mask_vlan_ctrl =
5037                        ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5038                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5039                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5040                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5041                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5042                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5043
5044         u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5045                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5046                        (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5047                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5048                        (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5049                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5050                        (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5051
5052         int err;
5053         int port, errors = 0;
5054         u8 vlan_control;
5055
5056         if (mlx4_is_slave(dev)) {
5057                 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5058                           work->slave);
5059                 goto out;
5060         }
5061
5062         mailbox = mlx4_alloc_cmd_mailbox(dev);
5063         if (IS_ERR(mailbox))
5064                 goto out;
5065         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5066                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5067                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5068                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5069                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5070                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5071                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5072         else if (!work->vlan_id)
5073                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5074                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5075         else
5076                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5077                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5078                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5079
5080         upd_context = mailbox->buf;
5081         upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5082
5083         spin_lock_irq(mlx4_tlock(dev));
5084         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5085                 spin_unlock_irq(mlx4_tlock(dev));
5086                 if (qp->com.owner == work->slave) {
5087                         if (qp->com.from_state != RES_QP_HW ||
5088                             !qp->sched_queue ||  /* no INIT2RTR trans yet */
5089                             mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5090                             qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5091                                 spin_lock_irq(mlx4_tlock(dev));
5092                                 continue;
5093                         }
5094                         port = (qp->sched_queue >> 6 & 1) + 1;
5095                         if (port != work->port) {
5096                                 spin_lock_irq(mlx4_tlock(dev));
5097                                 continue;
5098                         }
5099                         if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5100                                 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5101                         else
5102                                 upd_context->primary_addr_path_mask =
5103                                         cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5104                         if (work->vlan_id == MLX4_VGT) {
5105                                 upd_context->qp_context.param3 = qp->param3;
5106                                 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5107                                 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5108                                 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5109                                 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5110                                 upd_context->qp_context.pri_path.feup = qp->feup;
5111                                 upd_context->qp_context.pri_path.sched_queue =
5112                                         qp->sched_queue;
5113                         } else {
5114                                 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5115                                 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5116                                 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5117                                 upd_context->qp_context.pri_path.fvl_rx =
5118                                         qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5119                                 upd_context->qp_context.pri_path.fl =
5120                                         qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
5121                                 upd_context->qp_context.pri_path.feup =
5122                                         qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5123                                 upd_context->qp_context.pri_path.sched_queue =
5124                                         qp->sched_queue & 0xC7;
5125                                 upd_context->qp_context.pri_path.sched_queue |=
5126                                         ((work->qos & 0x7) << 3);
5127                                 upd_context->qp_mask |=
5128                                         cpu_to_be64(1ULL <<
5129                                                     MLX4_UPD_QP_MASK_QOS_VPP);
5130                                 upd_context->qp_context.qos_vport =
5131                                         work->qos_vport;
5132                         }
5133
5134                         err = mlx4_cmd(dev, mailbox->dma,
5135                                        qp->local_qpn & 0xffffff,
5136                                        0, MLX4_CMD_UPDATE_QP,
5137                                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5138                         if (err) {
5139                                 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5140                                           work->slave, port, qp->local_qpn, err);
5141                                 errors++;
5142                         }
5143                 }
5144                 spin_lock_irq(mlx4_tlock(dev));
5145         }
5146         spin_unlock_irq(mlx4_tlock(dev));
5147         mlx4_free_cmd_mailbox(dev, mailbox);
5148
5149         if (errors)
5150                 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5151                          errors, work->slave, work->port);
5152
5153         /* unregister previous vlan_id if needed and we had no errors
5154          * while updating the QPs
5155          */
5156         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5157             NO_INDX != work->orig_vlan_ix)
5158                 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5159                                        work->orig_vlan_id);
5160 out:
5161         kfree(work);
5162         return;
5163 }