Merge tag 'nfs-for-4.2-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49 #include "mlx4_stats.h"
50
51 #define MLX4_MAC_VALID          (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT       2
53 #define MLX4_VF_COUNTERS_PER_PORT       1
54
55 struct mac_res {
56         struct list_head list;
57         u64 mac;
58         int ref_count;
59         u8 smac_index;
60         u8 port;
61 };
62
63 struct vlan_res {
64         struct list_head list;
65         u16 vlan;
66         int ref_count;
67         int vlan_index;
68         u8 port;
69 };
70
71 struct res_common {
72         struct list_head        list;
73         struct rb_node          node;
74         u64                     res_id;
75         int                     owner;
76         int                     state;
77         int                     from_state;
78         int                     to_state;
79         int                     removing;
80 };
81
82 enum {
83         RES_ANY_BUSY = 1
84 };
85
86 struct res_gid {
87         struct list_head        list;
88         u8                      gid[16];
89         enum mlx4_protocol      prot;
90         enum mlx4_steer_type    steer;
91         u64                     reg_id;
92 };
93
94 enum res_qp_states {
95         RES_QP_BUSY = RES_ANY_BUSY,
96
97         /* QP number was allocated */
98         RES_QP_RESERVED,
99
100         /* ICM memory for QP context was mapped */
101         RES_QP_MAPPED,
102
103         /* QP is in hw ownership */
104         RES_QP_HW
105 };
106
107 struct res_qp {
108         struct res_common       com;
109         struct res_mtt         *mtt;
110         struct res_cq          *rcq;
111         struct res_cq          *scq;
112         struct res_srq         *srq;
113         struct list_head        mcg_list;
114         spinlock_t              mcg_spl;
115         int                     local_qpn;
116         atomic_t                ref_count;
117         u32                     qpc_flags;
118         /* saved qp params before VST enforcement in order to restore on VGT */
119         u8                      sched_queue;
120         __be32                  param3;
121         u8                      vlan_control;
122         u8                      fvl_rx;
123         u8                      pri_path_fl;
124         u8                      vlan_index;
125         u8                      feup;
126 };
127
128 enum res_mtt_states {
129         RES_MTT_BUSY = RES_ANY_BUSY,
130         RES_MTT_ALLOCATED,
131 };
132
133 static inline const char *mtt_states_str(enum res_mtt_states state)
134 {
135         switch (state) {
136         case RES_MTT_BUSY: return "RES_MTT_BUSY";
137         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
138         default: return "Unknown";
139         }
140 }
141
142 struct res_mtt {
143         struct res_common       com;
144         int                     order;
145         atomic_t                ref_count;
146 };
147
148 enum res_mpt_states {
149         RES_MPT_BUSY = RES_ANY_BUSY,
150         RES_MPT_RESERVED,
151         RES_MPT_MAPPED,
152         RES_MPT_HW,
153 };
154
155 struct res_mpt {
156         struct res_common       com;
157         struct res_mtt         *mtt;
158         int                     key;
159 };
160
161 enum res_eq_states {
162         RES_EQ_BUSY = RES_ANY_BUSY,
163         RES_EQ_RESERVED,
164         RES_EQ_HW,
165 };
166
167 struct res_eq {
168         struct res_common       com;
169         struct res_mtt         *mtt;
170 };
171
172 enum res_cq_states {
173         RES_CQ_BUSY = RES_ANY_BUSY,
174         RES_CQ_ALLOCATED,
175         RES_CQ_HW,
176 };
177
178 struct res_cq {
179         struct res_common       com;
180         struct res_mtt         *mtt;
181         atomic_t                ref_count;
182 };
183
184 enum res_srq_states {
185         RES_SRQ_BUSY = RES_ANY_BUSY,
186         RES_SRQ_ALLOCATED,
187         RES_SRQ_HW,
188 };
189
190 struct res_srq {
191         struct res_common       com;
192         struct res_mtt         *mtt;
193         struct res_cq          *cq;
194         atomic_t                ref_count;
195 };
196
197 enum res_counter_states {
198         RES_COUNTER_BUSY = RES_ANY_BUSY,
199         RES_COUNTER_ALLOCATED,
200 };
201
202 struct res_counter {
203         struct res_common       com;
204         int                     port;
205 };
206
207 enum res_xrcdn_states {
208         RES_XRCD_BUSY = RES_ANY_BUSY,
209         RES_XRCD_ALLOCATED,
210 };
211
212 struct res_xrcdn {
213         struct res_common       com;
214         int                     port;
215 };
216
217 enum res_fs_rule_states {
218         RES_FS_RULE_BUSY = RES_ANY_BUSY,
219         RES_FS_RULE_ALLOCATED,
220 };
221
222 struct res_fs_rule {
223         struct res_common       com;
224         int                     qpn;
225 };
226
227 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
228 {
229         struct rb_node *node = root->rb_node;
230
231         while (node) {
232                 struct res_common *res = container_of(node, struct res_common,
233                                                       node);
234
235                 if (res_id < res->res_id)
236                         node = node->rb_left;
237                 else if (res_id > res->res_id)
238                         node = node->rb_right;
239                 else
240                         return res;
241         }
242         return NULL;
243 }
244
245 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
246 {
247         struct rb_node **new = &(root->rb_node), *parent = NULL;
248
249         /* Figure out where to put new node */
250         while (*new) {
251                 struct res_common *this = container_of(*new, struct res_common,
252                                                        node);
253
254                 parent = *new;
255                 if (res->res_id < this->res_id)
256                         new = &((*new)->rb_left);
257                 else if (res->res_id > this->res_id)
258                         new = &((*new)->rb_right);
259                 else
260                         return -EEXIST;
261         }
262
263         /* Add new node and rebalance tree. */
264         rb_link_node(&res->node, parent, new);
265         rb_insert_color(&res->node, root);
266
267         return 0;
268 }
269
270 enum qp_transition {
271         QP_TRANS_INIT2RTR,
272         QP_TRANS_RTR2RTS,
273         QP_TRANS_RTS2RTS,
274         QP_TRANS_SQERR2RTS,
275         QP_TRANS_SQD2SQD,
276         QP_TRANS_SQD2RTS
277 };
278
279 /* For Debug uses */
280 static const char *resource_str(enum mlx4_resource rt)
281 {
282         switch (rt) {
283         case RES_QP: return "RES_QP";
284         case RES_CQ: return "RES_CQ";
285         case RES_SRQ: return "RES_SRQ";
286         case RES_MPT: return "RES_MPT";
287         case RES_MTT: return "RES_MTT";
288         case RES_MAC: return  "RES_MAC";
289         case RES_VLAN: return  "RES_VLAN";
290         case RES_EQ: return "RES_EQ";
291         case RES_COUNTER: return "RES_COUNTER";
292         case RES_FS_RULE: return "RES_FS_RULE";
293         case RES_XRCD: return "RES_XRCD";
294         default: return "Unknown resource type !!!";
295         };
296 }
297
298 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
299 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
300                                       enum mlx4_resource res_type, int count,
301                                       int port)
302 {
303         struct mlx4_priv *priv = mlx4_priv(dev);
304         struct resource_allocator *res_alloc =
305                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
306         int err = -EINVAL;
307         int allocated, free, reserved, guaranteed, from_free;
308         int from_rsvd;
309
310         if (slave > dev->persist->num_vfs)
311                 return -EINVAL;
312
313         spin_lock(&res_alloc->alloc_lock);
314         allocated = (port > 0) ?
315                 res_alloc->allocated[(port - 1) *
316                 (dev->persist->num_vfs + 1) + slave] :
317                 res_alloc->allocated[slave];
318         free = (port > 0) ? res_alloc->res_port_free[port - 1] :
319                 res_alloc->res_free;
320         reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
321                 res_alloc->res_reserved;
322         guaranteed = res_alloc->guaranteed[slave];
323
324         if (allocated + count > res_alloc->quota[slave]) {
325                 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
326                           slave, port, resource_str(res_type), count,
327                           allocated, res_alloc->quota[slave]);
328                 goto out;
329         }
330
331         if (allocated + count <= guaranteed) {
332                 err = 0;
333                 from_rsvd = count;
334         } else {
335                 /* portion may need to be obtained from free area */
336                 if (guaranteed - allocated > 0)
337                         from_free = count - (guaranteed - allocated);
338                 else
339                         from_free = count;
340
341                 from_rsvd = count - from_free;
342
343                 if (free - from_free >= reserved)
344                         err = 0;
345                 else
346                         mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
347                                   slave, port, resource_str(res_type), free,
348                                   from_free, reserved);
349         }
350
351         if (!err) {
352                 /* grant the request */
353                 if (port > 0) {
354                         res_alloc->allocated[(port - 1) *
355                         (dev->persist->num_vfs + 1) + slave] += count;
356                         res_alloc->res_port_free[port - 1] -= count;
357                         res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
358                 } else {
359                         res_alloc->allocated[slave] += count;
360                         res_alloc->res_free -= count;
361                         res_alloc->res_reserved -= from_rsvd;
362                 }
363         }
364
365 out:
366         spin_unlock(&res_alloc->alloc_lock);
367         return err;
368 }
369
370 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
371                                     enum mlx4_resource res_type, int count,
372                                     int port)
373 {
374         struct mlx4_priv *priv = mlx4_priv(dev);
375         struct resource_allocator *res_alloc =
376                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
377         int allocated, guaranteed, from_rsvd;
378
379         if (slave > dev->persist->num_vfs)
380                 return;
381
382         spin_lock(&res_alloc->alloc_lock);
383
384         allocated = (port > 0) ?
385                 res_alloc->allocated[(port - 1) *
386                 (dev->persist->num_vfs + 1) + slave] :
387                 res_alloc->allocated[slave];
388         guaranteed = res_alloc->guaranteed[slave];
389
390         if (allocated - count >= guaranteed) {
391                 from_rsvd = 0;
392         } else {
393                 /* portion may need to be returned to reserved area */
394                 if (allocated - guaranteed > 0)
395                         from_rsvd = count - (allocated - guaranteed);
396                 else
397                         from_rsvd = count;
398         }
399
400         if (port > 0) {
401                 res_alloc->allocated[(port - 1) *
402                 (dev->persist->num_vfs + 1) + slave] -= count;
403                 res_alloc->res_port_free[port - 1] += count;
404                 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
405         } else {
406                 res_alloc->allocated[slave] -= count;
407                 res_alloc->res_free += count;
408                 res_alloc->res_reserved += from_rsvd;
409         }
410
411         spin_unlock(&res_alloc->alloc_lock);
412         return;
413 }
414
415 static inline void initialize_res_quotas(struct mlx4_dev *dev,
416                                          struct resource_allocator *res_alloc,
417                                          enum mlx4_resource res_type,
418                                          int vf, int num_instances)
419 {
420         res_alloc->guaranteed[vf] = num_instances /
421                                     (2 * (dev->persist->num_vfs + 1));
422         res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
423         if (vf == mlx4_master_func_num(dev)) {
424                 res_alloc->res_free = num_instances;
425                 if (res_type == RES_MTT) {
426                         /* reserved mtts will be taken out of the PF allocation */
427                         res_alloc->res_free += dev->caps.reserved_mtts;
428                         res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
429                         res_alloc->quota[vf] += dev->caps.reserved_mtts;
430                 }
431         }
432 }
433
434 void mlx4_init_quotas(struct mlx4_dev *dev)
435 {
436         struct mlx4_priv *priv = mlx4_priv(dev);
437         int pf;
438
439         /* quotas for VFs are initialized in mlx4_slave_cap */
440         if (mlx4_is_slave(dev))
441                 return;
442
443         if (!mlx4_is_mfunc(dev)) {
444                 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
445                         mlx4_num_reserved_sqps(dev);
446                 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
447                 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
448                 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
449                 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
450                 return;
451         }
452
453         pf = mlx4_master_func_num(dev);
454         dev->quotas.qp =
455                 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
456         dev->quotas.cq =
457                 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
458         dev->quotas.srq =
459                 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
460         dev->quotas.mtt =
461                 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
462         dev->quotas.mpt =
463                 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
464 }
465
466 static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
467 {
468         /* reduce the sink counter */
469         return (dev->caps.max_counters - 1 -
470                 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
471                 / MLX4_MAX_PORTS;
472 }
473
474 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
475 {
476         struct mlx4_priv *priv = mlx4_priv(dev);
477         int i, j;
478         int t;
479         int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
480
481         priv->mfunc.master.res_tracker.slave_list =
482                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
483                         GFP_KERNEL);
484         if (!priv->mfunc.master.res_tracker.slave_list)
485                 return -ENOMEM;
486
487         for (i = 0 ; i < dev->num_slaves; i++) {
488                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
489                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
490                                        slave_list[i].res_list[t]);
491                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
492         }
493
494         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
495                  dev->num_slaves);
496         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
497                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
498
499         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
500                 struct resource_allocator *res_alloc =
501                         &priv->mfunc.master.res_tracker.res_alloc[i];
502                 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
503                                            sizeof(int), GFP_KERNEL);
504                 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
505                                                 sizeof(int), GFP_KERNEL);
506                 if (i == RES_MAC || i == RES_VLAN)
507                         res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
508                                                        (dev->persist->num_vfs
509                                                        + 1) *
510                                                        sizeof(int), GFP_KERNEL);
511                 else
512                         res_alloc->allocated = kzalloc((dev->persist->
513                                                         num_vfs + 1) *
514                                                        sizeof(int), GFP_KERNEL);
515                 /* Reduce the sink counter */
516                 if (i == RES_COUNTER)
517                         res_alloc->res_free = dev->caps.max_counters - 1;
518
519                 if (!res_alloc->quota || !res_alloc->guaranteed ||
520                     !res_alloc->allocated)
521                         goto no_mem_err;
522
523                 spin_lock_init(&res_alloc->alloc_lock);
524                 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
525                         struct mlx4_active_ports actv_ports =
526                                 mlx4_get_active_ports(dev, t);
527                         switch (i) {
528                         case RES_QP:
529                                 initialize_res_quotas(dev, res_alloc, RES_QP,
530                                                       t, dev->caps.num_qps -
531                                                       dev->caps.reserved_qps -
532                                                       mlx4_num_reserved_sqps(dev));
533                                 break;
534                         case RES_CQ:
535                                 initialize_res_quotas(dev, res_alloc, RES_CQ,
536                                                       t, dev->caps.num_cqs -
537                                                       dev->caps.reserved_cqs);
538                                 break;
539                         case RES_SRQ:
540                                 initialize_res_quotas(dev, res_alloc, RES_SRQ,
541                                                       t, dev->caps.num_srqs -
542                                                       dev->caps.reserved_srqs);
543                                 break;
544                         case RES_MPT:
545                                 initialize_res_quotas(dev, res_alloc, RES_MPT,
546                                                       t, dev->caps.num_mpts -
547                                                       dev->caps.reserved_mrws);
548                                 break;
549                         case RES_MTT:
550                                 initialize_res_quotas(dev, res_alloc, RES_MTT,
551                                                       t, dev->caps.num_mtts -
552                                                       dev->caps.reserved_mtts);
553                                 break;
554                         case RES_MAC:
555                                 if (t == mlx4_master_func_num(dev)) {
556                                         int max_vfs_pport = 0;
557                                         /* Calculate the max vfs per port for */
558                                         /* both ports.                        */
559                                         for (j = 0; j < dev->caps.num_ports;
560                                              j++) {
561                                                 struct mlx4_slaves_pport slaves_pport =
562                                                         mlx4_phys_to_slaves_pport(dev, j + 1);
563                                                 unsigned current_slaves =
564                                                         bitmap_weight(slaves_pport.slaves,
565                                                                       dev->caps.num_ports) - 1;
566                                                 if (max_vfs_pport < current_slaves)
567                                                         max_vfs_pport =
568                                                                 current_slaves;
569                                         }
570                                         res_alloc->quota[t] =
571                                                 MLX4_MAX_MAC_NUM -
572                                                 2 * max_vfs_pport;
573                                         res_alloc->guaranteed[t] = 2;
574                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
575                                                 res_alloc->res_port_free[j] =
576                                                         MLX4_MAX_MAC_NUM;
577                                 } else {
578                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
579                                         res_alloc->guaranteed[t] = 2;
580                                 }
581                                 break;
582                         case RES_VLAN:
583                                 if (t == mlx4_master_func_num(dev)) {
584                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
585                                         res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
586                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
587                                                 res_alloc->res_port_free[j] =
588                                                         res_alloc->quota[t];
589                                 } else {
590                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
591                                         res_alloc->guaranteed[t] = 0;
592                                 }
593                                 break;
594                         case RES_COUNTER:
595                                 res_alloc->quota[t] = dev->caps.max_counters;
596                                 if (t == mlx4_master_func_num(dev))
597                                         res_alloc->guaranteed[t] =
598                                                 MLX4_PF_COUNTERS_PER_PORT *
599                                                 MLX4_MAX_PORTS;
600                                 else if (t <= max_vfs_guarantee_counter)
601                                         res_alloc->guaranteed[t] =
602                                                 MLX4_VF_COUNTERS_PER_PORT *
603                                                 MLX4_MAX_PORTS;
604                                 else
605                                         res_alloc->guaranteed[t] = 0;
606                                 res_alloc->res_free -= res_alloc->guaranteed[t];
607                                 break;
608                         default:
609                                 break;
610                         }
611                         if (i == RES_MAC || i == RES_VLAN) {
612                                 for (j = 0; j < dev->caps.num_ports; j++)
613                                         if (test_bit(j, actv_ports.ports))
614                                                 res_alloc->res_port_rsvd[j] +=
615                                                         res_alloc->guaranteed[t];
616                         } else {
617                                 res_alloc->res_reserved += res_alloc->guaranteed[t];
618                         }
619                 }
620         }
621         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
622         return 0;
623
624 no_mem_err:
625         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
626                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
627                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
628                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
629                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
630                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
631                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
632         }
633         return -ENOMEM;
634 }
635
636 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
637                                 enum mlx4_res_tracker_free_type type)
638 {
639         struct mlx4_priv *priv = mlx4_priv(dev);
640         int i;
641
642         if (priv->mfunc.master.res_tracker.slave_list) {
643                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
644                         for (i = 0; i < dev->num_slaves; i++) {
645                                 if (type == RES_TR_FREE_ALL ||
646                                     dev->caps.function != i)
647                                         mlx4_delete_all_resources_for_slave(dev, i);
648                         }
649                         /* free master's vlans */
650                         i = dev->caps.function;
651                         mlx4_reset_roce_gids(dev, i);
652                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
653                         rem_slave_vlans(dev, i);
654                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
655                 }
656
657                 if (type != RES_TR_FREE_SLAVES_ONLY) {
658                         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
659                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
660                                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
661                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
662                                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
663                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
664                                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
665                         }
666                         kfree(priv->mfunc.master.res_tracker.slave_list);
667                         priv->mfunc.master.res_tracker.slave_list = NULL;
668                 }
669         }
670 }
671
672 static void update_pkey_index(struct mlx4_dev *dev, int slave,
673                               struct mlx4_cmd_mailbox *inbox)
674 {
675         u8 sched = *(u8 *)(inbox->buf + 64);
676         u8 orig_index = *(u8 *)(inbox->buf + 35);
677         u8 new_index;
678         struct mlx4_priv *priv = mlx4_priv(dev);
679         int port;
680
681         port = (sched >> 6 & 1) + 1;
682
683         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
684         *(u8 *)(inbox->buf + 35) = new_index;
685 }
686
687 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
688                        u8 slave)
689 {
690         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
691         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
692         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
693         int port;
694
695         if (MLX4_QP_ST_UD == ts) {
696                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
697                 if (mlx4_is_eth(dev, port))
698                         qp_ctx->pri_path.mgid_index =
699                                 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
700                 else
701                         qp_ctx->pri_path.mgid_index = slave | 0x80;
702
703         } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
704                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
705                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
706                         if (mlx4_is_eth(dev, port)) {
707                                 qp_ctx->pri_path.mgid_index +=
708                                         mlx4_get_base_gid_ix(dev, slave, port);
709                                 qp_ctx->pri_path.mgid_index &= 0x7f;
710                         } else {
711                                 qp_ctx->pri_path.mgid_index = slave & 0x7F;
712                         }
713                 }
714                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
715                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
716                         if (mlx4_is_eth(dev, port)) {
717                                 qp_ctx->alt_path.mgid_index +=
718                                         mlx4_get_base_gid_ix(dev, slave, port);
719                                 qp_ctx->alt_path.mgid_index &= 0x7f;
720                         } else {
721                                 qp_ctx->alt_path.mgid_index = slave & 0x7F;
722                         }
723                 }
724         }
725 }
726
727 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
728                           u8 slave, int port);
729
730 static int update_vport_qp_param(struct mlx4_dev *dev,
731                                  struct mlx4_cmd_mailbox *inbox,
732                                  u8 slave, u32 qpn)
733 {
734         struct mlx4_qp_context  *qpc = inbox->buf + 8;
735         struct mlx4_vport_oper_state *vp_oper;
736         struct mlx4_priv *priv;
737         u32 qp_type;
738         int port, err = 0;
739
740         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
741         priv = mlx4_priv(dev);
742         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
743         qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
744
745         err = handle_counter(dev, qpc, slave, port);
746         if (err)
747                 goto out;
748
749         if (MLX4_VGT != vp_oper->state.default_vlan) {
750                 /* the reserved QPs (special, proxy, tunnel)
751                  * do not operate over vlans
752                  */
753                 if (mlx4_is_qp_reserved(dev, qpn))
754                         return 0;
755
756                 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
757                 if (qp_type == MLX4_QP_ST_UD ||
758                     (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
759                         if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
760                                 *(__be32 *)inbox->buf =
761                                         cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
762                                         MLX4_QP_OPTPAR_VLAN_STRIPPING);
763                                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
764                         } else {
765                                 struct mlx4_update_qp_params params = {.flags = 0};
766
767                                 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
768                                 if (err)
769                                         goto out;
770                         }
771                 }
772
773                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
774                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
775                         qpc->pri_path.vlan_control =
776                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
777                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
778                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
779                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
780                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
781                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
782                 } else if (0 != vp_oper->state.default_vlan) {
783                         qpc->pri_path.vlan_control =
784                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
785                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
786                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
787                 } else { /* priority tagged */
788                         qpc->pri_path.vlan_control =
789                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
790                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
791                 }
792
793                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
794                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
795                 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
796                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
797                 qpc->pri_path.sched_queue &= 0xC7;
798                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
799                 qpc->qos_vport = vp_oper->state.qos_vport;
800         }
801         if (vp_oper->state.spoofchk) {
802                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
803                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
804         }
805 out:
806         return err;
807 }
808
809 static int mpt_mask(struct mlx4_dev *dev)
810 {
811         return dev->caps.num_mpts - 1;
812 }
813
814 static void *find_res(struct mlx4_dev *dev, u64 res_id,
815                       enum mlx4_resource type)
816 {
817         struct mlx4_priv *priv = mlx4_priv(dev);
818
819         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
820                                   res_id);
821 }
822
823 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
824                    enum mlx4_resource type,
825                    void *res)
826 {
827         struct res_common *r;
828         int err = 0;
829
830         spin_lock_irq(mlx4_tlock(dev));
831         r = find_res(dev, res_id, type);
832         if (!r) {
833                 err = -ENONET;
834                 goto exit;
835         }
836
837         if (r->state == RES_ANY_BUSY) {
838                 err = -EBUSY;
839                 goto exit;
840         }
841
842         if (r->owner != slave) {
843                 err = -EPERM;
844                 goto exit;
845         }
846
847         r->from_state = r->state;
848         r->state = RES_ANY_BUSY;
849
850         if (res)
851                 *((struct res_common **)res) = r;
852
853 exit:
854         spin_unlock_irq(mlx4_tlock(dev));
855         return err;
856 }
857
858 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
859                                     enum mlx4_resource type,
860                                     u64 res_id, int *slave)
861 {
862
863         struct res_common *r;
864         int err = -ENOENT;
865         int id = res_id;
866
867         if (type == RES_QP)
868                 id &= 0x7fffff;
869         spin_lock(mlx4_tlock(dev));
870
871         r = find_res(dev, id, type);
872         if (r) {
873                 *slave = r->owner;
874                 err = 0;
875         }
876         spin_unlock(mlx4_tlock(dev));
877
878         return err;
879 }
880
881 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
882                     enum mlx4_resource type)
883 {
884         struct res_common *r;
885
886         spin_lock_irq(mlx4_tlock(dev));
887         r = find_res(dev, res_id, type);
888         if (r)
889                 r->state = r->from_state;
890         spin_unlock_irq(mlx4_tlock(dev));
891 }
892
893 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
894                              u64 in_param, u64 *out_param, int port);
895
896 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
897                                    int counter_index)
898 {
899         struct res_common *r;
900         struct res_counter *counter;
901         int ret = 0;
902
903         if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
904                 return ret;
905
906         spin_lock_irq(mlx4_tlock(dev));
907         r = find_res(dev, counter_index, RES_COUNTER);
908         if (!r || r->owner != slave)
909                 ret = -EINVAL;
910         counter = container_of(r, struct res_counter, com);
911         if (!counter->port)
912                 counter->port = port;
913
914         spin_unlock_irq(mlx4_tlock(dev));
915         return ret;
916 }
917
918 static int handle_unexisting_counter(struct mlx4_dev *dev,
919                                      struct mlx4_qp_context *qpc, u8 slave,
920                                      int port)
921 {
922         struct mlx4_priv *priv = mlx4_priv(dev);
923         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
924         struct res_common *tmp;
925         struct res_counter *counter;
926         u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
927         int err = 0;
928
929         spin_lock_irq(mlx4_tlock(dev));
930         list_for_each_entry(tmp,
931                             &tracker->slave_list[slave].res_list[RES_COUNTER],
932                             list) {
933                 counter = container_of(tmp, struct res_counter, com);
934                 if (port == counter->port) {
935                         qpc->pri_path.counter_index  = counter->com.res_id;
936                         spin_unlock_irq(mlx4_tlock(dev));
937                         return 0;
938                 }
939         }
940         spin_unlock_irq(mlx4_tlock(dev));
941
942         /* No existing counter, need to allocate a new counter */
943         err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
944                                 port);
945         if (err == -ENOENT) {
946                 err = 0;
947         } else if (err && err != -ENOSPC) {
948                 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
949                          __func__, slave, err);
950         } else {
951                 qpc->pri_path.counter_index = counter_idx;
952                 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
953                          __func__, slave, qpc->pri_path.counter_index);
954                 err = 0;
955         }
956
957         return err;
958 }
959
960 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
961                           u8 slave, int port)
962 {
963         if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
964                 return handle_existing_counter(dev, slave, port,
965                                                qpc->pri_path.counter_index);
966
967         return handle_unexisting_counter(dev, qpc, slave, port);
968 }
969
970 static struct res_common *alloc_qp_tr(int id)
971 {
972         struct res_qp *ret;
973
974         ret = kzalloc(sizeof *ret, GFP_KERNEL);
975         if (!ret)
976                 return NULL;
977
978         ret->com.res_id = id;
979         ret->com.state = RES_QP_RESERVED;
980         ret->local_qpn = id;
981         INIT_LIST_HEAD(&ret->mcg_list);
982         spin_lock_init(&ret->mcg_spl);
983         atomic_set(&ret->ref_count, 0);
984
985         return &ret->com;
986 }
987
988 static struct res_common *alloc_mtt_tr(int id, int order)
989 {
990         struct res_mtt *ret;
991
992         ret = kzalloc(sizeof *ret, GFP_KERNEL);
993         if (!ret)
994                 return NULL;
995
996         ret->com.res_id = id;
997         ret->order = order;
998         ret->com.state = RES_MTT_ALLOCATED;
999         atomic_set(&ret->ref_count, 0);
1000
1001         return &ret->com;
1002 }
1003
1004 static struct res_common *alloc_mpt_tr(int id, int key)
1005 {
1006         struct res_mpt *ret;
1007
1008         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1009         if (!ret)
1010                 return NULL;
1011
1012         ret->com.res_id = id;
1013         ret->com.state = RES_MPT_RESERVED;
1014         ret->key = key;
1015
1016         return &ret->com;
1017 }
1018
1019 static struct res_common *alloc_eq_tr(int id)
1020 {
1021         struct res_eq *ret;
1022
1023         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1024         if (!ret)
1025                 return NULL;
1026
1027         ret->com.res_id = id;
1028         ret->com.state = RES_EQ_RESERVED;
1029
1030         return &ret->com;
1031 }
1032
1033 static struct res_common *alloc_cq_tr(int id)
1034 {
1035         struct res_cq *ret;
1036
1037         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1038         if (!ret)
1039                 return NULL;
1040
1041         ret->com.res_id = id;
1042         ret->com.state = RES_CQ_ALLOCATED;
1043         atomic_set(&ret->ref_count, 0);
1044
1045         return &ret->com;
1046 }
1047
1048 static struct res_common *alloc_srq_tr(int id)
1049 {
1050         struct res_srq *ret;
1051
1052         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1053         if (!ret)
1054                 return NULL;
1055
1056         ret->com.res_id = id;
1057         ret->com.state = RES_SRQ_ALLOCATED;
1058         atomic_set(&ret->ref_count, 0);
1059
1060         return &ret->com;
1061 }
1062
1063 static struct res_common *alloc_counter_tr(int id, int port)
1064 {
1065         struct res_counter *ret;
1066
1067         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1068         if (!ret)
1069                 return NULL;
1070
1071         ret->com.res_id = id;
1072         ret->com.state = RES_COUNTER_ALLOCATED;
1073         ret->port = port;
1074
1075         return &ret->com;
1076 }
1077
1078 static struct res_common *alloc_xrcdn_tr(int id)
1079 {
1080         struct res_xrcdn *ret;
1081
1082         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1083         if (!ret)
1084                 return NULL;
1085
1086         ret->com.res_id = id;
1087         ret->com.state = RES_XRCD_ALLOCATED;
1088
1089         return &ret->com;
1090 }
1091
1092 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1093 {
1094         struct res_fs_rule *ret;
1095
1096         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1097         if (!ret)
1098                 return NULL;
1099
1100         ret->com.res_id = id;
1101         ret->com.state = RES_FS_RULE_ALLOCATED;
1102         ret->qpn = qpn;
1103         return &ret->com;
1104 }
1105
1106 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1107                                    int extra)
1108 {
1109         struct res_common *ret;
1110
1111         switch (type) {
1112         case RES_QP:
1113                 ret = alloc_qp_tr(id);
1114                 break;
1115         case RES_MPT:
1116                 ret = alloc_mpt_tr(id, extra);
1117                 break;
1118         case RES_MTT:
1119                 ret = alloc_mtt_tr(id, extra);
1120                 break;
1121         case RES_EQ:
1122                 ret = alloc_eq_tr(id);
1123                 break;
1124         case RES_CQ:
1125                 ret = alloc_cq_tr(id);
1126                 break;
1127         case RES_SRQ:
1128                 ret = alloc_srq_tr(id);
1129                 break;
1130         case RES_MAC:
1131                 pr_err("implementation missing\n");
1132                 return NULL;
1133         case RES_COUNTER:
1134                 ret = alloc_counter_tr(id, extra);
1135                 break;
1136         case RES_XRCD:
1137                 ret = alloc_xrcdn_tr(id);
1138                 break;
1139         case RES_FS_RULE:
1140                 ret = alloc_fs_rule_tr(id, extra);
1141                 break;
1142         default:
1143                 return NULL;
1144         }
1145         if (ret)
1146                 ret->owner = slave;
1147
1148         return ret;
1149 }
1150
1151 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1152                           struct mlx4_counter *data)
1153 {
1154         struct mlx4_priv *priv = mlx4_priv(dev);
1155         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1156         struct res_common *tmp;
1157         struct res_counter *counter;
1158         int *counters_arr;
1159         int i = 0, err = 0;
1160
1161         memset(data, 0, sizeof(*data));
1162
1163         counters_arr = kmalloc_array(dev->caps.max_counters,
1164                                      sizeof(*counters_arr), GFP_KERNEL);
1165         if (!counters_arr)
1166                 return -ENOMEM;
1167
1168         spin_lock_irq(mlx4_tlock(dev));
1169         list_for_each_entry(tmp,
1170                             &tracker->slave_list[slave].res_list[RES_COUNTER],
1171                             list) {
1172                 counter = container_of(tmp, struct res_counter, com);
1173                 if (counter->port == port) {
1174                         counters_arr[i] = (int)tmp->res_id;
1175                         i++;
1176                 }
1177         }
1178         spin_unlock_irq(mlx4_tlock(dev));
1179         counters_arr[i] = -1;
1180
1181         i = 0;
1182
1183         while (counters_arr[i] != -1) {
1184                 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1185                                              0);
1186                 if (err) {
1187                         memset(data, 0, sizeof(*data));
1188                         goto table_changed;
1189                 }
1190                 i++;
1191         }
1192
1193 table_changed:
1194         kfree(counters_arr);
1195         return 0;
1196 }
1197
1198 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1199                          enum mlx4_resource type, int extra)
1200 {
1201         int i;
1202         int err;
1203         struct mlx4_priv *priv = mlx4_priv(dev);
1204         struct res_common **res_arr;
1205         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1206         struct rb_root *root = &tracker->res_tree[type];
1207
1208         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1209         if (!res_arr)
1210                 return -ENOMEM;
1211
1212         for (i = 0; i < count; ++i) {
1213                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1214                 if (!res_arr[i]) {
1215                         for (--i; i >= 0; --i)
1216                                 kfree(res_arr[i]);
1217
1218                         kfree(res_arr);
1219                         return -ENOMEM;
1220                 }
1221         }
1222
1223         spin_lock_irq(mlx4_tlock(dev));
1224         for (i = 0; i < count; ++i) {
1225                 if (find_res(dev, base + i, type)) {
1226                         err = -EEXIST;
1227                         goto undo;
1228                 }
1229                 err = res_tracker_insert(root, res_arr[i]);
1230                 if (err)
1231                         goto undo;
1232                 list_add_tail(&res_arr[i]->list,
1233                               &tracker->slave_list[slave].res_list[type]);
1234         }
1235         spin_unlock_irq(mlx4_tlock(dev));
1236         kfree(res_arr);
1237
1238         return 0;
1239
1240 undo:
1241         for (--i; i >= base; --i)
1242                 rb_erase(&res_arr[i]->node, root);
1243
1244         spin_unlock_irq(mlx4_tlock(dev));
1245
1246         for (i = 0; i < count; ++i)
1247                 kfree(res_arr[i]);
1248
1249         kfree(res_arr);
1250
1251         return err;
1252 }
1253
1254 static int remove_qp_ok(struct res_qp *res)
1255 {
1256         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1257             !list_empty(&res->mcg_list)) {
1258                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1259                        res->com.state, atomic_read(&res->ref_count));
1260                 return -EBUSY;
1261         } else if (res->com.state != RES_QP_RESERVED) {
1262                 return -EPERM;
1263         }
1264
1265         return 0;
1266 }
1267
1268 static int remove_mtt_ok(struct res_mtt *res, int order)
1269 {
1270         if (res->com.state == RES_MTT_BUSY ||
1271             atomic_read(&res->ref_count)) {
1272                 pr_devel("%s-%d: state %s, ref_count %d\n",
1273                          __func__, __LINE__,
1274                          mtt_states_str(res->com.state),
1275                          atomic_read(&res->ref_count));
1276                 return -EBUSY;
1277         } else if (res->com.state != RES_MTT_ALLOCATED)
1278                 return -EPERM;
1279         else if (res->order != order)
1280                 return -EINVAL;
1281
1282         return 0;
1283 }
1284
1285 static int remove_mpt_ok(struct res_mpt *res)
1286 {
1287         if (res->com.state == RES_MPT_BUSY)
1288                 return -EBUSY;
1289         else if (res->com.state != RES_MPT_RESERVED)
1290                 return -EPERM;
1291
1292         return 0;
1293 }
1294
1295 static int remove_eq_ok(struct res_eq *res)
1296 {
1297         if (res->com.state == RES_MPT_BUSY)
1298                 return -EBUSY;
1299         else if (res->com.state != RES_MPT_RESERVED)
1300                 return -EPERM;
1301
1302         return 0;
1303 }
1304
1305 static int remove_counter_ok(struct res_counter *res)
1306 {
1307         if (res->com.state == RES_COUNTER_BUSY)
1308                 return -EBUSY;
1309         else if (res->com.state != RES_COUNTER_ALLOCATED)
1310                 return -EPERM;
1311
1312         return 0;
1313 }
1314
1315 static int remove_xrcdn_ok(struct res_xrcdn *res)
1316 {
1317         if (res->com.state == RES_XRCD_BUSY)
1318                 return -EBUSY;
1319         else if (res->com.state != RES_XRCD_ALLOCATED)
1320                 return -EPERM;
1321
1322         return 0;
1323 }
1324
1325 static int remove_fs_rule_ok(struct res_fs_rule *res)
1326 {
1327         if (res->com.state == RES_FS_RULE_BUSY)
1328                 return -EBUSY;
1329         else if (res->com.state != RES_FS_RULE_ALLOCATED)
1330                 return -EPERM;
1331
1332         return 0;
1333 }
1334
1335 static int remove_cq_ok(struct res_cq *res)
1336 {
1337         if (res->com.state == RES_CQ_BUSY)
1338                 return -EBUSY;
1339         else if (res->com.state != RES_CQ_ALLOCATED)
1340                 return -EPERM;
1341
1342         return 0;
1343 }
1344
1345 static int remove_srq_ok(struct res_srq *res)
1346 {
1347         if (res->com.state == RES_SRQ_BUSY)
1348                 return -EBUSY;
1349         else if (res->com.state != RES_SRQ_ALLOCATED)
1350                 return -EPERM;
1351
1352         return 0;
1353 }
1354
1355 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1356 {
1357         switch (type) {
1358         case RES_QP:
1359                 return remove_qp_ok((struct res_qp *)res);
1360         case RES_CQ:
1361                 return remove_cq_ok((struct res_cq *)res);
1362         case RES_SRQ:
1363                 return remove_srq_ok((struct res_srq *)res);
1364         case RES_MPT:
1365                 return remove_mpt_ok((struct res_mpt *)res);
1366         case RES_MTT:
1367                 return remove_mtt_ok((struct res_mtt *)res, extra);
1368         case RES_MAC:
1369                 return -ENOSYS;
1370         case RES_EQ:
1371                 return remove_eq_ok((struct res_eq *)res);
1372         case RES_COUNTER:
1373                 return remove_counter_ok((struct res_counter *)res);
1374         case RES_XRCD:
1375                 return remove_xrcdn_ok((struct res_xrcdn *)res);
1376         case RES_FS_RULE:
1377                 return remove_fs_rule_ok((struct res_fs_rule *)res);
1378         default:
1379                 return -EINVAL;
1380         }
1381 }
1382
1383 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1384                          enum mlx4_resource type, int extra)
1385 {
1386         u64 i;
1387         int err;
1388         struct mlx4_priv *priv = mlx4_priv(dev);
1389         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1390         struct res_common *r;
1391
1392         spin_lock_irq(mlx4_tlock(dev));
1393         for (i = base; i < base + count; ++i) {
1394                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1395                 if (!r) {
1396                         err = -ENOENT;
1397                         goto out;
1398                 }
1399                 if (r->owner != slave) {
1400                         err = -EPERM;
1401                         goto out;
1402                 }
1403                 err = remove_ok(r, type, extra);
1404                 if (err)
1405                         goto out;
1406         }
1407
1408         for (i = base; i < base + count; ++i) {
1409                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1410                 rb_erase(&r->node, &tracker->res_tree[type]);
1411                 list_del(&r->list);
1412                 kfree(r);
1413         }
1414         err = 0;
1415
1416 out:
1417         spin_unlock_irq(mlx4_tlock(dev));
1418
1419         return err;
1420 }
1421
1422 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1423                                 enum res_qp_states state, struct res_qp **qp,
1424                                 int alloc)
1425 {
1426         struct mlx4_priv *priv = mlx4_priv(dev);
1427         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1428         struct res_qp *r;
1429         int err = 0;
1430
1431         spin_lock_irq(mlx4_tlock(dev));
1432         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1433         if (!r)
1434                 err = -ENOENT;
1435         else if (r->com.owner != slave)
1436                 err = -EPERM;
1437         else {
1438                 switch (state) {
1439                 case RES_QP_BUSY:
1440                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1441                                  __func__, r->com.res_id);
1442                         err = -EBUSY;
1443                         break;
1444
1445                 case RES_QP_RESERVED:
1446                         if (r->com.state == RES_QP_MAPPED && !alloc)
1447                                 break;
1448
1449                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1450                         err = -EINVAL;
1451                         break;
1452
1453                 case RES_QP_MAPPED:
1454                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
1455                             r->com.state == RES_QP_HW)
1456                                 break;
1457                         else {
1458                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1459                                           r->com.res_id);
1460                                 err = -EINVAL;
1461                         }
1462
1463                         break;
1464
1465                 case RES_QP_HW:
1466                         if (r->com.state != RES_QP_MAPPED)
1467                                 err = -EINVAL;
1468                         break;
1469                 default:
1470                         err = -EINVAL;
1471                 }
1472
1473                 if (!err) {
1474                         r->com.from_state = r->com.state;
1475                         r->com.to_state = state;
1476                         r->com.state = RES_QP_BUSY;
1477                         if (qp)
1478                                 *qp = r;
1479                 }
1480         }
1481
1482         spin_unlock_irq(mlx4_tlock(dev));
1483
1484         return err;
1485 }
1486
1487 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1488                                 enum res_mpt_states state, struct res_mpt **mpt)
1489 {
1490         struct mlx4_priv *priv = mlx4_priv(dev);
1491         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1492         struct res_mpt *r;
1493         int err = 0;
1494
1495         spin_lock_irq(mlx4_tlock(dev));
1496         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1497         if (!r)
1498                 err = -ENOENT;
1499         else if (r->com.owner != slave)
1500                 err = -EPERM;
1501         else {
1502                 switch (state) {
1503                 case RES_MPT_BUSY:
1504                         err = -EINVAL;
1505                         break;
1506
1507                 case RES_MPT_RESERVED:
1508                         if (r->com.state != RES_MPT_MAPPED)
1509                                 err = -EINVAL;
1510                         break;
1511
1512                 case RES_MPT_MAPPED:
1513                         if (r->com.state != RES_MPT_RESERVED &&
1514                             r->com.state != RES_MPT_HW)
1515                                 err = -EINVAL;
1516                         break;
1517
1518                 case RES_MPT_HW:
1519                         if (r->com.state != RES_MPT_MAPPED)
1520                                 err = -EINVAL;
1521                         break;
1522                 default:
1523                         err = -EINVAL;
1524                 }
1525
1526                 if (!err) {
1527                         r->com.from_state = r->com.state;
1528                         r->com.to_state = state;
1529                         r->com.state = RES_MPT_BUSY;
1530                         if (mpt)
1531                                 *mpt = r;
1532                 }
1533         }
1534
1535         spin_unlock_irq(mlx4_tlock(dev));
1536
1537         return err;
1538 }
1539
1540 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1541                                 enum res_eq_states state, struct res_eq **eq)
1542 {
1543         struct mlx4_priv *priv = mlx4_priv(dev);
1544         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1545         struct res_eq *r;
1546         int err = 0;
1547
1548         spin_lock_irq(mlx4_tlock(dev));
1549         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1550         if (!r)
1551                 err = -ENOENT;
1552         else if (r->com.owner != slave)
1553                 err = -EPERM;
1554         else {
1555                 switch (state) {
1556                 case RES_EQ_BUSY:
1557                         err = -EINVAL;
1558                         break;
1559
1560                 case RES_EQ_RESERVED:
1561                         if (r->com.state != RES_EQ_HW)
1562                                 err = -EINVAL;
1563                         break;
1564
1565                 case RES_EQ_HW:
1566                         if (r->com.state != RES_EQ_RESERVED)
1567                                 err = -EINVAL;
1568                         break;
1569
1570                 default:
1571                         err = -EINVAL;
1572                 }
1573
1574                 if (!err) {
1575                         r->com.from_state = r->com.state;
1576                         r->com.to_state = state;
1577                         r->com.state = RES_EQ_BUSY;
1578                         if (eq)
1579                                 *eq = r;
1580                 }
1581         }
1582
1583         spin_unlock_irq(mlx4_tlock(dev));
1584
1585         return err;
1586 }
1587
1588 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1589                                 enum res_cq_states state, struct res_cq **cq)
1590 {
1591         struct mlx4_priv *priv = mlx4_priv(dev);
1592         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1593         struct res_cq *r;
1594         int err;
1595
1596         spin_lock_irq(mlx4_tlock(dev));
1597         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1598         if (!r) {
1599                 err = -ENOENT;
1600         } else if (r->com.owner != slave) {
1601                 err = -EPERM;
1602         } else if (state == RES_CQ_ALLOCATED) {
1603                 if (r->com.state != RES_CQ_HW)
1604                         err = -EINVAL;
1605                 else if (atomic_read(&r->ref_count))
1606                         err = -EBUSY;
1607                 else
1608                         err = 0;
1609         } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1610                 err = -EINVAL;
1611         } else {
1612                 err = 0;
1613         }
1614
1615         if (!err) {
1616                 r->com.from_state = r->com.state;
1617                 r->com.to_state = state;
1618                 r->com.state = RES_CQ_BUSY;
1619                 if (cq)
1620                         *cq = r;
1621         }
1622
1623         spin_unlock_irq(mlx4_tlock(dev));
1624
1625         return err;
1626 }
1627
1628 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1629                                  enum res_srq_states state, struct res_srq **srq)
1630 {
1631         struct mlx4_priv *priv = mlx4_priv(dev);
1632         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1633         struct res_srq *r;
1634         int err = 0;
1635
1636         spin_lock_irq(mlx4_tlock(dev));
1637         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1638         if (!r) {
1639                 err = -ENOENT;
1640         } else if (r->com.owner != slave) {
1641                 err = -EPERM;
1642         } else if (state == RES_SRQ_ALLOCATED) {
1643                 if (r->com.state != RES_SRQ_HW)
1644                         err = -EINVAL;
1645                 else if (atomic_read(&r->ref_count))
1646                         err = -EBUSY;
1647         } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1648                 err = -EINVAL;
1649         }
1650
1651         if (!err) {
1652                 r->com.from_state = r->com.state;
1653                 r->com.to_state = state;
1654                 r->com.state = RES_SRQ_BUSY;
1655                 if (srq)
1656                         *srq = r;
1657         }
1658
1659         spin_unlock_irq(mlx4_tlock(dev));
1660
1661         return err;
1662 }
1663
1664 static void res_abort_move(struct mlx4_dev *dev, int slave,
1665                            enum mlx4_resource type, int id)
1666 {
1667         struct mlx4_priv *priv = mlx4_priv(dev);
1668         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1669         struct res_common *r;
1670
1671         spin_lock_irq(mlx4_tlock(dev));
1672         r = res_tracker_lookup(&tracker->res_tree[type], id);
1673         if (r && (r->owner == slave))
1674                 r->state = r->from_state;
1675         spin_unlock_irq(mlx4_tlock(dev));
1676 }
1677
1678 static void res_end_move(struct mlx4_dev *dev, int slave,
1679                          enum mlx4_resource type, int id)
1680 {
1681         struct mlx4_priv *priv = mlx4_priv(dev);
1682         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1683         struct res_common *r;
1684
1685         spin_lock_irq(mlx4_tlock(dev));
1686         r = res_tracker_lookup(&tracker->res_tree[type], id);
1687         if (r && (r->owner == slave))
1688                 r->state = r->to_state;
1689         spin_unlock_irq(mlx4_tlock(dev));
1690 }
1691
1692 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1693 {
1694         return mlx4_is_qp_reserved(dev, qpn) &&
1695                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1696 }
1697
1698 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1699 {
1700         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1701 }
1702
1703 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1704                         u64 in_param, u64 *out_param)
1705 {
1706         int err;
1707         int count;
1708         int align;
1709         int base;
1710         int qpn;
1711         u8 flags;
1712
1713         switch (op) {
1714         case RES_OP_RESERVE:
1715                 count = get_param_l(&in_param) & 0xffffff;
1716                 /* Turn off all unsupported QP allocation flags that the
1717                  * slave tries to set.
1718                  */
1719                 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1720                 align = get_param_h(&in_param);
1721                 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1722                 if (err)
1723                         return err;
1724
1725                 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1726                 if (err) {
1727                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1728                         return err;
1729                 }
1730
1731                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1732                 if (err) {
1733                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1734                         __mlx4_qp_release_range(dev, base, count);
1735                         return err;
1736                 }
1737                 set_param_l(out_param, base);
1738                 break;
1739         case RES_OP_MAP_ICM:
1740                 qpn = get_param_l(&in_param) & 0x7fffff;
1741                 if (valid_reserved(dev, slave, qpn)) {
1742                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1743                         if (err)
1744                                 return err;
1745                 }
1746
1747                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1748                                            NULL, 1);
1749                 if (err)
1750                         return err;
1751
1752                 if (!fw_reserved(dev, qpn)) {
1753                         err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1754                         if (err) {
1755                                 res_abort_move(dev, slave, RES_QP, qpn);
1756                                 return err;
1757                         }
1758                 }
1759
1760                 res_end_move(dev, slave, RES_QP, qpn);
1761                 break;
1762
1763         default:
1764                 err = -EINVAL;
1765                 break;
1766         }
1767         return err;
1768 }
1769
1770 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1771                          u64 in_param, u64 *out_param)
1772 {
1773         int err = -EINVAL;
1774         int base;
1775         int order;
1776
1777         if (op != RES_OP_RESERVE_AND_MAP)
1778                 return err;
1779
1780         order = get_param_l(&in_param);
1781
1782         err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1783         if (err)
1784                 return err;
1785
1786         base = __mlx4_alloc_mtt_range(dev, order);
1787         if (base == -1) {
1788                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1789                 return -ENOMEM;
1790         }
1791
1792         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1793         if (err) {
1794                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1795                 __mlx4_free_mtt_range(dev, base, order);
1796         } else {
1797                 set_param_l(out_param, base);
1798         }
1799
1800         return err;
1801 }
1802
1803 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1804                          u64 in_param, u64 *out_param)
1805 {
1806         int err = -EINVAL;
1807         int index;
1808         int id;
1809         struct res_mpt *mpt;
1810
1811         switch (op) {
1812         case RES_OP_RESERVE:
1813                 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1814                 if (err)
1815                         break;
1816
1817                 index = __mlx4_mpt_reserve(dev);
1818                 if (index == -1) {
1819                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1820                         break;
1821                 }
1822                 id = index & mpt_mask(dev);
1823
1824                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1825                 if (err) {
1826                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1827                         __mlx4_mpt_release(dev, index);
1828                         break;
1829                 }
1830                 set_param_l(out_param, index);
1831                 break;
1832         case RES_OP_MAP_ICM:
1833                 index = get_param_l(&in_param);
1834                 id = index & mpt_mask(dev);
1835                 err = mr_res_start_move_to(dev, slave, id,
1836                                            RES_MPT_MAPPED, &mpt);
1837                 if (err)
1838                         return err;
1839
1840                 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1841                 if (err) {
1842                         res_abort_move(dev, slave, RES_MPT, id);
1843                         return err;
1844                 }
1845
1846                 res_end_move(dev, slave, RES_MPT, id);
1847                 break;
1848         }
1849         return err;
1850 }
1851
1852 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1853                         u64 in_param, u64 *out_param)
1854 {
1855         int cqn;
1856         int err;
1857
1858         switch (op) {
1859         case RES_OP_RESERVE_AND_MAP:
1860                 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1861                 if (err)
1862                         break;
1863
1864                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1865                 if (err) {
1866                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1867                         break;
1868                 }
1869
1870                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1871                 if (err) {
1872                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1873                         __mlx4_cq_free_icm(dev, cqn);
1874                         break;
1875                 }
1876
1877                 set_param_l(out_param, cqn);
1878                 break;
1879
1880         default:
1881                 err = -EINVAL;
1882         }
1883
1884         return err;
1885 }
1886
1887 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1888                          u64 in_param, u64 *out_param)
1889 {
1890         int srqn;
1891         int err;
1892
1893         switch (op) {
1894         case RES_OP_RESERVE_AND_MAP:
1895                 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1896                 if (err)
1897                         break;
1898
1899                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1900                 if (err) {
1901                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1902                         break;
1903                 }
1904
1905                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1906                 if (err) {
1907                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1908                         __mlx4_srq_free_icm(dev, srqn);
1909                         break;
1910                 }
1911
1912                 set_param_l(out_param, srqn);
1913                 break;
1914
1915         default:
1916                 err = -EINVAL;
1917         }
1918
1919         return err;
1920 }
1921
1922 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1923                                      u8 smac_index, u64 *mac)
1924 {
1925         struct mlx4_priv *priv = mlx4_priv(dev);
1926         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1927         struct list_head *mac_list =
1928                 &tracker->slave_list[slave].res_list[RES_MAC];
1929         struct mac_res *res, *tmp;
1930
1931         list_for_each_entry_safe(res, tmp, mac_list, list) {
1932                 if (res->smac_index == smac_index && res->port == (u8) port) {
1933                         *mac = res->mac;
1934                         return 0;
1935                 }
1936         }
1937         return -ENOENT;
1938 }
1939
1940 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1941 {
1942         struct mlx4_priv *priv = mlx4_priv(dev);
1943         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1944         struct list_head *mac_list =
1945                 &tracker->slave_list[slave].res_list[RES_MAC];
1946         struct mac_res *res, *tmp;
1947
1948         list_for_each_entry_safe(res, tmp, mac_list, list) {
1949                 if (res->mac == mac && res->port == (u8) port) {
1950                         /* mac found. update ref count */
1951                         ++res->ref_count;
1952                         return 0;
1953                 }
1954         }
1955
1956         if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1957                 return -EINVAL;
1958         res = kzalloc(sizeof *res, GFP_KERNEL);
1959         if (!res) {
1960                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1961                 return -ENOMEM;
1962         }
1963         res->mac = mac;
1964         res->port = (u8) port;
1965         res->smac_index = smac_index;
1966         res->ref_count = 1;
1967         list_add_tail(&res->list,
1968                       &tracker->slave_list[slave].res_list[RES_MAC]);
1969         return 0;
1970 }
1971
1972 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1973                                int port)
1974 {
1975         struct mlx4_priv *priv = mlx4_priv(dev);
1976         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1977         struct list_head *mac_list =
1978                 &tracker->slave_list[slave].res_list[RES_MAC];
1979         struct mac_res *res, *tmp;
1980
1981         list_for_each_entry_safe(res, tmp, mac_list, list) {
1982                 if (res->mac == mac && res->port == (u8) port) {
1983                         if (!--res->ref_count) {
1984                                 list_del(&res->list);
1985                                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1986                                 kfree(res);
1987                         }
1988                         break;
1989                 }
1990         }
1991 }
1992
1993 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1994 {
1995         struct mlx4_priv *priv = mlx4_priv(dev);
1996         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1997         struct list_head *mac_list =
1998                 &tracker->slave_list[slave].res_list[RES_MAC];
1999         struct mac_res *res, *tmp;
2000         int i;
2001
2002         list_for_each_entry_safe(res, tmp, mac_list, list) {
2003                 list_del(&res->list);
2004                 /* dereference the mac the num times the slave referenced it */
2005                 for (i = 0; i < res->ref_count; i++)
2006                         __mlx4_unregister_mac(dev, res->port, res->mac);
2007                 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2008                 kfree(res);
2009         }
2010 }
2011
2012 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2013                          u64 in_param, u64 *out_param, int in_port)
2014 {
2015         int err = -EINVAL;
2016         int port;
2017         u64 mac;
2018         u8 smac_index;
2019
2020         if (op != RES_OP_RESERVE_AND_MAP)
2021                 return err;
2022
2023         port = !in_port ? get_param_l(out_param) : in_port;
2024         port = mlx4_slave_convert_port(
2025                         dev, slave, port);
2026
2027         if (port < 0)
2028                 return -EINVAL;
2029         mac = in_param;
2030
2031         err = __mlx4_register_mac(dev, port, mac);
2032         if (err >= 0) {
2033                 smac_index = err;
2034                 set_param_l(out_param, err);
2035                 err = 0;
2036         }
2037
2038         if (!err) {
2039                 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2040                 if (err)
2041                         __mlx4_unregister_mac(dev, port, mac);
2042         }
2043         return err;
2044 }
2045
2046 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2047                              int port, int vlan_index)
2048 {
2049         struct mlx4_priv *priv = mlx4_priv(dev);
2050         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2051         struct list_head *vlan_list =
2052                 &tracker->slave_list[slave].res_list[RES_VLAN];
2053         struct vlan_res *res, *tmp;
2054
2055         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2056                 if (res->vlan == vlan && res->port == (u8) port) {
2057                         /* vlan found. update ref count */
2058                         ++res->ref_count;
2059                         return 0;
2060                 }
2061         }
2062
2063         if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2064                 return -EINVAL;
2065         res = kzalloc(sizeof(*res), GFP_KERNEL);
2066         if (!res) {
2067                 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2068                 return -ENOMEM;
2069         }
2070         res->vlan = vlan;
2071         res->port = (u8) port;
2072         res->vlan_index = vlan_index;
2073         res->ref_count = 1;
2074         list_add_tail(&res->list,
2075                       &tracker->slave_list[slave].res_list[RES_VLAN]);
2076         return 0;
2077 }
2078
2079
2080 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2081                                 int port)
2082 {
2083         struct mlx4_priv *priv = mlx4_priv(dev);
2084         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2085         struct list_head *vlan_list =
2086                 &tracker->slave_list[slave].res_list[RES_VLAN];
2087         struct vlan_res *res, *tmp;
2088
2089         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2090                 if (res->vlan == vlan && res->port == (u8) port) {
2091                         if (!--res->ref_count) {
2092                                 list_del(&res->list);
2093                                 mlx4_release_resource(dev, slave, RES_VLAN,
2094                                                       1, port);
2095                                 kfree(res);
2096                         }
2097                         break;
2098                 }
2099         }
2100 }
2101
2102 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2103 {
2104         struct mlx4_priv *priv = mlx4_priv(dev);
2105         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2106         struct list_head *vlan_list =
2107                 &tracker->slave_list[slave].res_list[RES_VLAN];
2108         struct vlan_res *res, *tmp;
2109         int i;
2110
2111         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2112                 list_del(&res->list);
2113                 /* dereference the vlan the num times the slave referenced it */
2114                 for (i = 0; i < res->ref_count; i++)
2115                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
2116                 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2117                 kfree(res);
2118         }
2119 }
2120
2121 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2122                           u64 in_param, u64 *out_param, int in_port)
2123 {
2124         struct mlx4_priv *priv = mlx4_priv(dev);
2125         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2126         int err;
2127         u16 vlan;
2128         int vlan_index;
2129         int port;
2130
2131         port = !in_port ? get_param_l(out_param) : in_port;
2132
2133         if (!port || op != RES_OP_RESERVE_AND_MAP)
2134                 return -EINVAL;
2135
2136         port = mlx4_slave_convert_port(
2137                         dev, slave, port);
2138
2139         if (port < 0)
2140                 return -EINVAL;
2141         /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2142         if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2143                 slave_state[slave].old_vlan_api = true;
2144                 return 0;
2145         }
2146
2147         vlan = (u16) in_param;
2148
2149         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2150         if (!err) {
2151                 set_param_l(out_param, (u32) vlan_index);
2152                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2153                 if (err)
2154                         __mlx4_unregister_vlan(dev, port, vlan);
2155         }
2156         return err;
2157 }
2158
2159 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2160                              u64 in_param, u64 *out_param, int port)
2161 {
2162         u32 index;
2163         int err;
2164
2165         if (op != RES_OP_RESERVE)
2166                 return -EINVAL;
2167
2168         err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2169         if (err)
2170                 return err;
2171
2172         err = __mlx4_counter_alloc(dev, &index);
2173         if (err) {
2174                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2175                 return err;
2176         }
2177
2178         err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2179         if (err) {
2180                 __mlx4_counter_free(dev, index);
2181                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2182         } else {
2183                 set_param_l(out_param, index);
2184         }
2185
2186         return err;
2187 }
2188
2189 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2190                            u64 in_param, u64 *out_param)
2191 {
2192         u32 xrcdn;
2193         int err;
2194
2195         if (op != RES_OP_RESERVE)
2196                 return -EINVAL;
2197
2198         err = __mlx4_xrcd_alloc(dev, &xrcdn);
2199         if (err)
2200                 return err;
2201
2202         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2203         if (err)
2204                 __mlx4_xrcd_free(dev, xrcdn);
2205         else
2206                 set_param_l(out_param, xrcdn);
2207
2208         return err;
2209 }
2210
2211 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2212                            struct mlx4_vhcr *vhcr,
2213                            struct mlx4_cmd_mailbox *inbox,
2214                            struct mlx4_cmd_mailbox *outbox,
2215                            struct mlx4_cmd_info *cmd)
2216 {
2217         int err;
2218         int alop = vhcr->op_modifier;
2219
2220         switch (vhcr->in_modifier & 0xFF) {
2221         case RES_QP:
2222                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2223                                    vhcr->in_param, &vhcr->out_param);
2224                 break;
2225
2226         case RES_MTT:
2227                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2228                                     vhcr->in_param, &vhcr->out_param);
2229                 break;
2230
2231         case RES_MPT:
2232                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2233                                     vhcr->in_param, &vhcr->out_param);
2234                 break;
2235
2236         case RES_CQ:
2237                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2238                                    vhcr->in_param, &vhcr->out_param);
2239                 break;
2240
2241         case RES_SRQ:
2242                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2243                                     vhcr->in_param, &vhcr->out_param);
2244                 break;
2245
2246         case RES_MAC:
2247                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2248                                     vhcr->in_param, &vhcr->out_param,
2249                                     (vhcr->in_modifier >> 8) & 0xFF);
2250                 break;
2251
2252         case RES_VLAN:
2253                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2254                                      vhcr->in_param, &vhcr->out_param,
2255                                      (vhcr->in_modifier >> 8) & 0xFF);
2256                 break;
2257
2258         case RES_COUNTER:
2259                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2260                                         vhcr->in_param, &vhcr->out_param, 0);
2261                 break;
2262
2263         case RES_XRCD:
2264                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2265                                       vhcr->in_param, &vhcr->out_param);
2266                 break;
2267
2268         default:
2269                 err = -EINVAL;
2270                 break;
2271         }
2272
2273         return err;
2274 }
2275
2276 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2277                        u64 in_param)
2278 {
2279         int err;
2280         int count;
2281         int base;
2282         int qpn;
2283
2284         switch (op) {
2285         case RES_OP_RESERVE:
2286                 base = get_param_l(&in_param) & 0x7fffff;
2287                 count = get_param_h(&in_param);
2288                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2289                 if (err)
2290                         break;
2291                 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2292                 __mlx4_qp_release_range(dev, base, count);
2293                 break;
2294         case RES_OP_MAP_ICM:
2295                 qpn = get_param_l(&in_param) & 0x7fffff;
2296                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2297                                            NULL, 0);
2298                 if (err)
2299                         return err;
2300
2301                 if (!fw_reserved(dev, qpn))
2302                         __mlx4_qp_free_icm(dev, qpn);
2303
2304                 res_end_move(dev, slave, RES_QP, qpn);
2305
2306                 if (valid_reserved(dev, slave, qpn))
2307                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2308                 break;
2309         default:
2310                 err = -EINVAL;
2311                 break;
2312         }
2313         return err;
2314 }
2315
2316 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2317                         u64 in_param, u64 *out_param)
2318 {
2319         int err = -EINVAL;
2320         int base;
2321         int order;
2322
2323         if (op != RES_OP_RESERVE_AND_MAP)
2324                 return err;
2325
2326         base = get_param_l(&in_param);
2327         order = get_param_h(&in_param);
2328         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2329         if (!err) {
2330                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2331                 __mlx4_free_mtt_range(dev, base, order);
2332         }
2333         return err;
2334 }
2335
2336 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2337                         u64 in_param)
2338 {
2339         int err = -EINVAL;
2340         int index;
2341         int id;
2342         struct res_mpt *mpt;
2343
2344         switch (op) {
2345         case RES_OP_RESERVE:
2346                 index = get_param_l(&in_param);
2347                 id = index & mpt_mask(dev);
2348                 err = get_res(dev, slave, id, RES_MPT, &mpt);
2349                 if (err)
2350                         break;
2351                 index = mpt->key;
2352                 put_res(dev, slave, id, RES_MPT);
2353
2354                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2355                 if (err)
2356                         break;
2357                 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2358                 __mlx4_mpt_release(dev, index);
2359                 break;
2360         case RES_OP_MAP_ICM:
2361                         index = get_param_l(&in_param);
2362                         id = index & mpt_mask(dev);
2363                         err = mr_res_start_move_to(dev, slave, id,
2364                                                    RES_MPT_RESERVED, &mpt);
2365                         if (err)
2366                                 return err;
2367
2368                         __mlx4_mpt_free_icm(dev, mpt->key);
2369                         res_end_move(dev, slave, RES_MPT, id);
2370                         return err;
2371                 break;
2372         default:
2373                 err = -EINVAL;
2374                 break;
2375         }
2376         return err;
2377 }
2378
2379 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2380                        u64 in_param, u64 *out_param)
2381 {
2382         int cqn;
2383         int err;
2384
2385         switch (op) {
2386         case RES_OP_RESERVE_AND_MAP:
2387                 cqn = get_param_l(&in_param);
2388                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2389                 if (err)
2390                         break;
2391
2392                 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2393                 __mlx4_cq_free_icm(dev, cqn);
2394                 break;
2395
2396         default:
2397                 err = -EINVAL;
2398                 break;
2399         }
2400
2401         return err;
2402 }
2403
2404 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2405                         u64 in_param, u64 *out_param)
2406 {
2407         int srqn;
2408         int err;
2409
2410         switch (op) {
2411         case RES_OP_RESERVE_AND_MAP:
2412                 srqn = get_param_l(&in_param);
2413                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2414                 if (err)
2415                         break;
2416
2417                 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2418                 __mlx4_srq_free_icm(dev, srqn);
2419                 break;
2420
2421         default:
2422                 err = -EINVAL;
2423                 break;
2424         }
2425
2426         return err;
2427 }
2428
2429 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2430                             u64 in_param, u64 *out_param, int in_port)
2431 {
2432         int port;
2433         int err = 0;
2434
2435         switch (op) {
2436         case RES_OP_RESERVE_AND_MAP:
2437                 port = !in_port ? get_param_l(out_param) : in_port;
2438                 port = mlx4_slave_convert_port(
2439                                 dev, slave, port);
2440
2441                 if (port < 0)
2442                         return -EINVAL;
2443                 mac_del_from_slave(dev, slave, in_param, port);
2444                 __mlx4_unregister_mac(dev, port, in_param);
2445                 break;
2446         default:
2447                 err = -EINVAL;
2448                 break;
2449         }
2450
2451         return err;
2452
2453 }
2454
2455 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2456                             u64 in_param, u64 *out_param, int port)
2457 {
2458         struct mlx4_priv *priv = mlx4_priv(dev);
2459         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2460         int err = 0;
2461
2462         port = mlx4_slave_convert_port(
2463                         dev, slave, port);
2464
2465         if (port < 0)
2466                 return -EINVAL;
2467         switch (op) {
2468         case RES_OP_RESERVE_AND_MAP:
2469                 if (slave_state[slave].old_vlan_api)
2470                         return 0;
2471                 if (!port)
2472                         return -EINVAL;
2473                 vlan_del_from_slave(dev, slave, in_param, port);
2474                 __mlx4_unregister_vlan(dev, port, in_param);
2475                 break;
2476         default:
2477                 err = -EINVAL;
2478                 break;
2479         }
2480
2481         return err;
2482 }
2483
2484 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2485                             u64 in_param, u64 *out_param)
2486 {
2487         int index;
2488         int err;
2489
2490         if (op != RES_OP_RESERVE)
2491                 return -EINVAL;
2492
2493         index = get_param_l(&in_param);
2494         if (index == MLX4_SINK_COUNTER_INDEX(dev))
2495                 return 0;
2496
2497         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2498         if (err)
2499                 return err;
2500
2501         __mlx4_counter_free(dev, index);
2502         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2503
2504         return err;
2505 }
2506
2507 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2508                           u64 in_param, u64 *out_param)
2509 {
2510         int xrcdn;
2511         int err;
2512
2513         if (op != RES_OP_RESERVE)
2514                 return -EINVAL;
2515
2516         xrcdn = get_param_l(&in_param);
2517         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2518         if (err)
2519                 return err;
2520
2521         __mlx4_xrcd_free(dev, xrcdn);
2522
2523         return err;
2524 }
2525
2526 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2527                           struct mlx4_vhcr *vhcr,
2528                           struct mlx4_cmd_mailbox *inbox,
2529                           struct mlx4_cmd_mailbox *outbox,
2530                           struct mlx4_cmd_info *cmd)
2531 {
2532         int err = -EINVAL;
2533         int alop = vhcr->op_modifier;
2534
2535         switch (vhcr->in_modifier & 0xFF) {
2536         case RES_QP:
2537                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2538                                   vhcr->in_param);
2539                 break;
2540
2541         case RES_MTT:
2542                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2543                                    vhcr->in_param, &vhcr->out_param);
2544                 break;
2545
2546         case RES_MPT:
2547                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2548                                    vhcr->in_param);
2549                 break;
2550
2551         case RES_CQ:
2552                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2553                                   vhcr->in_param, &vhcr->out_param);
2554                 break;
2555
2556         case RES_SRQ:
2557                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2558                                    vhcr->in_param, &vhcr->out_param);
2559                 break;
2560
2561         case RES_MAC:
2562                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2563                                    vhcr->in_param, &vhcr->out_param,
2564                                    (vhcr->in_modifier >> 8) & 0xFF);
2565                 break;
2566
2567         case RES_VLAN:
2568                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2569                                     vhcr->in_param, &vhcr->out_param,
2570                                     (vhcr->in_modifier >> 8) & 0xFF);
2571                 break;
2572
2573         case RES_COUNTER:
2574                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2575                                        vhcr->in_param, &vhcr->out_param);
2576                 break;
2577
2578         case RES_XRCD:
2579                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2580                                      vhcr->in_param, &vhcr->out_param);
2581
2582         default:
2583                 break;
2584         }
2585         return err;
2586 }
2587
2588 /* ugly but other choices are uglier */
2589 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2590 {
2591         return (be32_to_cpu(mpt->flags) >> 9) & 1;
2592 }
2593
2594 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2595 {
2596         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2597 }
2598
2599 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2600 {
2601         return be32_to_cpu(mpt->mtt_sz);
2602 }
2603
2604 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2605 {
2606         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2607 }
2608
2609 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2610 {
2611         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2612 }
2613
2614 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2615 {
2616         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2617 }
2618
2619 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2620 {
2621         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2622 }
2623
2624 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2625 {
2626         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2627 }
2628
2629 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2630 {
2631         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2632 }
2633
2634 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2635 {
2636         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2637         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2638         int log_sq_sride = qpc->sq_size_stride & 7;
2639         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2640         int log_rq_stride = qpc->rq_size_stride & 7;
2641         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2642         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2643         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2644         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2645         int sq_size;
2646         int rq_size;
2647         int total_pages;
2648         int total_mem;
2649         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2650
2651         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2652         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2653         total_mem = sq_size + rq_size;
2654         total_pages =
2655                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2656                                    page_shift);
2657
2658         return total_pages;
2659 }
2660
2661 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2662                            int size, struct res_mtt *mtt)
2663 {
2664         int res_start = mtt->com.res_id;
2665         int res_size = (1 << mtt->order);
2666
2667         if (start < res_start || start + size > res_start + res_size)
2668                 return -EPERM;
2669         return 0;
2670 }
2671
2672 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2673                            struct mlx4_vhcr *vhcr,
2674                            struct mlx4_cmd_mailbox *inbox,
2675                            struct mlx4_cmd_mailbox *outbox,
2676                            struct mlx4_cmd_info *cmd)
2677 {
2678         int err;
2679         int index = vhcr->in_modifier;
2680         struct res_mtt *mtt;
2681         struct res_mpt *mpt;
2682         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2683         int phys;
2684         int id;
2685         u32 pd;
2686         int pd_slave;
2687
2688         id = index & mpt_mask(dev);
2689         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2690         if (err)
2691                 return err;
2692
2693         /* Disable memory windows for VFs. */
2694         if (!mr_is_region(inbox->buf)) {
2695                 err = -EPERM;
2696                 goto ex_abort;
2697         }
2698
2699         /* Make sure that the PD bits related to the slave id are zeros. */
2700         pd = mr_get_pd(inbox->buf);
2701         pd_slave = (pd >> 17) & 0x7f;
2702         if (pd_slave != 0 && --pd_slave != slave) {
2703                 err = -EPERM;
2704                 goto ex_abort;
2705         }
2706
2707         if (mr_is_fmr(inbox->buf)) {
2708                 /* FMR and Bind Enable are forbidden in slave devices. */
2709                 if (mr_is_bind_enabled(inbox->buf)) {
2710                         err = -EPERM;
2711                         goto ex_abort;
2712                 }
2713                 /* FMR and Memory Windows are also forbidden. */
2714                 if (!mr_is_region(inbox->buf)) {
2715                         err = -EPERM;
2716                         goto ex_abort;
2717                 }
2718         }
2719
2720         phys = mr_phys_mpt(inbox->buf);
2721         if (!phys) {
2722                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2723                 if (err)
2724                         goto ex_abort;
2725
2726                 err = check_mtt_range(dev, slave, mtt_base,
2727                                       mr_get_mtt_size(inbox->buf), mtt);
2728                 if (err)
2729                         goto ex_put;
2730
2731                 mpt->mtt = mtt;
2732         }
2733
2734         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2735         if (err)
2736                 goto ex_put;
2737
2738         if (!phys) {
2739                 atomic_inc(&mtt->ref_count);
2740                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2741         }
2742
2743         res_end_move(dev, slave, RES_MPT, id);
2744         return 0;
2745
2746 ex_put:
2747         if (!phys)
2748                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2749 ex_abort:
2750         res_abort_move(dev, slave, RES_MPT, id);
2751
2752         return err;
2753 }
2754
2755 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2756                            struct mlx4_vhcr *vhcr,
2757                            struct mlx4_cmd_mailbox *inbox,
2758                            struct mlx4_cmd_mailbox *outbox,
2759                            struct mlx4_cmd_info *cmd)
2760 {
2761         int err;
2762         int index = vhcr->in_modifier;
2763         struct res_mpt *mpt;
2764         int id;
2765
2766         id = index & mpt_mask(dev);
2767         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2768         if (err)
2769                 return err;
2770
2771         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2772         if (err)
2773                 goto ex_abort;
2774
2775         if (mpt->mtt)
2776                 atomic_dec(&mpt->mtt->ref_count);
2777
2778         res_end_move(dev, slave, RES_MPT, id);
2779         return 0;
2780
2781 ex_abort:
2782         res_abort_move(dev, slave, RES_MPT, id);
2783
2784         return err;
2785 }
2786
2787 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2788                            struct mlx4_vhcr *vhcr,
2789                            struct mlx4_cmd_mailbox *inbox,
2790                            struct mlx4_cmd_mailbox *outbox,
2791                            struct mlx4_cmd_info *cmd)
2792 {
2793         int err;
2794         int index = vhcr->in_modifier;
2795         struct res_mpt *mpt;
2796         int id;
2797
2798         id = index & mpt_mask(dev);
2799         err = get_res(dev, slave, id, RES_MPT, &mpt);
2800         if (err)
2801                 return err;
2802
2803         if (mpt->com.from_state == RES_MPT_MAPPED) {
2804                 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2805                  * that, the VF must read the MPT. But since the MPT entry memory is not
2806                  * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2807                  * entry contents. To guarantee that the MPT cannot be changed, the driver
2808                  * must perform HW2SW_MPT before this query and return the MPT entry to HW
2809                  * ownership fofollowing the change. The change here allows the VF to
2810                  * perform QUERY_MPT also when the entry is in SW ownership.
2811                  */
2812                 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2813                                         &mlx4_priv(dev)->mr_table.dmpt_table,
2814                                         mpt->key, NULL);
2815
2816                 if (NULL == mpt_entry || NULL == outbox->buf) {
2817                         err = -EINVAL;
2818                         goto out;
2819                 }
2820
2821                 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2822
2823                 err = 0;
2824         } else if (mpt->com.from_state == RES_MPT_HW) {
2825                 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2826         } else {
2827                 err = -EBUSY;
2828                 goto out;
2829         }
2830
2831
2832 out:
2833         put_res(dev, slave, id, RES_MPT);
2834         return err;
2835 }
2836
2837 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2838 {
2839         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2840 }
2841
2842 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2843 {
2844         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2845 }
2846
2847 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2848 {
2849         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2850 }
2851
2852 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2853                                   struct mlx4_qp_context *context)
2854 {
2855         u32 qpn = vhcr->in_modifier & 0xffffff;
2856         u32 qkey = 0;
2857
2858         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2859                 return;
2860
2861         /* adjust qkey in qp context */
2862         context->qkey = cpu_to_be32(qkey);
2863 }
2864
2865 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2866                                  struct mlx4_qp_context *qpc,
2867                                  struct mlx4_cmd_mailbox *inbox);
2868
2869 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2870                              struct mlx4_vhcr *vhcr,
2871                              struct mlx4_cmd_mailbox *inbox,
2872                              struct mlx4_cmd_mailbox *outbox,
2873                              struct mlx4_cmd_info *cmd)
2874 {
2875         int err;
2876         int qpn = vhcr->in_modifier & 0x7fffff;
2877         struct res_mtt *mtt;
2878         struct res_qp *qp;
2879         struct mlx4_qp_context *qpc = inbox->buf + 8;
2880         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2881         int mtt_size = qp_get_mtt_size(qpc);
2882         struct res_cq *rcq;
2883         struct res_cq *scq;
2884         int rcqn = qp_get_rcqn(qpc);
2885         int scqn = qp_get_scqn(qpc);
2886         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2887         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2888         struct res_srq *srq;
2889         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2890
2891         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2892         if (err)
2893                 return err;
2894
2895         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2896         if (err)
2897                 return err;
2898         qp->local_qpn = local_qpn;
2899         qp->sched_queue = 0;
2900         qp->param3 = 0;
2901         qp->vlan_control = 0;
2902         qp->fvl_rx = 0;
2903         qp->pri_path_fl = 0;
2904         qp->vlan_index = 0;
2905         qp->feup = 0;
2906         qp->qpc_flags = be32_to_cpu(qpc->flags);
2907
2908         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2909         if (err)
2910                 goto ex_abort;
2911
2912         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2913         if (err)
2914                 goto ex_put_mtt;
2915
2916         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2917         if (err)
2918                 goto ex_put_mtt;
2919
2920         if (scqn != rcqn) {
2921                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2922                 if (err)
2923                         goto ex_put_rcq;
2924         } else
2925                 scq = rcq;
2926
2927         if (use_srq) {
2928                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2929                 if (err)
2930                         goto ex_put_scq;
2931         }
2932
2933         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2934         update_pkey_index(dev, slave, inbox);
2935         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2936         if (err)
2937                 goto ex_put_srq;
2938         atomic_inc(&mtt->ref_count);
2939         qp->mtt = mtt;
2940         atomic_inc(&rcq->ref_count);
2941         qp->rcq = rcq;
2942         atomic_inc(&scq->ref_count);
2943         qp->scq = scq;
2944
2945         if (scqn != rcqn)
2946                 put_res(dev, slave, scqn, RES_CQ);
2947
2948         if (use_srq) {
2949                 atomic_inc(&srq->ref_count);
2950                 put_res(dev, slave, srqn, RES_SRQ);
2951                 qp->srq = srq;
2952         }
2953         put_res(dev, slave, rcqn, RES_CQ);
2954         put_res(dev, slave, mtt_base, RES_MTT);
2955         res_end_move(dev, slave, RES_QP, qpn);
2956
2957         return 0;
2958
2959 ex_put_srq:
2960         if (use_srq)
2961                 put_res(dev, slave, srqn, RES_SRQ);
2962 ex_put_scq:
2963         if (scqn != rcqn)
2964                 put_res(dev, slave, scqn, RES_CQ);
2965 ex_put_rcq:
2966         put_res(dev, slave, rcqn, RES_CQ);
2967 ex_put_mtt:
2968         put_res(dev, slave, mtt_base, RES_MTT);
2969 ex_abort:
2970         res_abort_move(dev, slave, RES_QP, qpn);
2971
2972         return err;
2973 }
2974
2975 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2976 {
2977         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2978 }
2979
2980 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2981 {
2982         int log_eq_size = eqc->log_eq_size & 0x1f;
2983         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2984
2985         if (log_eq_size + 5 < page_shift)
2986                 return 1;
2987
2988         return 1 << (log_eq_size + 5 - page_shift);
2989 }
2990
2991 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2992 {
2993         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2994 }
2995
2996 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2997 {
2998         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2999         int page_shift = (cqc->log_page_size & 0x3f) + 12;
3000
3001         if (log_cq_size + 5 < page_shift)
3002                 return 1;
3003
3004         return 1 << (log_cq_size + 5 - page_shift);
3005 }
3006
3007 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3008                           struct mlx4_vhcr *vhcr,
3009                           struct mlx4_cmd_mailbox *inbox,
3010                           struct mlx4_cmd_mailbox *outbox,
3011                           struct mlx4_cmd_info *cmd)
3012 {
3013         int err;
3014         int eqn = vhcr->in_modifier;
3015         int res_id = (slave << 10) | eqn;
3016         struct mlx4_eq_context *eqc = inbox->buf;
3017         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3018         int mtt_size = eq_get_mtt_size(eqc);
3019         struct res_eq *eq;
3020         struct res_mtt *mtt;
3021
3022         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3023         if (err)
3024                 return err;
3025         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3026         if (err)
3027                 goto out_add;
3028
3029         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3030         if (err)
3031                 goto out_move;
3032
3033         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3034         if (err)
3035                 goto out_put;
3036
3037         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3038         if (err)
3039                 goto out_put;
3040
3041         atomic_inc(&mtt->ref_count);
3042         eq->mtt = mtt;
3043         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3044         res_end_move(dev, slave, RES_EQ, res_id);
3045         return 0;
3046
3047 out_put:
3048         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3049 out_move:
3050         res_abort_move(dev, slave, RES_EQ, res_id);
3051 out_add:
3052         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3053         return err;
3054 }
3055
3056 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3057                             struct mlx4_vhcr *vhcr,
3058                             struct mlx4_cmd_mailbox *inbox,
3059                             struct mlx4_cmd_mailbox *outbox,
3060                             struct mlx4_cmd_info *cmd)
3061 {
3062         int err;
3063         u8 get = vhcr->op_modifier;
3064
3065         if (get != 1)
3066                 return -EPERM;
3067
3068         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3069
3070         return err;
3071 }
3072
3073 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3074                               int len, struct res_mtt **res)
3075 {
3076         struct mlx4_priv *priv = mlx4_priv(dev);
3077         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3078         struct res_mtt *mtt;
3079         int err = -EINVAL;
3080
3081         spin_lock_irq(mlx4_tlock(dev));
3082         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3083                             com.list) {
3084                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3085                         *res = mtt;
3086                         mtt->com.from_state = mtt->com.state;
3087                         mtt->com.state = RES_MTT_BUSY;
3088                         err = 0;
3089                         break;
3090                 }
3091         }
3092         spin_unlock_irq(mlx4_tlock(dev));
3093
3094         return err;
3095 }
3096
3097 static int verify_qp_parameters(struct mlx4_dev *dev,
3098                                 struct mlx4_vhcr *vhcr,
3099                                 struct mlx4_cmd_mailbox *inbox,
3100                                 enum qp_transition transition, u8 slave)
3101 {
3102         u32                     qp_type;
3103         u32                     qpn;
3104         struct mlx4_qp_context  *qp_ctx;
3105         enum mlx4_qp_optpar     optpar;
3106         int port;
3107         int num_gids;
3108
3109         qp_ctx  = inbox->buf + 8;
3110         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3111         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
3112
3113         if (slave != mlx4_master_func_num(dev)) {
3114                 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3115                 /* setting QP rate-limit is disallowed for VFs */
3116                 if (qp_ctx->rate_limit_params)
3117                         return -EPERM;
3118         }
3119
3120         switch (qp_type) {
3121         case MLX4_QP_ST_RC:
3122         case MLX4_QP_ST_XRC:
3123         case MLX4_QP_ST_UC:
3124                 switch (transition) {
3125                 case QP_TRANS_INIT2RTR:
3126                 case QP_TRANS_RTR2RTS:
3127                 case QP_TRANS_RTS2RTS:
3128                 case QP_TRANS_SQD2SQD:
3129                 case QP_TRANS_SQD2RTS:
3130                         if (slave != mlx4_master_func_num(dev))
3131                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3132                                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3133                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3134                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3135                                         else
3136                                                 num_gids = 1;
3137                                         if (qp_ctx->pri_path.mgid_index >= num_gids)
3138                                                 return -EINVAL;
3139                                 }
3140                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3141                                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3142                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3143                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3144                                         else
3145                                                 num_gids = 1;
3146                                         if (qp_ctx->alt_path.mgid_index >= num_gids)
3147                                                 return -EINVAL;
3148                                 }
3149                         break;
3150                 default:
3151                         break;
3152                 }
3153                 break;
3154
3155         case MLX4_QP_ST_MLX:
3156                 qpn = vhcr->in_modifier & 0x7fffff;
3157                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3158                 if (transition == QP_TRANS_INIT2RTR &&
3159                     slave != mlx4_master_func_num(dev) &&
3160                     mlx4_is_qp_reserved(dev, qpn) &&
3161                     !mlx4_vf_smi_enabled(dev, slave, port)) {
3162                         /* only enabled VFs may create MLX proxy QPs */
3163                         mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3164                                  __func__, slave, port);
3165                         return -EPERM;
3166                 }
3167                 break;
3168
3169         default:
3170                 break;
3171         }
3172
3173         return 0;
3174 }
3175
3176 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3177                            struct mlx4_vhcr *vhcr,
3178                            struct mlx4_cmd_mailbox *inbox,
3179                            struct mlx4_cmd_mailbox *outbox,
3180                            struct mlx4_cmd_info *cmd)
3181 {
3182         struct mlx4_mtt mtt;
3183         __be64 *page_list = inbox->buf;
3184         u64 *pg_list = (u64 *)page_list;
3185         int i;
3186         struct res_mtt *rmtt = NULL;
3187         int start = be64_to_cpu(page_list[0]);
3188         int npages = vhcr->in_modifier;
3189         int err;
3190
3191         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3192         if (err)
3193                 return err;
3194
3195         /* Call the SW implementation of write_mtt:
3196          * - Prepare a dummy mtt struct
3197          * - Translate inbox contents to simple addresses in host endianness */
3198         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3199                             we don't really use it */
3200         mtt.order = 0;
3201         mtt.page_shift = 0;
3202         for (i = 0; i < npages; ++i)
3203                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3204
3205         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3206                                ((u64 *)page_list + 2));
3207
3208         if (rmtt)
3209                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3210
3211         return err;
3212 }
3213
3214 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3215                           struct mlx4_vhcr *vhcr,
3216                           struct mlx4_cmd_mailbox *inbox,
3217                           struct mlx4_cmd_mailbox *outbox,
3218                           struct mlx4_cmd_info *cmd)
3219 {
3220         int eqn = vhcr->in_modifier;
3221         int res_id = eqn | (slave << 10);
3222         struct res_eq *eq;
3223         int err;
3224
3225         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3226         if (err)
3227                 return err;
3228
3229         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3230         if (err)
3231                 goto ex_abort;
3232
3233         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3234         if (err)
3235                 goto ex_put;
3236
3237         atomic_dec(&eq->mtt->ref_count);
3238         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3239         res_end_move(dev, slave, RES_EQ, res_id);
3240         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3241
3242         return 0;
3243
3244 ex_put:
3245         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3246 ex_abort:
3247         res_abort_move(dev, slave, RES_EQ, res_id);
3248
3249         return err;
3250 }
3251
3252 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3253 {
3254         struct mlx4_priv *priv = mlx4_priv(dev);
3255         struct mlx4_slave_event_eq_info *event_eq;
3256         struct mlx4_cmd_mailbox *mailbox;
3257         u32 in_modifier = 0;
3258         int err;
3259         int res_id;
3260         struct res_eq *req;
3261
3262         if (!priv->mfunc.master.slave_state)
3263                 return -EINVAL;
3264
3265         /* check for slave valid, slave not PF, and slave active */
3266         if (slave < 0 || slave > dev->persist->num_vfs ||
3267             slave == dev->caps.function ||
3268             !priv->mfunc.master.slave_state[slave].active)
3269                 return 0;
3270
3271         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3272
3273         /* Create the event only if the slave is registered */
3274         if (event_eq->eqn < 0)
3275                 return 0;
3276
3277         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3278         res_id = (slave << 10) | event_eq->eqn;
3279         err = get_res(dev, slave, res_id, RES_EQ, &req);
3280         if (err)
3281                 goto unlock;
3282
3283         if (req->com.from_state != RES_EQ_HW) {
3284                 err = -EINVAL;
3285                 goto put;
3286         }
3287
3288         mailbox = mlx4_alloc_cmd_mailbox(dev);
3289         if (IS_ERR(mailbox)) {
3290                 err = PTR_ERR(mailbox);
3291                 goto put;
3292         }
3293
3294         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3295                 ++event_eq->token;
3296                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3297         }
3298
3299         memcpy(mailbox->buf, (u8 *) eqe, 28);
3300
3301         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3302
3303         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3304                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3305                        MLX4_CMD_NATIVE);
3306
3307         put_res(dev, slave, res_id, RES_EQ);
3308         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3309         mlx4_free_cmd_mailbox(dev, mailbox);
3310         return err;
3311
3312 put:
3313         put_res(dev, slave, res_id, RES_EQ);
3314
3315 unlock:
3316         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3317         return err;
3318 }
3319
3320 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3321                           struct mlx4_vhcr *vhcr,
3322                           struct mlx4_cmd_mailbox *inbox,
3323                           struct mlx4_cmd_mailbox *outbox,
3324                           struct mlx4_cmd_info *cmd)
3325 {
3326         int eqn = vhcr->in_modifier;
3327         int res_id = eqn | (slave << 10);
3328         struct res_eq *eq;
3329         int err;
3330
3331         err = get_res(dev, slave, res_id, RES_EQ, &eq);
3332         if (err)
3333                 return err;
3334
3335         if (eq->com.from_state != RES_EQ_HW) {
3336                 err = -EINVAL;
3337                 goto ex_put;
3338         }
3339
3340         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3341
3342 ex_put:
3343         put_res(dev, slave, res_id, RES_EQ);
3344         return err;
3345 }
3346
3347 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3348                           struct mlx4_vhcr *vhcr,
3349                           struct mlx4_cmd_mailbox *inbox,
3350                           struct mlx4_cmd_mailbox *outbox,
3351                           struct mlx4_cmd_info *cmd)
3352 {
3353         int err;
3354         int cqn = vhcr->in_modifier;
3355         struct mlx4_cq_context *cqc = inbox->buf;
3356         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3357         struct res_cq *cq = NULL;
3358         struct res_mtt *mtt;
3359
3360         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3361         if (err)
3362                 return err;
3363         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3364         if (err)
3365                 goto out_move;
3366         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3367         if (err)
3368                 goto out_put;
3369         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3370         if (err)
3371                 goto out_put;
3372         atomic_inc(&mtt->ref_count);
3373         cq->mtt = mtt;
3374         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3375         res_end_move(dev, slave, RES_CQ, cqn);
3376         return 0;
3377
3378 out_put:
3379         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3380 out_move:
3381         res_abort_move(dev, slave, RES_CQ, cqn);
3382         return err;
3383 }
3384
3385 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3386                           struct mlx4_vhcr *vhcr,
3387                           struct mlx4_cmd_mailbox *inbox,
3388                           struct mlx4_cmd_mailbox *outbox,
3389                           struct mlx4_cmd_info *cmd)
3390 {
3391         int err;
3392         int cqn = vhcr->in_modifier;
3393         struct res_cq *cq = NULL;
3394
3395         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3396         if (err)
3397                 return err;
3398         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3399         if (err)
3400                 goto out_move;
3401         atomic_dec(&cq->mtt->ref_count);
3402         res_end_move(dev, slave, RES_CQ, cqn);
3403         return 0;
3404
3405 out_move:
3406         res_abort_move(dev, slave, RES_CQ, cqn);
3407         return err;
3408 }
3409
3410 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3411                           struct mlx4_vhcr *vhcr,
3412                           struct mlx4_cmd_mailbox *inbox,
3413                           struct mlx4_cmd_mailbox *outbox,
3414                           struct mlx4_cmd_info *cmd)
3415 {
3416         int cqn = vhcr->in_modifier;
3417         struct res_cq *cq;
3418         int err;
3419
3420         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3421         if (err)
3422                 return err;
3423
3424         if (cq->com.from_state != RES_CQ_HW)
3425                 goto ex_put;
3426
3427         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3428 ex_put:
3429         put_res(dev, slave, cqn, RES_CQ);
3430
3431         return err;
3432 }
3433
3434 static int handle_resize(struct mlx4_dev *dev, int slave,
3435                          struct mlx4_vhcr *vhcr,
3436                          struct mlx4_cmd_mailbox *inbox,
3437                          struct mlx4_cmd_mailbox *outbox,
3438                          struct mlx4_cmd_info *cmd,
3439                          struct res_cq *cq)
3440 {
3441         int err;
3442         struct res_mtt *orig_mtt;
3443         struct res_mtt *mtt;
3444         struct mlx4_cq_context *cqc = inbox->buf;
3445         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3446
3447         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3448         if (err)
3449                 return err;
3450
3451         if (orig_mtt != cq->mtt) {
3452                 err = -EINVAL;
3453                 goto ex_put;
3454         }
3455
3456         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3457         if (err)
3458                 goto ex_put;
3459
3460         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3461         if (err)
3462                 goto ex_put1;
3463         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3464         if (err)
3465                 goto ex_put1;
3466         atomic_dec(&orig_mtt->ref_count);
3467         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3468         atomic_inc(&mtt->ref_count);
3469         cq->mtt = mtt;
3470         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3471         return 0;
3472
3473 ex_put1:
3474         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3475 ex_put:
3476         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3477
3478         return err;
3479
3480 }
3481
3482 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3483                            struct mlx4_vhcr *vhcr,
3484                            struct mlx4_cmd_mailbox *inbox,
3485                            struct mlx4_cmd_mailbox *outbox,
3486                            struct mlx4_cmd_info *cmd)
3487 {
3488         int cqn = vhcr->in_modifier;
3489         struct res_cq *cq;
3490         int err;
3491
3492         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3493         if (err)
3494                 return err;
3495
3496         if (cq->com.from_state != RES_CQ_HW)
3497                 goto ex_put;
3498
3499         if (vhcr->op_modifier == 0) {
3500                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3501                 goto ex_put;
3502         }
3503
3504         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3505 ex_put:
3506         put_res(dev, slave, cqn, RES_CQ);
3507
3508         return err;
3509 }
3510
3511 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3512 {
3513         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3514         int log_rq_stride = srqc->logstride & 7;
3515         int page_shift = (srqc->log_page_size & 0x3f) + 12;
3516
3517         if (log_srq_size + log_rq_stride + 4 < page_shift)
3518                 return 1;
3519
3520         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3521 }
3522
3523 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3524                            struct mlx4_vhcr *vhcr,
3525                            struct mlx4_cmd_mailbox *inbox,
3526                            struct mlx4_cmd_mailbox *outbox,
3527                            struct mlx4_cmd_info *cmd)
3528 {
3529         int err;
3530         int srqn = vhcr->in_modifier;
3531         struct res_mtt *mtt;
3532         struct res_srq *srq = NULL;
3533         struct mlx4_srq_context *srqc = inbox->buf;
3534         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3535
3536         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3537                 return -EINVAL;
3538
3539         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3540         if (err)
3541                 return err;
3542         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3543         if (err)
3544                 goto ex_abort;
3545         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3546                               mtt);
3547         if (err)
3548                 goto ex_put_mtt;
3549
3550         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3551         if (err)
3552                 goto ex_put_mtt;
3553
3554         atomic_inc(&mtt->ref_count);
3555         srq->mtt = mtt;
3556         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3557         res_end_move(dev, slave, RES_SRQ, srqn);
3558         return 0;
3559
3560 ex_put_mtt:
3561         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3562 ex_abort:
3563         res_abort_move(dev, slave, RES_SRQ, srqn);
3564
3565         return err;
3566 }
3567
3568 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3569                            struct mlx4_vhcr *vhcr,
3570                            struct mlx4_cmd_mailbox *inbox,
3571                            struct mlx4_cmd_mailbox *outbox,
3572                            struct mlx4_cmd_info *cmd)
3573 {
3574         int err;
3575         int srqn = vhcr->in_modifier;
3576         struct res_srq *srq = NULL;
3577
3578         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3579         if (err)
3580                 return err;
3581         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3582         if (err)
3583                 goto ex_abort;
3584         atomic_dec(&srq->mtt->ref_count);
3585         if (srq->cq)
3586                 atomic_dec(&srq->cq->ref_count);
3587         res_end_move(dev, slave, RES_SRQ, srqn);
3588
3589         return 0;
3590
3591 ex_abort:
3592         res_abort_move(dev, slave, RES_SRQ, srqn);
3593
3594         return err;
3595 }
3596
3597 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3598                            struct mlx4_vhcr *vhcr,
3599                            struct mlx4_cmd_mailbox *inbox,
3600                            struct mlx4_cmd_mailbox *outbox,
3601                            struct mlx4_cmd_info *cmd)
3602 {
3603         int err;
3604         int srqn = vhcr->in_modifier;
3605         struct res_srq *srq;
3606
3607         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3608         if (err)
3609                 return err;
3610         if (srq->com.from_state != RES_SRQ_HW) {
3611                 err = -EBUSY;
3612                 goto out;
3613         }
3614         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3615 out:
3616         put_res(dev, slave, srqn, RES_SRQ);
3617         return err;
3618 }
3619
3620 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3621                          struct mlx4_vhcr *vhcr,
3622                          struct mlx4_cmd_mailbox *inbox,
3623                          struct mlx4_cmd_mailbox *outbox,
3624                          struct mlx4_cmd_info *cmd)
3625 {
3626         int err;
3627         int srqn = vhcr->in_modifier;
3628         struct res_srq *srq;
3629
3630         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3631         if (err)
3632                 return err;
3633
3634         if (srq->com.from_state != RES_SRQ_HW) {
3635                 err = -EBUSY;
3636                 goto out;
3637         }
3638
3639         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3640 out:
3641         put_res(dev, slave, srqn, RES_SRQ);
3642         return err;
3643 }
3644
3645 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3646                         struct mlx4_vhcr *vhcr,
3647                         struct mlx4_cmd_mailbox *inbox,
3648                         struct mlx4_cmd_mailbox *outbox,
3649                         struct mlx4_cmd_info *cmd)
3650 {
3651         int err;
3652         int qpn = vhcr->in_modifier & 0x7fffff;
3653         struct res_qp *qp;
3654
3655         err = get_res(dev, slave, qpn, RES_QP, &qp);
3656         if (err)
3657                 return err;
3658         if (qp->com.from_state != RES_QP_HW) {
3659                 err = -EBUSY;
3660                 goto out;
3661         }
3662
3663         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3664 out:
3665         put_res(dev, slave, qpn, RES_QP);
3666         return err;
3667 }
3668
3669 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3670                               struct mlx4_vhcr *vhcr,
3671                               struct mlx4_cmd_mailbox *inbox,
3672                               struct mlx4_cmd_mailbox *outbox,
3673                               struct mlx4_cmd_info *cmd)
3674 {
3675         struct mlx4_qp_context *context = inbox->buf + 8;
3676         adjust_proxy_tun_qkey(dev, vhcr, context);
3677         update_pkey_index(dev, slave, inbox);
3678         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3679 }
3680
3681 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3682                                   struct mlx4_qp_context *qpc,
3683                                   struct mlx4_cmd_mailbox *inbox)
3684 {
3685         enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3686         u8 pri_sched_queue;
3687         int port = mlx4_slave_convert_port(
3688                    dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3689
3690         if (port < 0)
3691                 return -EINVAL;
3692
3693         pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3694                           ((port & 1) << 6);
3695
3696         if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3697             qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3698                 qpc->pri_path.sched_queue = pri_sched_queue;
3699         }
3700
3701         if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3702                 port = mlx4_slave_convert_port(
3703                                 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3704                                 + 1) - 1;
3705                 if (port < 0)
3706                         return -EINVAL;
3707                 qpc->alt_path.sched_queue =
3708                         (qpc->alt_path.sched_queue & ~(1 << 6)) |
3709                         (port & 1) << 6;
3710         }
3711         return 0;
3712 }
3713
3714 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3715                                 struct mlx4_qp_context *qpc,
3716                                 struct mlx4_cmd_mailbox *inbox)
3717 {
3718         u64 mac;
3719         int port;
3720         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3721         u8 sched = *(u8 *)(inbox->buf + 64);
3722         u8 smac_ix;
3723
3724         port = (sched >> 6 & 1) + 1;
3725         if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3726                 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3727                 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3728                         return -ENOENT;
3729         }
3730         return 0;
3731 }
3732
3733 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3734                              struct mlx4_vhcr *vhcr,
3735                              struct mlx4_cmd_mailbox *inbox,
3736                              struct mlx4_cmd_mailbox *outbox,
3737                              struct mlx4_cmd_info *cmd)
3738 {
3739         int err;
3740         struct mlx4_qp_context *qpc = inbox->buf + 8;
3741         int qpn = vhcr->in_modifier & 0x7fffff;
3742         struct res_qp *qp;
3743         u8 orig_sched_queue;
3744         __be32  orig_param3 = qpc->param3;
3745         u8 orig_vlan_control = qpc->pri_path.vlan_control;
3746         u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3747         u8 orig_pri_path_fl = qpc->pri_path.fl;
3748         u8 orig_vlan_index = qpc->pri_path.vlan_index;
3749         u8 orig_feup = qpc->pri_path.feup;
3750
3751         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3752         if (err)
3753                 return err;
3754         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3755         if (err)
3756                 return err;
3757
3758         if (roce_verify_mac(dev, slave, qpc, inbox))
3759                 return -EINVAL;
3760
3761         update_pkey_index(dev, slave, inbox);
3762         update_gid(dev, inbox, (u8)slave);
3763         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3764         orig_sched_queue = qpc->pri_path.sched_queue;
3765         err = update_vport_qp_param(dev, inbox, slave, qpn);
3766         if (err)
3767                 return err;
3768
3769         err = get_res(dev, slave, qpn, RES_QP, &qp);
3770         if (err)
3771                 return err;
3772         if (qp->com.from_state != RES_QP_HW) {
3773                 err = -EBUSY;
3774                 goto out;
3775         }
3776
3777         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3778 out:
3779         /* if no error, save sched queue value passed in by VF. This is
3780          * essentially the QOS value provided by the VF. This will be useful
3781          * if we allow dynamic changes from VST back to VGT
3782          */
3783         if (!err) {
3784                 qp->sched_queue = orig_sched_queue;
3785                 qp->param3      = orig_param3;
3786                 qp->vlan_control = orig_vlan_control;
3787                 qp->fvl_rx      =  orig_fvl_rx;
3788                 qp->pri_path_fl = orig_pri_path_fl;
3789                 qp->vlan_index  = orig_vlan_index;
3790                 qp->feup        = orig_feup;
3791         }
3792         put_res(dev, slave, qpn, RES_QP);
3793         return err;
3794 }
3795
3796 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3797                             struct mlx4_vhcr *vhcr,
3798                             struct mlx4_cmd_mailbox *inbox,
3799                             struct mlx4_cmd_mailbox *outbox,
3800                             struct mlx4_cmd_info *cmd)
3801 {
3802         int err;
3803         struct mlx4_qp_context *context = inbox->buf + 8;
3804
3805         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3806         if (err)
3807                 return err;
3808         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3809         if (err)
3810                 return err;
3811
3812         update_pkey_index(dev, slave, inbox);
3813         update_gid(dev, inbox, (u8)slave);
3814         adjust_proxy_tun_qkey(dev, vhcr, context);
3815         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3816 }
3817
3818 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3819                             struct mlx4_vhcr *vhcr,
3820                             struct mlx4_cmd_mailbox *inbox,
3821                             struct mlx4_cmd_mailbox *outbox,
3822                             struct mlx4_cmd_info *cmd)
3823 {
3824         int err;
3825         struct mlx4_qp_context *context = inbox->buf + 8;
3826
3827         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3828         if (err)
3829                 return err;
3830         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3831         if (err)
3832                 return err;
3833
3834         update_pkey_index(dev, slave, inbox);
3835         update_gid(dev, inbox, (u8)slave);
3836         adjust_proxy_tun_qkey(dev, vhcr, context);
3837         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3838 }
3839
3840
3841 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3842                               struct mlx4_vhcr *vhcr,
3843                               struct mlx4_cmd_mailbox *inbox,
3844                               struct mlx4_cmd_mailbox *outbox,
3845                               struct mlx4_cmd_info *cmd)
3846 {
3847         struct mlx4_qp_context *context = inbox->buf + 8;
3848         int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3849         if (err)
3850                 return err;
3851         adjust_proxy_tun_qkey(dev, vhcr, context);
3852         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3853 }
3854
3855 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3856                             struct mlx4_vhcr *vhcr,
3857                             struct mlx4_cmd_mailbox *inbox,
3858                             struct mlx4_cmd_mailbox *outbox,
3859                             struct mlx4_cmd_info *cmd)
3860 {
3861         int err;
3862         struct mlx4_qp_context *context = inbox->buf + 8;
3863
3864         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3865         if (err)
3866                 return err;
3867         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3868         if (err)
3869                 return err;
3870
3871         adjust_proxy_tun_qkey(dev, vhcr, context);
3872         update_gid(dev, inbox, (u8)slave);
3873         update_pkey_index(dev, slave, inbox);
3874         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3875 }
3876
3877 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3878                             struct mlx4_vhcr *vhcr,
3879                             struct mlx4_cmd_mailbox *inbox,
3880                             struct mlx4_cmd_mailbox *outbox,
3881                             struct mlx4_cmd_info *cmd)
3882 {
3883         int err;
3884         struct mlx4_qp_context *context = inbox->buf + 8;
3885
3886         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3887         if (err)
3888                 return err;
3889         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3890         if (err)
3891                 return err;
3892
3893         adjust_proxy_tun_qkey(dev, vhcr, context);
3894         update_gid(dev, inbox, (u8)slave);
3895         update_pkey_index(dev, slave, inbox);
3896         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3897 }
3898
3899 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3900                          struct mlx4_vhcr *vhcr,
3901                          struct mlx4_cmd_mailbox *inbox,
3902                          struct mlx4_cmd_mailbox *outbox,
3903                          struct mlx4_cmd_info *cmd)
3904 {
3905         int err;
3906         int qpn = vhcr->in_modifier & 0x7fffff;
3907         struct res_qp *qp;
3908
3909         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3910         if (err)
3911                 return err;
3912         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3913         if (err)
3914                 goto ex_abort;
3915
3916         atomic_dec(&qp->mtt->ref_count);
3917         atomic_dec(&qp->rcq->ref_count);
3918         atomic_dec(&qp->scq->ref_count);
3919         if (qp->srq)
3920                 atomic_dec(&qp->srq->ref_count);
3921         res_end_move(dev, slave, RES_QP, qpn);
3922         return 0;
3923
3924 ex_abort:
3925         res_abort_move(dev, slave, RES_QP, qpn);
3926
3927         return err;
3928 }
3929
3930 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3931                                 struct res_qp *rqp, u8 *gid)
3932 {
3933         struct res_gid *res;
3934
3935         list_for_each_entry(res, &rqp->mcg_list, list) {
3936                 if (!memcmp(res->gid, gid, 16))
3937                         return res;
3938         }
3939         return NULL;
3940 }
3941
3942 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3943                        u8 *gid, enum mlx4_protocol prot,
3944                        enum mlx4_steer_type steer, u64 reg_id)
3945 {
3946         struct res_gid *res;
3947         int err;
3948
3949         res = kzalloc(sizeof *res, GFP_KERNEL);
3950         if (!res)
3951                 return -ENOMEM;
3952
3953         spin_lock_irq(&rqp->mcg_spl);
3954         if (find_gid(dev, slave, rqp, gid)) {
3955                 kfree(res);
3956                 err = -EEXIST;
3957         } else {
3958                 memcpy(res->gid, gid, 16);
3959                 res->prot = prot;
3960                 res->steer = steer;
3961                 res->reg_id = reg_id;
3962                 list_add_tail(&res->list, &rqp->mcg_list);
3963                 err = 0;
3964         }
3965         spin_unlock_irq(&rqp->mcg_spl);
3966
3967         return err;
3968 }
3969
3970 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3971                        u8 *gid, enum mlx4_protocol prot,
3972                        enum mlx4_steer_type steer, u64 *reg_id)
3973 {
3974         struct res_gid *res;
3975         int err;
3976
3977         spin_lock_irq(&rqp->mcg_spl);
3978         res = find_gid(dev, slave, rqp, gid);
3979         if (!res || res->prot != prot || res->steer != steer)
3980                 err = -EINVAL;
3981         else {
3982                 *reg_id = res->reg_id;
3983                 list_del(&res->list);
3984                 kfree(res);
3985                 err = 0;
3986         }
3987         spin_unlock_irq(&rqp->mcg_spl);
3988
3989         return err;
3990 }
3991
3992 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3993                      u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3994                      enum mlx4_steer_type type, u64 *reg_id)
3995 {
3996         switch (dev->caps.steering_mode) {
3997         case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3998                 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3999                 if (port < 0)
4000                         return port;
4001                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4002                                                 block_loopback, prot,
4003                                                 reg_id);
4004         }
4005         case MLX4_STEERING_MODE_B0:
4006                 if (prot == MLX4_PROT_ETH) {
4007                         int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4008                         if (port < 0)
4009                                 return port;
4010                         gid[5] = port;
4011                 }
4012                 return mlx4_qp_attach_common(dev, qp, gid,
4013                                             block_loopback, prot, type);
4014         default:
4015                 return -EINVAL;
4016         }
4017 }
4018
4019 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4020                      u8 gid[16], enum mlx4_protocol prot,
4021                      enum mlx4_steer_type type, u64 reg_id)
4022 {
4023         switch (dev->caps.steering_mode) {
4024         case MLX4_STEERING_MODE_DEVICE_MANAGED:
4025                 return mlx4_flow_detach(dev, reg_id);
4026         case MLX4_STEERING_MODE_B0:
4027                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4028         default:
4029                 return -EINVAL;
4030         }
4031 }
4032
4033 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4034                             u8 *gid, enum mlx4_protocol prot)
4035 {
4036         int real_port;
4037
4038         if (prot != MLX4_PROT_ETH)
4039                 return 0;
4040
4041         if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4042             dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4043                 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4044                 if (real_port < 0)
4045                         return -EINVAL;
4046                 gid[5] = real_port;
4047         }
4048
4049         return 0;
4050 }
4051
4052 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4053                                struct mlx4_vhcr *vhcr,
4054                                struct mlx4_cmd_mailbox *inbox,
4055                                struct mlx4_cmd_mailbox *outbox,
4056                                struct mlx4_cmd_info *cmd)
4057 {
4058         struct mlx4_qp qp; /* dummy for calling attach/detach */
4059         u8 *gid = inbox->buf;
4060         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4061         int err;
4062         int qpn;
4063         struct res_qp *rqp;
4064         u64 reg_id = 0;
4065         int attach = vhcr->op_modifier;
4066         int block_loopback = vhcr->in_modifier >> 31;
4067         u8 steer_type_mask = 2;
4068         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4069
4070         qpn = vhcr->in_modifier & 0xffffff;
4071         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4072         if (err)
4073                 return err;
4074
4075         qp.qpn = qpn;
4076         if (attach) {
4077                 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4078                                 type, &reg_id);
4079                 if (err) {
4080                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4081                         goto ex_put;
4082                 }
4083                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4084                 if (err)
4085                         goto ex_detach;
4086         } else {
4087                 err = mlx4_adjust_port(dev, slave, gid, prot);
4088                 if (err)
4089                         goto ex_put;
4090
4091                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
4092                 if (err)
4093                         goto ex_put;
4094
4095                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4096                 if (err)
4097                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4098                                qpn, reg_id);
4099         }
4100         put_res(dev, slave, qpn, RES_QP);
4101         return err;
4102
4103 ex_detach:
4104         qp_detach(dev, &qp, gid, prot, type, reg_id);
4105 ex_put:
4106         put_res(dev, slave, qpn, RES_QP);
4107         return err;
4108 }
4109
4110 /*
4111  * MAC validation for Flow Steering rules.
4112  * VF can attach rules only with a mac address which is assigned to it.
4113  */
4114 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4115                                    struct list_head *rlist)
4116 {
4117         struct mac_res *res, *tmp;
4118         __be64 be_mac;
4119
4120         /* make sure it isn't multicast or broadcast mac*/
4121         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4122             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4123                 list_for_each_entry_safe(res, tmp, rlist, list) {
4124                         be_mac = cpu_to_be64(res->mac << 16);
4125                         if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4126                                 return 0;
4127                 }
4128                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4129                        eth_header->eth.dst_mac, slave);
4130                 return -EINVAL;
4131         }
4132         return 0;
4133 }
4134
4135 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
4136                                          struct _rule_hw *eth_header)
4137 {
4138         if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
4139             is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4140                 struct mlx4_net_trans_rule_hw_eth *eth =
4141                         (struct mlx4_net_trans_rule_hw_eth *)eth_header;
4142                 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
4143                 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
4144                         next_rule->rsvd == 0;
4145
4146                 if (last_rule)
4147                         ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
4148         }
4149 }
4150
4151 /*
4152  * In case of missing eth header, append eth header with a MAC address
4153  * assigned to the VF.
4154  */
4155 static int add_eth_header(struct mlx4_dev *dev, int slave,
4156                           struct mlx4_cmd_mailbox *inbox,
4157                           struct list_head *rlist, int header_id)
4158 {
4159         struct mac_res *res, *tmp;
4160         u8 port;
4161         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4162         struct mlx4_net_trans_rule_hw_eth *eth_header;
4163         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4164         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4165         __be64 be_mac = 0;
4166         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4167
4168         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4169         port = ctrl->port;
4170         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4171
4172         /* Clear a space in the inbox for eth header */
4173         switch (header_id) {
4174         case MLX4_NET_TRANS_RULE_ID_IPV4:
4175                 ip_header =
4176                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4177                 memmove(ip_header, eth_header,
4178                         sizeof(*ip_header) + sizeof(*l4_header));
4179                 break;
4180         case MLX4_NET_TRANS_RULE_ID_TCP:
4181         case MLX4_NET_TRANS_RULE_ID_UDP:
4182                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4183                             (eth_header + 1);
4184                 memmove(l4_header, eth_header, sizeof(*l4_header));
4185                 break;
4186         default:
4187                 return -EINVAL;
4188         }
4189         list_for_each_entry_safe(res, tmp, rlist, list) {
4190                 if (port == res->port) {
4191                         be_mac = cpu_to_be64(res->mac << 16);
4192                         break;
4193                 }
4194         }
4195         if (!be_mac) {
4196                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4197                        port);
4198                 return -EINVAL;
4199         }
4200
4201         memset(eth_header, 0, sizeof(*eth_header));
4202         eth_header->size = sizeof(*eth_header) >> 2;
4203         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4204         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4205         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4206
4207         return 0;
4208
4209 }
4210
4211 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4212 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4213                            struct mlx4_vhcr *vhcr,
4214                            struct mlx4_cmd_mailbox *inbox,
4215                            struct mlx4_cmd_mailbox *outbox,
4216                            struct mlx4_cmd_info *cmd_info)
4217 {
4218         int err;
4219         u32 qpn = vhcr->in_modifier & 0xffffff;
4220         struct res_qp *rqp;
4221         u64 mac;
4222         unsigned port;
4223         u64 pri_addr_path_mask;
4224         struct mlx4_update_qp_context *cmd;
4225         int smac_index;
4226
4227         cmd = (struct mlx4_update_qp_context *)inbox->buf;
4228
4229         pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4230         if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4231             (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4232                 return -EPERM;
4233
4234         /* Just change the smac for the QP */
4235         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4236         if (err) {
4237                 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4238                 return err;
4239         }
4240
4241         port = (rqp->sched_queue >> 6 & 1) + 1;
4242
4243         if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4244                 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4245                 err = mac_find_smac_ix_in_slave(dev, slave, port,
4246                                                 smac_index, &mac);
4247
4248                 if (err) {
4249                         mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4250                                  qpn, smac_index);
4251                         goto err_mac;
4252                 }
4253         }
4254
4255         err = mlx4_cmd(dev, inbox->dma,
4256                        vhcr->in_modifier, 0,
4257                        MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4258                        MLX4_CMD_NATIVE);
4259         if (err) {
4260                 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4261                 goto err_mac;
4262         }
4263
4264 err_mac:
4265         put_res(dev, slave, qpn, RES_QP);
4266         return err;
4267 }
4268
4269 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4270                                          struct mlx4_vhcr *vhcr,
4271                                          struct mlx4_cmd_mailbox *inbox,
4272                                          struct mlx4_cmd_mailbox *outbox,
4273                                          struct mlx4_cmd_info *cmd)
4274 {
4275
4276         struct mlx4_priv *priv = mlx4_priv(dev);
4277         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4278         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4279         int err;
4280         int qpn;
4281         struct res_qp *rqp;
4282         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4283         struct _rule_hw  *rule_header;
4284         int header_id;
4285
4286         if (dev->caps.steering_mode !=
4287             MLX4_STEERING_MODE_DEVICE_MANAGED)
4288                 return -EOPNOTSUPP;
4289
4290         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4291         ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4292         if (ctrl->port <= 0)
4293                 return -EINVAL;
4294         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4295         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4296         if (err) {
4297                 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4298                 return err;
4299         }
4300         rule_header = (struct _rule_hw *)(ctrl + 1);
4301         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4302
4303         if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4304                 handle_eth_header_mcast_prio(ctrl, rule_header);
4305
4306         if (slave == dev->caps.function)
4307                 goto execute;
4308
4309         switch (header_id) {
4310         case MLX4_NET_TRANS_RULE_ID_ETH:
4311                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4312                         err = -EINVAL;
4313                         goto err_put;
4314                 }
4315                 break;
4316         case MLX4_NET_TRANS_RULE_ID_IB:
4317                 break;
4318         case MLX4_NET_TRANS_RULE_ID_IPV4:
4319         case MLX4_NET_TRANS_RULE_ID_TCP:
4320         case MLX4_NET_TRANS_RULE_ID_UDP:
4321                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4322                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4323                         err = -EINVAL;
4324                         goto err_put;
4325                 }
4326                 vhcr->in_modifier +=
4327                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4328                 break;
4329         default:
4330                 pr_err("Corrupted mailbox\n");
4331                 err = -EINVAL;
4332                 goto err_put;
4333         }
4334
4335 execute:
4336         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4337                            vhcr->in_modifier, 0,
4338                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4339                            MLX4_CMD_NATIVE);
4340         if (err)
4341                 goto err_put;
4342
4343         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4344         if (err) {
4345                 mlx4_err(dev, "Fail to add flow steering resources\n");
4346                 /* detach rule*/
4347                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4348                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4349                          MLX4_CMD_NATIVE);
4350                 goto err_put;
4351         }
4352         atomic_inc(&rqp->ref_count);
4353 err_put:
4354         put_res(dev, slave, qpn, RES_QP);
4355         return err;
4356 }
4357
4358 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4359                                          struct mlx4_vhcr *vhcr,
4360                                          struct mlx4_cmd_mailbox *inbox,
4361                                          struct mlx4_cmd_mailbox *outbox,
4362                                          struct mlx4_cmd_info *cmd)
4363 {
4364         int err;
4365         struct res_qp *rqp;
4366         struct res_fs_rule *rrule;
4367
4368         if (dev->caps.steering_mode !=
4369             MLX4_STEERING_MODE_DEVICE_MANAGED)
4370                 return -EOPNOTSUPP;
4371
4372         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4373         if (err)
4374                 return err;
4375         /* Release the rule form busy state before removal */
4376         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4377         err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4378         if (err)
4379                 return err;
4380
4381         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4382         if (err) {
4383                 mlx4_err(dev, "Fail to remove flow steering resources\n");
4384                 goto out;
4385         }
4386
4387         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4388                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4389                        MLX4_CMD_NATIVE);
4390         if (!err)
4391                 atomic_dec(&rqp->ref_count);
4392 out:
4393         put_res(dev, slave, rrule->qpn, RES_QP);
4394         return err;
4395 }
4396
4397 enum {
4398         BUSY_MAX_RETRIES = 10
4399 };
4400
4401 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4402                                struct mlx4_vhcr *vhcr,
4403                                struct mlx4_cmd_mailbox *inbox,
4404                                struct mlx4_cmd_mailbox *outbox,
4405                                struct mlx4_cmd_info *cmd)
4406 {
4407         int err;
4408         int index = vhcr->in_modifier & 0xffff;
4409
4410         err = get_res(dev, slave, index, RES_COUNTER, NULL);
4411         if (err)
4412                 return err;
4413
4414         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4415         put_res(dev, slave, index, RES_COUNTER);
4416         return err;
4417 }
4418
4419 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4420 {
4421         struct res_gid *rgid;
4422         struct res_gid *tmp;
4423         struct mlx4_qp qp; /* dummy for calling attach/detach */
4424
4425         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4426                 switch (dev->caps.steering_mode) {
4427                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4428                         mlx4_flow_detach(dev, rgid->reg_id);
4429                         break;
4430                 case MLX4_STEERING_MODE_B0:
4431                         qp.qpn = rqp->local_qpn;
4432                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4433                                                      rgid->prot, rgid->steer);
4434                         break;
4435                 }
4436                 list_del(&rgid->list);
4437                 kfree(rgid);
4438         }
4439 }
4440
4441 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4442                           enum mlx4_resource type, int print)
4443 {
4444         struct mlx4_priv *priv = mlx4_priv(dev);
4445         struct mlx4_resource_tracker *tracker =
4446                 &priv->mfunc.master.res_tracker;
4447         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4448         struct res_common *r;
4449         struct res_common *tmp;
4450         int busy;
4451
4452         busy = 0;
4453         spin_lock_irq(mlx4_tlock(dev));
4454         list_for_each_entry_safe(r, tmp, rlist, list) {
4455                 if (r->owner == slave) {
4456                         if (!r->removing) {
4457                                 if (r->state == RES_ANY_BUSY) {
4458                                         if (print)
4459                                                 mlx4_dbg(dev,
4460                                                          "%s id 0x%llx is busy\n",
4461                                                           resource_str(type),
4462                                                           r->res_id);
4463                                         ++busy;
4464                                 } else {
4465                                         r->from_state = r->state;
4466                                         r->state = RES_ANY_BUSY;
4467                                         r->removing = 1;
4468                                 }
4469                         }
4470                 }
4471         }
4472         spin_unlock_irq(mlx4_tlock(dev));
4473
4474         return busy;
4475 }
4476
4477 static int move_all_busy(struct mlx4_dev *dev, int slave,
4478                          enum mlx4_resource type)
4479 {
4480         unsigned long begin;
4481         int busy;
4482
4483         begin = jiffies;
4484         do {
4485                 busy = _move_all_busy(dev, slave, type, 0);
4486                 if (time_after(jiffies, begin + 5 * HZ))
4487                         break;
4488                 if (busy)
4489                         cond_resched();
4490         } while (busy);
4491
4492         if (busy)
4493                 busy = _move_all_busy(dev, slave, type, 1);
4494
4495         return busy;
4496 }
4497 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4498 {
4499         struct mlx4_priv *priv = mlx4_priv(dev);
4500         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4501         struct list_head *qp_list =
4502                 &tracker->slave_list[slave].res_list[RES_QP];
4503         struct res_qp *qp;
4504         struct res_qp *tmp;
4505         int state;
4506         u64 in_param;
4507         int qpn;
4508         int err;
4509
4510         err = move_all_busy(dev, slave, RES_QP);
4511         if (err)
4512                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4513                           slave);
4514
4515         spin_lock_irq(mlx4_tlock(dev));
4516         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4517                 spin_unlock_irq(mlx4_tlock(dev));
4518                 if (qp->com.owner == slave) {
4519                         qpn = qp->com.res_id;
4520                         detach_qp(dev, slave, qp);
4521                         state = qp->com.from_state;
4522                         while (state != 0) {
4523                                 switch (state) {
4524                                 case RES_QP_RESERVED:
4525                                         spin_lock_irq(mlx4_tlock(dev));
4526                                         rb_erase(&qp->com.node,
4527                                                  &tracker->res_tree[RES_QP]);
4528                                         list_del(&qp->com.list);
4529                                         spin_unlock_irq(mlx4_tlock(dev));
4530                                         if (!valid_reserved(dev, slave, qpn)) {
4531                                                 __mlx4_qp_release_range(dev, qpn, 1);
4532                                                 mlx4_release_resource(dev, slave,
4533                                                                       RES_QP, 1, 0);
4534                                         }
4535                                         kfree(qp);
4536                                         state = 0;
4537                                         break;
4538                                 case RES_QP_MAPPED:
4539                                         if (!valid_reserved(dev, slave, qpn))
4540                                                 __mlx4_qp_free_icm(dev, qpn);
4541                                         state = RES_QP_RESERVED;
4542                                         break;
4543                                 case RES_QP_HW:
4544                                         in_param = slave;
4545                                         err = mlx4_cmd(dev, in_param,
4546                                                        qp->local_qpn, 2,
4547                                                        MLX4_CMD_2RST_QP,
4548                                                        MLX4_CMD_TIME_CLASS_A,
4549                                                        MLX4_CMD_NATIVE);
4550                                         if (err)
4551                                                 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4552                                                          slave, qp->local_qpn);
4553                                         atomic_dec(&qp->rcq->ref_count);
4554                                         atomic_dec(&qp->scq->ref_count);
4555                                         atomic_dec(&qp->mtt->ref_count);
4556                                         if (qp->srq)
4557                                                 atomic_dec(&qp->srq->ref_count);
4558                                         state = RES_QP_MAPPED;
4559                                         break;
4560                                 default:
4561                                         state = 0;
4562                                 }
4563                         }
4564                 }
4565                 spin_lock_irq(mlx4_tlock(dev));
4566         }
4567         spin_unlock_irq(mlx4_tlock(dev));
4568 }
4569
4570 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4571 {
4572         struct mlx4_priv *priv = mlx4_priv(dev);
4573         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4574         struct list_head *srq_list =
4575                 &tracker->slave_list[slave].res_list[RES_SRQ];
4576         struct res_srq *srq;
4577         struct res_srq *tmp;
4578         int state;
4579         u64 in_param;
4580         LIST_HEAD(tlist);
4581         int srqn;
4582         int err;
4583
4584         err = move_all_busy(dev, slave, RES_SRQ);
4585         if (err)
4586                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4587                           slave);
4588
4589         spin_lock_irq(mlx4_tlock(dev));
4590         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4591                 spin_unlock_irq(mlx4_tlock(dev));
4592                 if (srq->com.owner == slave) {
4593                         srqn = srq->com.res_id;
4594                         state = srq->com.from_state;
4595                         while (state != 0) {
4596                                 switch (state) {
4597                                 case RES_SRQ_ALLOCATED:
4598                                         __mlx4_srq_free_icm(dev, srqn);
4599                                         spin_lock_irq(mlx4_tlock(dev));
4600                                         rb_erase(&srq->com.node,
4601                                                  &tracker->res_tree[RES_SRQ]);
4602                                         list_del(&srq->com.list);
4603                                         spin_unlock_irq(mlx4_tlock(dev));
4604                                         mlx4_release_resource(dev, slave,
4605                                                               RES_SRQ, 1, 0);
4606                                         kfree(srq);
4607                                         state = 0;
4608                                         break;
4609
4610                                 case RES_SRQ_HW:
4611                                         in_param = slave;
4612                                         err = mlx4_cmd(dev, in_param, srqn, 1,
4613                                                        MLX4_CMD_HW2SW_SRQ,
4614                                                        MLX4_CMD_TIME_CLASS_A,
4615                                                        MLX4_CMD_NATIVE);
4616                                         if (err)
4617                                                 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4618                                                          slave, srqn);
4619
4620                                         atomic_dec(&srq->mtt->ref_count);
4621                                         if (srq->cq)
4622                                                 atomic_dec(&srq->cq->ref_count);
4623                                         state = RES_SRQ_ALLOCATED;
4624                                         break;
4625
4626                                 default:
4627                                         state = 0;
4628                                 }
4629                         }
4630                 }
4631                 spin_lock_irq(mlx4_tlock(dev));
4632         }
4633         spin_unlock_irq(mlx4_tlock(dev));
4634 }
4635
4636 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4637 {
4638         struct mlx4_priv *priv = mlx4_priv(dev);
4639         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4640         struct list_head *cq_list =
4641                 &tracker->slave_list[slave].res_list[RES_CQ];
4642         struct res_cq *cq;
4643         struct res_cq *tmp;
4644         int state;
4645         u64 in_param;
4646         LIST_HEAD(tlist);
4647         int cqn;
4648         int err;
4649
4650         err = move_all_busy(dev, slave, RES_CQ);
4651         if (err)
4652                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4653                           slave);
4654
4655         spin_lock_irq(mlx4_tlock(dev));
4656         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4657                 spin_unlock_irq(mlx4_tlock(dev));
4658                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4659                         cqn = cq->com.res_id;
4660                         state = cq->com.from_state;
4661                         while (state != 0) {
4662                                 switch (state) {
4663                                 case RES_CQ_ALLOCATED:
4664                                         __mlx4_cq_free_icm(dev, cqn);
4665                                         spin_lock_irq(mlx4_tlock(dev));
4666                                         rb_erase(&cq->com.node,
4667                                                  &tracker->res_tree[RES_CQ]);
4668                                         list_del(&cq->com.list);
4669                                         spin_unlock_irq(mlx4_tlock(dev));
4670                                         mlx4_release_resource(dev, slave,
4671                                                               RES_CQ, 1, 0);
4672                                         kfree(cq);
4673                                         state = 0;
4674                                         break;
4675
4676                                 case RES_CQ_HW:
4677                                         in_param = slave;
4678                                         err = mlx4_cmd(dev, in_param, cqn, 1,
4679                                                        MLX4_CMD_HW2SW_CQ,
4680                                                        MLX4_CMD_TIME_CLASS_A,
4681                                                        MLX4_CMD_NATIVE);
4682                                         if (err)
4683                                                 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4684                                                          slave, cqn);
4685                                         atomic_dec(&cq->mtt->ref_count);
4686                                         state = RES_CQ_ALLOCATED;
4687                                         break;
4688
4689                                 default:
4690                                         state = 0;
4691                                 }
4692                         }
4693                 }
4694                 spin_lock_irq(mlx4_tlock(dev));
4695         }
4696         spin_unlock_irq(mlx4_tlock(dev));
4697 }
4698
4699 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4700 {
4701         struct mlx4_priv *priv = mlx4_priv(dev);
4702         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4703         struct list_head *mpt_list =
4704                 &tracker->slave_list[slave].res_list[RES_MPT];
4705         struct res_mpt *mpt;
4706         struct res_mpt *tmp;
4707         int state;
4708         u64 in_param;
4709         LIST_HEAD(tlist);
4710         int mptn;
4711         int err;
4712
4713         err = move_all_busy(dev, slave, RES_MPT);
4714         if (err)
4715                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4716                           slave);
4717
4718         spin_lock_irq(mlx4_tlock(dev));
4719         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4720                 spin_unlock_irq(mlx4_tlock(dev));
4721                 if (mpt->com.owner == slave) {
4722                         mptn = mpt->com.res_id;
4723                         state = mpt->com.from_state;
4724                         while (state != 0) {
4725                                 switch (state) {
4726                                 case RES_MPT_RESERVED:
4727                                         __mlx4_mpt_release(dev, mpt->key);
4728                                         spin_lock_irq(mlx4_tlock(dev));
4729                                         rb_erase(&mpt->com.node,
4730                                                  &tracker->res_tree[RES_MPT]);
4731                                         list_del(&mpt->com.list);
4732                                         spin_unlock_irq(mlx4_tlock(dev));
4733                                         mlx4_release_resource(dev, slave,
4734                                                               RES_MPT, 1, 0);
4735                                         kfree(mpt);
4736                                         state = 0;
4737                                         break;
4738
4739                                 case RES_MPT_MAPPED:
4740                                         __mlx4_mpt_free_icm(dev, mpt->key);
4741                                         state = RES_MPT_RESERVED;
4742                                         break;
4743
4744                                 case RES_MPT_HW:
4745                                         in_param = slave;
4746                                         err = mlx4_cmd(dev, in_param, mptn, 0,
4747                                                      MLX4_CMD_HW2SW_MPT,
4748                                                      MLX4_CMD_TIME_CLASS_A,
4749                                                      MLX4_CMD_NATIVE);
4750                                         if (err)
4751                                                 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4752                                                          slave, mptn);
4753                                         if (mpt->mtt)
4754                                                 atomic_dec(&mpt->mtt->ref_count);
4755                                         state = RES_MPT_MAPPED;
4756                                         break;
4757                                 default:
4758                                         state = 0;
4759                                 }
4760                         }
4761                 }
4762                 spin_lock_irq(mlx4_tlock(dev));
4763         }
4764         spin_unlock_irq(mlx4_tlock(dev));
4765 }
4766
4767 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4768 {
4769         struct mlx4_priv *priv = mlx4_priv(dev);
4770         struct mlx4_resource_tracker *tracker =
4771                 &priv->mfunc.master.res_tracker;
4772         struct list_head *mtt_list =
4773                 &tracker->slave_list[slave].res_list[RES_MTT];
4774         struct res_mtt *mtt;
4775         struct res_mtt *tmp;
4776         int state;
4777         LIST_HEAD(tlist);
4778         int base;
4779         int err;
4780
4781         err = move_all_busy(dev, slave, RES_MTT);
4782         if (err)
4783                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4784                           slave);
4785
4786         spin_lock_irq(mlx4_tlock(dev));
4787         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4788                 spin_unlock_irq(mlx4_tlock(dev));
4789                 if (mtt->com.owner == slave) {
4790                         base = mtt->com.res_id;
4791                         state = mtt->com.from_state;
4792                         while (state != 0) {
4793                                 switch (state) {
4794                                 case RES_MTT_ALLOCATED:
4795                                         __mlx4_free_mtt_range(dev, base,
4796                                                               mtt->order);
4797                                         spin_lock_irq(mlx4_tlock(dev));
4798                                         rb_erase(&mtt->com.node,
4799                                                  &tracker->res_tree[RES_MTT]);
4800                                         list_del(&mtt->com.list);
4801                                         spin_unlock_irq(mlx4_tlock(dev));
4802                                         mlx4_release_resource(dev, slave, RES_MTT,
4803                                                               1 << mtt->order, 0);
4804                                         kfree(mtt);
4805                                         state = 0;
4806                                         break;
4807
4808                                 default:
4809                                         state = 0;
4810                                 }
4811                         }
4812                 }
4813                 spin_lock_irq(mlx4_tlock(dev));
4814         }
4815         spin_unlock_irq(mlx4_tlock(dev));
4816 }
4817
4818 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4819 {
4820         struct mlx4_priv *priv = mlx4_priv(dev);
4821         struct mlx4_resource_tracker *tracker =
4822                 &priv->mfunc.master.res_tracker;
4823         struct list_head *fs_rule_list =
4824                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4825         struct res_fs_rule *fs_rule;
4826         struct res_fs_rule *tmp;
4827         int state;
4828         u64 base;
4829         int err;
4830
4831         err = move_all_busy(dev, slave, RES_FS_RULE);
4832         if (err)
4833                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4834                           slave);
4835
4836         spin_lock_irq(mlx4_tlock(dev));
4837         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4838                 spin_unlock_irq(mlx4_tlock(dev));
4839                 if (fs_rule->com.owner == slave) {
4840                         base = fs_rule->com.res_id;
4841                         state = fs_rule->com.from_state;
4842                         while (state != 0) {
4843                                 switch (state) {
4844                                 case RES_FS_RULE_ALLOCATED:
4845                                         /* detach rule */
4846                                         err = mlx4_cmd(dev, base, 0, 0,
4847                                                        MLX4_QP_FLOW_STEERING_DETACH,
4848                                                        MLX4_CMD_TIME_CLASS_A,
4849                                                        MLX4_CMD_NATIVE);
4850
4851                                         spin_lock_irq(mlx4_tlock(dev));
4852                                         rb_erase(&fs_rule->com.node,
4853                                                  &tracker->res_tree[RES_FS_RULE]);
4854                                         list_del(&fs_rule->com.list);
4855                                         spin_unlock_irq(mlx4_tlock(dev));
4856                                         kfree(fs_rule);
4857                                         state = 0;
4858                                         break;
4859
4860                                 default:
4861                                         state = 0;
4862                                 }
4863                         }
4864                 }
4865                 spin_lock_irq(mlx4_tlock(dev));
4866         }
4867         spin_unlock_irq(mlx4_tlock(dev));
4868 }
4869
4870 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4871 {
4872         struct mlx4_priv *priv = mlx4_priv(dev);
4873         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4874         struct list_head *eq_list =
4875                 &tracker->slave_list[slave].res_list[RES_EQ];
4876         struct res_eq *eq;
4877         struct res_eq *tmp;
4878         int err;
4879         int state;
4880         LIST_HEAD(tlist);
4881         int eqn;
4882
4883         err = move_all_busy(dev, slave, RES_EQ);
4884         if (err)
4885                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4886                           slave);
4887
4888         spin_lock_irq(mlx4_tlock(dev));
4889         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4890                 spin_unlock_irq(mlx4_tlock(dev));
4891                 if (eq->com.owner == slave) {
4892                         eqn = eq->com.res_id;
4893                         state = eq->com.from_state;
4894                         while (state != 0) {
4895                                 switch (state) {
4896                                 case RES_EQ_RESERVED:
4897                                         spin_lock_irq(mlx4_tlock(dev));
4898                                         rb_erase(&eq->com.node,
4899                                                  &tracker->res_tree[RES_EQ]);
4900                                         list_del(&eq->com.list);
4901                                         spin_unlock_irq(mlx4_tlock(dev));
4902                                         kfree(eq);
4903                                         state = 0;
4904                                         break;
4905
4906                                 case RES_EQ_HW:
4907                                         err = mlx4_cmd(dev, slave, eqn & 0x3ff,
4908                                                        1, MLX4_CMD_HW2SW_EQ,
4909                                                        MLX4_CMD_TIME_CLASS_A,
4910                                                        MLX4_CMD_NATIVE);
4911                                         if (err)
4912                                                 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4913                                                          slave, eqn & 0x3ff);
4914                                         atomic_dec(&eq->mtt->ref_count);
4915                                         state = RES_EQ_RESERVED;
4916                                         break;
4917
4918                                 default:
4919                                         state = 0;
4920                                 }
4921                         }
4922                 }
4923                 spin_lock_irq(mlx4_tlock(dev));
4924         }
4925         spin_unlock_irq(mlx4_tlock(dev));
4926 }
4927
4928 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4929 {
4930         struct mlx4_priv *priv = mlx4_priv(dev);
4931         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4932         struct list_head *counter_list =
4933                 &tracker->slave_list[slave].res_list[RES_COUNTER];
4934         struct res_counter *counter;
4935         struct res_counter *tmp;
4936         int err;
4937         int index;
4938
4939         err = move_all_busy(dev, slave, RES_COUNTER);
4940         if (err)
4941                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4942                           slave);
4943
4944         spin_lock_irq(mlx4_tlock(dev));
4945         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4946                 if (counter->com.owner == slave) {
4947                         index = counter->com.res_id;
4948                         rb_erase(&counter->com.node,
4949                                  &tracker->res_tree[RES_COUNTER]);
4950                         list_del(&counter->com.list);
4951                         kfree(counter);
4952                         __mlx4_counter_free(dev, index);
4953                         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4954                 }
4955         }
4956         spin_unlock_irq(mlx4_tlock(dev));
4957 }
4958
4959 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4960 {
4961         struct mlx4_priv *priv = mlx4_priv(dev);
4962         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4963         struct list_head *xrcdn_list =
4964                 &tracker->slave_list[slave].res_list[RES_XRCD];
4965         struct res_xrcdn *xrcd;
4966         struct res_xrcdn *tmp;
4967         int err;
4968         int xrcdn;
4969
4970         err = move_all_busy(dev, slave, RES_XRCD);
4971         if (err)
4972                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4973                           slave);
4974
4975         spin_lock_irq(mlx4_tlock(dev));
4976         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4977                 if (xrcd->com.owner == slave) {
4978                         xrcdn = xrcd->com.res_id;
4979                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4980                         list_del(&xrcd->com.list);
4981                         kfree(xrcd);
4982                         __mlx4_xrcd_free(dev, xrcdn);
4983                 }
4984         }
4985         spin_unlock_irq(mlx4_tlock(dev));
4986 }
4987
4988 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4989 {
4990         struct mlx4_priv *priv = mlx4_priv(dev);
4991         mlx4_reset_roce_gids(dev, slave);
4992         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4993         rem_slave_vlans(dev, slave);
4994         rem_slave_macs(dev, slave);
4995         rem_slave_fs_rule(dev, slave);
4996         rem_slave_qps(dev, slave);
4997         rem_slave_srqs(dev, slave);
4998         rem_slave_cqs(dev, slave);
4999         rem_slave_mrs(dev, slave);
5000         rem_slave_eqs(dev, slave);
5001         rem_slave_mtts(dev, slave);
5002         rem_slave_counters(dev, slave);
5003         rem_slave_xrcdns(dev, slave);
5004         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5005 }
5006
5007 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5008 {
5009         struct mlx4_vf_immed_vlan_work *work =
5010                 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5011         struct mlx4_cmd_mailbox *mailbox;
5012         struct mlx4_update_qp_context *upd_context;
5013         struct mlx4_dev *dev = &work->priv->dev;
5014         struct mlx4_resource_tracker *tracker =
5015                 &work->priv->mfunc.master.res_tracker;
5016         struct list_head *qp_list =
5017                 &tracker->slave_list[work->slave].res_list[RES_QP];
5018         struct res_qp *qp;
5019         struct res_qp *tmp;
5020         u64 qp_path_mask_vlan_ctrl =
5021                        ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5022                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5023                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5024                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5025                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5026                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5027
5028         u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5029                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5030                        (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5031                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5032                        (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5033                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5034                        (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5035
5036         int err;
5037         int port, errors = 0;
5038         u8 vlan_control;
5039
5040         if (mlx4_is_slave(dev)) {
5041                 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5042                           work->slave);
5043                 goto out;
5044         }
5045
5046         mailbox = mlx4_alloc_cmd_mailbox(dev);
5047         if (IS_ERR(mailbox))
5048                 goto out;
5049         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5050                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5051                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5052                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5053                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5054                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5055                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5056         else if (!work->vlan_id)
5057                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5058                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5059         else
5060                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5061                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5062                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5063
5064         upd_context = mailbox->buf;
5065         upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5066
5067         spin_lock_irq(mlx4_tlock(dev));
5068         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5069                 spin_unlock_irq(mlx4_tlock(dev));
5070                 if (qp->com.owner == work->slave) {
5071                         if (qp->com.from_state != RES_QP_HW ||
5072                             !qp->sched_queue ||  /* no INIT2RTR trans yet */
5073                             mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5074                             qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5075                                 spin_lock_irq(mlx4_tlock(dev));
5076                                 continue;
5077                         }
5078                         port = (qp->sched_queue >> 6 & 1) + 1;
5079                         if (port != work->port) {
5080                                 spin_lock_irq(mlx4_tlock(dev));
5081                                 continue;
5082                         }
5083                         if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5084                                 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5085                         else
5086                                 upd_context->primary_addr_path_mask =
5087                                         cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5088                         if (work->vlan_id == MLX4_VGT) {
5089                                 upd_context->qp_context.param3 = qp->param3;
5090                                 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5091                                 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5092                                 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5093                                 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5094                                 upd_context->qp_context.pri_path.feup = qp->feup;
5095                                 upd_context->qp_context.pri_path.sched_queue =
5096                                         qp->sched_queue;
5097                         } else {
5098                                 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5099                                 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5100                                 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5101                                 upd_context->qp_context.pri_path.fvl_rx =
5102                                         qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5103                                 upd_context->qp_context.pri_path.fl =
5104                                         qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
5105                                 upd_context->qp_context.pri_path.feup =
5106                                         qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5107                                 upd_context->qp_context.pri_path.sched_queue =
5108                                         qp->sched_queue & 0xC7;
5109                                 upd_context->qp_context.pri_path.sched_queue |=
5110                                         ((work->qos & 0x7) << 3);
5111                                 upd_context->qp_mask |=
5112                                         cpu_to_be64(1ULL <<
5113                                                     MLX4_UPD_QP_MASK_QOS_VPP);
5114                                 upd_context->qp_context.qos_vport =
5115                                         work->qos_vport;
5116                         }
5117
5118                         err = mlx4_cmd(dev, mailbox->dma,
5119                                        qp->local_qpn & 0xffffff,
5120                                        0, MLX4_CMD_UPDATE_QP,
5121                                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5122                         if (err) {
5123                                 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5124                                           work->slave, port, qp->local_qpn, err);
5125                                 errors++;
5126                         }
5127                 }
5128                 spin_lock_irq(mlx4_tlock(dev));
5129         }
5130         spin_unlock_irq(mlx4_tlock(dev));
5131         mlx4_free_cmd_mailbox(dev, mailbox);
5132
5133         if (errors)
5134                 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5135                          errors, work->slave, work->port);
5136
5137         /* unregister previous vlan_id if needed and we had no errors
5138          * while updating the QPs
5139          */
5140         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5141             NO_INDX != work->orig_vlan_ix)
5142                 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5143                                        work->orig_vlan_id);
5144 out:
5145         kfree(work);
5146         return;
5147 }