Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID          (1ull << 63)
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         u8 port;
56 };
57
58 struct vlan_res {
59         struct list_head list;
60         u16 vlan;
61         int ref_count;
62         int vlan_index;
63         u8 port;
64 };
65
66 struct res_common {
67         struct list_head        list;
68         struct rb_node          node;
69         u64                     res_id;
70         int                     owner;
71         int                     state;
72         int                     from_state;
73         int                     to_state;
74         int                     removing;
75 };
76
77 enum {
78         RES_ANY_BUSY = 1
79 };
80
81 struct res_gid {
82         struct list_head        list;
83         u8                      gid[16];
84         enum mlx4_protocol      prot;
85         enum mlx4_steer_type    steer;
86         u64                     reg_id;
87 };
88
89 enum res_qp_states {
90         RES_QP_BUSY = RES_ANY_BUSY,
91
92         /* QP number was allocated */
93         RES_QP_RESERVED,
94
95         /* ICM memory for QP context was mapped */
96         RES_QP_MAPPED,
97
98         /* QP is in hw ownership */
99         RES_QP_HW
100 };
101
102 struct res_qp {
103         struct res_common       com;
104         struct res_mtt         *mtt;
105         struct res_cq          *rcq;
106         struct res_cq          *scq;
107         struct res_srq         *srq;
108         struct list_head        mcg_list;
109         spinlock_t              mcg_spl;
110         int                     local_qpn;
111         atomic_t                ref_count;
112         u32                     qpc_flags;
113         /* saved qp params before VST enforcement in order to restore on VGT */
114         u8                      sched_queue;
115         __be32                  param3;
116         u8                      vlan_control;
117         u8                      fvl_rx;
118         u8                      pri_path_fl;
119         u8                      vlan_index;
120         u8                      feup;
121 };
122
123 enum res_mtt_states {
124         RES_MTT_BUSY = RES_ANY_BUSY,
125         RES_MTT_ALLOCATED,
126 };
127
128 static inline const char *mtt_states_str(enum res_mtt_states state)
129 {
130         switch (state) {
131         case RES_MTT_BUSY: return "RES_MTT_BUSY";
132         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
133         default: return "Unknown";
134         }
135 }
136
137 struct res_mtt {
138         struct res_common       com;
139         int                     order;
140         atomic_t                ref_count;
141 };
142
143 enum res_mpt_states {
144         RES_MPT_BUSY = RES_ANY_BUSY,
145         RES_MPT_RESERVED,
146         RES_MPT_MAPPED,
147         RES_MPT_HW,
148 };
149
150 struct res_mpt {
151         struct res_common       com;
152         struct res_mtt         *mtt;
153         int                     key;
154 };
155
156 enum res_eq_states {
157         RES_EQ_BUSY = RES_ANY_BUSY,
158         RES_EQ_RESERVED,
159         RES_EQ_HW,
160 };
161
162 struct res_eq {
163         struct res_common       com;
164         struct res_mtt         *mtt;
165 };
166
167 enum res_cq_states {
168         RES_CQ_BUSY = RES_ANY_BUSY,
169         RES_CQ_ALLOCATED,
170         RES_CQ_HW,
171 };
172
173 struct res_cq {
174         struct res_common       com;
175         struct res_mtt         *mtt;
176         atomic_t                ref_count;
177 };
178
179 enum res_srq_states {
180         RES_SRQ_BUSY = RES_ANY_BUSY,
181         RES_SRQ_ALLOCATED,
182         RES_SRQ_HW,
183 };
184
185 struct res_srq {
186         struct res_common       com;
187         struct res_mtt         *mtt;
188         struct res_cq          *cq;
189         atomic_t                ref_count;
190 };
191
192 enum res_counter_states {
193         RES_COUNTER_BUSY = RES_ANY_BUSY,
194         RES_COUNTER_ALLOCATED,
195 };
196
197 struct res_counter {
198         struct res_common       com;
199         int                     port;
200 };
201
202 enum res_xrcdn_states {
203         RES_XRCD_BUSY = RES_ANY_BUSY,
204         RES_XRCD_ALLOCATED,
205 };
206
207 struct res_xrcdn {
208         struct res_common       com;
209         int                     port;
210 };
211
212 enum res_fs_rule_states {
213         RES_FS_RULE_BUSY = RES_ANY_BUSY,
214         RES_FS_RULE_ALLOCATED,
215 };
216
217 struct res_fs_rule {
218         struct res_common       com;
219         int                     qpn;
220 };
221
222 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
223 {
224         struct rb_node *node = root->rb_node;
225
226         while (node) {
227                 struct res_common *res = container_of(node, struct res_common,
228                                                       node);
229
230                 if (res_id < res->res_id)
231                         node = node->rb_left;
232                 else if (res_id > res->res_id)
233                         node = node->rb_right;
234                 else
235                         return res;
236         }
237         return NULL;
238 }
239
240 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
241 {
242         struct rb_node **new = &(root->rb_node), *parent = NULL;
243
244         /* Figure out where to put new node */
245         while (*new) {
246                 struct res_common *this = container_of(*new, struct res_common,
247                                                        node);
248
249                 parent = *new;
250                 if (res->res_id < this->res_id)
251                         new = &((*new)->rb_left);
252                 else if (res->res_id > this->res_id)
253                         new = &((*new)->rb_right);
254                 else
255                         return -EEXIST;
256         }
257
258         /* Add new node and rebalance tree. */
259         rb_link_node(&res->node, parent, new);
260         rb_insert_color(&res->node, root);
261
262         return 0;
263 }
264
265 enum qp_transition {
266         QP_TRANS_INIT2RTR,
267         QP_TRANS_RTR2RTS,
268         QP_TRANS_RTS2RTS,
269         QP_TRANS_SQERR2RTS,
270         QP_TRANS_SQD2SQD,
271         QP_TRANS_SQD2RTS
272 };
273
274 /* For Debug uses */
275 static const char *ResourceType(enum mlx4_resource rt)
276 {
277         switch (rt) {
278         case RES_QP: return "RES_QP";
279         case RES_CQ: return "RES_CQ";
280         case RES_SRQ: return "RES_SRQ";
281         case RES_MPT: return "RES_MPT";
282         case RES_MTT: return "RES_MTT";
283         case RES_MAC: return  "RES_MAC";
284         case RES_VLAN: return  "RES_VLAN";
285         case RES_EQ: return "RES_EQ";
286         case RES_COUNTER: return "RES_COUNTER";
287         case RES_FS_RULE: return "RES_FS_RULE";
288         case RES_XRCD: return "RES_XRCD";
289         default: return "Unknown resource type !!!";
290         };
291 }
292
293 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
294 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
295                                       enum mlx4_resource res_type, int count,
296                                       int port)
297 {
298         struct mlx4_priv *priv = mlx4_priv(dev);
299         struct resource_allocator *res_alloc =
300                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
301         int err = -EINVAL;
302         int allocated, free, reserved, guaranteed, from_free;
303
304         if (slave > dev->num_vfs)
305                 return -EINVAL;
306
307         spin_lock(&res_alloc->alloc_lock);
308         allocated = (port > 0) ?
309                 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
310                 res_alloc->allocated[slave];
311         free = (port > 0) ? res_alloc->res_port_free[port - 1] :
312                 res_alloc->res_free;
313         reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
314                 res_alloc->res_reserved;
315         guaranteed = res_alloc->guaranteed[slave];
316
317         if (allocated + count > res_alloc->quota[slave])
318                 goto out;
319
320         if (allocated + count <= guaranteed) {
321                 err = 0;
322         } else {
323                 /* portion may need to be obtained from free area */
324                 if (guaranteed - allocated > 0)
325                         from_free = count - (guaranteed - allocated);
326                 else
327                         from_free = count;
328
329                 if (free - from_free > reserved)
330                         err = 0;
331         }
332
333         if (!err) {
334                 /* grant the request */
335                 if (port > 0) {
336                         res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
337                         res_alloc->res_port_free[port - 1] -= count;
338                 } else {
339                         res_alloc->allocated[slave] += count;
340                         res_alloc->res_free -= count;
341                 }
342         }
343
344 out:
345         spin_unlock(&res_alloc->alloc_lock);
346         return err;
347 }
348
349 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
350                                     enum mlx4_resource res_type, int count,
351                                     int port)
352 {
353         struct mlx4_priv *priv = mlx4_priv(dev);
354         struct resource_allocator *res_alloc =
355                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
356
357         if (slave > dev->num_vfs)
358                 return;
359
360         spin_lock(&res_alloc->alloc_lock);
361         if (port > 0) {
362                 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
363                 res_alloc->res_port_free[port - 1] += count;
364         } else {
365                 res_alloc->allocated[slave] -= count;
366                 res_alloc->res_free += count;
367         }
368
369         spin_unlock(&res_alloc->alloc_lock);
370         return;
371 }
372
373 static inline void initialize_res_quotas(struct mlx4_dev *dev,
374                                          struct resource_allocator *res_alloc,
375                                          enum mlx4_resource res_type,
376                                          int vf, int num_instances)
377 {
378         res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
379         res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
380         if (vf == mlx4_master_func_num(dev)) {
381                 res_alloc->res_free = num_instances;
382                 if (res_type == RES_MTT) {
383                         /* reserved mtts will be taken out of the PF allocation */
384                         res_alloc->res_free += dev->caps.reserved_mtts;
385                         res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
386                         res_alloc->quota[vf] += dev->caps.reserved_mtts;
387                 }
388         }
389 }
390
391 void mlx4_init_quotas(struct mlx4_dev *dev)
392 {
393         struct mlx4_priv *priv = mlx4_priv(dev);
394         int pf;
395
396         /* quotas for VFs are initialized in mlx4_slave_cap */
397         if (mlx4_is_slave(dev))
398                 return;
399
400         if (!mlx4_is_mfunc(dev)) {
401                 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
402                         mlx4_num_reserved_sqps(dev);
403                 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
404                 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
405                 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
406                 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
407                 return;
408         }
409
410         pf = mlx4_master_func_num(dev);
411         dev->quotas.qp =
412                 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
413         dev->quotas.cq =
414                 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
415         dev->quotas.srq =
416                 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
417         dev->quotas.mtt =
418                 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
419         dev->quotas.mpt =
420                 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
421 }
422 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
423 {
424         struct mlx4_priv *priv = mlx4_priv(dev);
425         int i, j;
426         int t;
427
428         priv->mfunc.master.res_tracker.slave_list =
429                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
430                         GFP_KERNEL);
431         if (!priv->mfunc.master.res_tracker.slave_list)
432                 return -ENOMEM;
433
434         for (i = 0 ; i < dev->num_slaves; i++) {
435                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
436                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
437                                        slave_list[i].res_list[t]);
438                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
439         }
440
441         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
442                  dev->num_slaves);
443         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
444                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
445
446         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
447                 struct resource_allocator *res_alloc =
448                         &priv->mfunc.master.res_tracker.res_alloc[i];
449                 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
450                 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
451                 if (i == RES_MAC || i == RES_VLAN)
452                         res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
453                                                        (dev->num_vfs + 1) * sizeof(int),
454                                                         GFP_KERNEL);
455                 else
456                         res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
457
458                 if (!res_alloc->quota || !res_alloc->guaranteed ||
459                     !res_alloc->allocated)
460                         goto no_mem_err;
461
462                 spin_lock_init(&res_alloc->alloc_lock);
463                 for (t = 0; t < dev->num_vfs + 1; t++) {
464                         switch (i) {
465                         case RES_QP:
466                                 initialize_res_quotas(dev, res_alloc, RES_QP,
467                                                       t, dev->caps.num_qps -
468                                                       dev->caps.reserved_qps -
469                                                       mlx4_num_reserved_sqps(dev));
470                                 break;
471                         case RES_CQ:
472                                 initialize_res_quotas(dev, res_alloc, RES_CQ,
473                                                       t, dev->caps.num_cqs -
474                                                       dev->caps.reserved_cqs);
475                                 break;
476                         case RES_SRQ:
477                                 initialize_res_quotas(dev, res_alloc, RES_SRQ,
478                                                       t, dev->caps.num_srqs -
479                                                       dev->caps.reserved_srqs);
480                                 break;
481                         case RES_MPT:
482                                 initialize_res_quotas(dev, res_alloc, RES_MPT,
483                                                       t, dev->caps.num_mpts -
484                                                       dev->caps.reserved_mrws);
485                                 break;
486                         case RES_MTT:
487                                 initialize_res_quotas(dev, res_alloc, RES_MTT,
488                                                       t, dev->caps.num_mtts -
489                                                       dev->caps.reserved_mtts);
490                                 break;
491                         case RES_MAC:
492                                 if (t == mlx4_master_func_num(dev)) {
493                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
494                                         res_alloc->guaranteed[t] = 2;
495                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
496                                                 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
497                                 } else {
498                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
499                                         res_alloc->guaranteed[t] = 2;
500                                 }
501                                 break;
502                         case RES_VLAN:
503                                 if (t == mlx4_master_func_num(dev)) {
504                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
505                                         res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
506                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
507                                                 res_alloc->res_port_free[j] =
508                                                         res_alloc->quota[t];
509                                 } else {
510                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
511                                         res_alloc->guaranteed[t] = 0;
512                                 }
513                                 break;
514                         case RES_COUNTER:
515                                 res_alloc->quota[t] = dev->caps.max_counters;
516                                 res_alloc->guaranteed[t] = 0;
517                                 if (t == mlx4_master_func_num(dev))
518                                         res_alloc->res_free = res_alloc->quota[t];
519                                 break;
520                         default:
521                                 break;
522                         }
523                         if (i == RES_MAC || i == RES_VLAN) {
524                                 for (j = 0; j < MLX4_MAX_PORTS; j++)
525                                         res_alloc->res_port_rsvd[j] +=
526                                                 res_alloc->guaranteed[t];
527                         } else {
528                                 res_alloc->res_reserved += res_alloc->guaranteed[t];
529                         }
530                 }
531         }
532         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
533         return 0;
534
535 no_mem_err:
536         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
537                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
538                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
539                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
540                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
541                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
542                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
543         }
544         return -ENOMEM;
545 }
546
547 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
548                                 enum mlx4_res_tracker_free_type type)
549 {
550         struct mlx4_priv *priv = mlx4_priv(dev);
551         int i;
552
553         if (priv->mfunc.master.res_tracker.slave_list) {
554                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
555                         for (i = 0; i < dev->num_slaves; i++) {
556                                 if (type == RES_TR_FREE_ALL ||
557                                     dev->caps.function != i)
558                                         mlx4_delete_all_resources_for_slave(dev, i);
559                         }
560                         /* free master's vlans */
561                         i = dev->caps.function;
562                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
563                         rem_slave_vlans(dev, i);
564                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
565                 }
566
567                 if (type != RES_TR_FREE_SLAVES_ONLY) {
568                         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
569                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
570                                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
571                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
572                                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
573                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
574                                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
575                         }
576                         kfree(priv->mfunc.master.res_tracker.slave_list);
577                         priv->mfunc.master.res_tracker.slave_list = NULL;
578                 }
579         }
580 }
581
582 static void update_pkey_index(struct mlx4_dev *dev, int slave,
583                               struct mlx4_cmd_mailbox *inbox)
584 {
585         u8 sched = *(u8 *)(inbox->buf + 64);
586         u8 orig_index = *(u8 *)(inbox->buf + 35);
587         u8 new_index;
588         struct mlx4_priv *priv = mlx4_priv(dev);
589         int port;
590
591         port = (sched >> 6 & 1) + 1;
592
593         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
594         *(u8 *)(inbox->buf + 35) = new_index;
595 }
596
597 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
598                        u8 slave)
599 {
600         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
601         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
602         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
603
604         if (MLX4_QP_ST_UD == ts)
605                 qp_ctx->pri_path.mgid_index = 0x80 | slave;
606
607         if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
608                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
609                         qp_ctx->pri_path.mgid_index = slave & 0x7F;
610                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
611                         qp_ctx->alt_path.mgid_index = slave & 0x7F;
612         }
613 }
614
615 static int update_vport_qp_param(struct mlx4_dev *dev,
616                                  struct mlx4_cmd_mailbox *inbox,
617                                  u8 slave, u32 qpn)
618 {
619         struct mlx4_qp_context  *qpc = inbox->buf + 8;
620         struct mlx4_vport_oper_state *vp_oper;
621         struct mlx4_priv *priv;
622         u32 qp_type;
623         int port;
624
625         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
626         priv = mlx4_priv(dev);
627         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
628
629         if (MLX4_VGT != vp_oper->state.default_vlan) {
630                 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
631                 if (MLX4_QP_ST_RC == qp_type ||
632                     (MLX4_QP_ST_UD == qp_type &&
633                      !mlx4_is_qp_reserved(dev, qpn)))
634                         return -EINVAL;
635
636                 /* the reserved QPs (special, proxy, tunnel)
637                  * do not operate over vlans
638                  */
639                 if (mlx4_is_qp_reserved(dev, qpn))
640                         return 0;
641
642                 /* force strip vlan by clear vsd */
643                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
644
645                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
646                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
647                         qpc->pri_path.vlan_control =
648                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
649                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
650                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
651                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
652                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
653                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
654                 } else if (0 != vp_oper->state.default_vlan) {
655                         qpc->pri_path.vlan_control =
656                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
657                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
658                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
659                 } else { /* priority tagged */
660                         qpc->pri_path.vlan_control =
661                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
662                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
663                 }
664
665                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
666                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
667                 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
668                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
669                 qpc->pri_path.sched_queue &= 0xC7;
670                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
671         }
672         if (vp_oper->state.spoofchk) {
673                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
674                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
675         }
676         return 0;
677 }
678
679 static int mpt_mask(struct mlx4_dev *dev)
680 {
681         return dev->caps.num_mpts - 1;
682 }
683
684 static void *find_res(struct mlx4_dev *dev, u64 res_id,
685                       enum mlx4_resource type)
686 {
687         struct mlx4_priv *priv = mlx4_priv(dev);
688
689         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
690                                   res_id);
691 }
692
693 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
694                    enum mlx4_resource type,
695                    void *res)
696 {
697         struct res_common *r;
698         int err = 0;
699
700         spin_lock_irq(mlx4_tlock(dev));
701         r = find_res(dev, res_id, type);
702         if (!r) {
703                 err = -ENONET;
704                 goto exit;
705         }
706
707         if (r->state == RES_ANY_BUSY) {
708                 err = -EBUSY;
709                 goto exit;
710         }
711
712         if (r->owner != slave) {
713                 err = -EPERM;
714                 goto exit;
715         }
716
717         r->from_state = r->state;
718         r->state = RES_ANY_BUSY;
719
720         if (res)
721                 *((struct res_common **)res) = r;
722
723 exit:
724         spin_unlock_irq(mlx4_tlock(dev));
725         return err;
726 }
727
728 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
729                                     enum mlx4_resource type,
730                                     u64 res_id, int *slave)
731 {
732
733         struct res_common *r;
734         int err = -ENOENT;
735         int id = res_id;
736
737         if (type == RES_QP)
738                 id &= 0x7fffff;
739         spin_lock(mlx4_tlock(dev));
740
741         r = find_res(dev, id, type);
742         if (r) {
743                 *slave = r->owner;
744                 err = 0;
745         }
746         spin_unlock(mlx4_tlock(dev));
747
748         return err;
749 }
750
751 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
752                     enum mlx4_resource type)
753 {
754         struct res_common *r;
755
756         spin_lock_irq(mlx4_tlock(dev));
757         r = find_res(dev, res_id, type);
758         if (r)
759                 r->state = r->from_state;
760         spin_unlock_irq(mlx4_tlock(dev));
761 }
762
763 static struct res_common *alloc_qp_tr(int id)
764 {
765         struct res_qp *ret;
766
767         ret = kzalloc(sizeof *ret, GFP_KERNEL);
768         if (!ret)
769                 return NULL;
770
771         ret->com.res_id = id;
772         ret->com.state = RES_QP_RESERVED;
773         ret->local_qpn = id;
774         INIT_LIST_HEAD(&ret->mcg_list);
775         spin_lock_init(&ret->mcg_spl);
776         atomic_set(&ret->ref_count, 0);
777
778         return &ret->com;
779 }
780
781 static struct res_common *alloc_mtt_tr(int id, int order)
782 {
783         struct res_mtt *ret;
784
785         ret = kzalloc(sizeof *ret, GFP_KERNEL);
786         if (!ret)
787                 return NULL;
788
789         ret->com.res_id = id;
790         ret->order = order;
791         ret->com.state = RES_MTT_ALLOCATED;
792         atomic_set(&ret->ref_count, 0);
793
794         return &ret->com;
795 }
796
797 static struct res_common *alloc_mpt_tr(int id, int key)
798 {
799         struct res_mpt *ret;
800
801         ret = kzalloc(sizeof *ret, GFP_KERNEL);
802         if (!ret)
803                 return NULL;
804
805         ret->com.res_id = id;
806         ret->com.state = RES_MPT_RESERVED;
807         ret->key = key;
808
809         return &ret->com;
810 }
811
812 static struct res_common *alloc_eq_tr(int id)
813 {
814         struct res_eq *ret;
815
816         ret = kzalloc(sizeof *ret, GFP_KERNEL);
817         if (!ret)
818                 return NULL;
819
820         ret->com.res_id = id;
821         ret->com.state = RES_EQ_RESERVED;
822
823         return &ret->com;
824 }
825
826 static struct res_common *alloc_cq_tr(int id)
827 {
828         struct res_cq *ret;
829
830         ret = kzalloc(sizeof *ret, GFP_KERNEL);
831         if (!ret)
832                 return NULL;
833
834         ret->com.res_id = id;
835         ret->com.state = RES_CQ_ALLOCATED;
836         atomic_set(&ret->ref_count, 0);
837
838         return &ret->com;
839 }
840
841 static struct res_common *alloc_srq_tr(int id)
842 {
843         struct res_srq *ret;
844
845         ret = kzalloc(sizeof *ret, GFP_KERNEL);
846         if (!ret)
847                 return NULL;
848
849         ret->com.res_id = id;
850         ret->com.state = RES_SRQ_ALLOCATED;
851         atomic_set(&ret->ref_count, 0);
852
853         return &ret->com;
854 }
855
856 static struct res_common *alloc_counter_tr(int id)
857 {
858         struct res_counter *ret;
859
860         ret = kzalloc(sizeof *ret, GFP_KERNEL);
861         if (!ret)
862                 return NULL;
863
864         ret->com.res_id = id;
865         ret->com.state = RES_COUNTER_ALLOCATED;
866
867         return &ret->com;
868 }
869
870 static struct res_common *alloc_xrcdn_tr(int id)
871 {
872         struct res_xrcdn *ret;
873
874         ret = kzalloc(sizeof *ret, GFP_KERNEL);
875         if (!ret)
876                 return NULL;
877
878         ret->com.res_id = id;
879         ret->com.state = RES_XRCD_ALLOCATED;
880
881         return &ret->com;
882 }
883
884 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
885 {
886         struct res_fs_rule *ret;
887
888         ret = kzalloc(sizeof *ret, GFP_KERNEL);
889         if (!ret)
890                 return NULL;
891
892         ret->com.res_id = id;
893         ret->com.state = RES_FS_RULE_ALLOCATED;
894         ret->qpn = qpn;
895         return &ret->com;
896 }
897
898 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
899                                    int extra)
900 {
901         struct res_common *ret;
902
903         switch (type) {
904         case RES_QP:
905                 ret = alloc_qp_tr(id);
906                 break;
907         case RES_MPT:
908                 ret = alloc_mpt_tr(id, extra);
909                 break;
910         case RES_MTT:
911                 ret = alloc_mtt_tr(id, extra);
912                 break;
913         case RES_EQ:
914                 ret = alloc_eq_tr(id);
915                 break;
916         case RES_CQ:
917                 ret = alloc_cq_tr(id);
918                 break;
919         case RES_SRQ:
920                 ret = alloc_srq_tr(id);
921                 break;
922         case RES_MAC:
923                 printk(KERN_ERR "implementation missing\n");
924                 return NULL;
925         case RES_COUNTER:
926                 ret = alloc_counter_tr(id);
927                 break;
928         case RES_XRCD:
929                 ret = alloc_xrcdn_tr(id);
930                 break;
931         case RES_FS_RULE:
932                 ret = alloc_fs_rule_tr(id, extra);
933                 break;
934         default:
935                 return NULL;
936         }
937         if (ret)
938                 ret->owner = slave;
939
940         return ret;
941 }
942
943 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
944                          enum mlx4_resource type, int extra)
945 {
946         int i;
947         int err;
948         struct mlx4_priv *priv = mlx4_priv(dev);
949         struct res_common **res_arr;
950         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
951         struct rb_root *root = &tracker->res_tree[type];
952
953         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
954         if (!res_arr)
955                 return -ENOMEM;
956
957         for (i = 0; i < count; ++i) {
958                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
959                 if (!res_arr[i]) {
960                         for (--i; i >= 0; --i)
961                                 kfree(res_arr[i]);
962
963                         kfree(res_arr);
964                         return -ENOMEM;
965                 }
966         }
967
968         spin_lock_irq(mlx4_tlock(dev));
969         for (i = 0; i < count; ++i) {
970                 if (find_res(dev, base + i, type)) {
971                         err = -EEXIST;
972                         goto undo;
973                 }
974                 err = res_tracker_insert(root, res_arr[i]);
975                 if (err)
976                         goto undo;
977                 list_add_tail(&res_arr[i]->list,
978                               &tracker->slave_list[slave].res_list[type]);
979         }
980         spin_unlock_irq(mlx4_tlock(dev));
981         kfree(res_arr);
982
983         return 0;
984
985 undo:
986         for (--i; i >= base; --i)
987                 rb_erase(&res_arr[i]->node, root);
988
989         spin_unlock_irq(mlx4_tlock(dev));
990
991         for (i = 0; i < count; ++i)
992                 kfree(res_arr[i]);
993
994         kfree(res_arr);
995
996         return err;
997 }
998
999 static int remove_qp_ok(struct res_qp *res)
1000 {
1001         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1002             !list_empty(&res->mcg_list)) {
1003                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1004                        res->com.state, atomic_read(&res->ref_count));
1005                 return -EBUSY;
1006         } else if (res->com.state != RES_QP_RESERVED) {
1007                 return -EPERM;
1008         }
1009
1010         return 0;
1011 }
1012
1013 static int remove_mtt_ok(struct res_mtt *res, int order)
1014 {
1015         if (res->com.state == RES_MTT_BUSY ||
1016             atomic_read(&res->ref_count)) {
1017                 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
1018                        __func__, __LINE__,
1019                        mtt_states_str(res->com.state),
1020                        atomic_read(&res->ref_count));
1021                 return -EBUSY;
1022         } else if (res->com.state != RES_MTT_ALLOCATED)
1023                 return -EPERM;
1024         else if (res->order != order)
1025                 return -EINVAL;
1026
1027         return 0;
1028 }
1029
1030 static int remove_mpt_ok(struct res_mpt *res)
1031 {
1032         if (res->com.state == RES_MPT_BUSY)
1033                 return -EBUSY;
1034         else if (res->com.state != RES_MPT_RESERVED)
1035                 return -EPERM;
1036
1037         return 0;
1038 }
1039
1040 static int remove_eq_ok(struct res_eq *res)
1041 {
1042         if (res->com.state == RES_MPT_BUSY)
1043                 return -EBUSY;
1044         else if (res->com.state != RES_MPT_RESERVED)
1045                 return -EPERM;
1046
1047         return 0;
1048 }
1049
1050 static int remove_counter_ok(struct res_counter *res)
1051 {
1052         if (res->com.state == RES_COUNTER_BUSY)
1053                 return -EBUSY;
1054         else if (res->com.state != RES_COUNTER_ALLOCATED)
1055                 return -EPERM;
1056
1057         return 0;
1058 }
1059
1060 static int remove_xrcdn_ok(struct res_xrcdn *res)
1061 {
1062         if (res->com.state == RES_XRCD_BUSY)
1063                 return -EBUSY;
1064         else if (res->com.state != RES_XRCD_ALLOCATED)
1065                 return -EPERM;
1066
1067         return 0;
1068 }
1069
1070 static int remove_fs_rule_ok(struct res_fs_rule *res)
1071 {
1072         if (res->com.state == RES_FS_RULE_BUSY)
1073                 return -EBUSY;
1074         else if (res->com.state != RES_FS_RULE_ALLOCATED)
1075                 return -EPERM;
1076
1077         return 0;
1078 }
1079
1080 static int remove_cq_ok(struct res_cq *res)
1081 {
1082         if (res->com.state == RES_CQ_BUSY)
1083                 return -EBUSY;
1084         else if (res->com.state != RES_CQ_ALLOCATED)
1085                 return -EPERM;
1086
1087         return 0;
1088 }
1089
1090 static int remove_srq_ok(struct res_srq *res)
1091 {
1092         if (res->com.state == RES_SRQ_BUSY)
1093                 return -EBUSY;
1094         else if (res->com.state != RES_SRQ_ALLOCATED)
1095                 return -EPERM;
1096
1097         return 0;
1098 }
1099
1100 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1101 {
1102         switch (type) {
1103         case RES_QP:
1104                 return remove_qp_ok((struct res_qp *)res);
1105         case RES_CQ:
1106                 return remove_cq_ok((struct res_cq *)res);
1107         case RES_SRQ:
1108                 return remove_srq_ok((struct res_srq *)res);
1109         case RES_MPT:
1110                 return remove_mpt_ok((struct res_mpt *)res);
1111         case RES_MTT:
1112                 return remove_mtt_ok((struct res_mtt *)res, extra);
1113         case RES_MAC:
1114                 return -ENOSYS;
1115         case RES_EQ:
1116                 return remove_eq_ok((struct res_eq *)res);
1117         case RES_COUNTER:
1118                 return remove_counter_ok((struct res_counter *)res);
1119         case RES_XRCD:
1120                 return remove_xrcdn_ok((struct res_xrcdn *)res);
1121         case RES_FS_RULE:
1122                 return remove_fs_rule_ok((struct res_fs_rule *)res);
1123         default:
1124                 return -EINVAL;
1125         }
1126 }
1127
1128 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1129                          enum mlx4_resource type, int extra)
1130 {
1131         u64 i;
1132         int err;
1133         struct mlx4_priv *priv = mlx4_priv(dev);
1134         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1135         struct res_common *r;
1136
1137         spin_lock_irq(mlx4_tlock(dev));
1138         for (i = base; i < base + count; ++i) {
1139                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1140                 if (!r) {
1141                         err = -ENOENT;
1142                         goto out;
1143                 }
1144                 if (r->owner != slave) {
1145                         err = -EPERM;
1146                         goto out;
1147                 }
1148                 err = remove_ok(r, type, extra);
1149                 if (err)
1150                         goto out;
1151         }
1152
1153         for (i = base; i < base + count; ++i) {
1154                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1155                 rb_erase(&r->node, &tracker->res_tree[type]);
1156                 list_del(&r->list);
1157                 kfree(r);
1158         }
1159         err = 0;
1160
1161 out:
1162         spin_unlock_irq(mlx4_tlock(dev));
1163
1164         return err;
1165 }
1166
1167 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1168                                 enum res_qp_states state, struct res_qp **qp,
1169                                 int alloc)
1170 {
1171         struct mlx4_priv *priv = mlx4_priv(dev);
1172         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1173         struct res_qp *r;
1174         int err = 0;
1175
1176         spin_lock_irq(mlx4_tlock(dev));
1177         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1178         if (!r)
1179                 err = -ENOENT;
1180         else if (r->com.owner != slave)
1181                 err = -EPERM;
1182         else {
1183                 switch (state) {
1184                 case RES_QP_BUSY:
1185                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1186                                  __func__, r->com.res_id);
1187                         err = -EBUSY;
1188                         break;
1189
1190                 case RES_QP_RESERVED:
1191                         if (r->com.state == RES_QP_MAPPED && !alloc)
1192                                 break;
1193
1194                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1195                         err = -EINVAL;
1196                         break;
1197
1198                 case RES_QP_MAPPED:
1199                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
1200                             r->com.state == RES_QP_HW)
1201                                 break;
1202                         else {
1203                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1204                                           r->com.res_id);
1205                                 err = -EINVAL;
1206                         }
1207
1208                         break;
1209
1210                 case RES_QP_HW:
1211                         if (r->com.state != RES_QP_MAPPED)
1212                                 err = -EINVAL;
1213                         break;
1214                 default:
1215                         err = -EINVAL;
1216                 }
1217
1218                 if (!err) {
1219                         r->com.from_state = r->com.state;
1220                         r->com.to_state = state;
1221                         r->com.state = RES_QP_BUSY;
1222                         if (qp)
1223                                 *qp = r;
1224                 }
1225         }
1226
1227         spin_unlock_irq(mlx4_tlock(dev));
1228
1229         return err;
1230 }
1231
1232 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1233                                 enum res_mpt_states state, struct res_mpt **mpt)
1234 {
1235         struct mlx4_priv *priv = mlx4_priv(dev);
1236         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1237         struct res_mpt *r;
1238         int err = 0;
1239
1240         spin_lock_irq(mlx4_tlock(dev));
1241         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1242         if (!r)
1243                 err = -ENOENT;
1244         else if (r->com.owner != slave)
1245                 err = -EPERM;
1246         else {
1247                 switch (state) {
1248                 case RES_MPT_BUSY:
1249                         err = -EINVAL;
1250                         break;
1251
1252                 case RES_MPT_RESERVED:
1253                         if (r->com.state != RES_MPT_MAPPED)
1254                                 err = -EINVAL;
1255                         break;
1256
1257                 case RES_MPT_MAPPED:
1258                         if (r->com.state != RES_MPT_RESERVED &&
1259                             r->com.state != RES_MPT_HW)
1260                                 err = -EINVAL;
1261                         break;
1262
1263                 case RES_MPT_HW:
1264                         if (r->com.state != RES_MPT_MAPPED)
1265                                 err = -EINVAL;
1266                         break;
1267                 default:
1268                         err = -EINVAL;
1269                 }
1270
1271                 if (!err) {
1272                         r->com.from_state = r->com.state;
1273                         r->com.to_state = state;
1274                         r->com.state = RES_MPT_BUSY;
1275                         if (mpt)
1276                                 *mpt = r;
1277                 }
1278         }
1279
1280         spin_unlock_irq(mlx4_tlock(dev));
1281
1282         return err;
1283 }
1284
1285 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1286                                 enum res_eq_states state, struct res_eq **eq)
1287 {
1288         struct mlx4_priv *priv = mlx4_priv(dev);
1289         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1290         struct res_eq *r;
1291         int err = 0;
1292
1293         spin_lock_irq(mlx4_tlock(dev));
1294         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1295         if (!r)
1296                 err = -ENOENT;
1297         else if (r->com.owner != slave)
1298                 err = -EPERM;
1299         else {
1300                 switch (state) {
1301                 case RES_EQ_BUSY:
1302                         err = -EINVAL;
1303                         break;
1304
1305                 case RES_EQ_RESERVED:
1306                         if (r->com.state != RES_EQ_HW)
1307                                 err = -EINVAL;
1308                         break;
1309
1310                 case RES_EQ_HW:
1311                         if (r->com.state != RES_EQ_RESERVED)
1312                                 err = -EINVAL;
1313                         break;
1314
1315                 default:
1316                         err = -EINVAL;
1317                 }
1318
1319                 if (!err) {
1320                         r->com.from_state = r->com.state;
1321                         r->com.to_state = state;
1322                         r->com.state = RES_EQ_BUSY;
1323                         if (eq)
1324                                 *eq = r;
1325                 }
1326         }
1327
1328         spin_unlock_irq(mlx4_tlock(dev));
1329
1330         return err;
1331 }
1332
1333 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1334                                 enum res_cq_states state, struct res_cq **cq)
1335 {
1336         struct mlx4_priv *priv = mlx4_priv(dev);
1337         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1338         struct res_cq *r;
1339         int err;
1340
1341         spin_lock_irq(mlx4_tlock(dev));
1342         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1343         if (!r) {
1344                 err = -ENOENT;
1345         } else if (r->com.owner != slave) {
1346                 err = -EPERM;
1347         } else if (state == RES_CQ_ALLOCATED) {
1348                 if (r->com.state != RES_CQ_HW)
1349                         err = -EINVAL;
1350                 else if (atomic_read(&r->ref_count))
1351                         err = -EBUSY;
1352                 else
1353                         err = 0;
1354         } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1355                 err = -EINVAL;
1356         } else {
1357                 err = 0;
1358         }
1359
1360         if (!err) {
1361                 r->com.from_state = r->com.state;
1362                 r->com.to_state = state;
1363                 r->com.state = RES_CQ_BUSY;
1364                 if (cq)
1365                         *cq = r;
1366         }
1367
1368         spin_unlock_irq(mlx4_tlock(dev));
1369
1370         return err;
1371 }
1372
1373 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1374                                  enum res_srq_states state, struct res_srq **srq)
1375 {
1376         struct mlx4_priv *priv = mlx4_priv(dev);
1377         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1378         struct res_srq *r;
1379         int err = 0;
1380
1381         spin_lock_irq(mlx4_tlock(dev));
1382         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1383         if (!r) {
1384                 err = -ENOENT;
1385         } else if (r->com.owner != slave) {
1386                 err = -EPERM;
1387         } else if (state == RES_SRQ_ALLOCATED) {
1388                 if (r->com.state != RES_SRQ_HW)
1389                         err = -EINVAL;
1390                 else if (atomic_read(&r->ref_count))
1391                         err = -EBUSY;
1392         } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1393                 err = -EINVAL;
1394         }
1395
1396         if (!err) {
1397                 r->com.from_state = r->com.state;
1398                 r->com.to_state = state;
1399                 r->com.state = RES_SRQ_BUSY;
1400                 if (srq)
1401                         *srq = r;
1402         }
1403
1404         spin_unlock_irq(mlx4_tlock(dev));
1405
1406         return err;
1407 }
1408
1409 static void res_abort_move(struct mlx4_dev *dev, int slave,
1410                            enum mlx4_resource type, int id)
1411 {
1412         struct mlx4_priv *priv = mlx4_priv(dev);
1413         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1414         struct res_common *r;
1415
1416         spin_lock_irq(mlx4_tlock(dev));
1417         r = res_tracker_lookup(&tracker->res_tree[type], id);
1418         if (r && (r->owner == slave))
1419                 r->state = r->from_state;
1420         spin_unlock_irq(mlx4_tlock(dev));
1421 }
1422
1423 static void res_end_move(struct mlx4_dev *dev, int slave,
1424                          enum mlx4_resource type, int id)
1425 {
1426         struct mlx4_priv *priv = mlx4_priv(dev);
1427         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1428         struct res_common *r;
1429
1430         spin_lock_irq(mlx4_tlock(dev));
1431         r = res_tracker_lookup(&tracker->res_tree[type], id);
1432         if (r && (r->owner == slave))
1433                 r->state = r->to_state;
1434         spin_unlock_irq(mlx4_tlock(dev));
1435 }
1436
1437 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1438 {
1439         return mlx4_is_qp_reserved(dev, qpn) &&
1440                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1441 }
1442
1443 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1444 {
1445         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1446 }
1447
1448 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1449                         u64 in_param, u64 *out_param)
1450 {
1451         int err;
1452         int count;
1453         int align;
1454         int base;
1455         int qpn;
1456
1457         switch (op) {
1458         case RES_OP_RESERVE:
1459                 count = get_param_l(&in_param);
1460                 align = get_param_h(&in_param);
1461                 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1462                 if (err)
1463                         return err;
1464
1465                 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1466                 if (err) {
1467                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1468                         return err;
1469                 }
1470
1471                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1472                 if (err) {
1473                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1474                         __mlx4_qp_release_range(dev, base, count);
1475                         return err;
1476                 }
1477                 set_param_l(out_param, base);
1478                 break;
1479         case RES_OP_MAP_ICM:
1480                 qpn = get_param_l(&in_param) & 0x7fffff;
1481                 if (valid_reserved(dev, slave, qpn)) {
1482                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1483                         if (err)
1484                                 return err;
1485                 }
1486
1487                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1488                                            NULL, 1);
1489                 if (err)
1490                         return err;
1491
1492                 if (!fw_reserved(dev, qpn)) {
1493                         err = __mlx4_qp_alloc_icm(dev, qpn);
1494                         if (err) {
1495                                 res_abort_move(dev, slave, RES_QP, qpn);
1496                                 return err;
1497                         }
1498                 }
1499
1500                 res_end_move(dev, slave, RES_QP, qpn);
1501                 break;
1502
1503         default:
1504                 err = -EINVAL;
1505                 break;
1506         }
1507         return err;
1508 }
1509
1510 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1511                          u64 in_param, u64 *out_param)
1512 {
1513         int err = -EINVAL;
1514         int base;
1515         int order;
1516
1517         if (op != RES_OP_RESERVE_AND_MAP)
1518                 return err;
1519
1520         order = get_param_l(&in_param);
1521
1522         err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1523         if (err)
1524                 return err;
1525
1526         base = __mlx4_alloc_mtt_range(dev, order);
1527         if (base == -1) {
1528                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1529                 return -ENOMEM;
1530         }
1531
1532         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1533         if (err) {
1534                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1535                 __mlx4_free_mtt_range(dev, base, order);
1536         } else {
1537                 set_param_l(out_param, base);
1538         }
1539
1540         return err;
1541 }
1542
1543 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1544                          u64 in_param, u64 *out_param)
1545 {
1546         int err = -EINVAL;
1547         int index;
1548         int id;
1549         struct res_mpt *mpt;
1550
1551         switch (op) {
1552         case RES_OP_RESERVE:
1553                 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1554                 if (err)
1555                         break;
1556
1557                 index = __mlx4_mpt_reserve(dev);
1558                 if (index == -1) {
1559                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1560                         break;
1561                 }
1562                 id = index & mpt_mask(dev);
1563
1564                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1565                 if (err) {
1566                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1567                         __mlx4_mpt_release(dev, index);
1568                         break;
1569                 }
1570                 set_param_l(out_param, index);
1571                 break;
1572         case RES_OP_MAP_ICM:
1573                 index = get_param_l(&in_param);
1574                 id = index & mpt_mask(dev);
1575                 err = mr_res_start_move_to(dev, slave, id,
1576                                            RES_MPT_MAPPED, &mpt);
1577                 if (err)
1578                         return err;
1579
1580                 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1581                 if (err) {
1582                         res_abort_move(dev, slave, RES_MPT, id);
1583                         return err;
1584                 }
1585
1586                 res_end_move(dev, slave, RES_MPT, id);
1587                 break;
1588         }
1589         return err;
1590 }
1591
1592 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1593                         u64 in_param, u64 *out_param)
1594 {
1595         int cqn;
1596         int err;
1597
1598         switch (op) {
1599         case RES_OP_RESERVE_AND_MAP:
1600                 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1601                 if (err)
1602                         break;
1603
1604                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1605                 if (err) {
1606                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1607                         break;
1608                 }
1609
1610                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1611                 if (err) {
1612                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1613                         __mlx4_cq_free_icm(dev, cqn);
1614                         break;
1615                 }
1616
1617                 set_param_l(out_param, cqn);
1618                 break;
1619
1620         default:
1621                 err = -EINVAL;
1622         }
1623
1624         return err;
1625 }
1626
1627 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1628                          u64 in_param, u64 *out_param)
1629 {
1630         int srqn;
1631         int err;
1632
1633         switch (op) {
1634         case RES_OP_RESERVE_AND_MAP:
1635                 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1636                 if (err)
1637                         break;
1638
1639                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1640                 if (err) {
1641                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1642                         break;
1643                 }
1644
1645                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1646                 if (err) {
1647                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1648                         __mlx4_srq_free_icm(dev, srqn);
1649                         break;
1650                 }
1651
1652                 set_param_l(out_param, srqn);
1653                 break;
1654
1655         default:
1656                 err = -EINVAL;
1657         }
1658
1659         return err;
1660 }
1661
1662 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1663 {
1664         struct mlx4_priv *priv = mlx4_priv(dev);
1665         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1666         struct mac_res *res;
1667
1668         if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1669                 return -EINVAL;
1670         res = kzalloc(sizeof *res, GFP_KERNEL);
1671         if (!res) {
1672                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1673                 return -ENOMEM;
1674         }
1675         res->mac = mac;
1676         res->port = (u8) port;
1677         list_add_tail(&res->list,
1678                       &tracker->slave_list[slave].res_list[RES_MAC]);
1679         return 0;
1680 }
1681
1682 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1683                                int port)
1684 {
1685         struct mlx4_priv *priv = mlx4_priv(dev);
1686         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1687         struct list_head *mac_list =
1688                 &tracker->slave_list[slave].res_list[RES_MAC];
1689         struct mac_res *res, *tmp;
1690
1691         list_for_each_entry_safe(res, tmp, mac_list, list) {
1692                 if (res->mac == mac && res->port == (u8) port) {
1693                         list_del(&res->list);
1694                         mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1695                         kfree(res);
1696                         break;
1697                 }
1698         }
1699 }
1700
1701 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1702 {
1703         struct mlx4_priv *priv = mlx4_priv(dev);
1704         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1705         struct list_head *mac_list =
1706                 &tracker->slave_list[slave].res_list[RES_MAC];
1707         struct mac_res *res, *tmp;
1708
1709         list_for_each_entry_safe(res, tmp, mac_list, list) {
1710                 list_del(&res->list);
1711                 __mlx4_unregister_mac(dev, res->port, res->mac);
1712                 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1713                 kfree(res);
1714         }
1715 }
1716
1717 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1718                          u64 in_param, u64 *out_param, int in_port)
1719 {
1720         int err = -EINVAL;
1721         int port;
1722         u64 mac;
1723
1724         if (op != RES_OP_RESERVE_AND_MAP)
1725                 return err;
1726
1727         port = !in_port ? get_param_l(out_param) : in_port;
1728         mac = in_param;
1729
1730         err = __mlx4_register_mac(dev, port, mac);
1731         if (err >= 0) {
1732                 set_param_l(out_param, err);
1733                 err = 0;
1734         }
1735
1736         if (!err) {
1737                 err = mac_add_to_slave(dev, slave, mac, port);
1738                 if (err)
1739                         __mlx4_unregister_mac(dev, port, mac);
1740         }
1741         return err;
1742 }
1743
1744 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1745                              int port, int vlan_index)
1746 {
1747         struct mlx4_priv *priv = mlx4_priv(dev);
1748         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1749         struct list_head *vlan_list =
1750                 &tracker->slave_list[slave].res_list[RES_VLAN];
1751         struct vlan_res *res, *tmp;
1752
1753         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1754                 if (res->vlan == vlan && res->port == (u8) port) {
1755                         /* vlan found. update ref count */
1756                         ++res->ref_count;
1757                         return 0;
1758                 }
1759         }
1760
1761         if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1762                 return -EINVAL;
1763         res = kzalloc(sizeof(*res), GFP_KERNEL);
1764         if (!res) {
1765                 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1766                 return -ENOMEM;
1767         }
1768         res->vlan = vlan;
1769         res->port = (u8) port;
1770         res->vlan_index = vlan_index;
1771         res->ref_count = 1;
1772         list_add_tail(&res->list,
1773                       &tracker->slave_list[slave].res_list[RES_VLAN]);
1774         return 0;
1775 }
1776
1777
1778 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1779                                 int port)
1780 {
1781         struct mlx4_priv *priv = mlx4_priv(dev);
1782         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1783         struct list_head *vlan_list =
1784                 &tracker->slave_list[slave].res_list[RES_VLAN];
1785         struct vlan_res *res, *tmp;
1786
1787         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1788                 if (res->vlan == vlan && res->port == (u8) port) {
1789                         if (!--res->ref_count) {
1790                                 list_del(&res->list);
1791                                 mlx4_release_resource(dev, slave, RES_VLAN,
1792                                                       1, port);
1793                                 kfree(res);
1794                         }
1795                         break;
1796                 }
1797         }
1798 }
1799
1800 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1801 {
1802         struct mlx4_priv *priv = mlx4_priv(dev);
1803         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1804         struct list_head *vlan_list =
1805                 &tracker->slave_list[slave].res_list[RES_VLAN];
1806         struct vlan_res *res, *tmp;
1807         int i;
1808
1809         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1810                 list_del(&res->list);
1811                 /* dereference the vlan the num times the slave referenced it */
1812                 for (i = 0; i < res->ref_count; i++)
1813                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
1814                 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1815                 kfree(res);
1816         }
1817 }
1818
1819 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1820                           u64 in_param, u64 *out_param, int in_port)
1821 {
1822         struct mlx4_priv *priv = mlx4_priv(dev);
1823         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1824         int err;
1825         u16 vlan;
1826         int vlan_index;
1827         int port;
1828
1829         port = !in_port ? get_param_l(out_param) : in_port;
1830
1831         if (!port || op != RES_OP_RESERVE_AND_MAP)
1832                 return -EINVAL;
1833
1834         /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1835         if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1836                 slave_state[slave].old_vlan_api = true;
1837                 return 0;
1838         }
1839
1840         vlan = (u16) in_param;
1841
1842         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1843         if (!err) {
1844                 set_param_l(out_param, (u32) vlan_index);
1845                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1846                 if (err)
1847                         __mlx4_unregister_vlan(dev, port, vlan);
1848         }
1849         return err;
1850 }
1851
1852 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1853                              u64 in_param, u64 *out_param)
1854 {
1855         u32 index;
1856         int err;
1857
1858         if (op != RES_OP_RESERVE)
1859                 return -EINVAL;
1860
1861         err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
1862         if (err)
1863                 return err;
1864
1865         err = __mlx4_counter_alloc(dev, &index);
1866         if (err) {
1867                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1868                 return err;
1869         }
1870
1871         err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1872         if (err) {
1873                 __mlx4_counter_free(dev, index);
1874                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1875         } else {
1876                 set_param_l(out_param, index);
1877         }
1878
1879         return err;
1880 }
1881
1882 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1883                            u64 in_param, u64 *out_param)
1884 {
1885         u32 xrcdn;
1886         int err;
1887
1888         if (op != RES_OP_RESERVE)
1889                 return -EINVAL;
1890
1891         err = __mlx4_xrcd_alloc(dev, &xrcdn);
1892         if (err)
1893                 return err;
1894
1895         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1896         if (err)
1897                 __mlx4_xrcd_free(dev, xrcdn);
1898         else
1899                 set_param_l(out_param, xrcdn);
1900
1901         return err;
1902 }
1903
1904 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1905                            struct mlx4_vhcr *vhcr,
1906                            struct mlx4_cmd_mailbox *inbox,
1907                            struct mlx4_cmd_mailbox *outbox,
1908                            struct mlx4_cmd_info *cmd)
1909 {
1910         int err;
1911         int alop = vhcr->op_modifier;
1912
1913         switch (vhcr->in_modifier & 0xFF) {
1914         case RES_QP:
1915                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1916                                    vhcr->in_param, &vhcr->out_param);
1917                 break;
1918
1919         case RES_MTT:
1920                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1921                                     vhcr->in_param, &vhcr->out_param);
1922                 break;
1923
1924         case RES_MPT:
1925                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1926                                     vhcr->in_param, &vhcr->out_param);
1927                 break;
1928
1929         case RES_CQ:
1930                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1931                                    vhcr->in_param, &vhcr->out_param);
1932                 break;
1933
1934         case RES_SRQ:
1935                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1936                                     vhcr->in_param, &vhcr->out_param);
1937                 break;
1938
1939         case RES_MAC:
1940                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1941                                     vhcr->in_param, &vhcr->out_param,
1942                                     (vhcr->in_modifier >> 8) & 0xFF);
1943                 break;
1944
1945         case RES_VLAN:
1946                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1947                                      vhcr->in_param, &vhcr->out_param,
1948                                      (vhcr->in_modifier >> 8) & 0xFF);
1949                 break;
1950
1951         case RES_COUNTER:
1952                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1953                                         vhcr->in_param, &vhcr->out_param);
1954                 break;
1955
1956         case RES_XRCD:
1957                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1958                                       vhcr->in_param, &vhcr->out_param);
1959                 break;
1960
1961         default:
1962                 err = -EINVAL;
1963                 break;
1964         }
1965
1966         return err;
1967 }
1968
1969 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1970                        u64 in_param)
1971 {
1972         int err;
1973         int count;
1974         int base;
1975         int qpn;
1976
1977         switch (op) {
1978         case RES_OP_RESERVE:
1979                 base = get_param_l(&in_param) & 0x7fffff;
1980                 count = get_param_h(&in_param);
1981                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1982                 if (err)
1983                         break;
1984                 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1985                 __mlx4_qp_release_range(dev, base, count);
1986                 break;
1987         case RES_OP_MAP_ICM:
1988                 qpn = get_param_l(&in_param) & 0x7fffff;
1989                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1990                                            NULL, 0);
1991                 if (err)
1992                         return err;
1993
1994                 if (!fw_reserved(dev, qpn))
1995                         __mlx4_qp_free_icm(dev, qpn);
1996
1997                 res_end_move(dev, slave, RES_QP, qpn);
1998
1999                 if (valid_reserved(dev, slave, qpn))
2000                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2001                 break;
2002         default:
2003                 err = -EINVAL;
2004                 break;
2005         }
2006         return err;
2007 }
2008
2009 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2010                         u64 in_param, u64 *out_param)
2011 {
2012         int err = -EINVAL;
2013         int base;
2014         int order;
2015
2016         if (op != RES_OP_RESERVE_AND_MAP)
2017                 return err;
2018
2019         base = get_param_l(&in_param);
2020         order = get_param_h(&in_param);
2021         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2022         if (!err) {
2023                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2024                 __mlx4_free_mtt_range(dev, base, order);
2025         }
2026         return err;
2027 }
2028
2029 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2030                         u64 in_param)
2031 {
2032         int err = -EINVAL;
2033         int index;
2034         int id;
2035         struct res_mpt *mpt;
2036
2037         switch (op) {
2038         case RES_OP_RESERVE:
2039                 index = get_param_l(&in_param);
2040                 id = index & mpt_mask(dev);
2041                 err = get_res(dev, slave, id, RES_MPT, &mpt);
2042                 if (err)
2043                         break;
2044                 index = mpt->key;
2045                 put_res(dev, slave, id, RES_MPT);
2046
2047                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2048                 if (err)
2049                         break;
2050                 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2051                 __mlx4_mpt_release(dev, index);
2052                 break;
2053         case RES_OP_MAP_ICM:
2054                         index = get_param_l(&in_param);
2055                         id = index & mpt_mask(dev);
2056                         err = mr_res_start_move_to(dev, slave, id,
2057                                                    RES_MPT_RESERVED, &mpt);
2058                         if (err)
2059                                 return err;
2060
2061                         __mlx4_mpt_free_icm(dev, mpt->key);
2062                         res_end_move(dev, slave, RES_MPT, id);
2063                         return err;
2064                 break;
2065         default:
2066                 err = -EINVAL;
2067                 break;
2068         }
2069         return err;
2070 }
2071
2072 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2073                        u64 in_param, u64 *out_param)
2074 {
2075         int cqn;
2076         int err;
2077
2078         switch (op) {
2079         case RES_OP_RESERVE_AND_MAP:
2080                 cqn = get_param_l(&in_param);
2081                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2082                 if (err)
2083                         break;
2084
2085                 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2086                 __mlx4_cq_free_icm(dev, cqn);
2087                 break;
2088
2089         default:
2090                 err = -EINVAL;
2091                 break;
2092         }
2093
2094         return err;
2095 }
2096
2097 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2098                         u64 in_param, u64 *out_param)
2099 {
2100         int srqn;
2101         int err;
2102
2103         switch (op) {
2104         case RES_OP_RESERVE_AND_MAP:
2105                 srqn = get_param_l(&in_param);
2106                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2107                 if (err)
2108                         break;
2109
2110                 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2111                 __mlx4_srq_free_icm(dev, srqn);
2112                 break;
2113
2114         default:
2115                 err = -EINVAL;
2116                 break;
2117         }
2118
2119         return err;
2120 }
2121
2122 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2123                             u64 in_param, u64 *out_param, int in_port)
2124 {
2125         int port;
2126         int err = 0;
2127
2128         switch (op) {
2129         case RES_OP_RESERVE_AND_MAP:
2130                 port = !in_port ? get_param_l(out_param) : in_port;
2131                 mac_del_from_slave(dev, slave, in_param, port);
2132                 __mlx4_unregister_mac(dev, port, in_param);
2133                 break;
2134         default:
2135                 err = -EINVAL;
2136                 break;
2137         }
2138
2139         return err;
2140
2141 }
2142
2143 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2144                             u64 in_param, u64 *out_param, int port)
2145 {
2146         struct mlx4_priv *priv = mlx4_priv(dev);
2147         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2148         int err = 0;
2149
2150         switch (op) {
2151         case RES_OP_RESERVE_AND_MAP:
2152                 if (slave_state[slave].old_vlan_api)
2153                         return 0;
2154                 if (!port)
2155                         return -EINVAL;
2156                 vlan_del_from_slave(dev, slave, in_param, port);
2157                 __mlx4_unregister_vlan(dev, port, in_param);
2158                 break;
2159         default:
2160                 err = -EINVAL;
2161                 break;
2162         }
2163
2164         return err;
2165 }
2166
2167 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2168                             u64 in_param, u64 *out_param)
2169 {
2170         int index;
2171         int err;
2172
2173         if (op != RES_OP_RESERVE)
2174                 return -EINVAL;
2175
2176         index = get_param_l(&in_param);
2177         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2178         if (err)
2179                 return err;
2180
2181         __mlx4_counter_free(dev, index);
2182         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2183
2184         return err;
2185 }
2186
2187 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2188                           u64 in_param, u64 *out_param)
2189 {
2190         int xrcdn;
2191         int err;
2192
2193         if (op != RES_OP_RESERVE)
2194                 return -EINVAL;
2195
2196         xrcdn = get_param_l(&in_param);
2197         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2198         if (err)
2199                 return err;
2200
2201         __mlx4_xrcd_free(dev, xrcdn);
2202
2203         return err;
2204 }
2205
2206 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2207                           struct mlx4_vhcr *vhcr,
2208                           struct mlx4_cmd_mailbox *inbox,
2209                           struct mlx4_cmd_mailbox *outbox,
2210                           struct mlx4_cmd_info *cmd)
2211 {
2212         int err = -EINVAL;
2213         int alop = vhcr->op_modifier;
2214
2215         switch (vhcr->in_modifier & 0xFF) {
2216         case RES_QP:
2217                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2218                                   vhcr->in_param);
2219                 break;
2220
2221         case RES_MTT:
2222                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2223                                    vhcr->in_param, &vhcr->out_param);
2224                 break;
2225
2226         case RES_MPT:
2227                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2228                                    vhcr->in_param);
2229                 break;
2230
2231         case RES_CQ:
2232                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2233                                   vhcr->in_param, &vhcr->out_param);
2234                 break;
2235
2236         case RES_SRQ:
2237                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2238                                    vhcr->in_param, &vhcr->out_param);
2239                 break;
2240
2241         case RES_MAC:
2242                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2243                                    vhcr->in_param, &vhcr->out_param,
2244                                    (vhcr->in_modifier >> 8) & 0xFF);
2245                 break;
2246
2247         case RES_VLAN:
2248                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2249                                     vhcr->in_param, &vhcr->out_param,
2250                                     (vhcr->in_modifier >> 8) & 0xFF);
2251                 break;
2252
2253         case RES_COUNTER:
2254                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2255                                        vhcr->in_param, &vhcr->out_param);
2256                 break;
2257
2258         case RES_XRCD:
2259                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2260                                      vhcr->in_param, &vhcr->out_param);
2261
2262         default:
2263                 break;
2264         }
2265         return err;
2266 }
2267
2268 /* ugly but other choices are uglier */
2269 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2270 {
2271         return (be32_to_cpu(mpt->flags) >> 9) & 1;
2272 }
2273
2274 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2275 {
2276         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2277 }
2278
2279 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2280 {
2281         return be32_to_cpu(mpt->mtt_sz);
2282 }
2283
2284 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2285 {
2286         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2287 }
2288
2289 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2290 {
2291         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2292 }
2293
2294 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2295 {
2296         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2297 }
2298
2299 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2300 {
2301         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2302 }
2303
2304 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2305 {
2306         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2307 }
2308
2309 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2310 {
2311         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2312 }
2313
2314 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2315 {
2316         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2317         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2318         int log_sq_sride = qpc->sq_size_stride & 7;
2319         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2320         int log_rq_stride = qpc->rq_size_stride & 7;
2321         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2322         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2323         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2324         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2325         int sq_size;
2326         int rq_size;
2327         int total_pages;
2328         int total_mem;
2329         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2330
2331         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2332         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2333         total_mem = sq_size + rq_size;
2334         total_pages =
2335                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2336                                    page_shift);
2337
2338         return total_pages;
2339 }
2340
2341 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2342                            int size, struct res_mtt *mtt)
2343 {
2344         int res_start = mtt->com.res_id;
2345         int res_size = (1 << mtt->order);
2346
2347         if (start < res_start || start + size > res_start + res_size)
2348                 return -EPERM;
2349         return 0;
2350 }
2351
2352 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2353                            struct mlx4_vhcr *vhcr,
2354                            struct mlx4_cmd_mailbox *inbox,
2355                            struct mlx4_cmd_mailbox *outbox,
2356                            struct mlx4_cmd_info *cmd)
2357 {
2358         int err;
2359         int index = vhcr->in_modifier;
2360         struct res_mtt *mtt;
2361         struct res_mpt *mpt;
2362         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2363         int phys;
2364         int id;
2365         u32 pd;
2366         int pd_slave;
2367
2368         id = index & mpt_mask(dev);
2369         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2370         if (err)
2371                 return err;
2372
2373         /* Disable memory windows for VFs. */
2374         if (!mr_is_region(inbox->buf)) {
2375                 err = -EPERM;
2376                 goto ex_abort;
2377         }
2378
2379         /* Make sure that the PD bits related to the slave id are zeros. */
2380         pd = mr_get_pd(inbox->buf);
2381         pd_slave = (pd >> 17) & 0x7f;
2382         if (pd_slave != 0 && pd_slave != slave) {
2383                 err = -EPERM;
2384                 goto ex_abort;
2385         }
2386
2387         if (mr_is_fmr(inbox->buf)) {
2388                 /* FMR and Bind Enable are forbidden in slave devices. */
2389                 if (mr_is_bind_enabled(inbox->buf)) {
2390                         err = -EPERM;
2391                         goto ex_abort;
2392                 }
2393                 /* FMR and Memory Windows are also forbidden. */
2394                 if (!mr_is_region(inbox->buf)) {
2395                         err = -EPERM;
2396                         goto ex_abort;
2397                 }
2398         }
2399
2400         phys = mr_phys_mpt(inbox->buf);
2401         if (!phys) {
2402                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2403                 if (err)
2404                         goto ex_abort;
2405
2406                 err = check_mtt_range(dev, slave, mtt_base,
2407                                       mr_get_mtt_size(inbox->buf), mtt);
2408                 if (err)
2409                         goto ex_put;
2410
2411                 mpt->mtt = mtt;
2412         }
2413
2414         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2415         if (err)
2416                 goto ex_put;
2417
2418         if (!phys) {
2419                 atomic_inc(&mtt->ref_count);
2420                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2421         }
2422
2423         res_end_move(dev, slave, RES_MPT, id);
2424         return 0;
2425
2426 ex_put:
2427         if (!phys)
2428                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2429 ex_abort:
2430         res_abort_move(dev, slave, RES_MPT, id);
2431
2432         return err;
2433 }
2434
2435 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2436                            struct mlx4_vhcr *vhcr,
2437                            struct mlx4_cmd_mailbox *inbox,
2438                            struct mlx4_cmd_mailbox *outbox,
2439                            struct mlx4_cmd_info *cmd)
2440 {
2441         int err;
2442         int index = vhcr->in_modifier;
2443         struct res_mpt *mpt;
2444         int id;
2445
2446         id = index & mpt_mask(dev);
2447         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2448         if (err)
2449                 return err;
2450
2451         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2452         if (err)
2453                 goto ex_abort;
2454
2455         if (mpt->mtt)
2456                 atomic_dec(&mpt->mtt->ref_count);
2457
2458         res_end_move(dev, slave, RES_MPT, id);
2459         return 0;
2460
2461 ex_abort:
2462         res_abort_move(dev, slave, RES_MPT, id);
2463
2464         return err;
2465 }
2466
2467 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2468                            struct mlx4_vhcr *vhcr,
2469                            struct mlx4_cmd_mailbox *inbox,
2470                            struct mlx4_cmd_mailbox *outbox,
2471                            struct mlx4_cmd_info *cmd)
2472 {
2473         int err;
2474         int index = vhcr->in_modifier;
2475         struct res_mpt *mpt;
2476         int id;
2477
2478         id = index & mpt_mask(dev);
2479         err = get_res(dev, slave, id, RES_MPT, &mpt);
2480         if (err)
2481                 return err;
2482
2483         if (mpt->com.from_state != RES_MPT_HW) {
2484                 err = -EBUSY;
2485                 goto out;
2486         }
2487
2488         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2489
2490 out:
2491         put_res(dev, slave, id, RES_MPT);
2492         return err;
2493 }
2494
2495 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2496 {
2497         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2498 }
2499
2500 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2501 {
2502         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2503 }
2504
2505 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2506 {
2507         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2508 }
2509
2510 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2511                                   struct mlx4_qp_context *context)
2512 {
2513         u32 qpn = vhcr->in_modifier & 0xffffff;
2514         u32 qkey = 0;
2515
2516         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2517                 return;
2518
2519         /* adjust qkey in qp context */
2520         context->qkey = cpu_to_be32(qkey);
2521 }
2522
2523 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2524                              struct mlx4_vhcr *vhcr,
2525                              struct mlx4_cmd_mailbox *inbox,
2526                              struct mlx4_cmd_mailbox *outbox,
2527                              struct mlx4_cmd_info *cmd)
2528 {
2529         int err;
2530         int qpn = vhcr->in_modifier & 0x7fffff;
2531         struct res_mtt *mtt;
2532         struct res_qp *qp;
2533         struct mlx4_qp_context *qpc = inbox->buf + 8;
2534         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2535         int mtt_size = qp_get_mtt_size(qpc);
2536         struct res_cq *rcq;
2537         struct res_cq *scq;
2538         int rcqn = qp_get_rcqn(qpc);
2539         int scqn = qp_get_scqn(qpc);
2540         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2541         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2542         struct res_srq *srq;
2543         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2544
2545         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2546         if (err)
2547                 return err;
2548         qp->local_qpn = local_qpn;
2549         qp->sched_queue = 0;
2550         qp->param3 = 0;
2551         qp->vlan_control = 0;
2552         qp->fvl_rx = 0;
2553         qp->pri_path_fl = 0;
2554         qp->vlan_index = 0;
2555         qp->feup = 0;
2556         qp->qpc_flags = be32_to_cpu(qpc->flags);
2557
2558         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2559         if (err)
2560                 goto ex_abort;
2561
2562         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2563         if (err)
2564                 goto ex_put_mtt;
2565
2566         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2567         if (err)
2568                 goto ex_put_mtt;
2569
2570         if (scqn != rcqn) {
2571                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2572                 if (err)
2573                         goto ex_put_rcq;
2574         } else
2575                 scq = rcq;
2576
2577         if (use_srq) {
2578                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2579                 if (err)
2580                         goto ex_put_scq;
2581         }
2582
2583         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2584         update_pkey_index(dev, slave, inbox);
2585         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2586         if (err)
2587                 goto ex_put_srq;
2588         atomic_inc(&mtt->ref_count);
2589         qp->mtt = mtt;
2590         atomic_inc(&rcq->ref_count);
2591         qp->rcq = rcq;
2592         atomic_inc(&scq->ref_count);
2593         qp->scq = scq;
2594
2595         if (scqn != rcqn)
2596                 put_res(dev, slave, scqn, RES_CQ);
2597
2598         if (use_srq) {
2599                 atomic_inc(&srq->ref_count);
2600                 put_res(dev, slave, srqn, RES_SRQ);
2601                 qp->srq = srq;
2602         }
2603         put_res(dev, slave, rcqn, RES_CQ);
2604         put_res(dev, slave, mtt_base, RES_MTT);
2605         res_end_move(dev, slave, RES_QP, qpn);
2606
2607         return 0;
2608
2609 ex_put_srq:
2610         if (use_srq)
2611                 put_res(dev, slave, srqn, RES_SRQ);
2612 ex_put_scq:
2613         if (scqn != rcqn)
2614                 put_res(dev, slave, scqn, RES_CQ);
2615 ex_put_rcq:
2616         put_res(dev, slave, rcqn, RES_CQ);
2617 ex_put_mtt:
2618         put_res(dev, slave, mtt_base, RES_MTT);
2619 ex_abort:
2620         res_abort_move(dev, slave, RES_QP, qpn);
2621
2622         return err;
2623 }
2624
2625 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2626 {
2627         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2628 }
2629
2630 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2631 {
2632         int log_eq_size = eqc->log_eq_size & 0x1f;
2633         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2634
2635         if (log_eq_size + 5 < page_shift)
2636                 return 1;
2637
2638         return 1 << (log_eq_size + 5 - page_shift);
2639 }
2640
2641 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2642 {
2643         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2644 }
2645
2646 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2647 {
2648         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2649         int page_shift = (cqc->log_page_size & 0x3f) + 12;
2650
2651         if (log_cq_size + 5 < page_shift)
2652                 return 1;
2653
2654         return 1 << (log_cq_size + 5 - page_shift);
2655 }
2656
2657 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2658                           struct mlx4_vhcr *vhcr,
2659                           struct mlx4_cmd_mailbox *inbox,
2660                           struct mlx4_cmd_mailbox *outbox,
2661                           struct mlx4_cmd_info *cmd)
2662 {
2663         int err;
2664         int eqn = vhcr->in_modifier;
2665         int res_id = (slave << 8) | eqn;
2666         struct mlx4_eq_context *eqc = inbox->buf;
2667         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2668         int mtt_size = eq_get_mtt_size(eqc);
2669         struct res_eq *eq;
2670         struct res_mtt *mtt;
2671
2672         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2673         if (err)
2674                 return err;
2675         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2676         if (err)
2677                 goto out_add;
2678
2679         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2680         if (err)
2681                 goto out_move;
2682
2683         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2684         if (err)
2685                 goto out_put;
2686
2687         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2688         if (err)
2689                 goto out_put;
2690
2691         atomic_inc(&mtt->ref_count);
2692         eq->mtt = mtt;
2693         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2694         res_end_move(dev, slave, RES_EQ, res_id);
2695         return 0;
2696
2697 out_put:
2698         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2699 out_move:
2700         res_abort_move(dev, slave, RES_EQ, res_id);
2701 out_add:
2702         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2703         return err;
2704 }
2705
2706 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2707                               int len, struct res_mtt **res)
2708 {
2709         struct mlx4_priv *priv = mlx4_priv(dev);
2710         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2711         struct res_mtt *mtt;
2712         int err = -EINVAL;
2713
2714         spin_lock_irq(mlx4_tlock(dev));
2715         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2716                             com.list) {
2717                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2718                         *res = mtt;
2719                         mtt->com.from_state = mtt->com.state;
2720                         mtt->com.state = RES_MTT_BUSY;
2721                         err = 0;
2722                         break;
2723                 }
2724         }
2725         spin_unlock_irq(mlx4_tlock(dev));
2726
2727         return err;
2728 }
2729
2730 static int verify_qp_parameters(struct mlx4_dev *dev,
2731                                 struct mlx4_cmd_mailbox *inbox,
2732                                 enum qp_transition transition, u8 slave)
2733 {
2734         u32                     qp_type;
2735         struct mlx4_qp_context  *qp_ctx;
2736         enum mlx4_qp_optpar     optpar;
2737
2738         qp_ctx  = inbox->buf + 8;
2739         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2740         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2741
2742         switch (qp_type) {
2743         case MLX4_QP_ST_RC:
2744         case MLX4_QP_ST_UC:
2745                 switch (transition) {
2746                 case QP_TRANS_INIT2RTR:
2747                 case QP_TRANS_RTR2RTS:
2748                 case QP_TRANS_RTS2RTS:
2749                 case QP_TRANS_SQD2SQD:
2750                 case QP_TRANS_SQD2RTS:
2751                         if (slave != mlx4_master_func_num(dev))
2752                                 /* slaves have only gid index 0 */
2753                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2754                                         if (qp_ctx->pri_path.mgid_index)
2755                                                 return -EINVAL;
2756                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2757                                         if (qp_ctx->alt_path.mgid_index)
2758                                                 return -EINVAL;
2759                         break;
2760                 default:
2761                         break;
2762                 }
2763
2764                 break;
2765         default:
2766                 break;
2767         }
2768
2769         return 0;
2770 }
2771
2772 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2773                            struct mlx4_vhcr *vhcr,
2774                            struct mlx4_cmd_mailbox *inbox,
2775                            struct mlx4_cmd_mailbox *outbox,
2776                            struct mlx4_cmd_info *cmd)
2777 {
2778         struct mlx4_mtt mtt;
2779         __be64 *page_list = inbox->buf;
2780         u64 *pg_list = (u64 *)page_list;
2781         int i;
2782         struct res_mtt *rmtt = NULL;
2783         int start = be64_to_cpu(page_list[0]);
2784         int npages = vhcr->in_modifier;
2785         int err;
2786
2787         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2788         if (err)
2789                 return err;
2790
2791         /* Call the SW implementation of write_mtt:
2792          * - Prepare a dummy mtt struct
2793          * - Translate inbox contents to simple addresses in host endianess */
2794         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2795                             we don't really use it */
2796         mtt.order = 0;
2797         mtt.page_shift = 0;
2798         for (i = 0; i < npages; ++i)
2799                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2800
2801         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2802                                ((u64 *)page_list + 2));
2803
2804         if (rmtt)
2805                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2806
2807         return err;
2808 }
2809
2810 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2811                           struct mlx4_vhcr *vhcr,
2812                           struct mlx4_cmd_mailbox *inbox,
2813                           struct mlx4_cmd_mailbox *outbox,
2814                           struct mlx4_cmd_info *cmd)
2815 {
2816         int eqn = vhcr->in_modifier;
2817         int res_id = eqn | (slave << 8);
2818         struct res_eq *eq;
2819         int err;
2820
2821         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2822         if (err)
2823                 return err;
2824
2825         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2826         if (err)
2827                 goto ex_abort;
2828
2829         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2830         if (err)
2831                 goto ex_put;
2832
2833         atomic_dec(&eq->mtt->ref_count);
2834         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2835         res_end_move(dev, slave, RES_EQ, res_id);
2836         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2837
2838         return 0;
2839
2840 ex_put:
2841         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2842 ex_abort:
2843         res_abort_move(dev, slave, RES_EQ, res_id);
2844
2845         return err;
2846 }
2847
2848 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2849 {
2850         struct mlx4_priv *priv = mlx4_priv(dev);
2851         struct mlx4_slave_event_eq_info *event_eq;
2852         struct mlx4_cmd_mailbox *mailbox;
2853         u32 in_modifier = 0;
2854         int err;
2855         int res_id;
2856         struct res_eq *req;
2857
2858         if (!priv->mfunc.master.slave_state)
2859                 return -EINVAL;
2860
2861         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2862
2863         /* Create the event only if the slave is registered */
2864         if (event_eq->eqn < 0)
2865                 return 0;
2866
2867         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2868         res_id = (slave << 8) | event_eq->eqn;
2869         err = get_res(dev, slave, res_id, RES_EQ, &req);
2870         if (err)
2871                 goto unlock;
2872
2873         if (req->com.from_state != RES_EQ_HW) {
2874                 err = -EINVAL;
2875                 goto put;
2876         }
2877
2878         mailbox = mlx4_alloc_cmd_mailbox(dev);
2879         if (IS_ERR(mailbox)) {
2880                 err = PTR_ERR(mailbox);
2881                 goto put;
2882         }
2883
2884         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2885                 ++event_eq->token;
2886                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2887         }
2888
2889         memcpy(mailbox->buf, (u8 *) eqe, 28);
2890
2891         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2892
2893         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2894                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2895                        MLX4_CMD_NATIVE);
2896
2897         put_res(dev, slave, res_id, RES_EQ);
2898         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2899         mlx4_free_cmd_mailbox(dev, mailbox);
2900         return err;
2901
2902 put:
2903         put_res(dev, slave, res_id, RES_EQ);
2904
2905 unlock:
2906         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2907         return err;
2908 }
2909
2910 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2911                           struct mlx4_vhcr *vhcr,
2912                           struct mlx4_cmd_mailbox *inbox,
2913                           struct mlx4_cmd_mailbox *outbox,
2914                           struct mlx4_cmd_info *cmd)
2915 {
2916         int eqn = vhcr->in_modifier;
2917         int res_id = eqn | (slave << 8);
2918         struct res_eq *eq;
2919         int err;
2920
2921         err = get_res(dev, slave, res_id, RES_EQ, &eq);
2922         if (err)
2923                 return err;
2924
2925         if (eq->com.from_state != RES_EQ_HW) {
2926                 err = -EINVAL;
2927                 goto ex_put;
2928         }
2929
2930         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2931
2932 ex_put:
2933         put_res(dev, slave, res_id, RES_EQ);
2934         return err;
2935 }
2936
2937 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2938                           struct mlx4_vhcr *vhcr,
2939                           struct mlx4_cmd_mailbox *inbox,
2940                           struct mlx4_cmd_mailbox *outbox,
2941                           struct mlx4_cmd_info *cmd)
2942 {
2943         int err;
2944         int cqn = vhcr->in_modifier;
2945         struct mlx4_cq_context *cqc = inbox->buf;
2946         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2947         struct res_cq *cq;
2948         struct res_mtt *mtt;
2949
2950         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2951         if (err)
2952                 return err;
2953         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2954         if (err)
2955                 goto out_move;
2956         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2957         if (err)
2958                 goto out_put;
2959         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2960         if (err)
2961                 goto out_put;
2962         atomic_inc(&mtt->ref_count);
2963         cq->mtt = mtt;
2964         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2965         res_end_move(dev, slave, RES_CQ, cqn);
2966         return 0;
2967
2968 out_put:
2969         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2970 out_move:
2971         res_abort_move(dev, slave, RES_CQ, cqn);
2972         return err;
2973 }
2974
2975 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2976                           struct mlx4_vhcr *vhcr,
2977                           struct mlx4_cmd_mailbox *inbox,
2978                           struct mlx4_cmd_mailbox *outbox,
2979                           struct mlx4_cmd_info *cmd)
2980 {
2981         int err;
2982         int cqn = vhcr->in_modifier;
2983         struct res_cq *cq;
2984
2985         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2986         if (err)
2987                 return err;
2988         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2989         if (err)
2990                 goto out_move;
2991         atomic_dec(&cq->mtt->ref_count);
2992         res_end_move(dev, slave, RES_CQ, cqn);
2993         return 0;
2994
2995 out_move:
2996         res_abort_move(dev, slave, RES_CQ, cqn);
2997         return err;
2998 }
2999
3000 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3001                           struct mlx4_vhcr *vhcr,
3002                           struct mlx4_cmd_mailbox *inbox,
3003                           struct mlx4_cmd_mailbox *outbox,
3004                           struct mlx4_cmd_info *cmd)
3005 {
3006         int cqn = vhcr->in_modifier;
3007         struct res_cq *cq;
3008         int err;
3009
3010         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3011         if (err)
3012                 return err;
3013
3014         if (cq->com.from_state != RES_CQ_HW)
3015                 goto ex_put;
3016
3017         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3018 ex_put:
3019         put_res(dev, slave, cqn, RES_CQ);
3020
3021         return err;
3022 }
3023
3024 static int handle_resize(struct mlx4_dev *dev, int slave,
3025                          struct mlx4_vhcr *vhcr,
3026                          struct mlx4_cmd_mailbox *inbox,
3027                          struct mlx4_cmd_mailbox *outbox,
3028                          struct mlx4_cmd_info *cmd,
3029                          struct res_cq *cq)
3030 {
3031         int err;
3032         struct res_mtt *orig_mtt;
3033         struct res_mtt *mtt;
3034         struct mlx4_cq_context *cqc = inbox->buf;
3035         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3036
3037         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3038         if (err)
3039                 return err;
3040
3041         if (orig_mtt != cq->mtt) {
3042                 err = -EINVAL;
3043                 goto ex_put;
3044         }
3045
3046         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3047         if (err)
3048                 goto ex_put;
3049
3050         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3051         if (err)
3052                 goto ex_put1;
3053         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3054         if (err)
3055                 goto ex_put1;
3056         atomic_dec(&orig_mtt->ref_count);
3057         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3058         atomic_inc(&mtt->ref_count);
3059         cq->mtt = mtt;
3060         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3061         return 0;
3062
3063 ex_put1:
3064         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3065 ex_put:
3066         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3067
3068         return err;
3069
3070 }
3071
3072 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3073                            struct mlx4_vhcr *vhcr,
3074                            struct mlx4_cmd_mailbox *inbox,
3075                            struct mlx4_cmd_mailbox *outbox,
3076                            struct mlx4_cmd_info *cmd)
3077 {
3078         int cqn = vhcr->in_modifier;
3079         struct res_cq *cq;
3080         int err;
3081
3082         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3083         if (err)
3084                 return err;
3085
3086         if (cq->com.from_state != RES_CQ_HW)
3087                 goto ex_put;
3088
3089         if (vhcr->op_modifier == 0) {
3090                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3091                 goto ex_put;
3092         }
3093
3094         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3095 ex_put:
3096         put_res(dev, slave, cqn, RES_CQ);
3097
3098         return err;
3099 }
3100
3101 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3102 {
3103         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3104         int log_rq_stride = srqc->logstride & 7;
3105         int page_shift = (srqc->log_page_size & 0x3f) + 12;
3106
3107         if (log_srq_size + log_rq_stride + 4 < page_shift)
3108                 return 1;
3109
3110         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3111 }
3112
3113 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3114                            struct mlx4_vhcr *vhcr,
3115                            struct mlx4_cmd_mailbox *inbox,
3116                            struct mlx4_cmd_mailbox *outbox,
3117                            struct mlx4_cmd_info *cmd)
3118 {
3119         int err;
3120         int srqn = vhcr->in_modifier;
3121         struct res_mtt *mtt;
3122         struct res_srq *srq;
3123         struct mlx4_srq_context *srqc = inbox->buf;
3124         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3125
3126         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3127                 return -EINVAL;
3128
3129         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3130         if (err)
3131                 return err;
3132         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3133         if (err)
3134                 goto ex_abort;
3135         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3136                               mtt);
3137         if (err)
3138                 goto ex_put_mtt;
3139
3140         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3141         if (err)
3142                 goto ex_put_mtt;
3143
3144         atomic_inc(&mtt->ref_count);
3145         srq->mtt = mtt;
3146         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3147         res_end_move(dev, slave, RES_SRQ, srqn);
3148         return 0;
3149
3150 ex_put_mtt:
3151         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3152 ex_abort:
3153         res_abort_move(dev, slave, RES_SRQ, srqn);
3154
3155         return err;
3156 }
3157
3158 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3159                            struct mlx4_vhcr *vhcr,
3160                            struct mlx4_cmd_mailbox *inbox,
3161                            struct mlx4_cmd_mailbox *outbox,
3162                            struct mlx4_cmd_info *cmd)
3163 {
3164         int err;
3165         int srqn = vhcr->in_modifier;
3166         struct res_srq *srq;
3167
3168         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3169         if (err)
3170                 return err;
3171         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3172         if (err)
3173                 goto ex_abort;
3174         atomic_dec(&srq->mtt->ref_count);
3175         if (srq->cq)
3176                 atomic_dec(&srq->cq->ref_count);
3177         res_end_move(dev, slave, RES_SRQ, srqn);
3178
3179         return 0;
3180
3181 ex_abort:
3182         res_abort_move(dev, slave, RES_SRQ, srqn);
3183
3184         return err;
3185 }
3186
3187 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3188                            struct mlx4_vhcr *vhcr,
3189                            struct mlx4_cmd_mailbox *inbox,
3190                            struct mlx4_cmd_mailbox *outbox,
3191                            struct mlx4_cmd_info *cmd)
3192 {
3193         int err;
3194         int srqn = vhcr->in_modifier;
3195         struct res_srq *srq;
3196
3197         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3198         if (err)
3199                 return err;
3200         if (srq->com.from_state != RES_SRQ_HW) {
3201                 err = -EBUSY;
3202                 goto out;
3203         }
3204         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3205 out:
3206         put_res(dev, slave, srqn, RES_SRQ);
3207         return err;
3208 }
3209
3210 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3211                          struct mlx4_vhcr *vhcr,
3212                          struct mlx4_cmd_mailbox *inbox,
3213                          struct mlx4_cmd_mailbox *outbox,
3214                          struct mlx4_cmd_info *cmd)
3215 {
3216         int err;
3217         int srqn = vhcr->in_modifier;
3218         struct res_srq *srq;
3219
3220         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3221         if (err)
3222                 return err;
3223
3224         if (srq->com.from_state != RES_SRQ_HW) {
3225                 err = -EBUSY;
3226                 goto out;
3227         }
3228
3229         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3230 out:
3231         put_res(dev, slave, srqn, RES_SRQ);
3232         return err;
3233 }
3234
3235 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3236                         struct mlx4_vhcr *vhcr,
3237                         struct mlx4_cmd_mailbox *inbox,
3238                         struct mlx4_cmd_mailbox *outbox,
3239                         struct mlx4_cmd_info *cmd)
3240 {
3241         int err;
3242         int qpn = vhcr->in_modifier & 0x7fffff;
3243         struct res_qp *qp;
3244
3245         err = get_res(dev, slave, qpn, RES_QP, &qp);
3246         if (err)
3247                 return err;
3248         if (qp->com.from_state != RES_QP_HW) {
3249                 err = -EBUSY;
3250                 goto out;
3251         }
3252
3253         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3254 out:
3255         put_res(dev, slave, qpn, RES_QP);
3256         return err;
3257 }
3258
3259 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3260                               struct mlx4_vhcr *vhcr,
3261                               struct mlx4_cmd_mailbox *inbox,
3262                               struct mlx4_cmd_mailbox *outbox,
3263                               struct mlx4_cmd_info *cmd)
3264 {
3265         struct mlx4_qp_context *context = inbox->buf + 8;
3266         adjust_proxy_tun_qkey(dev, vhcr, context);
3267         update_pkey_index(dev, slave, inbox);
3268         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3269 }
3270
3271 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3272                              struct mlx4_vhcr *vhcr,
3273                              struct mlx4_cmd_mailbox *inbox,
3274                              struct mlx4_cmd_mailbox *outbox,
3275                              struct mlx4_cmd_info *cmd)
3276 {
3277         int err;
3278         struct mlx4_qp_context *qpc = inbox->buf + 8;
3279         int qpn = vhcr->in_modifier & 0x7fffff;
3280         struct res_qp *qp;
3281         u8 orig_sched_queue;
3282         __be32  orig_param3 = qpc->param3;
3283         u8 orig_vlan_control = qpc->pri_path.vlan_control;
3284         u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3285         u8 orig_pri_path_fl = qpc->pri_path.fl;
3286         u8 orig_vlan_index = qpc->pri_path.vlan_index;
3287         u8 orig_feup = qpc->pri_path.feup;
3288
3289         err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3290         if (err)
3291                 return err;
3292
3293         update_pkey_index(dev, slave, inbox);
3294         update_gid(dev, inbox, (u8)slave);
3295         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3296         orig_sched_queue = qpc->pri_path.sched_queue;
3297         err = update_vport_qp_param(dev, inbox, slave, qpn);
3298         if (err)
3299                 return err;
3300
3301         err = get_res(dev, slave, qpn, RES_QP, &qp);
3302         if (err)
3303                 return err;
3304         if (qp->com.from_state != RES_QP_HW) {
3305                 err = -EBUSY;
3306                 goto out;
3307         }
3308
3309         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3310 out:
3311         /* if no error, save sched queue value passed in by VF. This is
3312          * essentially the QOS value provided by the VF. This will be useful
3313          * if we allow dynamic changes from VST back to VGT
3314          */
3315         if (!err) {
3316                 qp->sched_queue = orig_sched_queue;
3317                 qp->param3      = orig_param3;
3318                 qp->vlan_control = orig_vlan_control;
3319                 qp->fvl_rx      =  orig_fvl_rx;
3320                 qp->pri_path_fl = orig_pri_path_fl;
3321                 qp->vlan_index  = orig_vlan_index;
3322                 qp->feup        = orig_feup;
3323         }
3324         put_res(dev, slave, qpn, RES_QP);
3325         return err;
3326 }
3327
3328 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3329                             struct mlx4_vhcr *vhcr,
3330                             struct mlx4_cmd_mailbox *inbox,
3331                             struct mlx4_cmd_mailbox *outbox,
3332                             struct mlx4_cmd_info *cmd)
3333 {
3334         int err;
3335         struct mlx4_qp_context *context = inbox->buf + 8;
3336
3337         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3338         if (err)
3339                 return err;
3340
3341         update_pkey_index(dev, slave, inbox);
3342         update_gid(dev, inbox, (u8)slave);
3343         adjust_proxy_tun_qkey(dev, vhcr, context);
3344         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3345 }
3346
3347 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3348                             struct mlx4_vhcr *vhcr,
3349                             struct mlx4_cmd_mailbox *inbox,
3350                             struct mlx4_cmd_mailbox *outbox,
3351                             struct mlx4_cmd_info *cmd)
3352 {
3353         int err;
3354         struct mlx4_qp_context *context = inbox->buf + 8;
3355
3356         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3357         if (err)
3358                 return err;
3359
3360         update_pkey_index(dev, slave, inbox);
3361         update_gid(dev, inbox, (u8)slave);
3362         adjust_proxy_tun_qkey(dev, vhcr, context);
3363         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3364 }
3365
3366
3367 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3368                               struct mlx4_vhcr *vhcr,
3369                               struct mlx4_cmd_mailbox *inbox,
3370                               struct mlx4_cmd_mailbox *outbox,
3371                               struct mlx4_cmd_info *cmd)
3372 {
3373         struct mlx4_qp_context *context = inbox->buf + 8;
3374         adjust_proxy_tun_qkey(dev, vhcr, context);
3375         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3376 }
3377
3378 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3379                             struct mlx4_vhcr *vhcr,
3380                             struct mlx4_cmd_mailbox *inbox,
3381                             struct mlx4_cmd_mailbox *outbox,
3382                             struct mlx4_cmd_info *cmd)
3383 {
3384         int err;
3385         struct mlx4_qp_context *context = inbox->buf + 8;
3386
3387         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3388         if (err)
3389                 return err;
3390
3391         adjust_proxy_tun_qkey(dev, vhcr, context);
3392         update_gid(dev, inbox, (u8)slave);
3393         update_pkey_index(dev, slave, inbox);
3394         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3395 }
3396
3397 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3398                             struct mlx4_vhcr *vhcr,
3399                             struct mlx4_cmd_mailbox *inbox,
3400                             struct mlx4_cmd_mailbox *outbox,
3401                             struct mlx4_cmd_info *cmd)
3402 {
3403         int err;
3404         struct mlx4_qp_context *context = inbox->buf + 8;
3405
3406         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3407         if (err)
3408                 return err;
3409
3410         adjust_proxy_tun_qkey(dev, vhcr, context);
3411         update_gid(dev, inbox, (u8)slave);
3412         update_pkey_index(dev, slave, inbox);
3413         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3414 }
3415
3416 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3417                          struct mlx4_vhcr *vhcr,
3418                          struct mlx4_cmd_mailbox *inbox,
3419                          struct mlx4_cmd_mailbox *outbox,
3420                          struct mlx4_cmd_info *cmd)
3421 {
3422         int err;
3423         int qpn = vhcr->in_modifier & 0x7fffff;
3424         struct res_qp *qp;
3425
3426         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3427         if (err)
3428                 return err;
3429         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3430         if (err)
3431                 goto ex_abort;
3432
3433         atomic_dec(&qp->mtt->ref_count);
3434         atomic_dec(&qp->rcq->ref_count);
3435         atomic_dec(&qp->scq->ref_count);
3436         if (qp->srq)
3437                 atomic_dec(&qp->srq->ref_count);
3438         res_end_move(dev, slave, RES_QP, qpn);
3439         return 0;
3440
3441 ex_abort:
3442         res_abort_move(dev, slave, RES_QP, qpn);
3443
3444         return err;
3445 }
3446
3447 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3448                                 struct res_qp *rqp, u8 *gid)
3449 {
3450         struct res_gid *res;
3451
3452         list_for_each_entry(res, &rqp->mcg_list, list) {
3453                 if (!memcmp(res->gid, gid, 16))
3454                         return res;
3455         }
3456         return NULL;
3457 }
3458
3459 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3460                        u8 *gid, enum mlx4_protocol prot,
3461                        enum mlx4_steer_type steer, u64 reg_id)
3462 {
3463         struct res_gid *res;
3464         int err;
3465
3466         res = kzalloc(sizeof *res, GFP_KERNEL);
3467         if (!res)
3468                 return -ENOMEM;
3469
3470         spin_lock_irq(&rqp->mcg_spl);
3471         if (find_gid(dev, slave, rqp, gid)) {
3472                 kfree(res);
3473                 err = -EEXIST;
3474         } else {
3475                 memcpy(res->gid, gid, 16);
3476                 res->prot = prot;
3477                 res->steer = steer;
3478                 res->reg_id = reg_id;
3479                 list_add_tail(&res->list, &rqp->mcg_list);
3480                 err = 0;
3481         }
3482         spin_unlock_irq(&rqp->mcg_spl);
3483
3484         return err;
3485 }
3486
3487 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3488                        u8 *gid, enum mlx4_protocol prot,
3489                        enum mlx4_steer_type steer, u64 *reg_id)
3490 {
3491         struct res_gid *res;
3492         int err;
3493
3494         spin_lock_irq(&rqp->mcg_spl);
3495         res = find_gid(dev, slave, rqp, gid);
3496         if (!res || res->prot != prot || res->steer != steer)
3497                 err = -EINVAL;
3498         else {
3499                 *reg_id = res->reg_id;
3500                 list_del(&res->list);
3501                 kfree(res);
3502                 err = 0;
3503         }
3504         spin_unlock_irq(&rqp->mcg_spl);
3505
3506         return err;
3507 }
3508
3509 static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3510                      int block_loopback, enum mlx4_protocol prot,
3511                      enum mlx4_steer_type type, u64 *reg_id)
3512 {
3513         switch (dev->caps.steering_mode) {
3514         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3515                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3516                                                 block_loopback, prot,
3517                                                 reg_id);
3518         case MLX4_STEERING_MODE_B0:
3519                 return mlx4_qp_attach_common(dev, qp, gid,
3520                                             block_loopback, prot, type);
3521         default:
3522                 return -EINVAL;
3523         }
3524 }
3525
3526 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3527                      enum mlx4_protocol prot, enum mlx4_steer_type type,
3528                      u64 reg_id)
3529 {
3530         switch (dev->caps.steering_mode) {
3531         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3532                 return mlx4_flow_detach(dev, reg_id);
3533         case MLX4_STEERING_MODE_B0:
3534                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3535         default:
3536                 return -EINVAL;
3537         }
3538 }
3539
3540 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3541                                struct mlx4_vhcr *vhcr,
3542                                struct mlx4_cmd_mailbox *inbox,
3543                                struct mlx4_cmd_mailbox *outbox,
3544                                struct mlx4_cmd_info *cmd)
3545 {
3546         struct mlx4_qp qp; /* dummy for calling attach/detach */
3547         u8 *gid = inbox->buf;
3548         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3549         int err;
3550         int qpn;
3551         struct res_qp *rqp;
3552         u64 reg_id = 0;
3553         int attach = vhcr->op_modifier;
3554         int block_loopback = vhcr->in_modifier >> 31;
3555         u8 steer_type_mask = 2;
3556         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3557
3558         qpn = vhcr->in_modifier & 0xffffff;
3559         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3560         if (err)
3561                 return err;
3562
3563         qp.qpn = qpn;
3564         if (attach) {
3565                 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3566                                 type, &reg_id);
3567                 if (err) {
3568                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3569                         goto ex_put;
3570                 }
3571                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3572                 if (err)
3573                         goto ex_detach;
3574         } else {
3575                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3576                 if (err)
3577                         goto ex_put;
3578
3579                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3580                 if (err)
3581                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3582                                qpn, reg_id);
3583         }
3584         put_res(dev, slave, qpn, RES_QP);
3585         return err;
3586
3587 ex_detach:
3588         qp_detach(dev, &qp, gid, prot, type, reg_id);
3589 ex_put:
3590         put_res(dev, slave, qpn, RES_QP);
3591         return err;
3592 }
3593
3594 /*
3595  * MAC validation for Flow Steering rules.
3596  * VF can attach rules only with a mac address which is assigned to it.
3597  */
3598 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3599                                    struct list_head *rlist)
3600 {
3601         struct mac_res *res, *tmp;
3602         __be64 be_mac;
3603
3604         /* make sure it isn't multicast or broadcast mac*/
3605         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3606             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3607                 list_for_each_entry_safe(res, tmp, rlist, list) {
3608                         be_mac = cpu_to_be64(res->mac << 16);
3609                         if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3610                                 return 0;
3611                 }
3612                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3613                        eth_header->eth.dst_mac, slave);
3614                 return -EINVAL;
3615         }
3616         return 0;
3617 }
3618
3619 /*
3620  * In case of missing eth header, append eth header with a MAC address
3621  * assigned to the VF.
3622  */
3623 static int add_eth_header(struct mlx4_dev *dev, int slave,
3624                           struct mlx4_cmd_mailbox *inbox,
3625                           struct list_head *rlist, int header_id)
3626 {
3627         struct mac_res *res, *tmp;
3628         u8 port;
3629         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3630         struct mlx4_net_trans_rule_hw_eth *eth_header;
3631         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3632         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3633         __be64 be_mac = 0;
3634         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3635
3636         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3637         port = ctrl->port;
3638         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3639
3640         /* Clear a space in the inbox for eth header */
3641         switch (header_id) {
3642         case MLX4_NET_TRANS_RULE_ID_IPV4:
3643                 ip_header =
3644                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3645                 memmove(ip_header, eth_header,
3646                         sizeof(*ip_header) + sizeof(*l4_header));
3647                 break;
3648         case MLX4_NET_TRANS_RULE_ID_TCP:
3649         case MLX4_NET_TRANS_RULE_ID_UDP:
3650                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3651                             (eth_header + 1);
3652                 memmove(l4_header, eth_header, sizeof(*l4_header));
3653                 break;
3654         default:
3655                 return -EINVAL;
3656         }
3657         list_for_each_entry_safe(res, tmp, rlist, list) {
3658                 if (port == res->port) {
3659                         be_mac = cpu_to_be64(res->mac << 16);
3660                         break;
3661                 }
3662         }
3663         if (!be_mac) {
3664                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3665                        port);
3666                 return -EINVAL;
3667         }
3668
3669         memset(eth_header, 0, sizeof(*eth_header));
3670         eth_header->size = sizeof(*eth_header) >> 2;
3671         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3672         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3673         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3674
3675         return 0;
3676
3677 }
3678
3679 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3680                                          struct mlx4_vhcr *vhcr,
3681                                          struct mlx4_cmd_mailbox *inbox,
3682                                          struct mlx4_cmd_mailbox *outbox,
3683                                          struct mlx4_cmd_info *cmd)
3684 {
3685
3686         struct mlx4_priv *priv = mlx4_priv(dev);
3687         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3688         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3689         int err;
3690         int qpn;
3691         struct res_qp *rqp;
3692         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3693         struct _rule_hw  *rule_header;
3694         int header_id;
3695
3696         if (dev->caps.steering_mode !=
3697             MLX4_STEERING_MODE_DEVICE_MANAGED)
3698                 return -EOPNOTSUPP;
3699
3700         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3701         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3702         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3703         if (err) {
3704                 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3705                 return err;
3706         }
3707         rule_header = (struct _rule_hw *)(ctrl + 1);
3708         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3709
3710         switch (header_id) {
3711         case MLX4_NET_TRANS_RULE_ID_ETH:
3712                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3713                         err = -EINVAL;
3714                         goto err_put;
3715                 }
3716                 break;
3717         case MLX4_NET_TRANS_RULE_ID_IB:
3718                 break;
3719         case MLX4_NET_TRANS_RULE_ID_IPV4:
3720         case MLX4_NET_TRANS_RULE_ID_TCP:
3721         case MLX4_NET_TRANS_RULE_ID_UDP:
3722                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3723                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3724                         err = -EINVAL;
3725                         goto err_put;
3726                 }
3727                 vhcr->in_modifier +=
3728                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3729                 break;
3730         default:
3731                 pr_err("Corrupted mailbox.\n");
3732                 err = -EINVAL;
3733                 goto err_put;
3734         }
3735
3736         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3737                            vhcr->in_modifier, 0,
3738                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3739                            MLX4_CMD_NATIVE);
3740         if (err)
3741                 goto err_put;
3742
3743         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3744         if (err) {
3745                 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3746                 /* detach rule*/
3747                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3748                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3749                          MLX4_CMD_NATIVE);
3750                 goto err_put;
3751         }
3752         atomic_inc(&rqp->ref_count);
3753 err_put:
3754         put_res(dev, slave, qpn, RES_QP);
3755         return err;
3756 }
3757
3758 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3759                                          struct mlx4_vhcr *vhcr,
3760                                          struct mlx4_cmd_mailbox *inbox,
3761                                          struct mlx4_cmd_mailbox *outbox,
3762                                          struct mlx4_cmd_info *cmd)
3763 {
3764         int err;
3765         struct res_qp *rqp;
3766         struct res_fs_rule *rrule;
3767
3768         if (dev->caps.steering_mode !=
3769             MLX4_STEERING_MODE_DEVICE_MANAGED)
3770                 return -EOPNOTSUPP;
3771
3772         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3773         if (err)
3774                 return err;
3775         /* Release the rule form busy state before removal */
3776         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3777         err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3778         if (err)
3779                 return err;
3780
3781         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3782         if (err) {
3783                 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3784                 goto out;
3785         }
3786
3787         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3788                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3789                        MLX4_CMD_NATIVE);
3790         if (!err)
3791                 atomic_dec(&rqp->ref_count);
3792 out:
3793         put_res(dev, slave, rrule->qpn, RES_QP);
3794         return err;
3795 }
3796
3797 enum {
3798         BUSY_MAX_RETRIES = 10
3799 };
3800
3801 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3802                                struct mlx4_vhcr *vhcr,
3803                                struct mlx4_cmd_mailbox *inbox,
3804                                struct mlx4_cmd_mailbox *outbox,
3805                                struct mlx4_cmd_info *cmd)
3806 {
3807         int err;
3808         int index = vhcr->in_modifier & 0xffff;
3809
3810         err = get_res(dev, slave, index, RES_COUNTER, NULL);
3811         if (err)
3812                 return err;
3813
3814         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3815         put_res(dev, slave, index, RES_COUNTER);
3816         return err;
3817 }
3818
3819 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
3820                                               struct mlx4_vhcr *vhcr,
3821                                               struct mlx4_cmd_mailbox *inbox,
3822                                               struct mlx4_cmd_mailbox *outbox,
3823                                               struct mlx4_cmd_info *cmd)
3824 {
3825         return -EPERM;
3826 }
3827
3828
3829 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3830 {
3831         struct res_gid *rgid;
3832         struct res_gid *tmp;
3833         struct mlx4_qp qp; /* dummy for calling attach/detach */
3834
3835         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3836                 switch (dev->caps.steering_mode) {
3837                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3838                         mlx4_flow_detach(dev, rgid->reg_id);
3839                         break;
3840                 case MLX4_STEERING_MODE_B0:
3841                         qp.qpn = rqp->local_qpn;
3842                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3843                                                      rgid->prot, rgid->steer);
3844                         break;
3845                 }
3846                 list_del(&rgid->list);
3847                 kfree(rgid);
3848         }
3849 }
3850
3851 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3852                           enum mlx4_resource type, int print)
3853 {
3854         struct mlx4_priv *priv = mlx4_priv(dev);
3855         struct mlx4_resource_tracker *tracker =
3856                 &priv->mfunc.master.res_tracker;
3857         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3858         struct res_common *r;
3859         struct res_common *tmp;
3860         int busy;
3861
3862         busy = 0;
3863         spin_lock_irq(mlx4_tlock(dev));
3864         list_for_each_entry_safe(r, tmp, rlist, list) {
3865                 if (r->owner == slave) {
3866                         if (!r->removing) {
3867                                 if (r->state == RES_ANY_BUSY) {
3868                                         if (print)
3869                                                 mlx4_dbg(dev,
3870                                                          "%s id 0x%llx is busy\n",
3871                                                           ResourceType(type),
3872                                                           r->res_id);
3873                                         ++busy;
3874                                 } else {
3875                                         r->from_state = r->state;
3876                                         r->state = RES_ANY_BUSY;
3877                                         r->removing = 1;
3878                                 }
3879                         }
3880                 }
3881         }
3882         spin_unlock_irq(mlx4_tlock(dev));
3883
3884         return busy;
3885 }
3886
3887 static int move_all_busy(struct mlx4_dev *dev, int slave,
3888                          enum mlx4_resource type)
3889 {
3890         unsigned long begin;
3891         int busy;
3892
3893         begin = jiffies;
3894         do {
3895                 busy = _move_all_busy(dev, slave, type, 0);
3896                 if (time_after(jiffies, begin + 5 * HZ))
3897                         break;
3898                 if (busy)
3899                         cond_resched();
3900         } while (busy);
3901
3902         if (busy)
3903                 busy = _move_all_busy(dev, slave, type, 1);
3904
3905         return busy;
3906 }
3907 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3908 {
3909         struct mlx4_priv *priv = mlx4_priv(dev);
3910         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3911         struct list_head *qp_list =
3912                 &tracker->slave_list[slave].res_list[RES_QP];
3913         struct res_qp *qp;
3914         struct res_qp *tmp;
3915         int state;
3916         u64 in_param;
3917         int qpn;
3918         int err;
3919
3920         err = move_all_busy(dev, slave, RES_QP);
3921         if (err)
3922                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3923                           "for slave %d\n", slave);
3924
3925         spin_lock_irq(mlx4_tlock(dev));
3926         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3927                 spin_unlock_irq(mlx4_tlock(dev));
3928                 if (qp->com.owner == slave) {
3929                         qpn = qp->com.res_id;
3930                         detach_qp(dev, slave, qp);
3931                         state = qp->com.from_state;
3932                         while (state != 0) {
3933                                 switch (state) {
3934                                 case RES_QP_RESERVED:
3935                                         spin_lock_irq(mlx4_tlock(dev));
3936                                         rb_erase(&qp->com.node,
3937                                                  &tracker->res_tree[RES_QP]);
3938                                         list_del(&qp->com.list);
3939                                         spin_unlock_irq(mlx4_tlock(dev));
3940                                         if (!valid_reserved(dev, slave, qpn)) {
3941                                                 __mlx4_qp_release_range(dev, qpn, 1);
3942                                                 mlx4_release_resource(dev, slave,
3943                                                                       RES_QP, 1, 0);
3944                                         }
3945                                         kfree(qp);
3946                                         state = 0;
3947                                         break;
3948                                 case RES_QP_MAPPED:
3949                                         if (!valid_reserved(dev, slave, qpn))
3950                                                 __mlx4_qp_free_icm(dev, qpn);
3951                                         state = RES_QP_RESERVED;
3952                                         break;
3953                                 case RES_QP_HW:
3954                                         in_param = slave;
3955                                         err = mlx4_cmd(dev, in_param,
3956                                                        qp->local_qpn, 2,
3957                                                        MLX4_CMD_2RST_QP,
3958                                                        MLX4_CMD_TIME_CLASS_A,
3959                                                        MLX4_CMD_NATIVE);
3960                                         if (err)
3961                                                 mlx4_dbg(dev, "rem_slave_qps: failed"
3962                                                          " to move slave %d qpn %d to"
3963                                                          " reset\n", slave,
3964                                                          qp->local_qpn);
3965                                         atomic_dec(&qp->rcq->ref_count);
3966                                         atomic_dec(&qp->scq->ref_count);
3967                                         atomic_dec(&qp->mtt->ref_count);
3968                                         if (qp->srq)
3969                                                 atomic_dec(&qp->srq->ref_count);
3970                                         state = RES_QP_MAPPED;
3971                                         break;
3972                                 default:
3973                                         state = 0;
3974                                 }
3975                         }
3976                 }
3977                 spin_lock_irq(mlx4_tlock(dev));
3978         }
3979         spin_unlock_irq(mlx4_tlock(dev));
3980 }
3981
3982 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3983 {
3984         struct mlx4_priv *priv = mlx4_priv(dev);
3985         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3986         struct list_head *srq_list =
3987                 &tracker->slave_list[slave].res_list[RES_SRQ];
3988         struct res_srq *srq;
3989         struct res_srq *tmp;
3990         int state;
3991         u64 in_param;
3992         LIST_HEAD(tlist);
3993         int srqn;
3994         int err;
3995
3996         err = move_all_busy(dev, slave, RES_SRQ);
3997         if (err)
3998                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3999                           "busy for slave %d\n", slave);
4000
4001         spin_lock_irq(mlx4_tlock(dev));
4002         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4003                 spin_unlock_irq(mlx4_tlock(dev));
4004                 if (srq->com.owner == slave) {
4005                         srqn = srq->com.res_id;
4006                         state = srq->com.from_state;
4007                         while (state != 0) {
4008                                 switch (state) {
4009                                 case RES_SRQ_ALLOCATED:
4010                                         __mlx4_srq_free_icm(dev, srqn);
4011                                         spin_lock_irq(mlx4_tlock(dev));
4012                                         rb_erase(&srq->com.node,
4013                                                  &tracker->res_tree[RES_SRQ]);
4014                                         list_del(&srq->com.list);
4015                                         spin_unlock_irq(mlx4_tlock(dev));
4016                                         mlx4_release_resource(dev, slave,
4017                                                               RES_SRQ, 1, 0);
4018                                         kfree(srq);
4019                                         state = 0;
4020                                         break;
4021
4022                                 case RES_SRQ_HW:
4023                                         in_param = slave;
4024                                         err = mlx4_cmd(dev, in_param, srqn, 1,
4025                                                        MLX4_CMD_HW2SW_SRQ,
4026                                                        MLX4_CMD_TIME_CLASS_A,
4027                                                        MLX4_CMD_NATIVE);
4028                                         if (err)
4029                                                 mlx4_dbg(dev, "rem_slave_srqs: failed"
4030                                                          " to move slave %d srq %d to"
4031                                                          " SW ownership\n",
4032                                                          slave, srqn);
4033
4034                                         atomic_dec(&srq->mtt->ref_count);
4035                                         if (srq->cq)
4036                                                 atomic_dec(&srq->cq->ref_count);
4037                                         state = RES_SRQ_ALLOCATED;
4038                                         break;
4039
4040                                 default:
4041                                         state = 0;
4042                                 }
4043                         }
4044                 }
4045                 spin_lock_irq(mlx4_tlock(dev));
4046         }
4047         spin_unlock_irq(mlx4_tlock(dev));
4048 }
4049
4050 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4051 {
4052         struct mlx4_priv *priv = mlx4_priv(dev);
4053         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4054         struct list_head *cq_list =
4055                 &tracker->slave_list[slave].res_list[RES_CQ];
4056         struct res_cq *cq;
4057         struct res_cq *tmp;
4058         int state;
4059         u64 in_param;
4060         LIST_HEAD(tlist);
4061         int cqn;
4062         int err;
4063
4064         err = move_all_busy(dev, slave, RES_CQ);
4065         if (err)
4066                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
4067                           "busy for slave %d\n", slave);
4068
4069         spin_lock_irq(mlx4_tlock(dev));
4070         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4071                 spin_unlock_irq(mlx4_tlock(dev));
4072                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4073                         cqn = cq->com.res_id;
4074                         state = cq->com.from_state;
4075                         while (state != 0) {
4076                                 switch (state) {
4077                                 case RES_CQ_ALLOCATED:
4078                                         __mlx4_cq_free_icm(dev, cqn);
4079                                         spin_lock_irq(mlx4_tlock(dev));
4080                                         rb_erase(&cq->com.node,
4081                                                  &tracker->res_tree[RES_CQ]);
4082                                         list_del(&cq->com.list);
4083                                         spin_unlock_irq(mlx4_tlock(dev));
4084                                         mlx4_release_resource(dev, slave,
4085                                                               RES_CQ, 1, 0);
4086                                         kfree(cq);
4087                                         state = 0;
4088                                         break;
4089
4090                                 case RES_CQ_HW:
4091                                         in_param = slave;
4092                                         err = mlx4_cmd(dev, in_param, cqn, 1,
4093                                                        MLX4_CMD_HW2SW_CQ,
4094                                                        MLX4_CMD_TIME_CLASS_A,
4095                                                        MLX4_CMD_NATIVE);
4096                                         if (err)
4097                                                 mlx4_dbg(dev, "rem_slave_cqs: failed"
4098                                                          " to move slave %d cq %d to"
4099                                                          " SW ownership\n",
4100                                                          slave, cqn);
4101                                         atomic_dec(&cq->mtt->ref_count);
4102                                         state = RES_CQ_ALLOCATED;
4103                                         break;
4104
4105                                 default:
4106                                         state = 0;
4107                                 }
4108                         }
4109                 }
4110                 spin_lock_irq(mlx4_tlock(dev));
4111         }
4112         spin_unlock_irq(mlx4_tlock(dev));
4113 }
4114
4115 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4116 {
4117         struct mlx4_priv *priv = mlx4_priv(dev);
4118         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4119         struct list_head *mpt_list =
4120                 &tracker->slave_list[slave].res_list[RES_MPT];
4121         struct res_mpt *mpt;
4122         struct res_mpt *tmp;
4123         int state;
4124         u64 in_param;
4125         LIST_HEAD(tlist);
4126         int mptn;
4127         int err;
4128
4129         err = move_all_busy(dev, slave, RES_MPT);
4130         if (err)
4131                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
4132                           "busy for slave %d\n", slave);
4133
4134         spin_lock_irq(mlx4_tlock(dev));
4135         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4136                 spin_unlock_irq(mlx4_tlock(dev));
4137                 if (mpt->com.owner == slave) {
4138                         mptn = mpt->com.res_id;
4139                         state = mpt->com.from_state;
4140                         while (state != 0) {
4141                                 switch (state) {
4142                                 case RES_MPT_RESERVED:
4143                                         __mlx4_mpt_release(dev, mpt->key);
4144                                         spin_lock_irq(mlx4_tlock(dev));
4145                                         rb_erase(&mpt->com.node,
4146                                                  &tracker->res_tree[RES_MPT]);
4147                                         list_del(&mpt->com.list);
4148                                         spin_unlock_irq(mlx4_tlock(dev));
4149                                         mlx4_release_resource(dev, slave,
4150                                                               RES_MPT, 1, 0);
4151                                         kfree(mpt);
4152                                         state = 0;
4153                                         break;
4154
4155                                 case RES_MPT_MAPPED:
4156                                         __mlx4_mpt_free_icm(dev, mpt->key);
4157                                         state = RES_MPT_RESERVED;
4158                                         break;
4159
4160                                 case RES_MPT_HW:
4161                                         in_param = slave;
4162                                         err = mlx4_cmd(dev, in_param, mptn, 0,
4163                                                      MLX4_CMD_HW2SW_MPT,
4164                                                      MLX4_CMD_TIME_CLASS_A,
4165                                                      MLX4_CMD_NATIVE);
4166                                         if (err)
4167                                                 mlx4_dbg(dev, "rem_slave_mrs: failed"
4168                                                          " to move slave %d mpt %d to"
4169                                                          " SW ownership\n",
4170                                                          slave, mptn);
4171                                         if (mpt->mtt)
4172                                                 atomic_dec(&mpt->mtt->ref_count);
4173                                         state = RES_MPT_MAPPED;
4174                                         break;
4175                                 default:
4176                                         state = 0;
4177                                 }
4178                         }
4179                 }
4180                 spin_lock_irq(mlx4_tlock(dev));
4181         }
4182         spin_unlock_irq(mlx4_tlock(dev));
4183 }
4184
4185 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4186 {
4187         struct mlx4_priv *priv = mlx4_priv(dev);
4188         struct mlx4_resource_tracker *tracker =
4189                 &priv->mfunc.master.res_tracker;
4190         struct list_head *mtt_list =
4191                 &tracker->slave_list[slave].res_list[RES_MTT];
4192         struct res_mtt *mtt;
4193         struct res_mtt *tmp;
4194         int state;
4195         LIST_HEAD(tlist);
4196         int base;
4197         int err;
4198
4199         err = move_all_busy(dev, slave, RES_MTT);
4200         if (err)
4201                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
4202                           "busy for slave %d\n", slave);
4203
4204         spin_lock_irq(mlx4_tlock(dev));
4205         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4206                 spin_unlock_irq(mlx4_tlock(dev));
4207                 if (mtt->com.owner == slave) {
4208                         base = mtt->com.res_id;
4209                         state = mtt->com.from_state;
4210                         while (state != 0) {
4211                                 switch (state) {
4212                                 case RES_MTT_ALLOCATED:
4213                                         __mlx4_free_mtt_range(dev, base,
4214                                                               mtt->order);
4215                                         spin_lock_irq(mlx4_tlock(dev));
4216                                         rb_erase(&mtt->com.node,
4217                                                  &tracker->res_tree[RES_MTT]);
4218                                         list_del(&mtt->com.list);
4219                                         spin_unlock_irq(mlx4_tlock(dev));
4220                                         mlx4_release_resource(dev, slave, RES_MTT,
4221                                                               1 << mtt->order, 0);
4222                                         kfree(mtt);
4223                                         state = 0;
4224                                         break;
4225
4226                                 default:
4227                                         state = 0;
4228                                 }
4229                         }
4230                 }
4231                 spin_lock_irq(mlx4_tlock(dev));
4232         }
4233         spin_unlock_irq(mlx4_tlock(dev));
4234 }
4235
4236 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4237 {
4238         struct mlx4_priv *priv = mlx4_priv(dev);
4239         struct mlx4_resource_tracker *tracker =
4240                 &priv->mfunc.master.res_tracker;
4241         struct list_head *fs_rule_list =
4242                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4243         struct res_fs_rule *fs_rule;
4244         struct res_fs_rule *tmp;
4245         int state;
4246         u64 base;
4247         int err;
4248
4249         err = move_all_busy(dev, slave, RES_FS_RULE);
4250         if (err)
4251                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4252                           slave);
4253
4254         spin_lock_irq(mlx4_tlock(dev));
4255         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4256                 spin_unlock_irq(mlx4_tlock(dev));
4257                 if (fs_rule->com.owner == slave) {
4258                         base = fs_rule->com.res_id;
4259                         state = fs_rule->com.from_state;
4260                         while (state != 0) {
4261                                 switch (state) {
4262                                 case RES_FS_RULE_ALLOCATED:
4263                                         /* detach rule */
4264                                         err = mlx4_cmd(dev, base, 0, 0,
4265                                                        MLX4_QP_FLOW_STEERING_DETACH,
4266                                                        MLX4_CMD_TIME_CLASS_A,
4267                                                        MLX4_CMD_NATIVE);
4268
4269                                         spin_lock_irq(mlx4_tlock(dev));
4270                                         rb_erase(&fs_rule->com.node,
4271                                                  &tracker->res_tree[RES_FS_RULE]);
4272                                         list_del(&fs_rule->com.list);
4273                                         spin_unlock_irq(mlx4_tlock(dev));
4274                                         kfree(fs_rule);
4275                                         state = 0;
4276                                         break;
4277
4278                                 default:
4279                                         state = 0;
4280                                 }
4281                         }
4282                 }
4283                 spin_lock_irq(mlx4_tlock(dev));
4284         }
4285         spin_unlock_irq(mlx4_tlock(dev));
4286 }
4287
4288 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4289 {
4290         struct mlx4_priv *priv = mlx4_priv(dev);
4291         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4292         struct list_head *eq_list =
4293                 &tracker->slave_list[slave].res_list[RES_EQ];
4294         struct res_eq *eq;
4295         struct res_eq *tmp;
4296         int err;
4297         int state;
4298         LIST_HEAD(tlist);
4299         int eqn;
4300         struct mlx4_cmd_mailbox *mailbox;
4301
4302         err = move_all_busy(dev, slave, RES_EQ);
4303         if (err)
4304                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
4305                           "busy for slave %d\n", slave);
4306
4307         spin_lock_irq(mlx4_tlock(dev));
4308         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4309                 spin_unlock_irq(mlx4_tlock(dev));
4310                 if (eq->com.owner == slave) {
4311                         eqn = eq->com.res_id;
4312                         state = eq->com.from_state;
4313                         while (state != 0) {
4314                                 switch (state) {
4315                                 case RES_EQ_RESERVED:
4316                                         spin_lock_irq(mlx4_tlock(dev));
4317                                         rb_erase(&eq->com.node,
4318                                                  &tracker->res_tree[RES_EQ]);
4319                                         list_del(&eq->com.list);
4320                                         spin_unlock_irq(mlx4_tlock(dev));
4321                                         kfree(eq);
4322                                         state = 0;
4323                                         break;
4324
4325                                 case RES_EQ_HW:
4326                                         mailbox = mlx4_alloc_cmd_mailbox(dev);
4327                                         if (IS_ERR(mailbox)) {
4328                                                 cond_resched();
4329                                                 continue;
4330                                         }
4331                                         err = mlx4_cmd_box(dev, slave, 0,
4332                                                            eqn & 0xff, 0,
4333                                                            MLX4_CMD_HW2SW_EQ,
4334                                                            MLX4_CMD_TIME_CLASS_A,
4335                                                            MLX4_CMD_NATIVE);
4336                                         if (err)
4337                                                 mlx4_dbg(dev, "rem_slave_eqs: failed"
4338                                                          " to move slave %d eqs %d to"
4339                                                          " SW ownership\n", slave, eqn);
4340                                         mlx4_free_cmd_mailbox(dev, mailbox);
4341                                         atomic_dec(&eq->mtt->ref_count);
4342                                         state = RES_EQ_RESERVED;
4343                                         break;
4344
4345                                 default:
4346                                         state = 0;
4347                                 }
4348                         }
4349                 }
4350                 spin_lock_irq(mlx4_tlock(dev));
4351         }
4352         spin_unlock_irq(mlx4_tlock(dev));
4353 }
4354
4355 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4356 {
4357         struct mlx4_priv *priv = mlx4_priv(dev);
4358         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4359         struct list_head *counter_list =
4360                 &tracker->slave_list[slave].res_list[RES_COUNTER];
4361         struct res_counter *counter;
4362         struct res_counter *tmp;
4363         int err;
4364         int index;
4365
4366         err = move_all_busy(dev, slave, RES_COUNTER);
4367         if (err)
4368                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
4369                           "busy for slave %d\n", slave);
4370
4371         spin_lock_irq(mlx4_tlock(dev));
4372         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4373                 if (counter->com.owner == slave) {
4374                         index = counter->com.res_id;
4375                         rb_erase(&counter->com.node,
4376                                  &tracker->res_tree[RES_COUNTER]);
4377                         list_del(&counter->com.list);
4378                         kfree(counter);
4379                         __mlx4_counter_free(dev, index);
4380                         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4381                 }
4382         }
4383         spin_unlock_irq(mlx4_tlock(dev));
4384 }
4385
4386 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4387 {
4388         struct mlx4_priv *priv = mlx4_priv(dev);
4389         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4390         struct list_head *xrcdn_list =
4391                 &tracker->slave_list[slave].res_list[RES_XRCD];
4392         struct res_xrcdn *xrcd;
4393         struct res_xrcdn *tmp;
4394         int err;
4395         int xrcdn;
4396
4397         err = move_all_busy(dev, slave, RES_XRCD);
4398         if (err)
4399                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4400                           "busy for slave %d\n", slave);
4401
4402         spin_lock_irq(mlx4_tlock(dev));
4403         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4404                 if (xrcd->com.owner == slave) {
4405                         xrcdn = xrcd->com.res_id;
4406                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4407                         list_del(&xrcd->com.list);
4408                         kfree(xrcd);
4409                         __mlx4_xrcd_free(dev, xrcdn);
4410                 }
4411         }
4412         spin_unlock_irq(mlx4_tlock(dev));
4413 }
4414
4415 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4416 {
4417         struct mlx4_priv *priv = mlx4_priv(dev);
4418
4419         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4420         rem_slave_vlans(dev, slave);
4421         rem_slave_macs(dev, slave);
4422         rem_slave_fs_rule(dev, slave);
4423         rem_slave_qps(dev, slave);
4424         rem_slave_srqs(dev, slave);
4425         rem_slave_cqs(dev, slave);
4426         rem_slave_mrs(dev, slave);
4427         rem_slave_eqs(dev, slave);
4428         rem_slave_mtts(dev, slave);
4429         rem_slave_counters(dev, slave);
4430         rem_slave_xrcdns(dev, slave);
4431         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4432 }
4433
4434 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4435 {
4436         struct mlx4_vf_immed_vlan_work *work =
4437                 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4438         struct mlx4_cmd_mailbox *mailbox;
4439         struct mlx4_update_qp_context *upd_context;
4440         struct mlx4_dev *dev = &work->priv->dev;
4441         struct mlx4_resource_tracker *tracker =
4442                 &work->priv->mfunc.master.res_tracker;
4443         struct list_head *qp_list =
4444                 &tracker->slave_list[work->slave].res_list[RES_QP];
4445         struct res_qp *qp;
4446         struct res_qp *tmp;
4447         u64 qp_path_mask_vlan_ctrl =
4448                        ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4449                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4450                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4451                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4452                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4453                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4454
4455         u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4456                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4457                        (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4458                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4459                        (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4460                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4461                        (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4462
4463         int err;
4464         int port, errors = 0;
4465         u8 vlan_control;
4466
4467         if (mlx4_is_slave(dev)) {
4468                 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4469                           work->slave);
4470                 goto out;
4471         }
4472
4473         mailbox = mlx4_alloc_cmd_mailbox(dev);
4474         if (IS_ERR(mailbox))
4475                 goto out;
4476         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4477                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4478                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4479                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4480                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4481                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4482                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4483         else if (!work->vlan_id)
4484                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4485                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4486         else
4487                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4488                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4489                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4490
4491         upd_context = mailbox->buf;
4492         upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
4493
4494         spin_lock_irq(mlx4_tlock(dev));
4495         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4496                 spin_unlock_irq(mlx4_tlock(dev));
4497                 if (qp->com.owner == work->slave) {
4498                         if (qp->com.from_state != RES_QP_HW ||
4499                             !qp->sched_queue ||  /* no INIT2RTR trans yet */
4500                             mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4501                             qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4502                                 spin_lock_irq(mlx4_tlock(dev));
4503                                 continue;
4504                         }
4505                         port = (qp->sched_queue >> 6 & 1) + 1;
4506                         if (port != work->port) {
4507                                 spin_lock_irq(mlx4_tlock(dev));
4508                                 continue;
4509                         }
4510                         if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4511                                 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4512                         else
4513                                 upd_context->primary_addr_path_mask =
4514                                         cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4515                         if (work->vlan_id == MLX4_VGT) {
4516                                 upd_context->qp_context.param3 = qp->param3;
4517                                 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4518                                 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4519                                 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4520                                 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4521                                 upd_context->qp_context.pri_path.feup = qp->feup;
4522                                 upd_context->qp_context.pri_path.sched_queue =
4523                                         qp->sched_queue;
4524                         } else {
4525                                 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4526                                 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4527                                 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4528                                 upd_context->qp_context.pri_path.fvl_rx =
4529                                         qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4530                                 upd_context->qp_context.pri_path.fl =
4531                                         qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4532                                 upd_context->qp_context.pri_path.feup =
4533                                         qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4534                                 upd_context->qp_context.pri_path.sched_queue =
4535                                         qp->sched_queue & 0xC7;
4536                                 upd_context->qp_context.pri_path.sched_queue |=
4537                                         ((work->qos & 0x7) << 3);
4538                         }
4539
4540                         err = mlx4_cmd(dev, mailbox->dma,
4541                                        qp->local_qpn & 0xffffff,
4542                                        0, MLX4_CMD_UPDATE_QP,
4543                                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4544                         if (err) {
4545                                 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4546                                           "port %d, qpn %d (%d)\n",
4547                                           work->slave, port, qp->local_qpn,
4548                                           err);
4549                                 errors++;
4550                         }
4551                 }
4552                 spin_lock_irq(mlx4_tlock(dev));
4553         }
4554         spin_unlock_irq(mlx4_tlock(dev));
4555         mlx4_free_cmd_mailbox(dev, mailbox);
4556
4557         if (errors)
4558                 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4559                          errors, work->slave, work->port);
4560
4561         /* unregister previous vlan_id if needed and we had no errors
4562          * while updating the QPs
4563          */
4564         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4565             NO_INDX != work->orig_vlan_ix)
4566                 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4567                                        work->orig_vlan_id);
4568 out:
4569         kfree(work);
4570         return;
4571 }