ASoC: adau17x1: Add basic DT support for adau17x1
[cascardo/linux.git] / drivers / infiniband / core / uverbs_cmd.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
5  * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/file.h>
37 #include <linux/fs.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
40
41 #include <asm/uaccess.h>
42
43 #include "uverbs.h"
44 #include "core_priv.h"
45
46 struct uverbs_lock_class {
47         struct lock_class_key   key;
48         char                    name[16];
49 };
50
51 static struct uverbs_lock_class pd_lock_class   = { .name = "PD-uobj" };
52 static struct uverbs_lock_class mr_lock_class   = { .name = "MR-uobj" };
53 static struct uverbs_lock_class mw_lock_class   = { .name = "MW-uobj" };
54 static struct uverbs_lock_class cq_lock_class   = { .name = "CQ-uobj" };
55 static struct uverbs_lock_class qp_lock_class   = { .name = "QP-uobj" };
56 static struct uverbs_lock_class ah_lock_class   = { .name = "AH-uobj" };
57 static struct uverbs_lock_class srq_lock_class  = { .name = "SRQ-uobj" };
58 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
59 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
60
61 /*
62  * The ib_uobject locking scheme is as follows:
63  *
64  * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
65  *   needs to be held during all idr write operations.  When an object is
66  *   looked up, a reference must be taken on the object's kref before
67  *   dropping this lock.  For read operations, the rcu_read_lock()
68  *   and rcu_write_lock() but similarly the kref reference is grabbed
69  *   before the rcu_read_unlock().
70  *
71  * - Each object also has an rwsem.  This rwsem must be held for
72  *   reading while an operation that uses the object is performed.
73  *   For example, while registering an MR, the associated PD's
74  *   uobject.mutex must be held for reading.  The rwsem must be held
75  *   for writing while initializing or destroying an object.
76  *
77  * - In addition, each object has a "live" flag.  If this flag is not
78  *   set, then lookups of the object will fail even if it is found in
79  *   the idr.  This handles a reader that blocks and does not acquire
80  *   the rwsem until after the object is destroyed.  The destroy
81  *   operation will set the live flag to 0 and then drop the rwsem;
82  *   this will allow the reader to acquire the rwsem, see that the
83  *   live flag is 0, and then drop the rwsem and its reference to
84  *   object.  The underlying storage will not be freed until the last
85  *   reference to the object is dropped.
86  */
87
88 static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
89                       struct ib_ucontext *context, struct uverbs_lock_class *c)
90 {
91         uobj->user_handle = user_handle;
92         uobj->context     = context;
93         kref_init(&uobj->ref);
94         init_rwsem(&uobj->mutex);
95         lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
96         uobj->live        = 0;
97 }
98
99 static void release_uobj(struct kref *kref)
100 {
101         kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu);
102 }
103
104 static void put_uobj(struct ib_uobject *uobj)
105 {
106         kref_put(&uobj->ref, release_uobj);
107 }
108
109 static void put_uobj_read(struct ib_uobject *uobj)
110 {
111         up_read(&uobj->mutex);
112         put_uobj(uobj);
113 }
114
115 static void put_uobj_write(struct ib_uobject *uobj)
116 {
117         up_write(&uobj->mutex);
118         put_uobj(uobj);
119 }
120
121 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
122 {
123         int ret;
124
125         idr_preload(GFP_KERNEL);
126         spin_lock(&ib_uverbs_idr_lock);
127
128         ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
129         if (ret >= 0)
130                 uobj->id = ret;
131
132         spin_unlock(&ib_uverbs_idr_lock);
133         idr_preload_end();
134
135         return ret < 0 ? ret : 0;
136 }
137
138 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
139 {
140         spin_lock(&ib_uverbs_idr_lock);
141         idr_remove(idr, uobj->id);
142         spin_unlock(&ib_uverbs_idr_lock);
143 }
144
145 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
146                                          struct ib_ucontext *context)
147 {
148         struct ib_uobject *uobj;
149
150         rcu_read_lock();
151         uobj = idr_find(idr, id);
152         if (uobj) {
153                 if (uobj->context == context)
154                         kref_get(&uobj->ref);
155                 else
156                         uobj = NULL;
157         }
158         rcu_read_unlock();
159
160         return uobj;
161 }
162
163 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
164                                         struct ib_ucontext *context, int nested)
165 {
166         struct ib_uobject *uobj;
167
168         uobj = __idr_get_uobj(idr, id, context);
169         if (!uobj)
170                 return NULL;
171
172         if (nested)
173                 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
174         else
175                 down_read(&uobj->mutex);
176         if (!uobj->live) {
177                 put_uobj_read(uobj);
178                 return NULL;
179         }
180
181         return uobj;
182 }
183
184 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
185                                          struct ib_ucontext *context)
186 {
187         struct ib_uobject *uobj;
188
189         uobj = __idr_get_uobj(idr, id, context);
190         if (!uobj)
191                 return NULL;
192
193         down_write(&uobj->mutex);
194         if (!uobj->live) {
195                 put_uobj_write(uobj);
196                 return NULL;
197         }
198
199         return uobj;
200 }
201
202 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
203                           int nested)
204 {
205         struct ib_uobject *uobj;
206
207         uobj = idr_read_uobj(idr, id, context, nested);
208         return uobj ? uobj->object : NULL;
209 }
210
211 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
212 {
213         return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
214 }
215
216 static void put_pd_read(struct ib_pd *pd)
217 {
218         put_uobj_read(pd->uobject);
219 }
220
221 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
222 {
223         return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
224 }
225
226 static void put_cq_read(struct ib_cq *cq)
227 {
228         put_uobj_read(cq->uobject);
229 }
230
231 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
232 {
233         return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
234 }
235
236 static void put_ah_read(struct ib_ah *ah)
237 {
238         put_uobj_read(ah->uobject);
239 }
240
241 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
242 {
243         return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
244 }
245
246 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
247 {
248         struct ib_uobject *uobj;
249
250         uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
251         return uobj ? uobj->object : NULL;
252 }
253
254 static void put_qp_read(struct ib_qp *qp)
255 {
256         put_uobj_read(qp->uobject);
257 }
258
259 static void put_qp_write(struct ib_qp *qp)
260 {
261         put_uobj_write(qp->uobject);
262 }
263
264 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
265 {
266         return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
267 }
268
269 static void put_srq_read(struct ib_srq *srq)
270 {
271         put_uobj_read(srq->uobject);
272 }
273
274 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
275                                      struct ib_uobject **uobj)
276 {
277         *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
278         return *uobj ? (*uobj)->object : NULL;
279 }
280
281 static void put_xrcd_read(struct ib_uobject *uobj)
282 {
283         put_uobj_read(uobj);
284 }
285
286 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
287                               struct ib_device *ib_dev,
288                               const char __user *buf,
289                               int in_len, int out_len)
290 {
291         struct ib_uverbs_get_context      cmd;
292         struct ib_uverbs_get_context_resp resp;
293         struct ib_udata                   udata;
294         struct ib_ucontext               *ucontext;
295         struct file                      *filp;
296         int ret;
297
298         if (out_len < sizeof resp)
299                 return -ENOSPC;
300
301         if (copy_from_user(&cmd, buf, sizeof cmd))
302                 return -EFAULT;
303
304         mutex_lock(&file->mutex);
305
306         if (file->ucontext) {
307                 ret = -EINVAL;
308                 goto err;
309         }
310
311         INIT_UDATA(&udata, buf + sizeof cmd,
312                    (unsigned long) cmd.response + sizeof resp,
313                    in_len - sizeof cmd, out_len - sizeof resp);
314
315         ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
316         if (IS_ERR(ucontext)) {
317                 ret = PTR_ERR(ucontext);
318                 goto err;
319         }
320
321         ucontext->device = ib_dev;
322         INIT_LIST_HEAD(&ucontext->pd_list);
323         INIT_LIST_HEAD(&ucontext->mr_list);
324         INIT_LIST_HEAD(&ucontext->mw_list);
325         INIT_LIST_HEAD(&ucontext->cq_list);
326         INIT_LIST_HEAD(&ucontext->qp_list);
327         INIT_LIST_HEAD(&ucontext->srq_list);
328         INIT_LIST_HEAD(&ucontext->ah_list);
329         INIT_LIST_HEAD(&ucontext->xrcd_list);
330         INIT_LIST_HEAD(&ucontext->rule_list);
331         rcu_read_lock();
332         ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
333         rcu_read_unlock();
334         ucontext->closing = 0;
335
336 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
337         ucontext->umem_tree = RB_ROOT;
338         init_rwsem(&ucontext->umem_rwsem);
339         ucontext->odp_mrs_count = 0;
340         INIT_LIST_HEAD(&ucontext->no_private_counters);
341
342         if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
343                 ucontext->invalidate_range = NULL;
344
345 #endif
346
347         resp.num_comp_vectors = file->device->num_comp_vectors;
348
349         ret = get_unused_fd_flags(O_CLOEXEC);
350         if (ret < 0)
351                 goto err_free;
352         resp.async_fd = ret;
353
354         filp = ib_uverbs_alloc_event_file(file, ib_dev, 1);
355         if (IS_ERR(filp)) {
356                 ret = PTR_ERR(filp);
357                 goto err_fd;
358         }
359
360         if (copy_to_user((void __user *) (unsigned long) cmd.response,
361                          &resp, sizeof resp)) {
362                 ret = -EFAULT;
363                 goto err_file;
364         }
365
366         file->ucontext = ucontext;
367
368         fd_install(resp.async_fd, filp);
369
370         mutex_unlock(&file->mutex);
371
372         return in_len;
373
374 err_file:
375         ib_uverbs_free_async_event_file(file);
376         fput(filp);
377
378 err_fd:
379         put_unused_fd(resp.async_fd);
380
381 err_free:
382         put_pid(ucontext->tgid);
383         ib_dev->dealloc_ucontext(ucontext);
384
385 err:
386         mutex_unlock(&file->mutex);
387         return ret;
388 }
389
390 static void copy_query_dev_fields(struct ib_uverbs_file *file,
391                                   struct ib_device *ib_dev,
392                                   struct ib_uverbs_query_device_resp *resp,
393                                   struct ib_device_attr *attr)
394 {
395         resp->fw_ver            = attr->fw_ver;
396         resp->node_guid         = ib_dev->node_guid;
397         resp->sys_image_guid    = attr->sys_image_guid;
398         resp->max_mr_size       = attr->max_mr_size;
399         resp->page_size_cap     = attr->page_size_cap;
400         resp->vendor_id         = attr->vendor_id;
401         resp->vendor_part_id    = attr->vendor_part_id;
402         resp->hw_ver            = attr->hw_ver;
403         resp->max_qp            = attr->max_qp;
404         resp->max_qp_wr         = attr->max_qp_wr;
405         resp->device_cap_flags  = attr->device_cap_flags;
406         resp->max_sge           = attr->max_sge;
407         resp->max_sge_rd        = attr->max_sge_rd;
408         resp->max_cq            = attr->max_cq;
409         resp->max_cqe           = attr->max_cqe;
410         resp->max_mr            = attr->max_mr;
411         resp->max_pd            = attr->max_pd;
412         resp->max_qp_rd_atom    = attr->max_qp_rd_atom;
413         resp->max_ee_rd_atom    = attr->max_ee_rd_atom;
414         resp->max_res_rd_atom   = attr->max_res_rd_atom;
415         resp->max_qp_init_rd_atom       = attr->max_qp_init_rd_atom;
416         resp->max_ee_init_rd_atom       = attr->max_ee_init_rd_atom;
417         resp->atomic_cap                = attr->atomic_cap;
418         resp->max_ee                    = attr->max_ee;
419         resp->max_rdd                   = attr->max_rdd;
420         resp->max_mw                    = attr->max_mw;
421         resp->max_raw_ipv6_qp           = attr->max_raw_ipv6_qp;
422         resp->max_raw_ethy_qp           = attr->max_raw_ethy_qp;
423         resp->max_mcast_grp             = attr->max_mcast_grp;
424         resp->max_mcast_qp_attach       = attr->max_mcast_qp_attach;
425         resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
426         resp->max_ah                    = attr->max_ah;
427         resp->max_fmr                   = attr->max_fmr;
428         resp->max_map_per_fmr           = attr->max_map_per_fmr;
429         resp->max_srq                   = attr->max_srq;
430         resp->max_srq_wr                = attr->max_srq_wr;
431         resp->max_srq_sge               = attr->max_srq_sge;
432         resp->max_pkeys                 = attr->max_pkeys;
433         resp->local_ca_ack_delay        = attr->local_ca_ack_delay;
434         resp->phys_port_cnt             = ib_dev->phys_port_cnt;
435 }
436
437 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
438                                struct ib_device *ib_dev,
439                                const char __user *buf,
440                                int in_len, int out_len)
441 {
442         struct ib_uverbs_query_device      cmd;
443         struct ib_uverbs_query_device_resp resp;
444
445         if (out_len < sizeof resp)
446                 return -ENOSPC;
447
448         if (copy_from_user(&cmd, buf, sizeof cmd))
449                 return -EFAULT;
450
451         memset(&resp, 0, sizeof resp);
452         copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
453
454         if (copy_to_user((void __user *) (unsigned long) cmd.response,
455                          &resp, sizeof resp))
456                 return -EFAULT;
457
458         return in_len;
459 }
460
461 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
462                              struct ib_device *ib_dev,
463                              const char __user *buf,
464                              int in_len, int out_len)
465 {
466         struct ib_uverbs_query_port      cmd;
467         struct ib_uverbs_query_port_resp resp;
468         struct ib_port_attr              attr;
469         int                              ret;
470
471         if (out_len < sizeof resp)
472                 return -ENOSPC;
473
474         if (copy_from_user(&cmd, buf, sizeof cmd))
475                 return -EFAULT;
476
477         ret = ib_query_port(ib_dev, cmd.port_num, &attr);
478         if (ret)
479                 return ret;
480
481         memset(&resp, 0, sizeof resp);
482
483         resp.state           = attr.state;
484         resp.max_mtu         = attr.max_mtu;
485         resp.active_mtu      = attr.active_mtu;
486         resp.gid_tbl_len     = attr.gid_tbl_len;
487         resp.port_cap_flags  = attr.port_cap_flags;
488         resp.max_msg_sz      = attr.max_msg_sz;
489         resp.bad_pkey_cntr   = attr.bad_pkey_cntr;
490         resp.qkey_viol_cntr  = attr.qkey_viol_cntr;
491         resp.pkey_tbl_len    = attr.pkey_tbl_len;
492         resp.lid             = attr.lid;
493         resp.sm_lid          = attr.sm_lid;
494         resp.lmc             = attr.lmc;
495         resp.max_vl_num      = attr.max_vl_num;
496         resp.sm_sl           = attr.sm_sl;
497         resp.subnet_timeout  = attr.subnet_timeout;
498         resp.init_type_reply = attr.init_type_reply;
499         resp.active_width    = attr.active_width;
500         resp.active_speed    = attr.active_speed;
501         resp.phys_state      = attr.phys_state;
502         resp.link_layer      = rdma_port_get_link_layer(ib_dev,
503                                                         cmd.port_num);
504
505         if (copy_to_user((void __user *) (unsigned long) cmd.response,
506                          &resp, sizeof resp))
507                 return -EFAULT;
508
509         return in_len;
510 }
511
512 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
513                            struct ib_device *ib_dev,
514                            const char __user *buf,
515                            int in_len, int out_len)
516 {
517         struct ib_uverbs_alloc_pd      cmd;
518         struct ib_uverbs_alloc_pd_resp resp;
519         struct ib_udata                udata;
520         struct ib_uobject             *uobj;
521         struct ib_pd                  *pd;
522         int                            ret;
523
524         if (out_len < sizeof resp)
525                 return -ENOSPC;
526
527         if (copy_from_user(&cmd, buf, sizeof cmd))
528                 return -EFAULT;
529
530         INIT_UDATA(&udata, buf + sizeof cmd,
531                    (unsigned long) cmd.response + sizeof resp,
532                    in_len - sizeof cmd, out_len - sizeof resp);
533
534         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
535         if (!uobj)
536                 return -ENOMEM;
537
538         init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
539         down_write(&uobj->mutex);
540
541         pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
542         if (IS_ERR(pd)) {
543                 ret = PTR_ERR(pd);
544                 goto err;
545         }
546
547         pd->device  = ib_dev;
548         pd->uobject = uobj;
549         pd->local_mr = NULL;
550         atomic_set(&pd->usecnt, 0);
551
552         uobj->object = pd;
553         ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
554         if (ret)
555                 goto err_idr;
556
557         memset(&resp, 0, sizeof resp);
558         resp.pd_handle = uobj->id;
559
560         if (copy_to_user((void __user *) (unsigned long) cmd.response,
561                          &resp, sizeof resp)) {
562                 ret = -EFAULT;
563                 goto err_copy;
564         }
565
566         mutex_lock(&file->mutex);
567         list_add_tail(&uobj->list, &file->ucontext->pd_list);
568         mutex_unlock(&file->mutex);
569
570         uobj->live = 1;
571
572         up_write(&uobj->mutex);
573
574         return in_len;
575
576 err_copy:
577         idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
578
579 err_idr:
580         ib_dealloc_pd(pd);
581
582 err:
583         put_uobj_write(uobj);
584         return ret;
585 }
586
587 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
588                              struct ib_device *ib_dev,
589                              const char __user *buf,
590                              int in_len, int out_len)
591 {
592         struct ib_uverbs_dealloc_pd cmd;
593         struct ib_uobject          *uobj;
594         struct ib_pd               *pd;
595         int                         ret;
596
597         if (copy_from_user(&cmd, buf, sizeof cmd))
598                 return -EFAULT;
599
600         uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
601         if (!uobj)
602                 return -EINVAL;
603         pd = uobj->object;
604
605         if (atomic_read(&pd->usecnt)) {
606                 ret = -EBUSY;
607                 goto err_put;
608         }
609
610         ret = pd->device->dealloc_pd(uobj->object);
611         WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
612         if (ret)
613                 goto err_put;
614
615         uobj->live = 0;
616         put_uobj_write(uobj);
617
618         idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
619
620         mutex_lock(&file->mutex);
621         list_del(&uobj->list);
622         mutex_unlock(&file->mutex);
623
624         put_uobj(uobj);
625
626         return in_len;
627
628 err_put:
629         put_uobj_write(uobj);
630         return ret;
631 }
632
633 struct xrcd_table_entry {
634         struct rb_node  node;
635         struct ib_xrcd *xrcd;
636         struct inode   *inode;
637 };
638
639 static int xrcd_table_insert(struct ib_uverbs_device *dev,
640                             struct inode *inode,
641                             struct ib_xrcd *xrcd)
642 {
643         struct xrcd_table_entry *entry, *scan;
644         struct rb_node **p = &dev->xrcd_tree.rb_node;
645         struct rb_node *parent = NULL;
646
647         entry = kmalloc(sizeof *entry, GFP_KERNEL);
648         if (!entry)
649                 return -ENOMEM;
650
651         entry->xrcd  = xrcd;
652         entry->inode = inode;
653
654         while (*p) {
655                 parent = *p;
656                 scan = rb_entry(parent, struct xrcd_table_entry, node);
657
658                 if (inode < scan->inode) {
659                         p = &(*p)->rb_left;
660                 } else if (inode > scan->inode) {
661                         p = &(*p)->rb_right;
662                 } else {
663                         kfree(entry);
664                         return -EEXIST;
665                 }
666         }
667
668         rb_link_node(&entry->node, parent, p);
669         rb_insert_color(&entry->node, &dev->xrcd_tree);
670         igrab(inode);
671         return 0;
672 }
673
674 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
675                                                   struct inode *inode)
676 {
677         struct xrcd_table_entry *entry;
678         struct rb_node *p = dev->xrcd_tree.rb_node;
679
680         while (p) {
681                 entry = rb_entry(p, struct xrcd_table_entry, node);
682
683                 if (inode < entry->inode)
684                         p = p->rb_left;
685                 else if (inode > entry->inode)
686                         p = p->rb_right;
687                 else
688                         return entry;
689         }
690
691         return NULL;
692 }
693
694 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
695 {
696         struct xrcd_table_entry *entry;
697
698         entry = xrcd_table_search(dev, inode);
699         if (!entry)
700                 return NULL;
701
702         return entry->xrcd;
703 }
704
705 static void xrcd_table_delete(struct ib_uverbs_device *dev,
706                               struct inode *inode)
707 {
708         struct xrcd_table_entry *entry;
709
710         entry = xrcd_table_search(dev, inode);
711         if (entry) {
712                 iput(inode);
713                 rb_erase(&entry->node, &dev->xrcd_tree);
714                 kfree(entry);
715         }
716 }
717
718 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
719                             struct ib_device *ib_dev,
720                             const char __user *buf, int in_len,
721                             int out_len)
722 {
723         struct ib_uverbs_open_xrcd      cmd;
724         struct ib_uverbs_open_xrcd_resp resp;
725         struct ib_udata                 udata;
726         struct ib_uxrcd_object         *obj;
727         struct ib_xrcd                 *xrcd = NULL;
728         struct fd                       f = {NULL, 0};
729         struct inode                   *inode = NULL;
730         int                             ret = 0;
731         int                             new_xrcd = 0;
732
733         if (out_len < sizeof resp)
734                 return -ENOSPC;
735
736         if (copy_from_user(&cmd, buf, sizeof cmd))
737                 return -EFAULT;
738
739         INIT_UDATA(&udata, buf + sizeof cmd,
740                    (unsigned long) cmd.response + sizeof resp,
741                    in_len - sizeof cmd, out_len - sizeof  resp);
742
743         mutex_lock(&file->device->xrcd_tree_mutex);
744
745         if (cmd.fd != -1) {
746                 /* search for file descriptor */
747                 f = fdget(cmd.fd);
748                 if (!f.file) {
749                         ret = -EBADF;
750                         goto err_tree_mutex_unlock;
751                 }
752
753                 inode = file_inode(f.file);
754                 xrcd = find_xrcd(file->device, inode);
755                 if (!xrcd && !(cmd.oflags & O_CREAT)) {
756                         /* no file descriptor. Need CREATE flag */
757                         ret = -EAGAIN;
758                         goto err_tree_mutex_unlock;
759                 }
760
761                 if (xrcd && cmd.oflags & O_EXCL) {
762                         ret = -EINVAL;
763                         goto err_tree_mutex_unlock;
764                 }
765         }
766
767         obj = kmalloc(sizeof *obj, GFP_KERNEL);
768         if (!obj) {
769                 ret = -ENOMEM;
770                 goto err_tree_mutex_unlock;
771         }
772
773         init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
774
775         down_write(&obj->uobject.mutex);
776
777         if (!xrcd) {
778                 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
779                 if (IS_ERR(xrcd)) {
780                         ret = PTR_ERR(xrcd);
781                         goto err;
782                 }
783
784                 xrcd->inode   = inode;
785                 xrcd->device  = ib_dev;
786                 atomic_set(&xrcd->usecnt, 0);
787                 mutex_init(&xrcd->tgt_qp_mutex);
788                 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
789                 new_xrcd = 1;
790         }
791
792         atomic_set(&obj->refcnt, 0);
793         obj->uobject.object = xrcd;
794         ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
795         if (ret)
796                 goto err_idr;
797
798         memset(&resp, 0, sizeof resp);
799         resp.xrcd_handle = obj->uobject.id;
800
801         if (inode) {
802                 if (new_xrcd) {
803                         /* create new inode/xrcd table entry */
804                         ret = xrcd_table_insert(file->device, inode, xrcd);
805                         if (ret)
806                                 goto err_insert_xrcd;
807                 }
808                 atomic_inc(&xrcd->usecnt);
809         }
810
811         if (copy_to_user((void __user *) (unsigned long) cmd.response,
812                          &resp, sizeof resp)) {
813                 ret = -EFAULT;
814                 goto err_copy;
815         }
816
817         if (f.file)
818                 fdput(f);
819
820         mutex_lock(&file->mutex);
821         list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
822         mutex_unlock(&file->mutex);
823
824         obj->uobject.live = 1;
825         up_write(&obj->uobject.mutex);
826
827         mutex_unlock(&file->device->xrcd_tree_mutex);
828         return in_len;
829
830 err_copy:
831         if (inode) {
832                 if (new_xrcd)
833                         xrcd_table_delete(file->device, inode);
834                 atomic_dec(&xrcd->usecnt);
835         }
836
837 err_insert_xrcd:
838         idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
839
840 err_idr:
841         ib_dealloc_xrcd(xrcd);
842
843 err:
844         put_uobj_write(&obj->uobject);
845
846 err_tree_mutex_unlock:
847         if (f.file)
848                 fdput(f);
849
850         mutex_unlock(&file->device->xrcd_tree_mutex);
851
852         return ret;
853 }
854
855 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
856                              struct ib_device *ib_dev,
857                              const char __user *buf, int in_len,
858                              int out_len)
859 {
860         struct ib_uverbs_close_xrcd cmd;
861         struct ib_uobject           *uobj;
862         struct ib_xrcd              *xrcd = NULL;
863         struct inode                *inode = NULL;
864         struct ib_uxrcd_object      *obj;
865         int                         live;
866         int                         ret = 0;
867
868         if (copy_from_user(&cmd, buf, sizeof cmd))
869                 return -EFAULT;
870
871         mutex_lock(&file->device->xrcd_tree_mutex);
872         uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
873         if (!uobj) {
874                 ret = -EINVAL;
875                 goto out;
876         }
877
878         xrcd  = uobj->object;
879         inode = xrcd->inode;
880         obj   = container_of(uobj, struct ib_uxrcd_object, uobject);
881         if (atomic_read(&obj->refcnt)) {
882                 put_uobj_write(uobj);
883                 ret = -EBUSY;
884                 goto out;
885         }
886
887         if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
888                 ret = ib_dealloc_xrcd(uobj->object);
889                 if (!ret)
890                         uobj->live = 0;
891         }
892
893         live = uobj->live;
894         if (inode && ret)
895                 atomic_inc(&xrcd->usecnt);
896
897         put_uobj_write(uobj);
898
899         if (ret)
900                 goto out;
901
902         if (inode && !live)
903                 xrcd_table_delete(file->device, inode);
904
905         idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
906         mutex_lock(&file->mutex);
907         list_del(&uobj->list);
908         mutex_unlock(&file->mutex);
909
910         put_uobj(uobj);
911         ret = in_len;
912
913 out:
914         mutex_unlock(&file->device->xrcd_tree_mutex);
915         return ret;
916 }
917
918 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
919                             struct ib_xrcd *xrcd)
920 {
921         struct inode *inode;
922
923         inode = xrcd->inode;
924         if (inode && !atomic_dec_and_test(&xrcd->usecnt))
925                 return;
926
927         ib_dealloc_xrcd(xrcd);
928
929         if (inode)
930                 xrcd_table_delete(dev, inode);
931 }
932
933 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
934                          struct ib_device *ib_dev,
935                          const char __user *buf, int in_len,
936                          int out_len)
937 {
938         struct ib_uverbs_reg_mr      cmd;
939         struct ib_uverbs_reg_mr_resp resp;
940         struct ib_udata              udata;
941         struct ib_uobject           *uobj;
942         struct ib_pd                *pd;
943         struct ib_mr                *mr;
944         int                          ret;
945
946         if (out_len < sizeof resp)
947                 return -ENOSPC;
948
949         if (copy_from_user(&cmd, buf, sizeof cmd))
950                 return -EFAULT;
951
952         INIT_UDATA(&udata, buf + sizeof cmd,
953                    (unsigned long) cmd.response + sizeof resp,
954                    in_len - sizeof cmd, out_len - sizeof resp);
955
956         if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
957                 return -EINVAL;
958
959         ret = ib_check_mr_access(cmd.access_flags);
960         if (ret)
961                 return ret;
962
963         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
964         if (!uobj)
965                 return -ENOMEM;
966
967         init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
968         down_write(&uobj->mutex);
969
970         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
971         if (!pd) {
972                 ret = -EINVAL;
973                 goto err_free;
974         }
975
976         if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
977                 if (!(pd->device->attrs.device_cap_flags &
978                       IB_DEVICE_ON_DEMAND_PAGING)) {
979                         pr_debug("ODP support not available\n");
980                         ret = -EINVAL;
981                         goto err_put;
982                 }
983         }
984
985         mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
986                                      cmd.access_flags, &udata);
987         if (IS_ERR(mr)) {
988                 ret = PTR_ERR(mr);
989                 goto err_put;
990         }
991
992         mr->device  = pd->device;
993         mr->pd      = pd;
994         mr->uobject = uobj;
995         atomic_inc(&pd->usecnt);
996
997         uobj->object = mr;
998         ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
999         if (ret)
1000                 goto err_unreg;
1001
1002         memset(&resp, 0, sizeof resp);
1003         resp.lkey      = mr->lkey;
1004         resp.rkey      = mr->rkey;
1005         resp.mr_handle = uobj->id;
1006
1007         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1008                          &resp, sizeof resp)) {
1009                 ret = -EFAULT;
1010                 goto err_copy;
1011         }
1012
1013         put_pd_read(pd);
1014
1015         mutex_lock(&file->mutex);
1016         list_add_tail(&uobj->list, &file->ucontext->mr_list);
1017         mutex_unlock(&file->mutex);
1018
1019         uobj->live = 1;
1020
1021         up_write(&uobj->mutex);
1022
1023         return in_len;
1024
1025 err_copy:
1026         idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1027
1028 err_unreg:
1029         ib_dereg_mr(mr);
1030
1031 err_put:
1032         put_pd_read(pd);
1033
1034 err_free:
1035         put_uobj_write(uobj);
1036         return ret;
1037 }
1038
1039 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
1040                            struct ib_device *ib_dev,
1041                            const char __user *buf, int in_len,
1042                            int out_len)
1043 {
1044         struct ib_uverbs_rereg_mr      cmd;
1045         struct ib_uverbs_rereg_mr_resp resp;
1046         struct ib_udata              udata;
1047         struct ib_pd                *pd = NULL;
1048         struct ib_mr                *mr;
1049         struct ib_pd                *old_pd;
1050         int                          ret;
1051         struct ib_uobject           *uobj;
1052
1053         if (out_len < sizeof(resp))
1054                 return -ENOSPC;
1055
1056         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1057                 return -EFAULT;
1058
1059         INIT_UDATA(&udata, buf + sizeof(cmd),
1060                    (unsigned long) cmd.response + sizeof(resp),
1061                    in_len - sizeof(cmd), out_len - sizeof(resp));
1062
1063         if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
1064                 return -EINVAL;
1065
1066         if ((cmd.flags & IB_MR_REREG_TRANS) &&
1067             (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
1068              (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
1069                         return -EINVAL;
1070
1071         uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle,
1072                               file->ucontext);
1073
1074         if (!uobj)
1075                 return -EINVAL;
1076
1077         mr = uobj->object;
1078
1079         if (cmd.flags & IB_MR_REREG_ACCESS) {
1080                 ret = ib_check_mr_access(cmd.access_flags);
1081                 if (ret)
1082                         goto put_uobjs;
1083         }
1084
1085         if (cmd.flags & IB_MR_REREG_PD) {
1086                 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1087                 if (!pd) {
1088                         ret = -EINVAL;
1089                         goto put_uobjs;
1090                 }
1091         }
1092
1093         old_pd = mr->pd;
1094         ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
1095                                         cmd.length, cmd.hca_va,
1096                                         cmd.access_flags, pd, &udata);
1097         if (!ret) {
1098                 if (cmd.flags & IB_MR_REREG_PD) {
1099                         atomic_inc(&pd->usecnt);
1100                         mr->pd = pd;
1101                         atomic_dec(&old_pd->usecnt);
1102                 }
1103         } else {
1104                 goto put_uobj_pd;
1105         }
1106
1107         memset(&resp, 0, sizeof(resp));
1108         resp.lkey      = mr->lkey;
1109         resp.rkey      = mr->rkey;
1110
1111         if (copy_to_user((void __user *)(unsigned long)cmd.response,
1112                          &resp, sizeof(resp)))
1113                 ret = -EFAULT;
1114         else
1115                 ret = in_len;
1116
1117 put_uobj_pd:
1118         if (cmd.flags & IB_MR_REREG_PD)
1119                 put_pd_read(pd);
1120
1121 put_uobjs:
1122
1123         put_uobj_write(mr->uobject);
1124
1125         return ret;
1126 }
1127
1128 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1129                            struct ib_device *ib_dev,
1130                            const char __user *buf, int in_len,
1131                            int out_len)
1132 {
1133         struct ib_uverbs_dereg_mr cmd;
1134         struct ib_mr             *mr;
1135         struct ib_uobject        *uobj;
1136         int                       ret = -EINVAL;
1137
1138         if (copy_from_user(&cmd, buf, sizeof cmd))
1139                 return -EFAULT;
1140
1141         uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1142         if (!uobj)
1143                 return -EINVAL;
1144
1145         mr = uobj->object;
1146
1147         ret = ib_dereg_mr(mr);
1148         if (!ret)
1149                 uobj->live = 0;
1150
1151         put_uobj_write(uobj);
1152
1153         if (ret)
1154                 return ret;
1155
1156         idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1157
1158         mutex_lock(&file->mutex);
1159         list_del(&uobj->list);
1160         mutex_unlock(&file->mutex);
1161
1162         put_uobj(uobj);
1163
1164         return in_len;
1165 }
1166
1167 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1168                            struct ib_device *ib_dev,
1169                            const char __user *buf, int in_len,
1170                            int out_len)
1171 {
1172         struct ib_uverbs_alloc_mw      cmd;
1173         struct ib_uverbs_alloc_mw_resp resp;
1174         struct ib_uobject             *uobj;
1175         struct ib_pd                  *pd;
1176         struct ib_mw                  *mw;
1177         int                            ret;
1178
1179         if (out_len < sizeof(resp))
1180                 return -ENOSPC;
1181
1182         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1183                 return -EFAULT;
1184
1185         uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1186         if (!uobj)
1187                 return -ENOMEM;
1188
1189         init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1190         down_write(&uobj->mutex);
1191
1192         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1193         if (!pd) {
1194                 ret = -EINVAL;
1195                 goto err_free;
1196         }
1197
1198         mw = pd->device->alloc_mw(pd, cmd.mw_type);
1199         if (IS_ERR(mw)) {
1200                 ret = PTR_ERR(mw);
1201                 goto err_put;
1202         }
1203
1204         mw->device  = pd->device;
1205         mw->pd      = pd;
1206         mw->uobject = uobj;
1207         atomic_inc(&pd->usecnt);
1208
1209         uobj->object = mw;
1210         ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1211         if (ret)
1212                 goto err_unalloc;
1213
1214         memset(&resp, 0, sizeof(resp));
1215         resp.rkey      = mw->rkey;
1216         resp.mw_handle = uobj->id;
1217
1218         if (copy_to_user((void __user *)(unsigned long)cmd.response,
1219                          &resp, sizeof(resp))) {
1220                 ret = -EFAULT;
1221                 goto err_copy;
1222         }
1223
1224         put_pd_read(pd);
1225
1226         mutex_lock(&file->mutex);
1227         list_add_tail(&uobj->list, &file->ucontext->mw_list);
1228         mutex_unlock(&file->mutex);
1229
1230         uobj->live = 1;
1231
1232         up_write(&uobj->mutex);
1233
1234         return in_len;
1235
1236 err_copy:
1237         idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1238
1239 err_unalloc:
1240         uverbs_dealloc_mw(mw);
1241
1242 err_put:
1243         put_pd_read(pd);
1244
1245 err_free:
1246         put_uobj_write(uobj);
1247         return ret;
1248 }
1249
1250 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1251                              struct ib_device *ib_dev,
1252                              const char __user *buf, int in_len,
1253                              int out_len)
1254 {
1255         struct ib_uverbs_dealloc_mw cmd;
1256         struct ib_mw               *mw;
1257         struct ib_uobject          *uobj;
1258         int                         ret = -EINVAL;
1259
1260         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1261                 return -EFAULT;
1262
1263         uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1264         if (!uobj)
1265                 return -EINVAL;
1266
1267         mw = uobj->object;
1268
1269         ret = uverbs_dealloc_mw(mw);
1270         if (!ret)
1271                 uobj->live = 0;
1272
1273         put_uobj_write(uobj);
1274
1275         if (ret)
1276                 return ret;
1277
1278         idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1279
1280         mutex_lock(&file->mutex);
1281         list_del(&uobj->list);
1282         mutex_unlock(&file->mutex);
1283
1284         put_uobj(uobj);
1285
1286         return in_len;
1287 }
1288
1289 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1290                                       struct ib_device *ib_dev,
1291                                       const char __user *buf, int in_len,
1292                                       int out_len)
1293 {
1294         struct ib_uverbs_create_comp_channel       cmd;
1295         struct ib_uverbs_create_comp_channel_resp  resp;
1296         struct file                               *filp;
1297         int ret;
1298
1299         if (out_len < sizeof resp)
1300                 return -ENOSPC;
1301
1302         if (copy_from_user(&cmd, buf, sizeof cmd))
1303                 return -EFAULT;
1304
1305         ret = get_unused_fd_flags(O_CLOEXEC);
1306         if (ret < 0)
1307                 return ret;
1308         resp.fd = ret;
1309
1310         filp = ib_uverbs_alloc_event_file(file, ib_dev, 0);
1311         if (IS_ERR(filp)) {
1312                 put_unused_fd(resp.fd);
1313                 return PTR_ERR(filp);
1314         }
1315
1316         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1317                          &resp, sizeof resp)) {
1318                 put_unused_fd(resp.fd);
1319                 fput(filp);
1320                 return -EFAULT;
1321         }
1322
1323         fd_install(resp.fd, filp);
1324         return in_len;
1325 }
1326
1327 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
1328                                         struct ib_device *ib_dev,
1329                                        struct ib_udata *ucore,
1330                                        struct ib_udata *uhw,
1331                                        struct ib_uverbs_ex_create_cq *cmd,
1332                                        size_t cmd_sz,
1333                                        int (*cb)(struct ib_uverbs_file *file,
1334                                                  struct ib_ucq_object *obj,
1335                                                  struct ib_uverbs_ex_create_cq_resp *resp,
1336                                                  struct ib_udata *udata,
1337                                                  void *context),
1338                                        void *context)
1339 {
1340         struct ib_ucq_object           *obj;
1341         struct ib_uverbs_event_file    *ev_file = NULL;
1342         struct ib_cq                   *cq;
1343         int                             ret;
1344         struct ib_uverbs_ex_create_cq_resp resp;
1345         struct ib_cq_init_attr attr = {};
1346
1347         if (cmd->comp_vector >= file->device->num_comp_vectors)
1348                 return ERR_PTR(-EINVAL);
1349
1350         obj = kmalloc(sizeof *obj, GFP_KERNEL);
1351         if (!obj)
1352                 return ERR_PTR(-ENOMEM);
1353
1354         init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class);
1355         down_write(&obj->uobject.mutex);
1356
1357         if (cmd->comp_channel >= 0) {
1358                 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel);
1359                 if (!ev_file) {
1360                         ret = -EINVAL;
1361                         goto err;
1362                 }
1363         }
1364
1365         obj->uverbs_file           = file;
1366         obj->comp_events_reported  = 0;
1367         obj->async_events_reported = 0;
1368         INIT_LIST_HEAD(&obj->comp_list);
1369         INIT_LIST_HEAD(&obj->async_list);
1370
1371         attr.cqe = cmd->cqe;
1372         attr.comp_vector = cmd->comp_vector;
1373
1374         if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1375                 attr.flags = cmd->flags;
1376
1377         cq = ib_dev->create_cq(ib_dev, &attr,
1378                                              file->ucontext, uhw);
1379         if (IS_ERR(cq)) {
1380                 ret = PTR_ERR(cq);
1381                 goto err_file;
1382         }
1383
1384         cq->device        = ib_dev;
1385         cq->uobject       = &obj->uobject;
1386         cq->comp_handler  = ib_uverbs_comp_handler;
1387         cq->event_handler = ib_uverbs_cq_event_handler;
1388         cq->cq_context    = ev_file;
1389         atomic_set(&cq->usecnt, 0);
1390
1391         obj->uobject.object = cq;
1392         ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1393         if (ret)
1394                 goto err_free;
1395
1396         memset(&resp, 0, sizeof resp);
1397         resp.base.cq_handle = obj->uobject.id;
1398         resp.base.cqe       = cq->cqe;
1399
1400         resp.response_length = offsetof(typeof(resp), response_length) +
1401                 sizeof(resp.response_length);
1402
1403         ret = cb(file, obj, &resp, ucore, context);
1404         if (ret)
1405                 goto err_cb;
1406
1407         mutex_lock(&file->mutex);
1408         list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1409         mutex_unlock(&file->mutex);
1410
1411         obj->uobject.live = 1;
1412
1413         up_write(&obj->uobject.mutex);
1414
1415         return obj;
1416
1417 err_cb:
1418         idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1419
1420 err_free:
1421         ib_destroy_cq(cq);
1422
1423 err_file:
1424         if (ev_file)
1425                 ib_uverbs_release_ucq(file, ev_file, obj);
1426
1427 err:
1428         put_uobj_write(&obj->uobject);
1429
1430         return ERR_PTR(ret);
1431 }
1432
1433 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1434                                   struct ib_ucq_object *obj,
1435                                   struct ib_uverbs_ex_create_cq_resp *resp,
1436                                   struct ib_udata *ucore, void *context)
1437 {
1438         if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1439                 return -EFAULT;
1440
1441         return 0;
1442 }
1443
1444 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1445                             struct ib_device *ib_dev,
1446                             const char __user *buf, int in_len,
1447                             int out_len)
1448 {
1449         struct ib_uverbs_create_cq      cmd;
1450         struct ib_uverbs_ex_create_cq   cmd_ex;
1451         struct ib_uverbs_create_cq_resp resp;
1452         struct ib_udata                 ucore;
1453         struct ib_udata                 uhw;
1454         struct ib_ucq_object           *obj;
1455
1456         if (out_len < sizeof(resp))
1457                 return -ENOSPC;
1458
1459         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1460                 return -EFAULT;
1461
1462         INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp));
1463
1464         INIT_UDATA(&uhw, buf + sizeof(cmd),
1465                    (unsigned long)cmd.response + sizeof(resp),
1466                    in_len - sizeof(cmd), out_len - sizeof(resp));
1467
1468         memset(&cmd_ex, 0, sizeof(cmd_ex));
1469         cmd_ex.user_handle = cmd.user_handle;
1470         cmd_ex.cqe = cmd.cqe;
1471         cmd_ex.comp_vector = cmd.comp_vector;
1472         cmd_ex.comp_channel = cmd.comp_channel;
1473
1474         obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
1475                         offsetof(typeof(cmd_ex), comp_channel) +
1476                         sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1477                         NULL);
1478
1479         if (IS_ERR(obj))
1480                 return PTR_ERR(obj);
1481
1482         return in_len;
1483 }
1484
1485 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1486                                      struct ib_ucq_object *obj,
1487                                      struct ib_uverbs_ex_create_cq_resp *resp,
1488                                      struct ib_udata *ucore, void *context)
1489 {
1490         if (ib_copy_to_udata(ucore, resp, resp->response_length))
1491                 return -EFAULT;
1492
1493         return 0;
1494 }
1495
1496 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
1497                          struct ib_device *ib_dev,
1498                            struct ib_udata *ucore,
1499                            struct ib_udata *uhw)
1500 {
1501         struct ib_uverbs_ex_create_cq_resp resp;
1502         struct ib_uverbs_ex_create_cq  cmd;
1503         struct ib_ucq_object           *obj;
1504         int err;
1505
1506         if (ucore->inlen < sizeof(cmd))
1507                 return -EINVAL;
1508
1509         err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1510         if (err)
1511                 return err;
1512
1513         if (cmd.comp_mask)
1514                 return -EINVAL;
1515
1516         if (cmd.reserved)
1517                 return -EINVAL;
1518
1519         if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1520                              sizeof(resp.response_length)))
1521                 return -ENOSPC;
1522
1523         obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
1524                         min(ucore->inlen, sizeof(cmd)),
1525                         ib_uverbs_ex_create_cq_cb, NULL);
1526
1527         if (IS_ERR(obj))
1528                 return PTR_ERR(obj);
1529
1530         return 0;
1531 }
1532
1533 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1534                             struct ib_device *ib_dev,
1535                             const char __user *buf, int in_len,
1536                             int out_len)
1537 {
1538         struct ib_uverbs_resize_cq      cmd;
1539         struct ib_uverbs_resize_cq_resp resp;
1540         struct ib_udata                 udata;
1541         struct ib_cq                    *cq;
1542         int                             ret = -EINVAL;
1543
1544         if (copy_from_user(&cmd, buf, sizeof cmd))
1545                 return -EFAULT;
1546
1547         INIT_UDATA(&udata, buf + sizeof cmd,
1548                    (unsigned long) cmd.response + sizeof resp,
1549                    in_len - sizeof cmd, out_len - sizeof resp);
1550
1551         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1552         if (!cq)
1553                 return -EINVAL;
1554
1555         ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1556         if (ret)
1557                 goto out;
1558
1559         resp.cqe = cq->cqe;
1560
1561         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1562                          &resp, sizeof resp.cqe))
1563                 ret = -EFAULT;
1564
1565 out:
1566         put_cq_read(cq);
1567
1568         return ret ? ret : in_len;
1569 }
1570
1571 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1572 {
1573         struct ib_uverbs_wc tmp;
1574
1575         tmp.wr_id               = wc->wr_id;
1576         tmp.status              = wc->status;
1577         tmp.opcode              = wc->opcode;
1578         tmp.vendor_err          = wc->vendor_err;
1579         tmp.byte_len            = wc->byte_len;
1580         tmp.ex.imm_data         = (__u32 __force) wc->ex.imm_data;
1581         tmp.qp_num              = wc->qp->qp_num;
1582         tmp.src_qp              = wc->src_qp;
1583         tmp.wc_flags            = wc->wc_flags;
1584         tmp.pkey_index          = wc->pkey_index;
1585         tmp.slid                = wc->slid;
1586         tmp.sl                  = wc->sl;
1587         tmp.dlid_path_bits      = wc->dlid_path_bits;
1588         tmp.port_num            = wc->port_num;
1589         tmp.reserved            = 0;
1590
1591         if (copy_to_user(dest, &tmp, sizeof tmp))
1592                 return -EFAULT;
1593
1594         return 0;
1595 }
1596
1597 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1598                           struct ib_device *ib_dev,
1599                           const char __user *buf, int in_len,
1600                           int out_len)
1601 {
1602         struct ib_uverbs_poll_cq       cmd;
1603         struct ib_uverbs_poll_cq_resp  resp;
1604         u8 __user                     *header_ptr;
1605         u8 __user                     *data_ptr;
1606         struct ib_cq                  *cq;
1607         struct ib_wc                   wc;
1608         int                            ret;
1609
1610         if (copy_from_user(&cmd, buf, sizeof cmd))
1611                 return -EFAULT;
1612
1613         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1614         if (!cq)
1615                 return -EINVAL;
1616
1617         /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1618         header_ptr = (void __user *)(unsigned long) cmd.response;
1619         data_ptr = header_ptr + sizeof resp;
1620
1621         memset(&resp, 0, sizeof resp);
1622         while (resp.count < cmd.ne) {
1623                 ret = ib_poll_cq(cq, 1, &wc);
1624                 if (ret < 0)
1625                         goto out_put;
1626                 if (!ret)
1627                         break;
1628
1629                 ret = copy_wc_to_user(data_ptr, &wc);
1630                 if (ret)
1631                         goto out_put;
1632
1633                 data_ptr += sizeof(struct ib_uverbs_wc);
1634                 ++resp.count;
1635         }
1636
1637         if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1638                 ret = -EFAULT;
1639                 goto out_put;
1640         }
1641
1642         ret = in_len;
1643
1644 out_put:
1645         put_cq_read(cq);
1646         return ret;
1647 }
1648
1649 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1650                                 struct ib_device *ib_dev,
1651                                 const char __user *buf, int in_len,
1652                                 int out_len)
1653 {
1654         struct ib_uverbs_req_notify_cq cmd;
1655         struct ib_cq                  *cq;
1656
1657         if (copy_from_user(&cmd, buf, sizeof cmd))
1658                 return -EFAULT;
1659
1660         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1661         if (!cq)
1662                 return -EINVAL;
1663
1664         ib_req_notify_cq(cq, cmd.solicited_only ?
1665                          IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1666
1667         put_cq_read(cq);
1668
1669         return in_len;
1670 }
1671
1672 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1673                              struct ib_device *ib_dev,
1674                              const char __user *buf, int in_len,
1675                              int out_len)
1676 {
1677         struct ib_uverbs_destroy_cq      cmd;
1678         struct ib_uverbs_destroy_cq_resp resp;
1679         struct ib_uobject               *uobj;
1680         struct ib_cq                    *cq;
1681         struct ib_ucq_object            *obj;
1682         struct ib_uverbs_event_file     *ev_file;
1683         int                              ret = -EINVAL;
1684
1685         if (copy_from_user(&cmd, buf, sizeof cmd))
1686                 return -EFAULT;
1687
1688         uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1689         if (!uobj)
1690                 return -EINVAL;
1691         cq      = uobj->object;
1692         ev_file = cq->cq_context;
1693         obj     = container_of(cq->uobject, struct ib_ucq_object, uobject);
1694
1695         ret = ib_destroy_cq(cq);
1696         if (!ret)
1697                 uobj->live = 0;
1698
1699         put_uobj_write(uobj);
1700
1701         if (ret)
1702                 return ret;
1703
1704         idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1705
1706         mutex_lock(&file->mutex);
1707         list_del(&uobj->list);
1708         mutex_unlock(&file->mutex);
1709
1710         ib_uverbs_release_ucq(file, ev_file, obj);
1711
1712         memset(&resp, 0, sizeof resp);
1713         resp.comp_events_reported  = obj->comp_events_reported;
1714         resp.async_events_reported = obj->async_events_reported;
1715
1716         put_uobj(uobj);
1717
1718         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1719                          &resp, sizeof resp))
1720                 return -EFAULT;
1721
1722         return in_len;
1723 }
1724
1725 static int create_qp(struct ib_uverbs_file *file,
1726                      struct ib_udata *ucore,
1727                      struct ib_udata *uhw,
1728                      struct ib_uverbs_ex_create_qp *cmd,
1729                      size_t cmd_sz,
1730                      int (*cb)(struct ib_uverbs_file *file,
1731                                struct ib_uverbs_ex_create_qp_resp *resp,
1732                                struct ib_udata *udata),
1733                      void *context)
1734 {
1735         struct ib_uqp_object            *obj;
1736         struct ib_device                *device;
1737         struct ib_pd                    *pd = NULL;
1738         struct ib_xrcd                  *xrcd = NULL;
1739         struct ib_uobject               *uninitialized_var(xrcd_uobj);
1740         struct ib_cq                    *scq = NULL, *rcq = NULL;
1741         struct ib_srq                   *srq = NULL;
1742         struct ib_qp                    *qp;
1743         char                            *buf;
1744         struct ib_qp_init_attr          attr;
1745         struct ib_uverbs_ex_create_qp_resp resp;
1746         int                             ret;
1747
1748         if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1749                 return -EPERM;
1750
1751         obj = kzalloc(sizeof *obj, GFP_KERNEL);
1752         if (!obj)
1753                 return -ENOMEM;
1754
1755         init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext,
1756                   &qp_lock_class);
1757         down_write(&obj->uevent.uobject.mutex);
1758
1759         if (cmd->qp_type == IB_QPT_XRC_TGT) {
1760                 xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext,
1761                                      &xrcd_uobj);
1762                 if (!xrcd) {
1763                         ret = -EINVAL;
1764                         goto err_put;
1765                 }
1766                 device = xrcd->device;
1767         } else {
1768                 if (cmd->qp_type == IB_QPT_XRC_INI) {
1769                         cmd->max_recv_wr = 0;
1770                         cmd->max_recv_sge = 0;
1771                 } else {
1772                         if (cmd->is_srq) {
1773                                 srq = idr_read_srq(cmd->srq_handle,
1774                                                    file->ucontext);
1775                                 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1776                                         ret = -EINVAL;
1777                                         goto err_put;
1778                                 }
1779                         }
1780
1781                         if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1782                                 rcq = idr_read_cq(cmd->recv_cq_handle,
1783                                                   file->ucontext, 0);
1784                                 if (!rcq) {
1785                                         ret = -EINVAL;
1786                                         goto err_put;
1787                                 }
1788                         }
1789                 }
1790
1791                 scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq);
1792                 rcq = rcq ?: scq;
1793                 pd  = idr_read_pd(cmd->pd_handle, file->ucontext);
1794                 if (!pd || !scq) {
1795                         ret = -EINVAL;
1796                         goto err_put;
1797                 }
1798
1799                 device = pd->device;
1800         }
1801
1802         attr.event_handler = ib_uverbs_qp_event_handler;
1803         attr.qp_context    = file;
1804         attr.send_cq       = scq;
1805         attr.recv_cq       = rcq;
1806         attr.srq           = srq;
1807         attr.xrcd          = xrcd;
1808         attr.sq_sig_type   = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1809                                               IB_SIGNAL_REQ_WR;
1810         attr.qp_type       = cmd->qp_type;
1811         attr.create_flags  = 0;
1812
1813         attr.cap.max_send_wr     = cmd->max_send_wr;
1814         attr.cap.max_recv_wr     = cmd->max_recv_wr;
1815         attr.cap.max_send_sge    = cmd->max_send_sge;
1816         attr.cap.max_recv_sge    = cmd->max_recv_sge;
1817         attr.cap.max_inline_data = cmd->max_inline_data;
1818
1819         obj->uevent.events_reported     = 0;
1820         INIT_LIST_HEAD(&obj->uevent.event_list);
1821         INIT_LIST_HEAD(&obj->mcast_list);
1822
1823         if (cmd_sz >= offsetof(typeof(*cmd), create_flags) +
1824                       sizeof(cmd->create_flags))
1825                 attr.create_flags = cmd->create_flags;
1826
1827         if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1828                                 IB_QP_CREATE_CROSS_CHANNEL |
1829                                 IB_QP_CREATE_MANAGED_SEND |
1830                                 IB_QP_CREATE_MANAGED_RECV)) {
1831                 ret = -EINVAL;
1832                 goto err_put;
1833         }
1834
1835         buf = (void *)cmd + sizeof(*cmd);
1836         if (cmd_sz > sizeof(*cmd))
1837                 if (!(buf[0] == 0 && !memcmp(buf, buf + 1,
1838                                              cmd_sz - sizeof(*cmd) - 1))) {
1839                         ret = -EINVAL;
1840                         goto err_put;
1841                 }
1842
1843         if (cmd->qp_type == IB_QPT_XRC_TGT)
1844                 qp = ib_create_qp(pd, &attr);
1845         else
1846                 qp = device->create_qp(pd, &attr, uhw);
1847
1848         if (IS_ERR(qp)) {
1849                 ret = PTR_ERR(qp);
1850                 goto err_put;
1851         }
1852
1853         if (cmd->qp_type != IB_QPT_XRC_TGT) {
1854                 qp->real_qp       = qp;
1855                 qp->device        = device;
1856                 qp->pd            = pd;
1857                 qp->send_cq       = attr.send_cq;
1858                 qp->recv_cq       = attr.recv_cq;
1859                 qp->srq           = attr.srq;
1860                 qp->event_handler = attr.event_handler;
1861                 qp->qp_context    = attr.qp_context;
1862                 qp->qp_type       = attr.qp_type;
1863                 atomic_set(&qp->usecnt, 0);
1864                 atomic_inc(&pd->usecnt);
1865                 atomic_inc(&attr.send_cq->usecnt);
1866                 if (attr.recv_cq)
1867                         atomic_inc(&attr.recv_cq->usecnt);
1868                 if (attr.srq)
1869                         atomic_inc(&attr.srq->usecnt);
1870         }
1871         qp->uobject = &obj->uevent.uobject;
1872
1873         obj->uevent.uobject.object = qp;
1874         ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1875         if (ret)
1876                 goto err_destroy;
1877
1878         memset(&resp, 0, sizeof resp);
1879         resp.base.qpn             = qp->qp_num;
1880         resp.base.qp_handle       = obj->uevent.uobject.id;
1881         resp.base.max_recv_sge    = attr.cap.max_recv_sge;
1882         resp.base.max_send_sge    = attr.cap.max_send_sge;
1883         resp.base.max_recv_wr     = attr.cap.max_recv_wr;
1884         resp.base.max_send_wr     = attr.cap.max_send_wr;
1885         resp.base.max_inline_data = attr.cap.max_inline_data;
1886
1887         resp.response_length = offsetof(typeof(resp), response_length) +
1888                                sizeof(resp.response_length);
1889
1890         ret = cb(file, &resp, ucore);
1891         if (ret)
1892                 goto err_cb;
1893
1894         if (xrcd) {
1895                 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1896                                           uobject);
1897                 atomic_inc(&obj->uxrcd->refcnt);
1898                 put_xrcd_read(xrcd_uobj);
1899         }
1900
1901         if (pd)
1902                 put_pd_read(pd);
1903         if (scq)
1904                 put_cq_read(scq);
1905         if (rcq && rcq != scq)
1906                 put_cq_read(rcq);
1907         if (srq)
1908                 put_srq_read(srq);
1909
1910         mutex_lock(&file->mutex);
1911         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1912         mutex_unlock(&file->mutex);
1913
1914         obj->uevent.uobject.live = 1;
1915
1916         up_write(&obj->uevent.uobject.mutex);
1917
1918         return 0;
1919 err_cb:
1920         idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1921
1922 err_destroy:
1923         ib_destroy_qp(qp);
1924
1925 err_put:
1926         if (xrcd)
1927                 put_xrcd_read(xrcd_uobj);
1928         if (pd)
1929                 put_pd_read(pd);
1930         if (scq)
1931                 put_cq_read(scq);
1932         if (rcq && rcq != scq)
1933                 put_cq_read(rcq);
1934         if (srq)
1935                 put_srq_read(srq);
1936
1937         put_uobj_write(&obj->uevent.uobject);
1938         return ret;
1939 }
1940
1941 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file,
1942                                   struct ib_uverbs_ex_create_qp_resp *resp,
1943                                   struct ib_udata *ucore)
1944 {
1945         if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1946                 return -EFAULT;
1947
1948         return 0;
1949 }
1950
1951 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1952                             struct ib_device *ib_dev,
1953                             const char __user *buf, int in_len,
1954                             int out_len)
1955 {
1956         struct ib_uverbs_create_qp      cmd;
1957         struct ib_uverbs_ex_create_qp   cmd_ex;
1958         struct ib_udata                 ucore;
1959         struct ib_udata                 uhw;
1960         ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp);
1961         int                             err;
1962
1963         if (out_len < resp_size)
1964                 return -ENOSPC;
1965
1966         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1967                 return -EFAULT;
1968
1969         INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd),
1970                    resp_size);
1971         INIT_UDATA(&uhw, buf + sizeof(cmd),
1972                    (unsigned long)cmd.response + resp_size,
1973                    in_len - sizeof(cmd), out_len - resp_size);
1974
1975         memset(&cmd_ex, 0, sizeof(cmd_ex));
1976         cmd_ex.user_handle = cmd.user_handle;
1977         cmd_ex.pd_handle = cmd.pd_handle;
1978         cmd_ex.send_cq_handle = cmd.send_cq_handle;
1979         cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1980         cmd_ex.srq_handle = cmd.srq_handle;
1981         cmd_ex.max_send_wr = cmd.max_send_wr;
1982         cmd_ex.max_recv_wr = cmd.max_recv_wr;
1983         cmd_ex.max_send_sge = cmd.max_send_sge;
1984         cmd_ex.max_recv_sge = cmd.max_recv_sge;
1985         cmd_ex.max_inline_data = cmd.max_inline_data;
1986         cmd_ex.sq_sig_all = cmd.sq_sig_all;
1987         cmd_ex.qp_type = cmd.qp_type;
1988         cmd_ex.is_srq = cmd.is_srq;
1989
1990         err = create_qp(file, &ucore, &uhw, &cmd_ex,
1991                         offsetof(typeof(cmd_ex), is_srq) +
1992                         sizeof(cmd.is_srq), ib_uverbs_create_qp_cb,
1993                         NULL);
1994
1995         if (err)
1996                 return err;
1997
1998         return in_len;
1999 }
2000
2001 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file,
2002                                      struct ib_uverbs_ex_create_qp_resp *resp,
2003                                      struct ib_udata *ucore)
2004 {
2005         if (ib_copy_to_udata(ucore, resp, resp->response_length))
2006                 return -EFAULT;
2007
2008         return 0;
2009 }
2010
2011 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
2012                            struct ib_device *ib_dev,
2013                            struct ib_udata *ucore,
2014                            struct ib_udata *uhw)
2015 {
2016         struct ib_uverbs_ex_create_qp_resp resp;
2017         struct ib_uverbs_ex_create_qp cmd = {0};
2018         int err;
2019
2020         if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) +
2021                             sizeof(cmd.comp_mask)))
2022                 return -EINVAL;
2023
2024         err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2025         if (err)
2026                 return err;
2027
2028         if (cmd.comp_mask)
2029                 return -EINVAL;
2030
2031         if (cmd.reserved)
2032                 return -EINVAL;
2033
2034         if (ucore->outlen < (offsetof(typeof(resp), response_length) +
2035                              sizeof(resp.response_length)))
2036                 return -ENOSPC;
2037
2038         err = create_qp(file, ucore, uhw, &cmd,
2039                         min(ucore->inlen, sizeof(cmd)),
2040                         ib_uverbs_ex_create_qp_cb, NULL);
2041
2042         if (err)
2043                 return err;
2044
2045         return 0;
2046 }
2047
2048 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
2049                           struct ib_device *ib_dev,
2050                           const char __user *buf, int in_len, int out_len)
2051 {
2052         struct ib_uverbs_open_qp        cmd;
2053         struct ib_uverbs_create_qp_resp resp;
2054         struct ib_udata                 udata;
2055         struct ib_uqp_object           *obj;
2056         struct ib_xrcd                 *xrcd;
2057         struct ib_uobject              *uninitialized_var(xrcd_uobj);
2058         struct ib_qp                   *qp;
2059         struct ib_qp_open_attr          attr;
2060         int ret;
2061
2062         if (out_len < sizeof resp)
2063                 return -ENOSPC;
2064
2065         if (copy_from_user(&cmd, buf, sizeof cmd))
2066                 return -EFAULT;
2067
2068         INIT_UDATA(&udata, buf + sizeof cmd,
2069                    (unsigned long) cmd.response + sizeof resp,
2070                    in_len - sizeof cmd, out_len - sizeof resp);
2071
2072         obj = kmalloc(sizeof *obj, GFP_KERNEL);
2073         if (!obj)
2074                 return -ENOMEM;
2075
2076         init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
2077         down_write(&obj->uevent.uobject.mutex);
2078
2079         xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
2080         if (!xrcd) {
2081                 ret = -EINVAL;
2082                 goto err_put;
2083         }
2084
2085         attr.event_handler = ib_uverbs_qp_event_handler;
2086         attr.qp_context    = file;
2087         attr.qp_num        = cmd.qpn;
2088         attr.qp_type       = cmd.qp_type;
2089
2090         obj->uevent.events_reported = 0;
2091         INIT_LIST_HEAD(&obj->uevent.event_list);
2092         INIT_LIST_HEAD(&obj->mcast_list);
2093
2094         qp = ib_open_qp(xrcd, &attr);
2095         if (IS_ERR(qp)) {
2096                 ret = PTR_ERR(qp);
2097                 goto err_put;
2098         }
2099
2100         qp->uobject = &obj->uevent.uobject;
2101
2102         obj->uevent.uobject.object = qp;
2103         ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
2104         if (ret)
2105                 goto err_destroy;
2106
2107         memset(&resp, 0, sizeof resp);
2108         resp.qpn       = qp->qp_num;
2109         resp.qp_handle = obj->uevent.uobject.id;
2110
2111         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2112                          &resp, sizeof resp)) {
2113                 ret = -EFAULT;
2114                 goto err_remove;
2115         }
2116
2117         obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2118         atomic_inc(&obj->uxrcd->refcnt);
2119         put_xrcd_read(xrcd_uobj);
2120
2121         mutex_lock(&file->mutex);
2122         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
2123         mutex_unlock(&file->mutex);
2124
2125         obj->uevent.uobject.live = 1;
2126
2127         up_write(&obj->uevent.uobject.mutex);
2128
2129         return in_len;
2130
2131 err_remove:
2132         idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
2133
2134 err_destroy:
2135         ib_destroy_qp(qp);
2136
2137 err_put:
2138         put_xrcd_read(xrcd_uobj);
2139         put_uobj_write(&obj->uevent.uobject);
2140         return ret;
2141 }
2142
2143 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
2144                            struct ib_device *ib_dev,
2145                            const char __user *buf, int in_len,
2146                            int out_len)
2147 {
2148         struct ib_uverbs_query_qp      cmd;
2149         struct ib_uverbs_query_qp_resp resp;
2150         struct ib_qp                   *qp;
2151         struct ib_qp_attr              *attr;
2152         struct ib_qp_init_attr         *init_attr;
2153         int                            ret;
2154
2155         if (copy_from_user(&cmd, buf, sizeof cmd))
2156                 return -EFAULT;
2157
2158         attr      = kmalloc(sizeof *attr, GFP_KERNEL);
2159         init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
2160         if (!attr || !init_attr) {
2161                 ret = -ENOMEM;
2162                 goto out;
2163         }
2164
2165         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2166         if (!qp) {
2167                 ret = -EINVAL;
2168                 goto out;
2169         }
2170
2171         ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
2172
2173         put_qp_read(qp);
2174
2175         if (ret)
2176                 goto out;
2177
2178         memset(&resp, 0, sizeof resp);
2179
2180         resp.qp_state               = attr->qp_state;
2181         resp.cur_qp_state           = attr->cur_qp_state;
2182         resp.path_mtu               = attr->path_mtu;
2183         resp.path_mig_state         = attr->path_mig_state;
2184         resp.qkey                   = attr->qkey;
2185         resp.rq_psn                 = attr->rq_psn;
2186         resp.sq_psn                 = attr->sq_psn;
2187         resp.dest_qp_num            = attr->dest_qp_num;
2188         resp.qp_access_flags        = attr->qp_access_flags;
2189         resp.pkey_index             = attr->pkey_index;
2190         resp.alt_pkey_index         = attr->alt_pkey_index;
2191         resp.sq_draining            = attr->sq_draining;
2192         resp.max_rd_atomic          = attr->max_rd_atomic;
2193         resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
2194         resp.min_rnr_timer          = attr->min_rnr_timer;
2195         resp.port_num               = attr->port_num;
2196         resp.timeout                = attr->timeout;
2197         resp.retry_cnt              = attr->retry_cnt;
2198         resp.rnr_retry              = attr->rnr_retry;
2199         resp.alt_port_num           = attr->alt_port_num;
2200         resp.alt_timeout            = attr->alt_timeout;
2201
2202         memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
2203         resp.dest.flow_label        = attr->ah_attr.grh.flow_label;
2204         resp.dest.sgid_index        = attr->ah_attr.grh.sgid_index;
2205         resp.dest.hop_limit         = attr->ah_attr.grh.hop_limit;
2206         resp.dest.traffic_class     = attr->ah_attr.grh.traffic_class;
2207         resp.dest.dlid              = attr->ah_attr.dlid;
2208         resp.dest.sl                = attr->ah_attr.sl;
2209         resp.dest.src_path_bits     = attr->ah_attr.src_path_bits;
2210         resp.dest.static_rate       = attr->ah_attr.static_rate;
2211         resp.dest.is_global         = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
2212         resp.dest.port_num          = attr->ah_attr.port_num;
2213
2214         memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
2215         resp.alt_dest.flow_label    = attr->alt_ah_attr.grh.flow_label;
2216         resp.alt_dest.sgid_index    = attr->alt_ah_attr.grh.sgid_index;
2217         resp.alt_dest.hop_limit     = attr->alt_ah_attr.grh.hop_limit;
2218         resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
2219         resp.alt_dest.dlid          = attr->alt_ah_attr.dlid;
2220         resp.alt_dest.sl            = attr->alt_ah_attr.sl;
2221         resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
2222         resp.alt_dest.static_rate   = attr->alt_ah_attr.static_rate;
2223         resp.alt_dest.is_global     = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
2224         resp.alt_dest.port_num      = attr->alt_ah_attr.port_num;
2225
2226         resp.max_send_wr            = init_attr->cap.max_send_wr;
2227         resp.max_recv_wr            = init_attr->cap.max_recv_wr;
2228         resp.max_send_sge           = init_attr->cap.max_send_sge;
2229         resp.max_recv_sge           = init_attr->cap.max_recv_sge;
2230         resp.max_inline_data        = init_attr->cap.max_inline_data;
2231         resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
2232
2233         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2234                          &resp, sizeof resp))
2235                 ret = -EFAULT;
2236
2237 out:
2238         kfree(attr);
2239         kfree(init_attr);
2240
2241         return ret ? ret : in_len;
2242 }
2243
2244 /* Remove ignored fields set in the attribute mask */
2245 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
2246 {
2247         switch (qp_type) {
2248         case IB_QPT_XRC_INI:
2249                 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
2250         case IB_QPT_XRC_TGT:
2251                 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
2252                                 IB_QP_RNR_RETRY);
2253         default:
2254                 return mask;
2255         }
2256 }
2257
2258 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2259                             struct ib_device *ib_dev,
2260                             const char __user *buf, int in_len,
2261                             int out_len)
2262 {
2263         struct ib_uverbs_modify_qp cmd;
2264         struct ib_udata            udata;
2265         struct ib_qp              *qp;
2266         struct ib_qp_attr         *attr;
2267         int                        ret;
2268
2269         if (copy_from_user(&cmd, buf, sizeof cmd))
2270                 return -EFAULT;
2271
2272         INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2273                    out_len);
2274
2275         attr = kmalloc(sizeof *attr, GFP_KERNEL);
2276         if (!attr)
2277                 return -ENOMEM;
2278
2279         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2280         if (!qp) {
2281                 ret = -EINVAL;
2282                 goto out;
2283         }
2284
2285         attr->qp_state            = cmd.qp_state;
2286         attr->cur_qp_state        = cmd.cur_qp_state;
2287         attr->path_mtu            = cmd.path_mtu;
2288         attr->path_mig_state      = cmd.path_mig_state;
2289         attr->qkey                = cmd.qkey;
2290         attr->rq_psn              = cmd.rq_psn;
2291         attr->sq_psn              = cmd.sq_psn;
2292         attr->dest_qp_num         = cmd.dest_qp_num;
2293         attr->qp_access_flags     = cmd.qp_access_flags;
2294         attr->pkey_index          = cmd.pkey_index;
2295         attr->alt_pkey_index      = cmd.alt_pkey_index;
2296         attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
2297         attr->max_rd_atomic       = cmd.max_rd_atomic;
2298         attr->max_dest_rd_atomic  = cmd.max_dest_rd_atomic;
2299         attr->min_rnr_timer       = cmd.min_rnr_timer;
2300         attr->port_num            = cmd.port_num;
2301         attr->timeout             = cmd.timeout;
2302         attr->retry_cnt           = cmd.retry_cnt;
2303         attr->rnr_retry           = cmd.rnr_retry;
2304         attr->alt_port_num        = cmd.alt_port_num;
2305         attr->alt_timeout         = cmd.alt_timeout;
2306
2307         memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
2308         attr->ah_attr.grh.flow_label        = cmd.dest.flow_label;
2309         attr->ah_attr.grh.sgid_index        = cmd.dest.sgid_index;
2310         attr->ah_attr.grh.hop_limit         = cmd.dest.hop_limit;
2311         attr->ah_attr.grh.traffic_class     = cmd.dest.traffic_class;
2312         attr->ah_attr.dlid                  = cmd.dest.dlid;
2313         attr->ah_attr.sl                    = cmd.dest.sl;
2314         attr->ah_attr.src_path_bits         = cmd.dest.src_path_bits;
2315         attr->ah_attr.static_rate           = cmd.dest.static_rate;
2316         attr->ah_attr.ah_flags              = cmd.dest.is_global ? IB_AH_GRH : 0;
2317         attr->ah_attr.port_num              = cmd.dest.port_num;
2318
2319         memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
2320         attr->alt_ah_attr.grh.flow_label    = cmd.alt_dest.flow_label;
2321         attr->alt_ah_attr.grh.sgid_index    = cmd.alt_dest.sgid_index;
2322         attr->alt_ah_attr.grh.hop_limit     = cmd.alt_dest.hop_limit;
2323         attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
2324         attr->alt_ah_attr.dlid              = cmd.alt_dest.dlid;
2325         attr->alt_ah_attr.sl                = cmd.alt_dest.sl;
2326         attr->alt_ah_attr.src_path_bits     = cmd.alt_dest.src_path_bits;
2327         attr->alt_ah_attr.static_rate       = cmd.alt_dest.static_rate;
2328         attr->alt_ah_attr.ah_flags          = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
2329         attr->alt_ah_attr.port_num          = cmd.alt_dest.port_num;
2330
2331         if (qp->real_qp == qp) {
2332                 ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask);
2333                 if (ret)
2334                         goto release_qp;
2335                 ret = qp->device->modify_qp(qp, attr,
2336                         modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2337         } else {
2338                 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2339         }
2340
2341         if (ret)
2342                 goto release_qp;
2343
2344         ret = in_len;
2345
2346 release_qp:
2347         put_qp_read(qp);
2348
2349 out:
2350         kfree(attr);
2351
2352         return ret;
2353 }
2354
2355 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2356                              struct ib_device *ib_dev,
2357                              const char __user *buf, int in_len,
2358                              int out_len)
2359 {
2360         struct ib_uverbs_destroy_qp      cmd;
2361         struct ib_uverbs_destroy_qp_resp resp;
2362         struct ib_uobject               *uobj;
2363         struct ib_qp                    *qp;
2364         struct ib_uqp_object            *obj;
2365         int                              ret = -EINVAL;
2366
2367         if (copy_from_user(&cmd, buf, sizeof cmd))
2368                 return -EFAULT;
2369
2370         memset(&resp, 0, sizeof resp);
2371
2372         uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2373         if (!uobj)
2374                 return -EINVAL;
2375         qp  = uobj->object;
2376         obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2377
2378         if (!list_empty(&obj->mcast_list)) {
2379                 put_uobj_write(uobj);
2380                 return -EBUSY;
2381         }
2382
2383         ret = ib_destroy_qp(qp);
2384         if (!ret)
2385                 uobj->live = 0;
2386
2387         put_uobj_write(uobj);
2388
2389         if (ret)
2390                 return ret;
2391
2392         if (obj->uxrcd)
2393                 atomic_dec(&obj->uxrcd->refcnt);
2394
2395         idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2396
2397         mutex_lock(&file->mutex);
2398         list_del(&uobj->list);
2399         mutex_unlock(&file->mutex);
2400
2401         ib_uverbs_release_uevent(file, &obj->uevent);
2402
2403         resp.events_reported = obj->uevent.events_reported;
2404
2405         put_uobj(uobj);
2406
2407         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2408                          &resp, sizeof resp))
2409                 return -EFAULT;
2410
2411         return in_len;
2412 }
2413
2414 static void *alloc_wr(size_t wr_size, __u32 num_sge)
2415 {
2416         return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2417                          num_sge * sizeof (struct ib_sge), GFP_KERNEL);
2418 };
2419
2420 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2421                             struct ib_device *ib_dev,
2422                             const char __user *buf, int in_len,
2423                             int out_len)
2424 {
2425         struct ib_uverbs_post_send      cmd;
2426         struct ib_uverbs_post_send_resp resp;
2427         struct ib_uverbs_send_wr       *user_wr;
2428         struct ib_send_wr              *wr = NULL, *last, *next, *bad_wr;
2429         struct ib_qp                   *qp;
2430         int                             i, sg_ind;
2431         int                             is_ud;
2432         ssize_t                         ret = -EINVAL;
2433         size_t                          next_size;
2434
2435         if (copy_from_user(&cmd, buf, sizeof cmd))
2436                 return -EFAULT;
2437
2438         if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2439             cmd.sge_count * sizeof (struct ib_uverbs_sge))
2440                 return -EINVAL;
2441
2442         if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2443                 return -EINVAL;
2444
2445         user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2446         if (!user_wr)
2447                 return -ENOMEM;
2448
2449         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2450         if (!qp)
2451                 goto out;
2452
2453         is_ud = qp->qp_type == IB_QPT_UD;
2454         sg_ind = 0;
2455         last = NULL;
2456         for (i = 0; i < cmd.wr_count; ++i) {
2457                 if (copy_from_user(user_wr,
2458                                    buf + sizeof cmd + i * cmd.wqe_size,
2459                                    cmd.wqe_size)) {
2460                         ret = -EFAULT;
2461                         goto out_put;
2462                 }
2463
2464                 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2465                         ret = -EINVAL;
2466                         goto out_put;
2467                 }
2468
2469                 if (is_ud) {
2470                         struct ib_ud_wr *ud;
2471
2472                         if (user_wr->opcode != IB_WR_SEND &&
2473                             user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2474                                 ret = -EINVAL;
2475                                 goto out_put;
2476                         }
2477
2478                         next_size = sizeof(*ud);
2479                         ud = alloc_wr(next_size, user_wr->num_sge);
2480                         if (!ud) {
2481                                 ret = -ENOMEM;
2482                                 goto out_put;
2483                         }
2484
2485                         ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext);
2486                         if (!ud->ah) {
2487                                 kfree(ud);
2488                                 ret = -EINVAL;
2489                                 goto out_put;
2490                         }
2491                         ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2492                         ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2493
2494                         next = &ud->wr;
2495                 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2496                            user_wr->opcode == IB_WR_RDMA_WRITE ||
2497                            user_wr->opcode == IB_WR_RDMA_READ) {
2498                         struct ib_rdma_wr *rdma;
2499
2500                         next_size = sizeof(*rdma);
2501                         rdma = alloc_wr(next_size, user_wr->num_sge);
2502                         if (!rdma) {
2503                                 ret = -ENOMEM;
2504                                 goto out_put;
2505                         }
2506
2507                         rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2508                         rdma->rkey = user_wr->wr.rdma.rkey;
2509
2510                         next = &rdma->wr;
2511                 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2512                            user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2513                         struct ib_atomic_wr *atomic;
2514
2515                         next_size = sizeof(*atomic);
2516                         atomic = alloc_wr(next_size, user_wr->num_sge);
2517                         if (!atomic) {
2518                                 ret = -ENOMEM;
2519                                 goto out_put;
2520                         }
2521
2522                         atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2523                         atomic->compare_add = user_wr->wr.atomic.compare_add;
2524                         atomic->swap = user_wr->wr.atomic.swap;
2525                         atomic->rkey = user_wr->wr.atomic.rkey;
2526
2527                         next = &atomic->wr;
2528                 } else if (user_wr->opcode == IB_WR_SEND ||
2529                            user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2530                            user_wr->opcode == IB_WR_SEND_WITH_INV) {
2531                         next_size = sizeof(*next);
2532                         next = alloc_wr(next_size, user_wr->num_sge);
2533                         if (!next) {
2534                                 ret = -ENOMEM;
2535                                 goto out_put;
2536                         }
2537                 } else {
2538                         ret = -EINVAL;
2539                         goto out_put;
2540                 }
2541
2542                 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2543                     user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2544                         next->ex.imm_data =
2545                                         (__be32 __force) user_wr->ex.imm_data;
2546                 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2547                         next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2548                 }
2549
2550                 if (!last)
2551                         wr = next;
2552                 else
2553                         last->next = next;
2554                 last = next;
2555
2556                 next->next       = NULL;
2557                 next->wr_id      = user_wr->wr_id;
2558                 next->num_sge    = user_wr->num_sge;
2559                 next->opcode     = user_wr->opcode;
2560                 next->send_flags = user_wr->send_flags;
2561
2562                 if (next->num_sge) {
2563                         next->sg_list = (void *) next +
2564                                 ALIGN(next_size, sizeof(struct ib_sge));
2565                         if (copy_from_user(next->sg_list,
2566                                            buf + sizeof cmd +
2567                                            cmd.wr_count * cmd.wqe_size +
2568                                            sg_ind * sizeof (struct ib_sge),
2569                                            next->num_sge * sizeof (struct ib_sge))) {
2570                                 ret = -EFAULT;
2571                                 goto out_put;
2572                         }
2573                         sg_ind += next->num_sge;
2574                 } else
2575                         next->sg_list = NULL;
2576         }
2577
2578         resp.bad_wr = 0;
2579         ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2580         if (ret)
2581                 for (next = wr; next; next = next->next) {
2582                         ++resp.bad_wr;
2583                         if (next == bad_wr)
2584                                 break;
2585                 }
2586
2587         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2588                          &resp, sizeof resp))
2589                 ret = -EFAULT;
2590
2591 out_put:
2592         put_qp_read(qp);
2593
2594         while (wr) {
2595                 if (is_ud && ud_wr(wr)->ah)
2596                         put_ah_read(ud_wr(wr)->ah);
2597                 next = wr->next;
2598                 kfree(wr);
2599                 wr = next;
2600         }
2601
2602 out:
2603         kfree(user_wr);
2604
2605         return ret ? ret : in_len;
2606 }
2607
2608 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2609                                                     int in_len,
2610                                                     u32 wr_count,
2611                                                     u32 sge_count,
2612                                                     u32 wqe_size)
2613 {
2614         struct ib_uverbs_recv_wr *user_wr;
2615         struct ib_recv_wr        *wr = NULL, *last, *next;
2616         int                       sg_ind;
2617         int                       i;
2618         int                       ret;
2619
2620         if (in_len < wqe_size * wr_count +
2621             sge_count * sizeof (struct ib_uverbs_sge))
2622                 return ERR_PTR(-EINVAL);
2623
2624         if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2625                 return ERR_PTR(-EINVAL);
2626
2627         user_wr = kmalloc(wqe_size, GFP_KERNEL);
2628         if (!user_wr)
2629                 return ERR_PTR(-ENOMEM);
2630
2631         sg_ind = 0;
2632         last = NULL;
2633         for (i = 0; i < wr_count; ++i) {
2634                 if (copy_from_user(user_wr, buf + i * wqe_size,
2635                                    wqe_size)) {
2636                         ret = -EFAULT;
2637                         goto err;
2638                 }
2639
2640                 if (user_wr->num_sge + sg_ind > sge_count) {
2641                         ret = -EINVAL;
2642                         goto err;
2643                 }
2644
2645                 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2646                                user_wr->num_sge * sizeof (struct ib_sge),
2647                                GFP_KERNEL);
2648                 if (!next) {
2649                         ret = -ENOMEM;
2650                         goto err;
2651                 }
2652
2653                 if (!last)
2654                         wr = next;
2655                 else
2656                         last->next = next;
2657                 last = next;
2658
2659                 next->next       = NULL;
2660                 next->wr_id      = user_wr->wr_id;
2661                 next->num_sge    = user_wr->num_sge;
2662
2663                 if (next->num_sge) {
2664                         next->sg_list = (void *) next +
2665                                 ALIGN(sizeof *next, sizeof (struct ib_sge));
2666                         if (copy_from_user(next->sg_list,
2667                                            buf + wr_count * wqe_size +
2668                                            sg_ind * sizeof (struct ib_sge),
2669                                            next->num_sge * sizeof (struct ib_sge))) {
2670                                 ret = -EFAULT;
2671                                 goto err;
2672                         }
2673                         sg_ind += next->num_sge;
2674                 } else
2675                         next->sg_list = NULL;
2676         }
2677
2678         kfree(user_wr);
2679         return wr;
2680
2681 err:
2682         kfree(user_wr);
2683
2684         while (wr) {
2685                 next = wr->next;
2686                 kfree(wr);
2687                 wr = next;
2688         }
2689
2690         return ERR_PTR(ret);
2691 }
2692
2693 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2694                             struct ib_device *ib_dev,
2695                             const char __user *buf, int in_len,
2696                             int out_len)
2697 {
2698         struct ib_uverbs_post_recv      cmd;
2699         struct ib_uverbs_post_recv_resp resp;
2700         struct ib_recv_wr              *wr, *next, *bad_wr;
2701         struct ib_qp                   *qp;
2702         ssize_t                         ret = -EINVAL;
2703
2704         if (copy_from_user(&cmd, buf, sizeof cmd))
2705                 return -EFAULT;
2706
2707         wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2708                                        in_len - sizeof cmd, cmd.wr_count,
2709                                        cmd.sge_count, cmd.wqe_size);
2710         if (IS_ERR(wr))
2711                 return PTR_ERR(wr);
2712
2713         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2714         if (!qp)
2715                 goto out;
2716
2717         resp.bad_wr = 0;
2718         ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2719
2720         put_qp_read(qp);
2721
2722         if (ret)
2723                 for (next = wr; next; next = next->next) {
2724                         ++resp.bad_wr;
2725                         if (next == bad_wr)
2726                                 break;
2727                 }
2728
2729         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2730                          &resp, sizeof resp))
2731                 ret = -EFAULT;
2732
2733 out:
2734         while (wr) {
2735                 next = wr->next;
2736                 kfree(wr);
2737                 wr = next;
2738         }
2739
2740         return ret ? ret : in_len;
2741 }
2742
2743 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2744                                 struct ib_device *ib_dev,
2745                                 const char __user *buf, int in_len,
2746                                 int out_len)
2747 {
2748         struct ib_uverbs_post_srq_recv      cmd;
2749         struct ib_uverbs_post_srq_recv_resp resp;
2750         struct ib_recv_wr                  *wr, *next, *bad_wr;
2751         struct ib_srq                      *srq;
2752         ssize_t                             ret = -EINVAL;
2753
2754         if (copy_from_user(&cmd, buf, sizeof cmd))
2755                 return -EFAULT;
2756
2757         wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2758                                        in_len - sizeof cmd, cmd.wr_count,
2759                                        cmd.sge_count, cmd.wqe_size);
2760         if (IS_ERR(wr))
2761                 return PTR_ERR(wr);
2762
2763         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2764         if (!srq)
2765                 goto out;
2766
2767         resp.bad_wr = 0;
2768         ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2769
2770         put_srq_read(srq);
2771
2772         if (ret)
2773                 for (next = wr; next; next = next->next) {
2774                         ++resp.bad_wr;
2775                         if (next == bad_wr)
2776                                 break;
2777                 }
2778
2779         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2780                          &resp, sizeof resp))
2781                 ret = -EFAULT;
2782
2783 out:
2784         while (wr) {
2785                 next = wr->next;
2786                 kfree(wr);
2787                 wr = next;
2788         }
2789
2790         return ret ? ret : in_len;
2791 }
2792
2793 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2794                             struct ib_device *ib_dev,
2795                             const char __user *buf, int in_len,
2796                             int out_len)
2797 {
2798         struct ib_uverbs_create_ah       cmd;
2799         struct ib_uverbs_create_ah_resp  resp;
2800         struct ib_uobject               *uobj;
2801         struct ib_pd                    *pd;
2802         struct ib_ah                    *ah;
2803         struct ib_ah_attr               attr;
2804         int ret;
2805
2806         if (out_len < sizeof resp)
2807                 return -ENOSPC;
2808
2809         if (copy_from_user(&cmd, buf, sizeof cmd))
2810                 return -EFAULT;
2811
2812         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2813         if (!uobj)
2814                 return -ENOMEM;
2815
2816         init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2817         down_write(&uobj->mutex);
2818
2819         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2820         if (!pd) {
2821                 ret = -EINVAL;
2822                 goto err;
2823         }
2824
2825         attr.dlid              = cmd.attr.dlid;
2826         attr.sl                = cmd.attr.sl;
2827         attr.src_path_bits     = cmd.attr.src_path_bits;
2828         attr.static_rate       = cmd.attr.static_rate;
2829         attr.ah_flags          = cmd.attr.is_global ? IB_AH_GRH : 0;
2830         attr.port_num          = cmd.attr.port_num;
2831         attr.grh.flow_label    = cmd.attr.grh.flow_label;
2832         attr.grh.sgid_index    = cmd.attr.grh.sgid_index;
2833         attr.grh.hop_limit     = cmd.attr.grh.hop_limit;
2834         attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2835         memset(&attr.dmac, 0, sizeof(attr.dmac));
2836         memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2837
2838         ah = ib_create_ah(pd, &attr);
2839         if (IS_ERR(ah)) {
2840                 ret = PTR_ERR(ah);
2841                 goto err_put;
2842         }
2843
2844         ah->uobject  = uobj;
2845         uobj->object = ah;
2846
2847         ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2848         if (ret)
2849                 goto err_destroy;
2850
2851         resp.ah_handle = uobj->id;
2852
2853         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2854                          &resp, sizeof resp)) {
2855                 ret = -EFAULT;
2856                 goto err_copy;
2857         }
2858
2859         put_pd_read(pd);
2860
2861         mutex_lock(&file->mutex);
2862         list_add_tail(&uobj->list, &file->ucontext->ah_list);
2863         mutex_unlock(&file->mutex);
2864
2865         uobj->live = 1;
2866
2867         up_write(&uobj->mutex);
2868
2869         return in_len;
2870
2871 err_copy:
2872         idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2873
2874 err_destroy:
2875         ib_destroy_ah(ah);
2876
2877 err_put:
2878         put_pd_read(pd);
2879
2880 err:
2881         put_uobj_write(uobj);
2882         return ret;
2883 }
2884
2885 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2886                              struct ib_device *ib_dev,
2887                              const char __user *buf, int in_len, int out_len)
2888 {
2889         struct ib_uverbs_destroy_ah cmd;
2890         struct ib_ah               *ah;
2891         struct ib_uobject          *uobj;
2892         int                         ret;
2893
2894         if (copy_from_user(&cmd, buf, sizeof cmd))
2895                 return -EFAULT;
2896
2897         uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2898         if (!uobj)
2899                 return -EINVAL;
2900         ah = uobj->object;
2901
2902         ret = ib_destroy_ah(ah);
2903         if (!ret)
2904                 uobj->live = 0;
2905
2906         put_uobj_write(uobj);
2907
2908         if (ret)
2909                 return ret;
2910
2911         idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2912
2913         mutex_lock(&file->mutex);
2914         list_del(&uobj->list);
2915         mutex_unlock(&file->mutex);
2916
2917         put_uobj(uobj);
2918
2919         return in_len;
2920 }
2921
2922 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2923                                struct ib_device *ib_dev,
2924                                const char __user *buf, int in_len,
2925                                int out_len)
2926 {
2927         struct ib_uverbs_attach_mcast cmd;
2928         struct ib_qp                 *qp;
2929         struct ib_uqp_object         *obj;
2930         struct ib_uverbs_mcast_entry *mcast;
2931         int                           ret;
2932
2933         if (copy_from_user(&cmd, buf, sizeof cmd))
2934                 return -EFAULT;
2935
2936         qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2937         if (!qp)
2938                 return -EINVAL;
2939
2940         obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2941
2942         list_for_each_entry(mcast, &obj->mcast_list, list)
2943                 if (cmd.mlid == mcast->lid &&
2944                     !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2945                         ret = 0;
2946                         goto out_put;
2947                 }
2948
2949         mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2950         if (!mcast) {
2951                 ret = -ENOMEM;
2952                 goto out_put;
2953         }
2954
2955         mcast->lid = cmd.mlid;
2956         memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2957
2958         ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2959         if (!ret)
2960                 list_add_tail(&mcast->list, &obj->mcast_list);
2961         else
2962                 kfree(mcast);
2963
2964 out_put:
2965         put_qp_write(qp);
2966
2967         return ret ? ret : in_len;
2968 }
2969
2970 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2971                                struct ib_device *ib_dev,
2972                                const char __user *buf, int in_len,
2973                                int out_len)
2974 {
2975         struct ib_uverbs_detach_mcast cmd;
2976         struct ib_uqp_object         *obj;
2977         struct ib_qp                 *qp;
2978         struct ib_uverbs_mcast_entry *mcast;
2979         int                           ret = -EINVAL;
2980
2981         if (copy_from_user(&cmd, buf, sizeof cmd))
2982                 return -EFAULT;
2983
2984         qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2985         if (!qp)
2986                 return -EINVAL;
2987
2988         ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2989         if (ret)
2990                 goto out_put;
2991
2992         obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2993
2994         list_for_each_entry(mcast, &obj->mcast_list, list)
2995                 if (cmd.mlid == mcast->lid &&
2996                     !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2997                         list_del(&mcast->list);
2998                         kfree(mcast);
2999                         break;
3000                 }
3001
3002 out_put:
3003         put_qp_write(qp);
3004
3005         return ret ? ret : in_len;
3006 }
3007
3008 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
3009                                 union ib_flow_spec *ib_spec)
3010 {
3011         if (kern_spec->reserved)
3012                 return -EINVAL;
3013
3014         ib_spec->type = kern_spec->type;
3015
3016         switch (ib_spec->type) {
3017         case IB_FLOW_SPEC_ETH:
3018                 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
3019                 if (ib_spec->eth.size != kern_spec->eth.size)
3020                         return -EINVAL;
3021                 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
3022                        sizeof(struct ib_flow_eth_filter));
3023                 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
3024                        sizeof(struct ib_flow_eth_filter));
3025                 break;
3026         case IB_FLOW_SPEC_IPV4:
3027                 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
3028                 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
3029                         return -EINVAL;
3030                 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
3031                        sizeof(struct ib_flow_ipv4_filter));
3032                 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
3033                        sizeof(struct ib_flow_ipv4_filter));
3034                 break;
3035         case IB_FLOW_SPEC_TCP:
3036         case IB_FLOW_SPEC_UDP:
3037                 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
3038                 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
3039                         return -EINVAL;
3040                 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
3041                        sizeof(struct ib_flow_tcp_udp_filter));
3042                 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
3043                        sizeof(struct ib_flow_tcp_udp_filter));
3044                 break;
3045         default:
3046                 return -EINVAL;
3047         }
3048         return 0;
3049 }
3050
3051 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3052                              struct ib_device *ib_dev,
3053                              struct ib_udata *ucore,
3054                              struct ib_udata *uhw)
3055 {
3056         struct ib_uverbs_create_flow      cmd;
3057         struct ib_uverbs_create_flow_resp resp;
3058         struct ib_uobject                 *uobj;
3059         struct ib_flow                    *flow_id;
3060         struct ib_uverbs_flow_attr        *kern_flow_attr;
3061         struct ib_flow_attr               *flow_attr;
3062         struct ib_qp                      *qp;
3063         int err = 0;
3064         void *kern_spec;
3065         void *ib_spec;
3066         int i;
3067
3068         if (ucore->inlen < sizeof(cmd))
3069                 return -EINVAL;
3070
3071         if (ucore->outlen < sizeof(resp))
3072                 return -ENOSPC;
3073
3074         err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3075         if (err)
3076                 return err;
3077
3078         ucore->inbuf += sizeof(cmd);
3079         ucore->inlen -= sizeof(cmd);
3080
3081         if (cmd.comp_mask)
3082                 return -EINVAL;
3083
3084         if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
3085              !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
3086                 return -EPERM;
3087
3088         if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3089                 return -EINVAL;
3090
3091         if (cmd.flow_attr.size > ucore->inlen ||
3092             cmd.flow_attr.size >
3093             (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3094                 return -EINVAL;
3095
3096         if (cmd.flow_attr.reserved[0] ||
3097             cmd.flow_attr.reserved[1])
3098                 return -EINVAL;
3099
3100         if (cmd.flow_attr.num_of_specs) {
3101                 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3102                                          GFP_KERNEL);
3103                 if (!kern_flow_attr)
3104                         return -ENOMEM;
3105
3106                 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
3107                 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
3108                                          cmd.flow_attr.size);
3109                 if (err)
3110                         goto err_free_attr;
3111         } else {
3112                 kern_flow_attr = &cmd.flow_attr;
3113         }
3114
3115         uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
3116         if (!uobj) {
3117                 err = -ENOMEM;
3118                 goto err_free_attr;
3119         }
3120         init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
3121         down_write(&uobj->mutex);
3122
3123         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
3124         if (!qp) {
3125                 err = -EINVAL;
3126                 goto err_uobj;
3127         }
3128
3129         flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
3130         if (!flow_attr) {
3131                 err = -ENOMEM;
3132                 goto err_put;
3133         }
3134
3135         flow_attr->type = kern_flow_attr->type;
3136         flow_attr->priority = kern_flow_attr->priority;
3137         flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3138         flow_attr->port = kern_flow_attr->port;
3139         flow_attr->flags = kern_flow_attr->flags;
3140         flow_attr->size = sizeof(*flow_attr);
3141
3142         kern_spec = kern_flow_attr + 1;
3143         ib_spec = flow_attr + 1;
3144         for (i = 0; i < flow_attr->num_of_specs &&
3145              cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
3146              cmd.flow_attr.size >=
3147              ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
3148                 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
3149                 if (err)
3150                         goto err_free;
3151                 flow_attr->size +=
3152                         ((union ib_flow_spec *) ib_spec)->size;
3153                 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
3154                 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
3155                 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3156         }
3157         if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3158                 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3159                         i, cmd.flow_attr.size);
3160                 err = -EINVAL;
3161                 goto err_free;
3162         }
3163         flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
3164         if (IS_ERR(flow_id)) {
3165                 err = PTR_ERR(flow_id);
3166                 goto err_free;
3167         }
3168         flow_id->qp = qp;
3169         flow_id->uobject = uobj;
3170         uobj->object = flow_id;
3171
3172         err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
3173         if (err)
3174                 goto destroy_flow;
3175
3176         memset(&resp, 0, sizeof(resp));
3177         resp.flow_handle = uobj->id;
3178
3179         err = ib_copy_to_udata(ucore,
3180                                &resp, sizeof(resp));
3181         if (err)
3182                 goto err_copy;
3183
3184         put_qp_read(qp);
3185         mutex_lock(&file->mutex);
3186         list_add_tail(&uobj->list, &file->ucontext->rule_list);
3187         mutex_unlock(&file->mutex);
3188
3189         uobj->live = 1;
3190
3191         up_write(&uobj->mutex);
3192         kfree(flow_attr);
3193         if (cmd.flow_attr.num_of_specs)
3194                 kfree(kern_flow_attr);
3195         return 0;
3196 err_copy:
3197         idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3198 destroy_flow:
3199         ib_destroy_flow(flow_id);
3200 err_free:
3201         kfree(flow_attr);
3202 err_put:
3203         put_qp_read(qp);
3204 err_uobj:
3205         put_uobj_write(uobj);
3206 err_free_attr:
3207         if (cmd.flow_attr.num_of_specs)
3208                 kfree(kern_flow_attr);
3209         return err;
3210 }
3211
3212 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
3213                               struct ib_device *ib_dev,
3214                               struct ib_udata *ucore,
3215                               struct ib_udata *uhw)
3216 {
3217         struct ib_uverbs_destroy_flow   cmd;
3218         struct ib_flow                  *flow_id;
3219         struct ib_uobject               *uobj;
3220         int                             ret;
3221
3222         if (ucore->inlen < sizeof(cmd))
3223                 return -EINVAL;
3224
3225         ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3226         if (ret)
3227                 return ret;
3228
3229         if (cmd.comp_mask)
3230                 return -EINVAL;
3231
3232         uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
3233                               file->ucontext);
3234         if (!uobj)
3235                 return -EINVAL;
3236         flow_id = uobj->object;
3237
3238         ret = ib_destroy_flow(flow_id);
3239         if (!ret)
3240                 uobj->live = 0;
3241
3242         put_uobj_write(uobj);
3243
3244         idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3245
3246         mutex_lock(&file->mutex);
3247         list_del(&uobj->list);
3248         mutex_unlock(&file->mutex);
3249
3250         put_uobj(uobj);
3251
3252         return ret;
3253 }
3254
3255 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
3256                                 struct ib_device *ib_dev,
3257                                 struct ib_uverbs_create_xsrq *cmd,
3258                                 struct ib_udata *udata)
3259 {
3260         struct ib_uverbs_create_srq_resp resp;
3261         struct ib_usrq_object           *obj;
3262         struct ib_pd                    *pd;
3263         struct ib_srq                   *srq;
3264         struct ib_uobject               *uninitialized_var(xrcd_uobj);
3265         struct ib_srq_init_attr          attr;
3266         int ret;
3267
3268         obj = kmalloc(sizeof *obj, GFP_KERNEL);
3269         if (!obj)
3270                 return -ENOMEM;
3271
3272         init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
3273         down_write(&obj->uevent.uobject.mutex);
3274
3275         if (cmd->srq_type == IB_SRQT_XRC) {
3276                 attr.ext.xrc.xrcd  = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
3277                 if (!attr.ext.xrc.xrcd) {
3278                         ret = -EINVAL;
3279                         goto err;
3280                 }
3281
3282                 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3283                 atomic_inc(&obj->uxrcd->refcnt);
3284
3285                 attr.ext.xrc.cq  = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
3286                 if (!attr.ext.xrc.cq) {
3287                         ret = -EINVAL;
3288                         goto err_put_xrcd;
3289                 }
3290         }
3291
3292         pd  = idr_read_pd(cmd->pd_handle, file->ucontext);
3293         if (!pd) {
3294                 ret = -EINVAL;
3295                 goto err_put_cq;
3296         }
3297
3298         attr.event_handler  = ib_uverbs_srq_event_handler;
3299         attr.srq_context    = file;
3300         attr.srq_type       = cmd->srq_type;
3301         attr.attr.max_wr    = cmd->max_wr;
3302         attr.attr.max_sge   = cmd->max_sge;
3303         attr.attr.srq_limit = cmd->srq_limit;
3304
3305         obj->uevent.events_reported = 0;
3306         INIT_LIST_HEAD(&obj->uevent.event_list);
3307
3308         srq = pd->device->create_srq(pd, &attr, udata);
3309         if (IS_ERR(srq)) {
3310                 ret = PTR_ERR(srq);
3311                 goto err_put;
3312         }
3313
3314         srq->device        = pd->device;
3315         srq->pd            = pd;
3316         srq->srq_type      = cmd->srq_type;
3317         srq->uobject       = &obj->uevent.uobject;
3318         srq->event_handler = attr.event_handler;
3319         srq->srq_context   = attr.srq_context;
3320
3321         if (cmd->srq_type == IB_SRQT_XRC) {
3322                 srq->ext.xrc.cq   = attr.ext.xrc.cq;
3323                 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3324                 atomic_inc(&attr.ext.xrc.cq->usecnt);
3325                 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3326         }
3327
3328         atomic_inc(&pd->usecnt);
3329         atomic_set(&srq->usecnt, 0);
3330
3331         obj->uevent.uobject.object = srq;
3332         ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3333         if (ret)
3334                 goto err_destroy;
3335
3336         memset(&resp, 0, sizeof resp);
3337         resp.srq_handle = obj->uevent.uobject.id;
3338         resp.max_wr     = attr.attr.max_wr;
3339         resp.max_sge    = attr.attr.max_sge;
3340         if (cmd->srq_type == IB_SRQT_XRC)
3341                 resp.srqn = srq->ext.xrc.srq_num;
3342
3343         if (copy_to_user((void __user *) (unsigned long) cmd->response,
3344                          &resp, sizeof resp)) {
3345                 ret = -EFAULT;
3346                 goto err_copy;
3347         }
3348
3349         if (cmd->srq_type == IB_SRQT_XRC) {
3350                 put_uobj_read(xrcd_uobj);
3351                 put_cq_read(attr.ext.xrc.cq);
3352         }
3353         put_pd_read(pd);
3354
3355         mutex_lock(&file->mutex);
3356         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
3357         mutex_unlock(&file->mutex);
3358
3359         obj->uevent.uobject.live = 1;
3360
3361         up_write(&obj->uevent.uobject.mutex);
3362
3363         return 0;
3364
3365 err_copy:
3366         idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3367
3368 err_destroy:
3369         ib_destroy_srq(srq);
3370
3371 err_put:
3372         put_pd_read(pd);
3373
3374 err_put_cq:
3375         if (cmd->srq_type == IB_SRQT_XRC)
3376                 put_cq_read(attr.ext.xrc.cq);
3377
3378 err_put_xrcd:
3379         if (cmd->srq_type == IB_SRQT_XRC) {
3380                 atomic_dec(&obj->uxrcd->refcnt);
3381                 put_uobj_read(xrcd_uobj);
3382         }
3383
3384 err:
3385         put_uobj_write(&obj->uevent.uobject);
3386         return ret;
3387 }
3388
3389 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3390                              struct ib_device *ib_dev,
3391                              const char __user *buf, int in_len,
3392                              int out_len)
3393 {
3394         struct ib_uverbs_create_srq      cmd;
3395         struct ib_uverbs_create_xsrq     xcmd;
3396         struct ib_uverbs_create_srq_resp resp;
3397         struct ib_udata                  udata;
3398         int ret;
3399
3400         if (out_len < sizeof resp)
3401                 return -ENOSPC;
3402
3403         if (copy_from_user(&cmd, buf, sizeof cmd))
3404                 return -EFAULT;
3405
3406         xcmd.response    = cmd.response;
3407         xcmd.user_handle = cmd.user_handle;
3408         xcmd.srq_type    = IB_SRQT_BASIC;
3409         xcmd.pd_handle   = cmd.pd_handle;
3410         xcmd.max_wr      = cmd.max_wr;
3411         xcmd.max_sge     = cmd.max_sge;
3412         xcmd.srq_limit   = cmd.srq_limit;
3413
3414         INIT_UDATA(&udata, buf + sizeof cmd,
3415                    (unsigned long) cmd.response + sizeof resp,
3416                    in_len - sizeof cmd, out_len - sizeof resp);
3417
3418         ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
3419         if (ret)
3420                 return ret;
3421
3422         return in_len;
3423 }
3424
3425 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3426                               struct ib_device *ib_dev,
3427                               const char __user *buf, int in_len, int out_len)
3428 {
3429         struct ib_uverbs_create_xsrq     cmd;
3430         struct ib_uverbs_create_srq_resp resp;
3431         struct ib_udata                  udata;
3432         int ret;
3433
3434         if (out_len < sizeof resp)
3435                 return -ENOSPC;
3436
3437         if (copy_from_user(&cmd, buf, sizeof cmd))
3438                 return -EFAULT;
3439
3440         INIT_UDATA(&udata, buf + sizeof cmd,
3441                    (unsigned long) cmd.response + sizeof resp,
3442                    in_len - sizeof cmd, out_len - sizeof resp);
3443
3444         ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
3445         if (ret)
3446                 return ret;
3447
3448         return in_len;
3449 }
3450
3451 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3452                              struct ib_device *ib_dev,
3453                              const char __user *buf, int in_len,
3454                              int out_len)
3455 {
3456         struct ib_uverbs_modify_srq cmd;
3457         struct ib_udata             udata;
3458         struct ib_srq              *srq;
3459         struct ib_srq_attr          attr;
3460         int                         ret;
3461
3462         if (copy_from_user(&cmd, buf, sizeof cmd))
3463                 return -EFAULT;
3464
3465         INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3466                    out_len);
3467
3468         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3469         if (!srq)
3470                 return -EINVAL;
3471
3472         attr.max_wr    = cmd.max_wr;
3473         attr.srq_limit = cmd.srq_limit;
3474
3475         ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3476
3477         put_srq_read(srq);
3478
3479         return ret ? ret : in_len;
3480 }
3481
3482 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3483                             struct ib_device *ib_dev,
3484                             const char __user *buf,
3485                             int in_len, int out_len)
3486 {
3487         struct ib_uverbs_query_srq      cmd;
3488         struct ib_uverbs_query_srq_resp resp;
3489         struct ib_srq_attr              attr;
3490         struct ib_srq                   *srq;
3491         int                             ret;
3492
3493         if (out_len < sizeof resp)
3494                 return -ENOSPC;
3495
3496         if (copy_from_user(&cmd, buf, sizeof cmd))
3497                 return -EFAULT;
3498
3499         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3500         if (!srq)
3501                 return -EINVAL;
3502
3503         ret = ib_query_srq(srq, &attr);
3504
3505         put_srq_read(srq);
3506
3507         if (ret)
3508                 return ret;
3509
3510         memset(&resp, 0, sizeof resp);
3511
3512         resp.max_wr    = attr.max_wr;
3513         resp.max_sge   = attr.max_sge;
3514         resp.srq_limit = attr.srq_limit;
3515
3516         if (copy_to_user((void __user *) (unsigned long) cmd.response,
3517                          &resp, sizeof resp))
3518                 return -EFAULT;
3519
3520         return in_len;
3521 }
3522
3523 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3524                               struct ib_device *ib_dev,
3525                               const char __user *buf, int in_len,
3526                               int out_len)
3527 {
3528         struct ib_uverbs_destroy_srq      cmd;
3529         struct ib_uverbs_destroy_srq_resp resp;
3530         struct ib_uobject                *uobj;
3531         struct ib_srq                    *srq;
3532         struct ib_uevent_object          *obj;
3533         int                               ret = -EINVAL;
3534         struct ib_usrq_object            *us;
3535         enum ib_srq_type                  srq_type;
3536
3537         if (copy_from_user(&cmd, buf, sizeof cmd))
3538                 return -EFAULT;
3539
3540         uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3541         if (!uobj)
3542                 return -EINVAL;
3543         srq = uobj->object;
3544         obj = container_of(uobj, struct ib_uevent_object, uobject);
3545         srq_type = srq->srq_type;
3546
3547         ret = ib_destroy_srq(srq);
3548         if (!ret)
3549                 uobj->live = 0;
3550
3551         put_uobj_write(uobj);
3552
3553         if (ret)
3554                 return ret;
3555
3556         if (srq_type == IB_SRQT_XRC) {
3557                 us = container_of(obj, struct ib_usrq_object, uevent);
3558                 atomic_dec(&us->uxrcd->refcnt);
3559         }
3560
3561         idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3562
3563         mutex_lock(&file->mutex);
3564         list_del(&uobj->list);
3565         mutex_unlock(&file->mutex);
3566
3567         ib_uverbs_release_uevent(file, obj);
3568
3569         memset(&resp, 0, sizeof resp);
3570         resp.events_reported = obj->events_reported;
3571
3572         put_uobj(uobj);
3573
3574         if (copy_to_user((void __user *) (unsigned long) cmd.response,
3575                          &resp, sizeof resp))
3576                 ret = -EFAULT;
3577
3578         return ret ? ret : in_len;
3579 }
3580
3581 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3582                               struct ib_device *ib_dev,
3583                               struct ib_udata *ucore,
3584                               struct ib_udata *uhw)
3585 {
3586         struct ib_uverbs_ex_query_device_resp resp;
3587         struct ib_uverbs_ex_query_device  cmd;
3588         struct ib_device_attr attr;
3589         int err;
3590
3591         if (ucore->inlen < sizeof(cmd))
3592                 return -EINVAL;
3593
3594         err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3595         if (err)
3596                 return err;
3597
3598         if (cmd.comp_mask)
3599                 return -EINVAL;
3600
3601         if (cmd.reserved)
3602                 return -EINVAL;
3603
3604         resp.response_length = offsetof(typeof(resp), odp_caps);
3605
3606         if (ucore->outlen < resp.response_length)
3607                 return -ENOSPC;
3608
3609         memset(&attr, 0, sizeof(attr));
3610
3611         err = ib_dev->query_device(ib_dev, &attr, uhw);
3612         if (err)
3613                 return err;
3614
3615         copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
3616         resp.comp_mask = 0;
3617
3618         if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3619                 goto end;
3620
3621 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3622         resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3623         resp.odp_caps.per_transport_caps.rc_odp_caps =
3624                 attr.odp_caps.per_transport_caps.rc_odp_caps;
3625         resp.odp_caps.per_transport_caps.uc_odp_caps =
3626                 attr.odp_caps.per_transport_caps.uc_odp_caps;
3627         resp.odp_caps.per_transport_caps.ud_odp_caps =
3628                 attr.odp_caps.per_transport_caps.ud_odp_caps;
3629         resp.odp_caps.reserved = 0;
3630 #else
3631         memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
3632 #endif
3633         resp.response_length += sizeof(resp.odp_caps);
3634
3635         if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3636                 goto end;
3637
3638         resp.timestamp_mask = attr.timestamp_mask;
3639         resp.response_length += sizeof(resp.timestamp_mask);
3640
3641         if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3642                 goto end;
3643
3644         resp.hca_core_clock = attr.hca_core_clock;
3645         resp.response_length += sizeof(resp.hca_core_clock);
3646
3647 end:
3648         err = ib_copy_to_udata(ucore, &resp, resp.response_length);
3649         if (err)
3650                 return err;
3651
3652         return 0;
3653 }