2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/file.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
41 #include <asm/uaccess.h>
44 #include "core_priv.h"
46 struct uverbs_lock_class {
47 struct lock_class_key key;
51 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
52 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
53 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" };
54 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
55 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
56 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
57 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
58 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
59 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
62 * The ib_uobject locking scheme is as follows:
64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
65 * needs to be held during all idr operations. When an object is
66 * looked up, a reference must be taken on the object's kref before
69 * - Each object also has an rwsem. This rwsem must be held for
70 * reading while an operation that uses the object is performed.
71 * For example, while registering an MR, the associated PD's
72 * uobject.mutex must be held for reading. The rwsem must be held
73 * for writing while initializing or destroying an object.
75 * - In addition, each object has a "live" flag. If this flag is not
76 * set, then lookups of the object will fail even if it is found in
77 * the idr. This handles a reader that blocks and does not acquire
78 * the rwsem until after the object is destroyed. The destroy
79 * operation will set the live flag to 0 and then drop the rwsem;
80 * this will allow the reader to acquire the rwsem, see that the
81 * live flag is 0, and then drop the rwsem and its reference to
82 * object. The underlying storage will not be freed until the last
83 * reference to the object is dropped.
86 static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
87 struct ib_ucontext *context, struct uverbs_lock_class *c)
89 uobj->user_handle = user_handle;
90 uobj->context = context;
91 kref_init(&uobj->ref);
92 init_rwsem(&uobj->mutex);
93 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
97 static void release_uobj(struct kref *kref)
99 kfree(container_of(kref, struct ib_uobject, ref));
102 static void put_uobj(struct ib_uobject *uobj)
104 kref_put(&uobj->ref, release_uobj);
107 static void put_uobj_read(struct ib_uobject *uobj)
109 up_read(&uobj->mutex);
113 static void put_uobj_write(struct ib_uobject *uobj)
115 up_write(&uobj->mutex);
119 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
123 idr_preload(GFP_KERNEL);
124 spin_lock(&ib_uverbs_idr_lock);
126 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
130 spin_unlock(&ib_uverbs_idr_lock);
133 return ret < 0 ? ret : 0;
136 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
138 spin_lock(&ib_uverbs_idr_lock);
139 idr_remove(idr, uobj->id);
140 spin_unlock(&ib_uverbs_idr_lock);
143 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
144 struct ib_ucontext *context)
146 struct ib_uobject *uobj;
148 spin_lock(&ib_uverbs_idr_lock);
149 uobj = idr_find(idr, id);
151 if (uobj->context == context)
152 kref_get(&uobj->ref);
156 spin_unlock(&ib_uverbs_idr_lock);
161 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
162 struct ib_ucontext *context, int nested)
164 struct ib_uobject *uobj;
166 uobj = __idr_get_uobj(idr, id, context);
171 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
173 down_read(&uobj->mutex);
182 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
183 struct ib_ucontext *context)
185 struct ib_uobject *uobj;
187 uobj = __idr_get_uobj(idr, id, context);
191 down_write(&uobj->mutex);
193 put_uobj_write(uobj);
200 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
203 struct ib_uobject *uobj;
205 uobj = idr_read_uobj(idr, id, context, nested);
206 return uobj ? uobj->object : NULL;
209 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
211 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
214 static void put_pd_read(struct ib_pd *pd)
216 put_uobj_read(pd->uobject);
219 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
221 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
224 static void put_cq_read(struct ib_cq *cq)
226 put_uobj_read(cq->uobject);
229 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
231 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
234 static void put_ah_read(struct ib_ah *ah)
236 put_uobj_read(ah->uobject);
239 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
241 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
244 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
246 struct ib_uobject *uobj;
248 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
249 return uobj ? uobj->object : NULL;
252 static void put_qp_read(struct ib_qp *qp)
254 put_uobj_read(qp->uobject);
257 static void put_qp_write(struct ib_qp *qp)
259 put_uobj_write(qp->uobject);
262 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
264 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
267 static void put_srq_read(struct ib_srq *srq)
269 put_uobj_read(srq->uobject);
272 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
273 struct ib_uobject **uobj)
275 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
276 return *uobj ? (*uobj)->object : NULL;
279 static void put_xrcd_read(struct ib_uobject *uobj)
284 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
285 const char __user *buf,
286 int in_len, int out_len)
288 struct ib_uverbs_get_context cmd;
289 struct ib_uverbs_get_context_resp resp;
290 struct ib_udata udata;
291 struct ib_device *ibdev = file->device->ib_dev;
292 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
293 struct ib_device_attr dev_attr;
295 struct ib_ucontext *ucontext;
299 if (out_len < sizeof resp)
302 if (copy_from_user(&cmd, buf, sizeof cmd))
305 mutex_lock(&file->mutex);
307 if (file->ucontext) {
312 INIT_UDATA(&udata, buf + sizeof cmd,
313 (unsigned long) cmd.response + sizeof resp,
314 in_len - sizeof cmd, out_len - sizeof resp);
316 ucontext = ibdev->alloc_ucontext(ibdev, &udata);
317 if (IS_ERR(ucontext)) {
318 ret = PTR_ERR(ucontext);
322 ucontext->device = ibdev;
323 INIT_LIST_HEAD(&ucontext->pd_list);
324 INIT_LIST_HEAD(&ucontext->mr_list);
325 INIT_LIST_HEAD(&ucontext->mw_list);
326 INIT_LIST_HEAD(&ucontext->cq_list);
327 INIT_LIST_HEAD(&ucontext->qp_list);
328 INIT_LIST_HEAD(&ucontext->srq_list);
329 INIT_LIST_HEAD(&ucontext->ah_list);
330 INIT_LIST_HEAD(&ucontext->xrcd_list);
331 INIT_LIST_HEAD(&ucontext->rule_list);
333 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
335 ucontext->closing = 0;
337 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
338 ucontext->umem_tree = RB_ROOT;
339 init_rwsem(&ucontext->umem_rwsem);
340 ucontext->odp_mrs_count = 0;
341 INIT_LIST_HEAD(&ucontext->no_private_counters);
343 ret = ib_query_device(ibdev, &dev_attr);
346 if (!(dev_attr.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
347 ucontext->invalidate_range = NULL;
351 resp.num_comp_vectors = file->device->num_comp_vectors;
353 ret = get_unused_fd_flags(O_CLOEXEC);
358 filp = ib_uverbs_alloc_event_file(file, 1);
364 if (copy_to_user((void __user *) (unsigned long) cmd.response,
365 &resp, sizeof resp)) {
370 file->async_file = filp->private_data;
372 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
373 ib_uverbs_event_handler);
374 ret = ib_register_event_handler(&file->event_handler);
378 kref_get(&file->async_file->ref);
379 kref_get(&file->ref);
380 file->ucontext = ucontext;
382 fd_install(resp.async_fd, filp);
384 mutex_unlock(&file->mutex);
392 put_unused_fd(resp.async_fd);
395 put_pid(ucontext->tgid);
396 ibdev->dealloc_ucontext(ucontext);
399 mutex_unlock(&file->mutex);
403 static void copy_query_dev_fields(struct ib_uverbs_file *file,
404 struct ib_uverbs_query_device_resp *resp,
405 struct ib_device_attr *attr)
407 resp->fw_ver = attr->fw_ver;
408 resp->node_guid = file->device->ib_dev->node_guid;
409 resp->sys_image_guid = attr->sys_image_guid;
410 resp->max_mr_size = attr->max_mr_size;
411 resp->page_size_cap = attr->page_size_cap;
412 resp->vendor_id = attr->vendor_id;
413 resp->vendor_part_id = attr->vendor_part_id;
414 resp->hw_ver = attr->hw_ver;
415 resp->max_qp = attr->max_qp;
416 resp->max_qp_wr = attr->max_qp_wr;
417 resp->device_cap_flags = attr->device_cap_flags;
418 resp->max_sge = attr->max_sge;
419 resp->max_sge_rd = attr->max_sge_rd;
420 resp->max_cq = attr->max_cq;
421 resp->max_cqe = attr->max_cqe;
422 resp->max_mr = attr->max_mr;
423 resp->max_pd = attr->max_pd;
424 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
425 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
426 resp->max_res_rd_atom = attr->max_res_rd_atom;
427 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
428 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
429 resp->atomic_cap = attr->atomic_cap;
430 resp->max_ee = attr->max_ee;
431 resp->max_rdd = attr->max_rdd;
432 resp->max_mw = attr->max_mw;
433 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
434 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
435 resp->max_mcast_grp = attr->max_mcast_grp;
436 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
437 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
438 resp->max_ah = attr->max_ah;
439 resp->max_fmr = attr->max_fmr;
440 resp->max_map_per_fmr = attr->max_map_per_fmr;
441 resp->max_srq = attr->max_srq;
442 resp->max_srq_wr = attr->max_srq_wr;
443 resp->max_srq_sge = attr->max_srq_sge;
444 resp->max_pkeys = attr->max_pkeys;
445 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
446 resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
449 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
450 const char __user *buf,
451 int in_len, int out_len)
453 struct ib_uverbs_query_device cmd;
454 struct ib_uverbs_query_device_resp resp;
455 struct ib_device_attr attr;
458 if (out_len < sizeof resp)
461 if (copy_from_user(&cmd, buf, sizeof cmd))
464 ret = ib_query_device(file->device->ib_dev, &attr);
468 memset(&resp, 0, sizeof resp);
469 copy_query_dev_fields(file, &resp, &attr);
471 if (copy_to_user((void __user *) (unsigned long) cmd.response,
478 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
479 const char __user *buf,
480 int in_len, int out_len)
482 struct ib_uverbs_query_port cmd;
483 struct ib_uverbs_query_port_resp resp;
484 struct ib_port_attr attr;
487 if (out_len < sizeof resp)
490 if (copy_from_user(&cmd, buf, sizeof cmd))
493 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
497 memset(&resp, 0, sizeof resp);
499 resp.state = attr.state;
500 resp.max_mtu = attr.max_mtu;
501 resp.active_mtu = attr.active_mtu;
502 resp.gid_tbl_len = attr.gid_tbl_len;
503 resp.port_cap_flags = attr.port_cap_flags;
504 resp.max_msg_sz = attr.max_msg_sz;
505 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
506 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
507 resp.pkey_tbl_len = attr.pkey_tbl_len;
509 resp.sm_lid = attr.sm_lid;
511 resp.max_vl_num = attr.max_vl_num;
512 resp.sm_sl = attr.sm_sl;
513 resp.subnet_timeout = attr.subnet_timeout;
514 resp.init_type_reply = attr.init_type_reply;
515 resp.active_width = attr.active_width;
516 resp.active_speed = attr.active_speed;
517 resp.phys_state = attr.phys_state;
518 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev,
521 if (copy_to_user((void __user *) (unsigned long) cmd.response,
528 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
529 const char __user *buf,
530 int in_len, int out_len)
532 struct ib_uverbs_alloc_pd cmd;
533 struct ib_uverbs_alloc_pd_resp resp;
534 struct ib_udata udata;
535 struct ib_uobject *uobj;
539 if (out_len < sizeof resp)
542 if (copy_from_user(&cmd, buf, sizeof cmd))
545 INIT_UDATA(&udata, buf + sizeof cmd,
546 (unsigned long) cmd.response + sizeof resp,
547 in_len - sizeof cmd, out_len - sizeof resp);
549 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
553 init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
554 down_write(&uobj->mutex);
556 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
557 file->ucontext, &udata);
563 pd->device = file->device->ib_dev;
566 atomic_set(&pd->usecnt, 0);
569 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
573 memset(&resp, 0, sizeof resp);
574 resp.pd_handle = uobj->id;
576 if (copy_to_user((void __user *) (unsigned long) cmd.response,
577 &resp, sizeof resp)) {
582 mutex_lock(&file->mutex);
583 list_add_tail(&uobj->list, &file->ucontext->pd_list);
584 mutex_unlock(&file->mutex);
588 up_write(&uobj->mutex);
593 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
599 put_uobj_write(uobj);
603 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
604 const char __user *buf,
605 int in_len, int out_len)
607 struct ib_uverbs_dealloc_pd cmd;
608 struct ib_uobject *uobj;
611 if (copy_from_user(&cmd, buf, sizeof cmd))
614 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
618 ret = ib_dealloc_pd(uobj->object);
622 put_uobj_write(uobj);
627 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
629 mutex_lock(&file->mutex);
630 list_del(&uobj->list);
631 mutex_unlock(&file->mutex);
638 struct xrcd_table_entry {
640 struct ib_xrcd *xrcd;
644 static int xrcd_table_insert(struct ib_uverbs_device *dev,
646 struct ib_xrcd *xrcd)
648 struct xrcd_table_entry *entry, *scan;
649 struct rb_node **p = &dev->xrcd_tree.rb_node;
650 struct rb_node *parent = NULL;
652 entry = kmalloc(sizeof *entry, GFP_KERNEL);
657 entry->inode = inode;
661 scan = rb_entry(parent, struct xrcd_table_entry, node);
663 if (inode < scan->inode) {
665 } else if (inode > scan->inode) {
673 rb_link_node(&entry->node, parent, p);
674 rb_insert_color(&entry->node, &dev->xrcd_tree);
679 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
682 struct xrcd_table_entry *entry;
683 struct rb_node *p = dev->xrcd_tree.rb_node;
686 entry = rb_entry(p, struct xrcd_table_entry, node);
688 if (inode < entry->inode)
690 else if (inode > entry->inode)
699 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
701 struct xrcd_table_entry *entry;
703 entry = xrcd_table_search(dev, inode);
710 static void xrcd_table_delete(struct ib_uverbs_device *dev,
713 struct xrcd_table_entry *entry;
715 entry = xrcd_table_search(dev, inode);
718 rb_erase(&entry->node, &dev->xrcd_tree);
723 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
724 const char __user *buf, int in_len,
727 struct ib_uverbs_open_xrcd cmd;
728 struct ib_uverbs_open_xrcd_resp resp;
729 struct ib_udata udata;
730 struct ib_uxrcd_object *obj;
731 struct ib_xrcd *xrcd = NULL;
732 struct fd f = {NULL, 0};
733 struct inode *inode = NULL;
737 if (out_len < sizeof resp)
740 if (copy_from_user(&cmd, buf, sizeof cmd))
743 INIT_UDATA(&udata, buf + sizeof cmd,
744 (unsigned long) cmd.response + sizeof resp,
745 in_len - sizeof cmd, out_len - sizeof resp);
747 mutex_lock(&file->device->xrcd_tree_mutex);
750 /* search for file descriptor */
754 goto err_tree_mutex_unlock;
757 inode = file_inode(f.file);
758 xrcd = find_xrcd(file->device, inode);
759 if (!xrcd && !(cmd.oflags & O_CREAT)) {
760 /* no file descriptor. Need CREATE flag */
762 goto err_tree_mutex_unlock;
765 if (xrcd && cmd.oflags & O_EXCL) {
767 goto err_tree_mutex_unlock;
771 obj = kmalloc(sizeof *obj, GFP_KERNEL);
774 goto err_tree_mutex_unlock;
777 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
779 down_write(&obj->uobject.mutex);
782 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
783 file->ucontext, &udata);
790 xrcd->device = file->device->ib_dev;
791 atomic_set(&xrcd->usecnt, 0);
792 mutex_init(&xrcd->tgt_qp_mutex);
793 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
797 atomic_set(&obj->refcnt, 0);
798 obj->uobject.object = xrcd;
799 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
803 memset(&resp, 0, sizeof resp);
804 resp.xrcd_handle = obj->uobject.id;
808 /* create new inode/xrcd table entry */
809 ret = xrcd_table_insert(file->device, inode, xrcd);
811 goto err_insert_xrcd;
813 atomic_inc(&xrcd->usecnt);
816 if (copy_to_user((void __user *) (unsigned long) cmd.response,
817 &resp, sizeof resp)) {
825 mutex_lock(&file->mutex);
826 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
827 mutex_unlock(&file->mutex);
829 obj->uobject.live = 1;
830 up_write(&obj->uobject.mutex);
832 mutex_unlock(&file->device->xrcd_tree_mutex);
838 xrcd_table_delete(file->device, inode);
839 atomic_dec(&xrcd->usecnt);
843 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
846 ib_dealloc_xrcd(xrcd);
849 put_uobj_write(&obj->uobject);
851 err_tree_mutex_unlock:
855 mutex_unlock(&file->device->xrcd_tree_mutex);
860 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
861 const char __user *buf, int in_len,
864 struct ib_uverbs_close_xrcd cmd;
865 struct ib_uobject *uobj;
866 struct ib_xrcd *xrcd = NULL;
867 struct inode *inode = NULL;
868 struct ib_uxrcd_object *obj;
872 if (copy_from_user(&cmd, buf, sizeof cmd))
875 mutex_lock(&file->device->xrcd_tree_mutex);
876 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
884 obj = container_of(uobj, struct ib_uxrcd_object, uobject);
885 if (atomic_read(&obj->refcnt)) {
886 put_uobj_write(uobj);
891 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
892 ret = ib_dealloc_xrcd(uobj->object);
899 atomic_inc(&xrcd->usecnt);
901 put_uobj_write(uobj);
907 xrcd_table_delete(file->device, inode);
909 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
910 mutex_lock(&file->mutex);
911 list_del(&uobj->list);
912 mutex_unlock(&file->mutex);
918 mutex_unlock(&file->device->xrcd_tree_mutex);
922 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
923 struct ib_xrcd *xrcd)
928 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
931 ib_dealloc_xrcd(xrcd);
934 xrcd_table_delete(dev, inode);
937 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
938 const char __user *buf, int in_len,
941 struct ib_uverbs_reg_mr cmd;
942 struct ib_uverbs_reg_mr_resp resp;
943 struct ib_udata udata;
944 struct ib_uobject *uobj;
949 if (out_len < sizeof resp)
952 if (copy_from_user(&cmd, buf, sizeof cmd))
955 INIT_UDATA(&udata, buf + sizeof cmd,
956 (unsigned long) cmd.response + sizeof resp,
957 in_len - sizeof cmd, out_len - sizeof resp);
959 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
962 ret = ib_check_mr_access(cmd.access_flags);
966 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
970 init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
971 down_write(&uobj->mutex);
973 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
979 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
980 struct ib_device_attr attr;
982 ret = ib_query_device(pd->device, &attr);
983 if (ret || !(attr.device_cap_flags &
984 IB_DEVICE_ON_DEMAND_PAGING)) {
985 pr_debug("ODP support not available\n");
991 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
992 cmd.access_flags, &udata);
998 mr->device = pd->device;
1001 atomic_inc(&pd->usecnt);
1002 atomic_set(&mr->usecnt, 0);
1005 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
1009 memset(&resp, 0, sizeof resp);
1010 resp.lkey = mr->lkey;
1011 resp.rkey = mr->rkey;
1012 resp.mr_handle = uobj->id;
1014 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1015 &resp, sizeof resp)) {
1022 mutex_lock(&file->mutex);
1023 list_add_tail(&uobj->list, &file->ucontext->mr_list);
1024 mutex_unlock(&file->mutex);
1028 up_write(&uobj->mutex);
1033 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1042 put_uobj_write(uobj);
1046 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
1047 const char __user *buf, int in_len,
1050 struct ib_uverbs_rereg_mr cmd;
1051 struct ib_uverbs_rereg_mr_resp resp;
1052 struct ib_udata udata;
1053 struct ib_pd *pd = NULL;
1055 struct ib_pd *old_pd;
1057 struct ib_uobject *uobj;
1059 if (out_len < sizeof(resp))
1062 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1065 INIT_UDATA(&udata, buf + sizeof(cmd),
1066 (unsigned long) cmd.response + sizeof(resp),
1067 in_len - sizeof(cmd), out_len - sizeof(resp));
1069 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
1072 if ((cmd.flags & IB_MR_REREG_TRANS) &&
1073 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
1074 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
1077 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle,
1085 if (cmd.flags & IB_MR_REREG_ACCESS) {
1086 ret = ib_check_mr_access(cmd.access_flags);
1091 if (cmd.flags & IB_MR_REREG_PD) {
1092 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1099 if (atomic_read(&mr->usecnt)) {
1105 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
1106 cmd.length, cmd.hca_va,
1107 cmd.access_flags, pd, &udata);
1109 if (cmd.flags & IB_MR_REREG_PD) {
1110 atomic_inc(&pd->usecnt);
1112 atomic_dec(&old_pd->usecnt);
1118 memset(&resp, 0, sizeof(resp));
1119 resp.lkey = mr->lkey;
1120 resp.rkey = mr->rkey;
1122 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1123 &resp, sizeof(resp)))
1129 if (cmd.flags & IB_MR_REREG_PD)
1134 put_uobj_write(mr->uobject);
1139 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1140 const char __user *buf, int in_len,
1143 struct ib_uverbs_dereg_mr cmd;
1145 struct ib_uobject *uobj;
1148 if (copy_from_user(&cmd, buf, sizeof cmd))
1151 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1157 ret = ib_dereg_mr(mr);
1161 put_uobj_write(uobj);
1166 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1168 mutex_lock(&file->mutex);
1169 list_del(&uobj->list);
1170 mutex_unlock(&file->mutex);
1177 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1178 const char __user *buf, int in_len,
1181 struct ib_uverbs_alloc_mw cmd;
1182 struct ib_uverbs_alloc_mw_resp resp;
1183 struct ib_uobject *uobj;
1188 if (out_len < sizeof(resp))
1191 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1194 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1198 init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1199 down_write(&uobj->mutex);
1201 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1207 mw = pd->device->alloc_mw(pd, cmd.mw_type);
1213 mw->device = pd->device;
1216 atomic_inc(&pd->usecnt);
1219 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1223 memset(&resp, 0, sizeof(resp));
1224 resp.rkey = mw->rkey;
1225 resp.mw_handle = uobj->id;
1227 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1228 &resp, sizeof(resp))) {
1235 mutex_lock(&file->mutex);
1236 list_add_tail(&uobj->list, &file->ucontext->mw_list);
1237 mutex_unlock(&file->mutex);
1241 up_write(&uobj->mutex);
1246 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1255 put_uobj_write(uobj);
1259 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1260 const char __user *buf, int in_len,
1263 struct ib_uverbs_dealloc_mw cmd;
1265 struct ib_uobject *uobj;
1268 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1271 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1277 ret = ib_dealloc_mw(mw);
1281 put_uobj_write(uobj);
1286 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1288 mutex_lock(&file->mutex);
1289 list_del(&uobj->list);
1290 mutex_unlock(&file->mutex);
1297 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1298 const char __user *buf, int in_len,
1301 struct ib_uverbs_create_comp_channel cmd;
1302 struct ib_uverbs_create_comp_channel_resp resp;
1306 if (out_len < sizeof resp)
1309 if (copy_from_user(&cmd, buf, sizeof cmd))
1312 ret = get_unused_fd_flags(O_CLOEXEC);
1317 filp = ib_uverbs_alloc_event_file(file, 0);
1319 put_unused_fd(resp.fd);
1320 return PTR_ERR(filp);
1323 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1324 &resp, sizeof resp)) {
1325 put_unused_fd(resp.fd);
1330 fd_install(resp.fd, filp);
1334 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
1335 struct ib_udata *ucore,
1336 struct ib_udata *uhw,
1337 struct ib_uverbs_ex_create_cq *cmd,
1339 int (*cb)(struct ib_uverbs_file *file,
1340 struct ib_ucq_object *obj,
1341 struct ib_uverbs_ex_create_cq_resp *resp,
1342 struct ib_udata *udata,
1346 struct ib_ucq_object *obj;
1347 struct ib_uverbs_event_file *ev_file = NULL;
1350 struct ib_uverbs_ex_create_cq_resp resp;
1351 struct ib_cq_init_attr attr = {};
1353 if (cmd->comp_vector >= file->device->num_comp_vectors)
1354 return ERR_PTR(-EINVAL);
1356 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1358 return ERR_PTR(-ENOMEM);
1360 init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class);
1361 down_write(&obj->uobject.mutex);
1363 if (cmd->comp_channel >= 0) {
1364 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel);
1371 obj->uverbs_file = file;
1372 obj->comp_events_reported = 0;
1373 obj->async_events_reported = 0;
1374 INIT_LIST_HEAD(&obj->comp_list);
1375 INIT_LIST_HEAD(&obj->async_list);
1377 attr.cqe = cmd->cqe;
1378 attr.comp_vector = cmd->comp_vector;
1380 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1381 attr.flags = cmd->flags;
1383 cq = file->device->ib_dev->create_cq(file->device->ib_dev, &attr,
1384 file->ucontext, uhw);
1390 cq->device = file->device->ib_dev;
1391 cq->uobject = &obj->uobject;
1392 cq->comp_handler = ib_uverbs_comp_handler;
1393 cq->event_handler = ib_uverbs_cq_event_handler;
1394 cq->cq_context = ev_file;
1395 atomic_set(&cq->usecnt, 0);
1397 obj->uobject.object = cq;
1398 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1402 memset(&resp, 0, sizeof resp);
1403 resp.base.cq_handle = obj->uobject.id;
1404 resp.base.cqe = cq->cqe;
1406 resp.response_length = offsetof(typeof(resp), response_length) +
1407 sizeof(resp.response_length);
1409 ret = cb(file, obj, &resp, ucore, context);
1413 mutex_lock(&file->mutex);
1414 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1415 mutex_unlock(&file->mutex);
1417 obj->uobject.live = 1;
1419 up_write(&obj->uobject.mutex);
1424 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1431 ib_uverbs_release_ucq(file, ev_file, obj);
1434 put_uobj_write(&obj->uobject);
1436 return ERR_PTR(ret);
1439 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1440 struct ib_ucq_object *obj,
1441 struct ib_uverbs_ex_create_cq_resp *resp,
1442 struct ib_udata *ucore, void *context)
1444 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1450 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1451 const char __user *buf, int in_len,
1454 struct ib_uverbs_create_cq cmd;
1455 struct ib_uverbs_ex_create_cq cmd_ex;
1456 struct ib_uverbs_create_cq_resp resp;
1457 struct ib_udata ucore;
1458 struct ib_udata uhw;
1459 struct ib_ucq_object *obj;
1461 if (out_len < sizeof(resp))
1464 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1467 INIT_UDATA(&ucore, buf, cmd.response, sizeof(cmd), sizeof(resp));
1469 INIT_UDATA(&uhw, buf + sizeof(cmd),
1470 (unsigned long)cmd.response + sizeof(resp),
1471 in_len - sizeof(cmd), out_len - sizeof(resp));
1473 memset(&cmd_ex, 0, sizeof(cmd_ex));
1474 cmd_ex.user_handle = cmd.user_handle;
1475 cmd_ex.cqe = cmd.cqe;
1476 cmd_ex.comp_vector = cmd.comp_vector;
1477 cmd_ex.comp_channel = cmd.comp_channel;
1479 obj = create_cq(file, &ucore, &uhw, &cmd_ex,
1480 offsetof(typeof(cmd_ex), comp_channel) +
1481 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1485 return PTR_ERR(obj);
1490 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1491 struct ib_ucq_object *obj,
1492 struct ib_uverbs_ex_create_cq_resp *resp,
1493 struct ib_udata *ucore, void *context)
1495 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1501 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
1502 struct ib_udata *ucore,
1503 struct ib_udata *uhw)
1505 struct ib_uverbs_ex_create_cq_resp resp;
1506 struct ib_uverbs_ex_create_cq cmd;
1507 struct ib_ucq_object *obj;
1510 if (ucore->inlen < sizeof(cmd))
1513 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1523 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1524 sizeof(resp.response_length)))
1527 obj = create_cq(file, ucore, uhw, &cmd,
1528 min(ucore->inlen, sizeof(cmd)),
1529 ib_uverbs_ex_create_cq_cb, NULL);
1532 return PTR_ERR(obj);
1537 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1538 const char __user *buf, int in_len,
1541 struct ib_uverbs_resize_cq cmd;
1542 struct ib_uverbs_resize_cq_resp resp;
1543 struct ib_udata udata;
1547 if (copy_from_user(&cmd, buf, sizeof cmd))
1550 INIT_UDATA(&udata, buf + sizeof cmd,
1551 (unsigned long) cmd.response + sizeof resp,
1552 in_len - sizeof cmd, out_len - sizeof resp);
1554 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1558 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1564 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1565 &resp, sizeof resp.cqe))
1571 return ret ? ret : in_len;
1574 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1576 struct ib_uverbs_wc tmp;
1578 tmp.wr_id = wc->wr_id;
1579 tmp.status = wc->status;
1580 tmp.opcode = wc->opcode;
1581 tmp.vendor_err = wc->vendor_err;
1582 tmp.byte_len = wc->byte_len;
1583 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
1584 tmp.qp_num = wc->qp->qp_num;
1585 tmp.src_qp = wc->src_qp;
1586 tmp.wc_flags = wc->wc_flags;
1587 tmp.pkey_index = wc->pkey_index;
1588 tmp.slid = wc->slid;
1590 tmp.dlid_path_bits = wc->dlid_path_bits;
1591 tmp.port_num = wc->port_num;
1594 if (copy_to_user(dest, &tmp, sizeof tmp))
1600 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1601 const char __user *buf, int in_len,
1604 struct ib_uverbs_poll_cq cmd;
1605 struct ib_uverbs_poll_cq_resp resp;
1606 u8 __user *header_ptr;
1607 u8 __user *data_ptr;
1612 if (copy_from_user(&cmd, buf, sizeof cmd))
1615 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1619 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1620 header_ptr = (void __user *)(unsigned long) cmd.response;
1621 data_ptr = header_ptr + sizeof resp;
1623 memset(&resp, 0, sizeof resp);
1624 while (resp.count < cmd.ne) {
1625 ret = ib_poll_cq(cq, 1, &wc);
1631 ret = copy_wc_to_user(data_ptr, &wc);
1635 data_ptr += sizeof(struct ib_uverbs_wc);
1639 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1651 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1652 const char __user *buf, int in_len,
1655 struct ib_uverbs_req_notify_cq cmd;
1658 if (copy_from_user(&cmd, buf, sizeof cmd))
1661 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1665 ib_req_notify_cq(cq, cmd.solicited_only ?
1666 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1673 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1674 const char __user *buf, int in_len,
1677 struct ib_uverbs_destroy_cq cmd;
1678 struct ib_uverbs_destroy_cq_resp resp;
1679 struct ib_uobject *uobj;
1681 struct ib_ucq_object *obj;
1682 struct ib_uverbs_event_file *ev_file;
1685 if (copy_from_user(&cmd, buf, sizeof cmd))
1688 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1692 ev_file = cq->cq_context;
1693 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1695 ret = ib_destroy_cq(cq);
1699 put_uobj_write(uobj);
1704 idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1706 mutex_lock(&file->mutex);
1707 list_del(&uobj->list);
1708 mutex_unlock(&file->mutex);
1710 ib_uverbs_release_ucq(file, ev_file, obj);
1712 memset(&resp, 0, sizeof resp);
1713 resp.comp_events_reported = obj->comp_events_reported;
1714 resp.async_events_reported = obj->async_events_reported;
1718 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1719 &resp, sizeof resp))
1725 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1726 const char __user *buf, int in_len,
1729 struct ib_uverbs_create_qp cmd;
1730 struct ib_uverbs_create_qp_resp resp;
1731 struct ib_udata udata;
1732 struct ib_uqp_object *obj;
1733 struct ib_device *device;
1734 struct ib_pd *pd = NULL;
1735 struct ib_xrcd *xrcd = NULL;
1736 struct ib_uobject *uninitialized_var(xrcd_uobj);
1737 struct ib_cq *scq = NULL, *rcq = NULL;
1738 struct ib_srq *srq = NULL;
1740 struct ib_qp_init_attr attr;
1743 if (out_len < sizeof resp)
1746 if (copy_from_user(&cmd, buf, sizeof cmd))
1749 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1752 INIT_UDATA(&udata, buf + sizeof cmd,
1753 (unsigned long) cmd.response + sizeof resp,
1754 in_len - sizeof cmd, out_len - sizeof resp);
1756 obj = kzalloc(sizeof *obj, GFP_KERNEL);
1760 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1761 down_write(&obj->uevent.uobject.mutex);
1763 if (cmd.qp_type == IB_QPT_XRC_TGT) {
1764 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1769 device = xrcd->device;
1771 if (cmd.qp_type == IB_QPT_XRC_INI) {
1772 cmd.max_recv_wr = cmd.max_recv_sge = 0;
1775 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1776 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1782 if (cmd.recv_cq_handle != cmd.send_cq_handle) {
1783 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
1791 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
1793 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1799 device = pd->device;
1802 attr.event_handler = ib_uverbs_qp_event_handler;
1803 attr.qp_context = file;
1808 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1809 attr.qp_type = cmd.qp_type;
1810 attr.create_flags = 0;
1812 attr.cap.max_send_wr = cmd.max_send_wr;
1813 attr.cap.max_recv_wr = cmd.max_recv_wr;
1814 attr.cap.max_send_sge = cmd.max_send_sge;
1815 attr.cap.max_recv_sge = cmd.max_recv_sge;
1816 attr.cap.max_inline_data = cmd.max_inline_data;
1818 obj->uevent.events_reported = 0;
1819 INIT_LIST_HEAD(&obj->uevent.event_list);
1820 INIT_LIST_HEAD(&obj->mcast_list);
1822 if (cmd.qp_type == IB_QPT_XRC_TGT)
1823 qp = ib_create_qp(pd, &attr);
1825 qp = device->create_qp(pd, &attr, &udata);
1832 if (cmd.qp_type != IB_QPT_XRC_TGT) {
1834 qp->device = device;
1836 qp->send_cq = attr.send_cq;
1837 qp->recv_cq = attr.recv_cq;
1839 qp->event_handler = attr.event_handler;
1840 qp->qp_context = attr.qp_context;
1841 qp->qp_type = attr.qp_type;
1842 atomic_set(&qp->usecnt, 0);
1843 atomic_inc(&pd->usecnt);
1844 atomic_inc(&attr.send_cq->usecnt);
1846 atomic_inc(&attr.recv_cq->usecnt);
1848 atomic_inc(&attr.srq->usecnt);
1850 qp->uobject = &obj->uevent.uobject;
1852 obj->uevent.uobject.object = qp;
1853 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1857 memset(&resp, 0, sizeof resp);
1858 resp.qpn = qp->qp_num;
1859 resp.qp_handle = obj->uevent.uobject.id;
1860 resp.max_recv_sge = attr.cap.max_recv_sge;
1861 resp.max_send_sge = attr.cap.max_send_sge;
1862 resp.max_recv_wr = attr.cap.max_recv_wr;
1863 resp.max_send_wr = attr.cap.max_send_wr;
1864 resp.max_inline_data = attr.cap.max_inline_data;
1866 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1867 &resp, sizeof resp)) {
1873 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1875 atomic_inc(&obj->uxrcd->refcnt);
1876 put_xrcd_read(xrcd_uobj);
1883 if (rcq && rcq != scq)
1888 mutex_lock(&file->mutex);
1889 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1890 mutex_unlock(&file->mutex);
1892 obj->uevent.uobject.live = 1;
1894 up_write(&obj->uevent.uobject.mutex);
1899 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1906 put_xrcd_read(xrcd_uobj);
1911 if (rcq && rcq != scq)
1916 put_uobj_write(&obj->uevent.uobject);
1920 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1921 const char __user *buf, int in_len, int out_len)
1923 struct ib_uverbs_open_qp cmd;
1924 struct ib_uverbs_create_qp_resp resp;
1925 struct ib_udata udata;
1926 struct ib_uqp_object *obj;
1927 struct ib_xrcd *xrcd;
1928 struct ib_uobject *uninitialized_var(xrcd_uobj);
1930 struct ib_qp_open_attr attr;
1933 if (out_len < sizeof resp)
1936 if (copy_from_user(&cmd, buf, sizeof cmd))
1939 INIT_UDATA(&udata, buf + sizeof cmd,
1940 (unsigned long) cmd.response + sizeof resp,
1941 in_len - sizeof cmd, out_len - sizeof resp);
1943 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1947 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1948 down_write(&obj->uevent.uobject.mutex);
1950 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1956 attr.event_handler = ib_uverbs_qp_event_handler;
1957 attr.qp_context = file;
1958 attr.qp_num = cmd.qpn;
1959 attr.qp_type = cmd.qp_type;
1961 obj->uevent.events_reported = 0;
1962 INIT_LIST_HEAD(&obj->uevent.event_list);
1963 INIT_LIST_HEAD(&obj->mcast_list);
1965 qp = ib_open_qp(xrcd, &attr);
1971 qp->uobject = &obj->uevent.uobject;
1973 obj->uevent.uobject.object = qp;
1974 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1978 memset(&resp, 0, sizeof resp);
1979 resp.qpn = qp->qp_num;
1980 resp.qp_handle = obj->uevent.uobject.id;
1982 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1983 &resp, sizeof resp)) {
1988 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1989 atomic_inc(&obj->uxrcd->refcnt);
1990 put_xrcd_read(xrcd_uobj);
1992 mutex_lock(&file->mutex);
1993 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1994 mutex_unlock(&file->mutex);
1996 obj->uevent.uobject.live = 1;
1998 up_write(&obj->uevent.uobject.mutex);
2003 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
2009 put_xrcd_read(xrcd_uobj);
2010 put_uobj_write(&obj->uevent.uobject);
2014 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
2015 const char __user *buf, int in_len,
2018 struct ib_uverbs_query_qp cmd;
2019 struct ib_uverbs_query_qp_resp resp;
2021 struct ib_qp_attr *attr;
2022 struct ib_qp_init_attr *init_attr;
2025 if (copy_from_user(&cmd, buf, sizeof cmd))
2028 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2029 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
2030 if (!attr || !init_attr) {
2035 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2041 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
2048 memset(&resp, 0, sizeof resp);
2050 resp.qp_state = attr->qp_state;
2051 resp.cur_qp_state = attr->cur_qp_state;
2052 resp.path_mtu = attr->path_mtu;
2053 resp.path_mig_state = attr->path_mig_state;
2054 resp.qkey = attr->qkey;
2055 resp.rq_psn = attr->rq_psn;
2056 resp.sq_psn = attr->sq_psn;
2057 resp.dest_qp_num = attr->dest_qp_num;
2058 resp.qp_access_flags = attr->qp_access_flags;
2059 resp.pkey_index = attr->pkey_index;
2060 resp.alt_pkey_index = attr->alt_pkey_index;
2061 resp.sq_draining = attr->sq_draining;
2062 resp.max_rd_atomic = attr->max_rd_atomic;
2063 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
2064 resp.min_rnr_timer = attr->min_rnr_timer;
2065 resp.port_num = attr->port_num;
2066 resp.timeout = attr->timeout;
2067 resp.retry_cnt = attr->retry_cnt;
2068 resp.rnr_retry = attr->rnr_retry;
2069 resp.alt_port_num = attr->alt_port_num;
2070 resp.alt_timeout = attr->alt_timeout;
2072 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
2073 resp.dest.flow_label = attr->ah_attr.grh.flow_label;
2074 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
2075 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
2076 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
2077 resp.dest.dlid = attr->ah_attr.dlid;
2078 resp.dest.sl = attr->ah_attr.sl;
2079 resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
2080 resp.dest.static_rate = attr->ah_attr.static_rate;
2081 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
2082 resp.dest.port_num = attr->ah_attr.port_num;
2084 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
2085 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
2086 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
2087 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
2088 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
2089 resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
2090 resp.alt_dest.sl = attr->alt_ah_attr.sl;
2091 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
2092 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
2093 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
2094 resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
2096 resp.max_send_wr = init_attr->cap.max_send_wr;
2097 resp.max_recv_wr = init_attr->cap.max_recv_wr;
2098 resp.max_send_sge = init_attr->cap.max_send_sge;
2099 resp.max_recv_sge = init_attr->cap.max_recv_sge;
2100 resp.max_inline_data = init_attr->cap.max_inline_data;
2101 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
2103 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2104 &resp, sizeof resp))
2111 return ret ? ret : in_len;
2114 /* Remove ignored fields set in the attribute mask */
2115 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
2118 case IB_QPT_XRC_INI:
2119 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
2120 case IB_QPT_XRC_TGT:
2121 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
2128 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2129 const char __user *buf, int in_len,
2132 struct ib_uverbs_modify_qp cmd;
2133 struct ib_udata udata;
2135 struct ib_qp_attr *attr;
2138 if (copy_from_user(&cmd, buf, sizeof cmd))
2141 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2144 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2148 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2154 attr->qp_state = cmd.qp_state;
2155 attr->cur_qp_state = cmd.cur_qp_state;
2156 attr->path_mtu = cmd.path_mtu;
2157 attr->path_mig_state = cmd.path_mig_state;
2158 attr->qkey = cmd.qkey;
2159 attr->rq_psn = cmd.rq_psn;
2160 attr->sq_psn = cmd.sq_psn;
2161 attr->dest_qp_num = cmd.dest_qp_num;
2162 attr->qp_access_flags = cmd.qp_access_flags;
2163 attr->pkey_index = cmd.pkey_index;
2164 attr->alt_pkey_index = cmd.alt_pkey_index;
2165 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
2166 attr->max_rd_atomic = cmd.max_rd_atomic;
2167 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
2168 attr->min_rnr_timer = cmd.min_rnr_timer;
2169 attr->port_num = cmd.port_num;
2170 attr->timeout = cmd.timeout;
2171 attr->retry_cnt = cmd.retry_cnt;
2172 attr->rnr_retry = cmd.rnr_retry;
2173 attr->alt_port_num = cmd.alt_port_num;
2174 attr->alt_timeout = cmd.alt_timeout;
2176 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
2177 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
2178 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
2179 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
2180 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
2181 attr->ah_attr.dlid = cmd.dest.dlid;
2182 attr->ah_attr.sl = cmd.dest.sl;
2183 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
2184 attr->ah_attr.static_rate = cmd.dest.static_rate;
2185 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
2186 attr->ah_attr.port_num = cmd.dest.port_num;
2188 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
2189 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
2190 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
2191 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
2192 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
2193 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
2194 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
2195 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
2196 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
2197 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
2198 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
2200 if (qp->real_qp == qp) {
2201 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
2204 ret = qp->device->modify_qp(qp, attr,
2205 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2207 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2224 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2225 const char __user *buf, int in_len,
2228 struct ib_uverbs_destroy_qp cmd;
2229 struct ib_uverbs_destroy_qp_resp resp;
2230 struct ib_uobject *uobj;
2232 struct ib_uqp_object *obj;
2235 if (copy_from_user(&cmd, buf, sizeof cmd))
2238 memset(&resp, 0, sizeof resp);
2240 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2244 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2246 if (!list_empty(&obj->mcast_list)) {
2247 put_uobj_write(uobj);
2251 ret = ib_destroy_qp(qp);
2255 put_uobj_write(uobj);
2261 atomic_dec(&obj->uxrcd->refcnt);
2263 idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2265 mutex_lock(&file->mutex);
2266 list_del(&uobj->list);
2267 mutex_unlock(&file->mutex);
2269 ib_uverbs_release_uevent(file, &obj->uevent);
2271 resp.events_reported = obj->uevent.events_reported;
2275 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2276 &resp, sizeof resp))
2282 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2283 const char __user *buf, int in_len,
2286 struct ib_uverbs_post_send cmd;
2287 struct ib_uverbs_post_send_resp resp;
2288 struct ib_uverbs_send_wr *user_wr;
2289 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2293 ssize_t ret = -EINVAL;
2295 if (copy_from_user(&cmd, buf, sizeof cmd))
2298 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2299 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2302 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2305 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2309 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2313 is_ud = qp->qp_type == IB_QPT_UD;
2316 for (i = 0; i < cmd.wr_count; ++i) {
2317 if (copy_from_user(user_wr,
2318 buf + sizeof cmd + i * cmd.wqe_size,
2324 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2329 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2330 user_wr->num_sge * sizeof (struct ib_sge),
2344 next->wr_id = user_wr->wr_id;
2345 next->num_sge = user_wr->num_sge;
2346 next->opcode = user_wr->opcode;
2347 next->send_flags = user_wr->send_flags;
2350 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2352 if (!next->wr.ud.ah) {
2356 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
2357 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
2358 if (next->opcode == IB_WR_SEND_WITH_IMM)
2360 (__be32 __force) user_wr->ex.imm_data;
2362 switch (next->opcode) {
2363 case IB_WR_RDMA_WRITE_WITH_IMM:
2365 (__be32 __force) user_wr->ex.imm_data;
2366 case IB_WR_RDMA_WRITE:
2367 case IB_WR_RDMA_READ:
2368 next->wr.rdma.remote_addr =
2369 user_wr->wr.rdma.remote_addr;
2370 next->wr.rdma.rkey =
2371 user_wr->wr.rdma.rkey;
2373 case IB_WR_SEND_WITH_IMM:
2375 (__be32 __force) user_wr->ex.imm_data;
2377 case IB_WR_SEND_WITH_INV:
2378 next->ex.invalidate_rkey =
2379 user_wr->ex.invalidate_rkey;
2381 case IB_WR_ATOMIC_CMP_AND_SWP:
2382 case IB_WR_ATOMIC_FETCH_AND_ADD:
2383 next->wr.atomic.remote_addr =
2384 user_wr->wr.atomic.remote_addr;
2385 next->wr.atomic.compare_add =
2386 user_wr->wr.atomic.compare_add;
2387 next->wr.atomic.swap = user_wr->wr.atomic.swap;
2388 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2395 if (next->num_sge) {
2396 next->sg_list = (void *) next +
2397 ALIGN(sizeof *next, sizeof (struct ib_sge));
2398 if (copy_from_user(next->sg_list,
2400 cmd.wr_count * cmd.wqe_size +
2401 sg_ind * sizeof (struct ib_sge),
2402 next->num_sge * sizeof (struct ib_sge))) {
2406 sg_ind += next->num_sge;
2408 next->sg_list = NULL;
2412 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2414 for (next = wr; next; next = next->next) {
2420 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2421 &resp, sizeof resp))
2428 if (is_ud && wr->wr.ud.ah)
2429 put_ah_read(wr->wr.ud.ah);
2438 return ret ? ret : in_len;
2441 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2447 struct ib_uverbs_recv_wr *user_wr;
2448 struct ib_recv_wr *wr = NULL, *last, *next;
2453 if (in_len < wqe_size * wr_count +
2454 sge_count * sizeof (struct ib_uverbs_sge))
2455 return ERR_PTR(-EINVAL);
2457 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2458 return ERR_PTR(-EINVAL);
2460 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2462 return ERR_PTR(-ENOMEM);
2466 for (i = 0; i < wr_count; ++i) {
2467 if (copy_from_user(user_wr, buf + i * wqe_size,
2473 if (user_wr->num_sge + sg_ind > sge_count) {
2478 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2479 user_wr->num_sge * sizeof (struct ib_sge),
2493 next->wr_id = user_wr->wr_id;
2494 next->num_sge = user_wr->num_sge;
2496 if (next->num_sge) {
2497 next->sg_list = (void *) next +
2498 ALIGN(sizeof *next, sizeof (struct ib_sge));
2499 if (copy_from_user(next->sg_list,
2500 buf + wr_count * wqe_size +
2501 sg_ind * sizeof (struct ib_sge),
2502 next->num_sge * sizeof (struct ib_sge))) {
2506 sg_ind += next->num_sge;
2508 next->sg_list = NULL;
2523 return ERR_PTR(ret);
2526 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2527 const char __user *buf, int in_len,
2530 struct ib_uverbs_post_recv cmd;
2531 struct ib_uverbs_post_recv_resp resp;
2532 struct ib_recv_wr *wr, *next, *bad_wr;
2534 ssize_t ret = -EINVAL;
2536 if (copy_from_user(&cmd, buf, sizeof cmd))
2539 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2540 in_len - sizeof cmd, cmd.wr_count,
2541 cmd.sge_count, cmd.wqe_size);
2545 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2550 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2555 for (next = wr; next; next = next->next) {
2561 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2562 &resp, sizeof resp))
2572 return ret ? ret : in_len;
2575 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2576 const char __user *buf, int in_len,
2579 struct ib_uverbs_post_srq_recv cmd;
2580 struct ib_uverbs_post_srq_recv_resp resp;
2581 struct ib_recv_wr *wr, *next, *bad_wr;
2583 ssize_t ret = -EINVAL;
2585 if (copy_from_user(&cmd, buf, sizeof cmd))
2588 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2589 in_len - sizeof cmd, cmd.wr_count,
2590 cmd.sge_count, cmd.wqe_size);
2594 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2599 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2604 for (next = wr; next; next = next->next) {
2610 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2611 &resp, sizeof resp))
2621 return ret ? ret : in_len;
2624 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2625 const char __user *buf, int in_len,
2628 struct ib_uverbs_create_ah cmd;
2629 struct ib_uverbs_create_ah_resp resp;
2630 struct ib_uobject *uobj;
2633 struct ib_ah_attr attr;
2636 if (out_len < sizeof resp)
2639 if (copy_from_user(&cmd, buf, sizeof cmd))
2642 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2646 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2647 down_write(&uobj->mutex);
2649 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2655 attr.dlid = cmd.attr.dlid;
2656 attr.sl = cmd.attr.sl;
2657 attr.src_path_bits = cmd.attr.src_path_bits;
2658 attr.static_rate = cmd.attr.static_rate;
2659 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
2660 attr.port_num = cmd.attr.port_num;
2661 attr.grh.flow_label = cmd.attr.grh.flow_label;
2662 attr.grh.sgid_index = cmd.attr.grh.sgid_index;
2663 attr.grh.hop_limit = cmd.attr.grh.hop_limit;
2664 attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2666 memset(&attr.dmac, 0, sizeof(attr.dmac));
2667 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2669 ah = ib_create_ah(pd, &attr);
2678 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2682 resp.ah_handle = uobj->id;
2684 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2685 &resp, sizeof resp)) {
2692 mutex_lock(&file->mutex);
2693 list_add_tail(&uobj->list, &file->ucontext->ah_list);
2694 mutex_unlock(&file->mutex);
2698 up_write(&uobj->mutex);
2703 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2712 put_uobj_write(uobj);
2716 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2717 const char __user *buf, int in_len, int out_len)
2719 struct ib_uverbs_destroy_ah cmd;
2721 struct ib_uobject *uobj;
2724 if (copy_from_user(&cmd, buf, sizeof cmd))
2727 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2732 ret = ib_destroy_ah(ah);
2736 put_uobj_write(uobj);
2741 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2743 mutex_lock(&file->mutex);
2744 list_del(&uobj->list);
2745 mutex_unlock(&file->mutex);
2752 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2753 const char __user *buf, int in_len,
2756 struct ib_uverbs_attach_mcast cmd;
2758 struct ib_uqp_object *obj;
2759 struct ib_uverbs_mcast_entry *mcast;
2762 if (copy_from_user(&cmd, buf, sizeof cmd))
2765 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2769 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2771 list_for_each_entry(mcast, &obj->mcast_list, list)
2772 if (cmd.mlid == mcast->lid &&
2773 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2778 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2784 mcast->lid = cmd.mlid;
2785 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2787 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2789 list_add_tail(&mcast->list, &obj->mcast_list);
2796 return ret ? ret : in_len;
2799 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2800 const char __user *buf, int in_len,
2803 struct ib_uverbs_detach_mcast cmd;
2804 struct ib_uqp_object *obj;
2806 struct ib_uverbs_mcast_entry *mcast;
2809 if (copy_from_user(&cmd, buf, sizeof cmd))
2812 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2816 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2820 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2822 list_for_each_entry(mcast, &obj->mcast_list, list)
2823 if (cmd.mlid == mcast->lid &&
2824 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2825 list_del(&mcast->list);
2833 return ret ? ret : in_len;
2836 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2837 union ib_flow_spec *ib_spec)
2839 if (kern_spec->reserved)
2842 ib_spec->type = kern_spec->type;
2844 switch (ib_spec->type) {
2845 case IB_FLOW_SPEC_ETH:
2846 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
2847 if (ib_spec->eth.size != kern_spec->eth.size)
2849 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
2850 sizeof(struct ib_flow_eth_filter));
2851 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
2852 sizeof(struct ib_flow_eth_filter));
2854 case IB_FLOW_SPEC_IPV4:
2855 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
2856 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
2858 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
2859 sizeof(struct ib_flow_ipv4_filter));
2860 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
2861 sizeof(struct ib_flow_ipv4_filter));
2863 case IB_FLOW_SPEC_TCP:
2864 case IB_FLOW_SPEC_UDP:
2865 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
2866 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
2868 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
2869 sizeof(struct ib_flow_tcp_udp_filter));
2870 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
2871 sizeof(struct ib_flow_tcp_udp_filter));
2879 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2880 struct ib_udata *ucore,
2881 struct ib_udata *uhw)
2883 struct ib_uverbs_create_flow cmd;
2884 struct ib_uverbs_create_flow_resp resp;
2885 struct ib_uobject *uobj;
2886 struct ib_flow *flow_id;
2887 struct ib_uverbs_flow_attr *kern_flow_attr;
2888 struct ib_flow_attr *flow_attr;
2895 if (ucore->inlen < sizeof(cmd))
2898 if (ucore->outlen < sizeof(resp))
2901 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2905 ucore->inbuf += sizeof(cmd);
2906 ucore->inlen -= sizeof(cmd);
2911 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
2912 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
2915 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
2918 if (cmd.flow_attr.size > ucore->inlen ||
2919 cmd.flow_attr.size >
2920 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
2923 if (cmd.flow_attr.reserved[0] ||
2924 cmd.flow_attr.reserved[1])
2927 if (cmd.flow_attr.num_of_specs) {
2928 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2930 if (!kern_flow_attr)
2933 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
2934 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
2935 cmd.flow_attr.size);
2939 kern_flow_attr = &cmd.flow_attr;
2942 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
2947 init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
2948 down_write(&uobj->mutex);
2950 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2956 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
2962 flow_attr->type = kern_flow_attr->type;
2963 flow_attr->priority = kern_flow_attr->priority;
2964 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
2965 flow_attr->port = kern_flow_attr->port;
2966 flow_attr->flags = kern_flow_attr->flags;
2967 flow_attr->size = sizeof(*flow_attr);
2969 kern_spec = kern_flow_attr + 1;
2970 ib_spec = flow_attr + 1;
2971 for (i = 0; i < flow_attr->num_of_specs &&
2972 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
2973 cmd.flow_attr.size >=
2974 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
2975 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
2979 ((union ib_flow_spec *) ib_spec)->size;
2980 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
2981 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
2982 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
2984 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
2985 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2986 i, cmd.flow_attr.size);
2990 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
2991 if (IS_ERR(flow_id)) {
2992 err = PTR_ERR(flow_id);
2996 flow_id->uobject = uobj;
2997 uobj->object = flow_id;
2999 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
3003 memset(&resp, 0, sizeof(resp));
3004 resp.flow_handle = uobj->id;
3006 err = ib_copy_to_udata(ucore,
3007 &resp, sizeof(resp));
3012 mutex_lock(&file->mutex);
3013 list_add_tail(&uobj->list, &file->ucontext->rule_list);
3014 mutex_unlock(&file->mutex);
3018 up_write(&uobj->mutex);
3020 if (cmd.flow_attr.num_of_specs)
3021 kfree(kern_flow_attr);
3024 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3026 ib_destroy_flow(flow_id);
3032 put_uobj_write(uobj);
3034 if (cmd.flow_attr.num_of_specs)
3035 kfree(kern_flow_attr);
3039 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
3040 struct ib_udata *ucore,
3041 struct ib_udata *uhw)
3043 struct ib_uverbs_destroy_flow cmd;
3044 struct ib_flow *flow_id;
3045 struct ib_uobject *uobj;
3048 if (ucore->inlen < sizeof(cmd))
3051 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3058 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
3062 flow_id = uobj->object;
3064 ret = ib_destroy_flow(flow_id);
3068 put_uobj_write(uobj);
3070 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3072 mutex_lock(&file->mutex);
3073 list_del(&uobj->list);
3074 mutex_unlock(&file->mutex);
3081 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
3082 struct ib_uverbs_create_xsrq *cmd,
3083 struct ib_udata *udata)
3085 struct ib_uverbs_create_srq_resp resp;
3086 struct ib_usrq_object *obj;
3089 struct ib_uobject *uninitialized_var(xrcd_uobj);
3090 struct ib_srq_init_attr attr;
3093 obj = kmalloc(sizeof *obj, GFP_KERNEL);
3097 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
3098 down_write(&obj->uevent.uobject.mutex);
3100 if (cmd->srq_type == IB_SRQT_XRC) {
3101 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
3102 if (!attr.ext.xrc.xrcd) {
3107 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3108 atomic_inc(&obj->uxrcd->refcnt);
3110 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
3111 if (!attr.ext.xrc.cq) {
3117 pd = idr_read_pd(cmd->pd_handle, file->ucontext);
3123 attr.event_handler = ib_uverbs_srq_event_handler;
3124 attr.srq_context = file;
3125 attr.srq_type = cmd->srq_type;
3126 attr.attr.max_wr = cmd->max_wr;
3127 attr.attr.max_sge = cmd->max_sge;
3128 attr.attr.srq_limit = cmd->srq_limit;
3130 obj->uevent.events_reported = 0;
3131 INIT_LIST_HEAD(&obj->uevent.event_list);
3133 srq = pd->device->create_srq(pd, &attr, udata);
3139 srq->device = pd->device;
3141 srq->srq_type = cmd->srq_type;
3142 srq->uobject = &obj->uevent.uobject;
3143 srq->event_handler = attr.event_handler;
3144 srq->srq_context = attr.srq_context;
3146 if (cmd->srq_type == IB_SRQT_XRC) {
3147 srq->ext.xrc.cq = attr.ext.xrc.cq;
3148 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3149 atomic_inc(&attr.ext.xrc.cq->usecnt);
3150 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3153 atomic_inc(&pd->usecnt);
3154 atomic_set(&srq->usecnt, 0);
3156 obj->uevent.uobject.object = srq;
3157 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3161 memset(&resp, 0, sizeof resp);
3162 resp.srq_handle = obj->uevent.uobject.id;
3163 resp.max_wr = attr.attr.max_wr;
3164 resp.max_sge = attr.attr.max_sge;
3165 if (cmd->srq_type == IB_SRQT_XRC)
3166 resp.srqn = srq->ext.xrc.srq_num;
3168 if (copy_to_user((void __user *) (unsigned long) cmd->response,
3169 &resp, sizeof resp)) {
3174 if (cmd->srq_type == IB_SRQT_XRC) {
3175 put_uobj_read(xrcd_uobj);
3176 put_cq_read(attr.ext.xrc.cq);
3180 mutex_lock(&file->mutex);
3181 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
3182 mutex_unlock(&file->mutex);
3184 obj->uevent.uobject.live = 1;
3186 up_write(&obj->uevent.uobject.mutex);
3191 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3194 ib_destroy_srq(srq);
3200 if (cmd->srq_type == IB_SRQT_XRC)
3201 put_cq_read(attr.ext.xrc.cq);
3204 if (cmd->srq_type == IB_SRQT_XRC) {
3205 atomic_dec(&obj->uxrcd->refcnt);
3206 put_uobj_read(xrcd_uobj);
3210 put_uobj_write(&obj->uevent.uobject);
3214 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3215 const char __user *buf, int in_len,
3218 struct ib_uverbs_create_srq cmd;
3219 struct ib_uverbs_create_xsrq xcmd;
3220 struct ib_uverbs_create_srq_resp resp;
3221 struct ib_udata udata;
3224 if (out_len < sizeof resp)
3227 if (copy_from_user(&cmd, buf, sizeof cmd))
3230 xcmd.response = cmd.response;
3231 xcmd.user_handle = cmd.user_handle;
3232 xcmd.srq_type = IB_SRQT_BASIC;
3233 xcmd.pd_handle = cmd.pd_handle;
3234 xcmd.max_wr = cmd.max_wr;
3235 xcmd.max_sge = cmd.max_sge;
3236 xcmd.srq_limit = cmd.srq_limit;
3238 INIT_UDATA(&udata, buf + sizeof cmd,
3239 (unsigned long) cmd.response + sizeof resp,
3240 in_len - sizeof cmd, out_len - sizeof resp);
3242 ret = __uverbs_create_xsrq(file, &xcmd, &udata);
3249 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3250 const char __user *buf, int in_len, int out_len)
3252 struct ib_uverbs_create_xsrq cmd;
3253 struct ib_uverbs_create_srq_resp resp;
3254 struct ib_udata udata;
3257 if (out_len < sizeof resp)
3260 if (copy_from_user(&cmd, buf, sizeof cmd))
3263 INIT_UDATA(&udata, buf + sizeof cmd,
3264 (unsigned long) cmd.response + sizeof resp,
3265 in_len - sizeof cmd, out_len - sizeof resp);
3267 ret = __uverbs_create_xsrq(file, &cmd, &udata);
3274 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3275 const char __user *buf, int in_len,
3278 struct ib_uverbs_modify_srq cmd;
3279 struct ib_udata udata;
3281 struct ib_srq_attr attr;
3284 if (copy_from_user(&cmd, buf, sizeof cmd))
3287 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3290 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3294 attr.max_wr = cmd.max_wr;
3295 attr.srq_limit = cmd.srq_limit;
3297 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3301 return ret ? ret : in_len;
3304 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3305 const char __user *buf,
3306 int in_len, int out_len)
3308 struct ib_uverbs_query_srq cmd;
3309 struct ib_uverbs_query_srq_resp resp;
3310 struct ib_srq_attr attr;
3314 if (out_len < sizeof resp)
3317 if (copy_from_user(&cmd, buf, sizeof cmd))
3320 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3324 ret = ib_query_srq(srq, &attr);
3331 memset(&resp, 0, sizeof resp);
3333 resp.max_wr = attr.max_wr;
3334 resp.max_sge = attr.max_sge;
3335 resp.srq_limit = attr.srq_limit;
3337 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3338 &resp, sizeof resp))
3344 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3345 const char __user *buf, int in_len,
3348 struct ib_uverbs_destroy_srq cmd;
3349 struct ib_uverbs_destroy_srq_resp resp;
3350 struct ib_uobject *uobj;
3352 struct ib_uevent_object *obj;
3354 struct ib_usrq_object *us;
3355 enum ib_srq_type srq_type;
3357 if (copy_from_user(&cmd, buf, sizeof cmd))
3360 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3364 obj = container_of(uobj, struct ib_uevent_object, uobject);
3365 srq_type = srq->srq_type;
3367 ret = ib_destroy_srq(srq);
3371 put_uobj_write(uobj);
3376 if (srq_type == IB_SRQT_XRC) {
3377 us = container_of(obj, struct ib_usrq_object, uevent);
3378 atomic_dec(&us->uxrcd->refcnt);
3381 idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3383 mutex_lock(&file->mutex);
3384 list_del(&uobj->list);
3385 mutex_unlock(&file->mutex);
3387 ib_uverbs_release_uevent(file, obj);
3389 memset(&resp, 0, sizeof resp);
3390 resp.events_reported = obj->events_reported;
3394 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3395 &resp, sizeof resp))
3398 return ret ? ret : in_len;
3401 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3402 struct ib_udata *ucore,
3403 struct ib_udata *uhw)
3405 struct ib_uverbs_ex_query_device_resp resp;
3406 struct ib_uverbs_ex_query_device cmd;
3407 struct ib_device_attr attr;
3408 struct ib_device *device;
3411 device = file->device->ib_dev;
3412 if (ucore->inlen < sizeof(cmd))
3415 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3425 resp.response_length = offsetof(typeof(resp), odp_caps);
3427 if (ucore->outlen < resp.response_length)
3430 memset(&attr, 0, sizeof(attr));
3432 err = device->query_device(device, &attr, uhw);
3436 copy_query_dev_fields(file, &resp.base, &attr);
3439 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3442 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3443 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3444 resp.odp_caps.per_transport_caps.rc_odp_caps =
3445 attr.odp_caps.per_transport_caps.rc_odp_caps;
3446 resp.odp_caps.per_transport_caps.uc_odp_caps =
3447 attr.odp_caps.per_transport_caps.uc_odp_caps;
3448 resp.odp_caps.per_transport_caps.ud_odp_caps =
3449 attr.odp_caps.per_transport_caps.ud_odp_caps;
3450 resp.odp_caps.reserved = 0;
3452 memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
3454 resp.response_length += sizeof(resp.odp_caps);
3456 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3459 resp.timestamp_mask = attr.timestamp_mask;
3460 resp.response_length += sizeof(resp.timestamp_mask);
3462 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3465 resp.hca_core_clock = attr.hca_core_clock;
3466 resp.response_length += sizeof(resp.hca_core_clock);
3469 err = ib_copy_to_udata(ucore, &resp, resp.response_length);