Merge branch 'stable-4.9' of git://git.infradead.org/users/pcmoore/audit
[cascardo/linux.git] / drivers / infiniband / core / uverbs_main.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6  * Copyright (c) 2005 PathScale, Inc. All rights reserved.
7  *
8  * This software is available to you under a choice of one of two
9  * licenses.  You may choose to be licensed under the terms of the GNU
10  * General Public License (GPL) Version 2, available from the file
11  * COPYING in the main directory of this source tree, or the
12  * OpenIB.org BSD license below:
13  *
14  *     Redistribution and use in source and binary forms, with or
15  *     without modification, are permitted provided that the following
16  *     conditions are met:
17  *
18  *      - Redistributions of source code must retain the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer.
21  *
22  *      - Redistributions in binary form must reproduce the above
23  *        copyright notice, this list of conditions and the following
24  *        disclaimer in the documentation and/or other materials
25  *        provided with the distribution.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34  * SOFTWARE.
35  */
36
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
41 #include <linux/fs.h>
42 #include <linux/poll.h>
43 #include <linux/sched.h>
44 #include <linux/file.h>
45 #include <linux/cdev.h>
46 #include <linux/anon_inodes.h>
47 #include <linux/slab.h>
48
49 #include <asm/uaccess.h>
50
51 #include <rdma/ib.h>
52
53 #include "uverbs.h"
54
55 MODULE_AUTHOR("Roland Dreier");
56 MODULE_DESCRIPTION("InfiniBand userspace verbs access");
57 MODULE_LICENSE("Dual BSD/GPL");
58
59 enum {
60         IB_UVERBS_MAJOR       = 231,
61         IB_UVERBS_BASE_MINOR  = 192,
62         IB_UVERBS_MAX_DEVICES = 32
63 };
64
65 #define IB_UVERBS_BASE_DEV      MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
66
67 static struct class *uverbs_class;
68
69 DEFINE_SPINLOCK(ib_uverbs_idr_lock);
70 DEFINE_IDR(ib_uverbs_pd_idr);
71 DEFINE_IDR(ib_uverbs_mr_idr);
72 DEFINE_IDR(ib_uverbs_mw_idr);
73 DEFINE_IDR(ib_uverbs_ah_idr);
74 DEFINE_IDR(ib_uverbs_cq_idr);
75 DEFINE_IDR(ib_uverbs_qp_idr);
76 DEFINE_IDR(ib_uverbs_srq_idr);
77 DEFINE_IDR(ib_uverbs_xrcd_idr);
78 DEFINE_IDR(ib_uverbs_rule_idr);
79 DEFINE_IDR(ib_uverbs_wq_idr);
80 DEFINE_IDR(ib_uverbs_rwq_ind_tbl_idr);
81
82 static DEFINE_SPINLOCK(map_lock);
83 static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
84
85 static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
86                                      struct ib_device *ib_dev,
87                                      const char __user *buf, int in_len,
88                                      int out_len) = {
89         [IB_USER_VERBS_CMD_GET_CONTEXT]         = ib_uverbs_get_context,
90         [IB_USER_VERBS_CMD_QUERY_DEVICE]        = ib_uverbs_query_device,
91         [IB_USER_VERBS_CMD_QUERY_PORT]          = ib_uverbs_query_port,
92         [IB_USER_VERBS_CMD_ALLOC_PD]            = ib_uverbs_alloc_pd,
93         [IB_USER_VERBS_CMD_DEALLOC_PD]          = ib_uverbs_dealloc_pd,
94         [IB_USER_VERBS_CMD_REG_MR]              = ib_uverbs_reg_mr,
95         [IB_USER_VERBS_CMD_REREG_MR]            = ib_uverbs_rereg_mr,
96         [IB_USER_VERBS_CMD_DEREG_MR]            = ib_uverbs_dereg_mr,
97         [IB_USER_VERBS_CMD_ALLOC_MW]            = ib_uverbs_alloc_mw,
98         [IB_USER_VERBS_CMD_DEALLOC_MW]          = ib_uverbs_dealloc_mw,
99         [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel,
100         [IB_USER_VERBS_CMD_CREATE_CQ]           = ib_uverbs_create_cq,
101         [IB_USER_VERBS_CMD_RESIZE_CQ]           = ib_uverbs_resize_cq,
102         [IB_USER_VERBS_CMD_POLL_CQ]             = ib_uverbs_poll_cq,
103         [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ]       = ib_uverbs_req_notify_cq,
104         [IB_USER_VERBS_CMD_DESTROY_CQ]          = ib_uverbs_destroy_cq,
105         [IB_USER_VERBS_CMD_CREATE_QP]           = ib_uverbs_create_qp,
106         [IB_USER_VERBS_CMD_QUERY_QP]            = ib_uverbs_query_qp,
107         [IB_USER_VERBS_CMD_MODIFY_QP]           = ib_uverbs_modify_qp,
108         [IB_USER_VERBS_CMD_DESTROY_QP]          = ib_uverbs_destroy_qp,
109         [IB_USER_VERBS_CMD_POST_SEND]           = ib_uverbs_post_send,
110         [IB_USER_VERBS_CMD_POST_RECV]           = ib_uverbs_post_recv,
111         [IB_USER_VERBS_CMD_POST_SRQ_RECV]       = ib_uverbs_post_srq_recv,
112         [IB_USER_VERBS_CMD_CREATE_AH]           = ib_uverbs_create_ah,
113         [IB_USER_VERBS_CMD_DESTROY_AH]          = ib_uverbs_destroy_ah,
114         [IB_USER_VERBS_CMD_ATTACH_MCAST]        = ib_uverbs_attach_mcast,
115         [IB_USER_VERBS_CMD_DETACH_MCAST]        = ib_uverbs_detach_mcast,
116         [IB_USER_VERBS_CMD_CREATE_SRQ]          = ib_uverbs_create_srq,
117         [IB_USER_VERBS_CMD_MODIFY_SRQ]          = ib_uverbs_modify_srq,
118         [IB_USER_VERBS_CMD_QUERY_SRQ]           = ib_uverbs_query_srq,
119         [IB_USER_VERBS_CMD_DESTROY_SRQ]         = ib_uverbs_destroy_srq,
120         [IB_USER_VERBS_CMD_OPEN_XRCD]           = ib_uverbs_open_xrcd,
121         [IB_USER_VERBS_CMD_CLOSE_XRCD]          = ib_uverbs_close_xrcd,
122         [IB_USER_VERBS_CMD_CREATE_XSRQ]         = ib_uverbs_create_xsrq,
123         [IB_USER_VERBS_CMD_OPEN_QP]             = ib_uverbs_open_qp,
124 };
125
126 static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
127                                     struct ib_device *ib_dev,
128                                     struct ib_udata *ucore,
129                                     struct ib_udata *uhw) = {
130         [IB_USER_VERBS_EX_CMD_CREATE_FLOW]      = ib_uverbs_ex_create_flow,
131         [IB_USER_VERBS_EX_CMD_DESTROY_FLOW]     = ib_uverbs_ex_destroy_flow,
132         [IB_USER_VERBS_EX_CMD_QUERY_DEVICE]     = ib_uverbs_ex_query_device,
133         [IB_USER_VERBS_EX_CMD_CREATE_CQ]        = ib_uverbs_ex_create_cq,
134         [IB_USER_VERBS_EX_CMD_CREATE_QP]        = ib_uverbs_ex_create_qp,
135         [IB_USER_VERBS_EX_CMD_CREATE_WQ]        = ib_uverbs_ex_create_wq,
136         [IB_USER_VERBS_EX_CMD_MODIFY_WQ]        = ib_uverbs_ex_modify_wq,
137         [IB_USER_VERBS_EX_CMD_DESTROY_WQ]       = ib_uverbs_ex_destroy_wq,
138         [IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table,
139         [IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table,
140 };
141
142 static void ib_uverbs_add_one(struct ib_device *device);
143 static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
144
145 int uverbs_dealloc_mw(struct ib_mw *mw)
146 {
147         struct ib_pd *pd = mw->pd;
148         int ret;
149
150         ret = mw->device->dealloc_mw(mw);
151         if (!ret)
152                 atomic_dec(&pd->usecnt);
153         return ret;
154 }
155
156 static void ib_uverbs_release_dev(struct kobject *kobj)
157 {
158         struct ib_uverbs_device *dev =
159                 container_of(kobj, struct ib_uverbs_device, kobj);
160
161         cleanup_srcu_struct(&dev->disassociate_srcu);
162         kfree(dev);
163 }
164
165 static struct kobj_type ib_uverbs_dev_ktype = {
166         .release = ib_uverbs_release_dev,
167 };
168
169 static void ib_uverbs_release_event_file(struct kref *ref)
170 {
171         struct ib_uverbs_event_file *file =
172                 container_of(ref, struct ib_uverbs_event_file, ref);
173
174         kfree(file);
175 }
176
177 void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
178                           struct ib_uverbs_event_file *ev_file,
179                           struct ib_ucq_object *uobj)
180 {
181         struct ib_uverbs_event *evt, *tmp;
182
183         if (ev_file) {
184                 spin_lock_irq(&ev_file->lock);
185                 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
186                         list_del(&evt->list);
187                         kfree(evt);
188                 }
189                 spin_unlock_irq(&ev_file->lock);
190
191                 kref_put(&ev_file->ref, ib_uverbs_release_event_file);
192         }
193
194         spin_lock_irq(&file->async_file->lock);
195         list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
196                 list_del(&evt->list);
197                 kfree(evt);
198         }
199         spin_unlock_irq(&file->async_file->lock);
200 }
201
202 void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
203                               struct ib_uevent_object *uobj)
204 {
205         struct ib_uverbs_event *evt, *tmp;
206
207         spin_lock_irq(&file->async_file->lock);
208         list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
209                 list_del(&evt->list);
210                 kfree(evt);
211         }
212         spin_unlock_irq(&file->async_file->lock);
213 }
214
215 static void ib_uverbs_detach_umcast(struct ib_qp *qp,
216                                     struct ib_uqp_object *uobj)
217 {
218         struct ib_uverbs_mcast_entry *mcast, *tmp;
219
220         list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) {
221                 ib_detach_mcast(qp, &mcast->gid, mcast->lid);
222                 list_del(&mcast->list);
223                 kfree(mcast);
224         }
225 }
226
227 static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
228                                       struct ib_ucontext *context)
229 {
230         struct ib_uobject *uobj, *tmp;
231
232         context->closing = 1;
233
234         list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) {
235                 struct ib_ah *ah = uobj->object;
236
237                 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
238                 ib_destroy_ah(ah);
239                 kfree(uobj);
240         }
241
242         /* Remove MWs before QPs, in order to support type 2A MWs. */
243         list_for_each_entry_safe(uobj, tmp, &context->mw_list, list) {
244                 struct ib_mw *mw = uobj->object;
245
246                 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
247                 uverbs_dealloc_mw(mw);
248                 kfree(uobj);
249         }
250
251         list_for_each_entry_safe(uobj, tmp, &context->rule_list, list) {
252                 struct ib_flow *flow_id = uobj->object;
253
254                 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
255                 ib_destroy_flow(flow_id);
256                 kfree(uobj);
257         }
258
259         list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) {
260                 struct ib_qp *qp = uobj->object;
261                 struct ib_uqp_object *uqp =
262                         container_of(uobj, struct ib_uqp_object, uevent.uobject);
263
264                 idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
265                 if (qp != qp->real_qp) {
266                         ib_close_qp(qp);
267                 } else {
268                         ib_uverbs_detach_umcast(qp, uqp);
269                         ib_destroy_qp(qp);
270                 }
271                 ib_uverbs_release_uevent(file, &uqp->uevent);
272                 kfree(uqp);
273         }
274
275         list_for_each_entry_safe(uobj, tmp, &context->rwq_ind_tbl_list, list) {
276                 struct ib_rwq_ind_table *rwq_ind_tbl = uobj->object;
277                 struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
278
279                 idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj);
280                 ib_destroy_rwq_ind_table(rwq_ind_tbl);
281                 kfree(ind_tbl);
282                 kfree(uobj);
283         }
284
285         list_for_each_entry_safe(uobj, tmp, &context->wq_list, list) {
286                 struct ib_wq *wq = uobj->object;
287                 struct ib_uwq_object *uwq =
288                         container_of(uobj, struct ib_uwq_object, uevent.uobject);
289
290                 idr_remove_uobj(&ib_uverbs_wq_idr, uobj);
291                 ib_destroy_wq(wq);
292                 ib_uverbs_release_uevent(file, &uwq->uevent);
293                 kfree(uwq);
294         }
295
296         list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
297                 struct ib_srq *srq = uobj->object;
298                 struct ib_uevent_object *uevent =
299                         container_of(uobj, struct ib_uevent_object, uobject);
300
301                 idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
302                 ib_destroy_srq(srq);
303                 ib_uverbs_release_uevent(file, uevent);
304                 kfree(uevent);
305         }
306
307         list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) {
308                 struct ib_cq *cq = uobj->object;
309                 struct ib_uverbs_event_file *ev_file = cq->cq_context;
310                 struct ib_ucq_object *ucq =
311                         container_of(uobj, struct ib_ucq_object, uobject);
312
313                 idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
314                 ib_destroy_cq(cq);
315                 ib_uverbs_release_ucq(file, ev_file, ucq);
316                 kfree(ucq);
317         }
318
319         list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
320                 struct ib_mr *mr = uobj->object;
321
322                 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
323                 ib_dereg_mr(mr);
324                 kfree(uobj);
325         }
326
327         mutex_lock(&file->device->xrcd_tree_mutex);
328         list_for_each_entry_safe(uobj, tmp, &context->xrcd_list, list) {
329                 struct ib_xrcd *xrcd = uobj->object;
330                 struct ib_uxrcd_object *uxrcd =
331                         container_of(uobj, struct ib_uxrcd_object, uobject);
332
333                 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
334                 ib_uverbs_dealloc_xrcd(file->device, xrcd);
335                 kfree(uxrcd);
336         }
337         mutex_unlock(&file->device->xrcd_tree_mutex);
338
339         list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) {
340                 struct ib_pd *pd = uobj->object;
341
342                 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
343                 ib_dealloc_pd(pd);
344                 kfree(uobj);
345         }
346
347         put_pid(context->tgid);
348
349         return context->device->dealloc_ucontext(context);
350 }
351
352 static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
353 {
354         complete(&dev->comp);
355 }
356
357 static void ib_uverbs_release_file(struct kref *ref)
358 {
359         struct ib_uverbs_file *file =
360                 container_of(ref, struct ib_uverbs_file, ref);
361         struct ib_device *ib_dev;
362         int srcu_key;
363
364         srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
365         ib_dev = srcu_dereference(file->device->ib_dev,
366                                   &file->device->disassociate_srcu);
367         if (ib_dev && !ib_dev->disassociate_ucontext)
368                 module_put(ib_dev->owner);
369         srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
370
371         if (atomic_dec_and_test(&file->device->refcount))
372                 ib_uverbs_comp_dev(file->device);
373
374         kfree(file);
375 }
376
377 static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf,
378                                     size_t count, loff_t *pos)
379 {
380         struct ib_uverbs_event_file *file = filp->private_data;
381         struct ib_uverbs_event *event;
382         int eventsz;
383         int ret = 0;
384
385         spin_lock_irq(&file->lock);
386
387         while (list_empty(&file->event_list)) {
388                 spin_unlock_irq(&file->lock);
389
390                 if (filp->f_flags & O_NONBLOCK)
391                         return -EAGAIN;
392
393                 if (wait_event_interruptible(file->poll_wait,
394                                              (!list_empty(&file->event_list) ||
395                         /* The barriers built into wait_event_interruptible()
396                          * and wake_up() guarentee this will see the null set
397                          * without using RCU
398                          */
399                                              !file->uverbs_file->device->ib_dev)))
400                         return -ERESTARTSYS;
401
402                 /* If device was disassociated and no event exists set an error */
403                 if (list_empty(&file->event_list) &&
404                     !file->uverbs_file->device->ib_dev)
405                         return -EIO;
406
407                 spin_lock_irq(&file->lock);
408         }
409
410         event = list_entry(file->event_list.next, struct ib_uverbs_event, list);
411
412         if (file->is_async)
413                 eventsz = sizeof (struct ib_uverbs_async_event_desc);
414         else
415                 eventsz = sizeof (struct ib_uverbs_comp_event_desc);
416
417         if (eventsz > count) {
418                 ret   = -EINVAL;
419                 event = NULL;
420         } else {
421                 list_del(file->event_list.next);
422                 if (event->counter) {
423                         ++(*event->counter);
424                         list_del(&event->obj_list);
425                 }
426         }
427
428         spin_unlock_irq(&file->lock);
429
430         if (event) {
431                 if (copy_to_user(buf, event, eventsz))
432                         ret = -EFAULT;
433                 else
434                         ret = eventsz;
435         }
436
437         kfree(event);
438
439         return ret;
440 }
441
442 static unsigned int ib_uverbs_event_poll(struct file *filp,
443                                          struct poll_table_struct *wait)
444 {
445         unsigned int pollflags = 0;
446         struct ib_uverbs_event_file *file = filp->private_data;
447
448         poll_wait(filp, &file->poll_wait, wait);
449
450         spin_lock_irq(&file->lock);
451         if (!list_empty(&file->event_list))
452                 pollflags = POLLIN | POLLRDNORM;
453         spin_unlock_irq(&file->lock);
454
455         return pollflags;
456 }
457
458 static int ib_uverbs_event_fasync(int fd, struct file *filp, int on)
459 {
460         struct ib_uverbs_event_file *file = filp->private_data;
461
462         return fasync_helper(fd, filp, on, &file->async_queue);
463 }
464
465 static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
466 {
467         struct ib_uverbs_event_file *file = filp->private_data;
468         struct ib_uverbs_event *entry, *tmp;
469         int closed_already = 0;
470
471         mutex_lock(&file->uverbs_file->device->lists_mutex);
472         spin_lock_irq(&file->lock);
473         closed_already = file->is_closed;
474         file->is_closed = 1;
475         list_for_each_entry_safe(entry, tmp, &file->event_list, list) {
476                 if (entry->counter)
477                         list_del(&entry->obj_list);
478                 kfree(entry);
479         }
480         spin_unlock_irq(&file->lock);
481         if (!closed_already) {
482                 list_del(&file->list);
483                 if (file->is_async)
484                         ib_unregister_event_handler(&file->uverbs_file->
485                                 event_handler);
486         }
487         mutex_unlock(&file->uverbs_file->device->lists_mutex);
488
489         kref_put(&file->uverbs_file->ref, ib_uverbs_release_file);
490         kref_put(&file->ref, ib_uverbs_release_event_file);
491
492         return 0;
493 }
494
495 static const struct file_operations uverbs_event_fops = {
496         .owner   = THIS_MODULE,
497         .read    = ib_uverbs_event_read,
498         .poll    = ib_uverbs_event_poll,
499         .release = ib_uverbs_event_close,
500         .fasync  = ib_uverbs_event_fasync,
501         .llseek  = no_llseek,
502 };
503
504 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
505 {
506         struct ib_uverbs_event_file    *file = cq_context;
507         struct ib_ucq_object           *uobj;
508         struct ib_uverbs_event         *entry;
509         unsigned long                   flags;
510
511         if (!file)
512                 return;
513
514         spin_lock_irqsave(&file->lock, flags);
515         if (file->is_closed) {
516                 spin_unlock_irqrestore(&file->lock, flags);
517                 return;
518         }
519
520         entry = kmalloc(sizeof *entry, GFP_ATOMIC);
521         if (!entry) {
522                 spin_unlock_irqrestore(&file->lock, flags);
523                 return;
524         }
525
526         uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
527
528         entry->desc.comp.cq_handle = cq->uobject->user_handle;
529         entry->counter             = &uobj->comp_events_reported;
530
531         list_add_tail(&entry->list, &file->event_list);
532         list_add_tail(&entry->obj_list, &uobj->comp_list);
533         spin_unlock_irqrestore(&file->lock, flags);
534
535         wake_up_interruptible(&file->poll_wait);
536         kill_fasync(&file->async_queue, SIGIO, POLL_IN);
537 }
538
539 static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
540                                     __u64 element, __u64 event,
541                                     struct list_head *obj_list,
542                                     u32 *counter)
543 {
544         struct ib_uverbs_event *entry;
545         unsigned long flags;
546
547         spin_lock_irqsave(&file->async_file->lock, flags);
548         if (file->async_file->is_closed) {
549                 spin_unlock_irqrestore(&file->async_file->lock, flags);
550                 return;
551         }
552
553         entry = kmalloc(sizeof *entry, GFP_ATOMIC);
554         if (!entry) {
555                 spin_unlock_irqrestore(&file->async_file->lock, flags);
556                 return;
557         }
558
559         entry->desc.async.element    = element;
560         entry->desc.async.event_type = event;
561         entry->desc.async.reserved   = 0;
562         entry->counter               = counter;
563
564         list_add_tail(&entry->list, &file->async_file->event_list);
565         if (obj_list)
566                 list_add_tail(&entry->obj_list, obj_list);
567         spin_unlock_irqrestore(&file->async_file->lock, flags);
568
569         wake_up_interruptible(&file->async_file->poll_wait);
570         kill_fasync(&file->async_file->async_queue, SIGIO, POLL_IN);
571 }
572
573 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
574 {
575         struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
576                                                   struct ib_ucq_object, uobject);
577
578         ib_uverbs_async_handler(uobj->uverbs_file, uobj->uobject.user_handle,
579                                 event->event, &uobj->async_list,
580                                 &uobj->async_events_reported);
581 }
582
583 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
584 {
585         struct ib_uevent_object *uobj;
586
587         /* for XRC target qp's, check that qp is live */
588         if (!event->element.qp->uobject || !event->element.qp->uobject->live)
589                 return;
590
591         uobj = container_of(event->element.qp->uobject,
592                             struct ib_uevent_object, uobject);
593
594         ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
595                                 event->event, &uobj->event_list,
596                                 &uobj->events_reported);
597 }
598
599 void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
600 {
601         struct ib_uevent_object *uobj = container_of(event->element.wq->uobject,
602                                                   struct ib_uevent_object, uobject);
603
604         ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
605                                 event->event, &uobj->event_list,
606                                 &uobj->events_reported);
607 }
608
609 void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
610 {
611         struct ib_uevent_object *uobj;
612
613         uobj = container_of(event->element.srq->uobject,
614                             struct ib_uevent_object, uobject);
615
616         ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
617                                 event->event, &uobj->event_list,
618                                 &uobj->events_reported);
619 }
620
621 void ib_uverbs_event_handler(struct ib_event_handler *handler,
622                              struct ib_event *event)
623 {
624         struct ib_uverbs_file *file =
625                 container_of(handler, struct ib_uverbs_file, event_handler);
626
627         ib_uverbs_async_handler(file, event->element.port_num, event->event,
628                                 NULL, NULL);
629 }
630
631 void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file)
632 {
633         kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
634         file->async_file = NULL;
635 }
636
637 struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
638                                         struct ib_device        *ib_dev,
639                                         int is_async)
640 {
641         struct ib_uverbs_event_file *ev_file;
642         struct file *filp;
643         int ret;
644
645         ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
646         if (!ev_file)
647                 return ERR_PTR(-ENOMEM);
648
649         kref_init(&ev_file->ref);
650         spin_lock_init(&ev_file->lock);
651         INIT_LIST_HEAD(&ev_file->event_list);
652         init_waitqueue_head(&ev_file->poll_wait);
653         ev_file->uverbs_file = uverbs_file;
654         kref_get(&ev_file->uverbs_file->ref);
655         ev_file->async_queue = NULL;
656         ev_file->is_closed   = 0;
657
658         filp = anon_inode_getfile("[infinibandevent]", &uverbs_event_fops,
659                                   ev_file, O_RDONLY);
660         if (IS_ERR(filp))
661                 goto err_put_refs;
662
663         mutex_lock(&uverbs_file->device->lists_mutex);
664         list_add_tail(&ev_file->list,
665                       &uverbs_file->device->uverbs_events_file_list);
666         mutex_unlock(&uverbs_file->device->lists_mutex);
667
668         if (is_async) {
669                 WARN_ON(uverbs_file->async_file);
670                 uverbs_file->async_file = ev_file;
671                 kref_get(&uverbs_file->async_file->ref);
672                 INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
673                                       ib_dev,
674                                       ib_uverbs_event_handler);
675                 ret = ib_register_event_handler(&uverbs_file->event_handler);
676                 if (ret)
677                         goto err_put_file;
678
679                 /* At that point async file stuff was fully set */
680                 ev_file->is_async = 1;
681         }
682
683         return filp;
684
685 err_put_file:
686         fput(filp);
687         kref_put(&uverbs_file->async_file->ref, ib_uverbs_release_event_file);
688         uverbs_file->async_file = NULL;
689         return ERR_PTR(ret);
690
691 err_put_refs:
692         kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
693         kref_put(&ev_file->ref, ib_uverbs_release_event_file);
694         return filp;
695 }
696
697 /*
698  * Look up a completion event file by FD.  If lookup is successful,
699  * takes a ref to the event file struct that it returns; if
700  * unsuccessful, returns NULL.
701  */
702 struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd)
703 {
704         struct ib_uverbs_event_file *ev_file = NULL;
705         struct fd f = fdget(fd);
706
707         if (!f.file)
708                 return NULL;
709
710         if (f.file->f_op != &uverbs_event_fops)
711                 goto out;
712
713         ev_file = f.file->private_data;
714         if (ev_file->is_async) {
715                 ev_file = NULL;
716                 goto out;
717         }
718
719         kref_get(&ev_file->ref);
720
721 out:
722         fdput(f);
723         return ev_file;
724 }
725
726 static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
727 {
728         u64 mask;
729
730         if (command <= IB_USER_VERBS_CMD_OPEN_QP)
731                 mask = ib_dev->uverbs_cmd_mask;
732         else
733                 mask = ib_dev->uverbs_ex_cmd_mask;
734
735         if (mask & ((u64)1 << command))
736                 return 0;
737
738         return -1;
739 }
740
741 static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
742                              size_t count, loff_t *pos)
743 {
744         struct ib_uverbs_file *file = filp->private_data;
745         struct ib_device *ib_dev;
746         struct ib_uverbs_cmd_hdr hdr;
747         __u32 command;
748         __u32 flags;
749         int srcu_key;
750         ssize_t ret;
751
752         if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
753                 return -EACCES;
754
755         if (count < sizeof hdr)
756                 return -EINVAL;
757
758         if (copy_from_user(&hdr, buf, sizeof hdr))
759                 return -EFAULT;
760
761         srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
762         ib_dev = srcu_dereference(file->device->ib_dev,
763                                   &file->device->disassociate_srcu);
764         if (!ib_dev) {
765                 ret = -EIO;
766                 goto out;
767         }
768
769         if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
770                                    IB_USER_VERBS_CMD_COMMAND_MASK)) {
771                 ret = -EINVAL;
772                 goto out;
773         }
774
775         command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
776         if (verify_command_mask(ib_dev, command)) {
777                 ret = -EOPNOTSUPP;
778                 goto out;
779         }
780
781         if (!file->ucontext &&
782             command != IB_USER_VERBS_CMD_GET_CONTEXT) {
783                 ret = -EINVAL;
784                 goto out;
785         }
786
787         flags = (hdr.command &
788                  IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
789
790         if (!flags) {
791                 if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
792                     !uverbs_cmd_table[command]) {
793                         ret = -EINVAL;
794                         goto out;
795                 }
796
797                 if (hdr.in_words * 4 != count) {
798                         ret = -EINVAL;
799                         goto out;
800                 }
801
802                 ret = uverbs_cmd_table[command](file, ib_dev,
803                                                  buf + sizeof(hdr),
804                                                  hdr.in_words * 4,
805                                                  hdr.out_words * 4);
806
807         } else if (flags == IB_USER_VERBS_CMD_FLAG_EXTENDED) {
808                 struct ib_uverbs_ex_cmd_hdr ex_hdr;
809                 struct ib_udata ucore;
810                 struct ib_udata uhw;
811                 size_t written_count = count;
812
813                 if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
814                     !uverbs_ex_cmd_table[command]) {
815                         ret = -ENOSYS;
816                         goto out;
817                 }
818
819                 if (!file->ucontext) {
820                         ret = -EINVAL;
821                         goto out;
822                 }
823
824                 if (count < (sizeof(hdr) + sizeof(ex_hdr))) {
825                         ret = -EINVAL;
826                         goto out;
827                 }
828
829                 if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr))) {
830                         ret = -EFAULT;
831                         goto out;
832                 }
833
834                 count -= sizeof(hdr) + sizeof(ex_hdr);
835                 buf += sizeof(hdr) + sizeof(ex_hdr);
836
837                 if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) {
838                         ret = -EINVAL;
839                         goto out;
840                 }
841
842                 if (ex_hdr.cmd_hdr_reserved) {
843                         ret = -EINVAL;
844                         goto out;
845                 }
846
847                 if (ex_hdr.response) {
848                         if (!hdr.out_words && !ex_hdr.provider_out_words) {
849                                 ret = -EINVAL;
850                                 goto out;
851                         }
852
853                         if (!access_ok(VERIFY_WRITE,
854                                        (void __user *) (unsigned long) ex_hdr.response,
855                                        (hdr.out_words + ex_hdr.provider_out_words) * 8)) {
856                                 ret = -EFAULT;
857                                 goto out;
858                         }
859                 } else {
860                         if (hdr.out_words || ex_hdr.provider_out_words) {
861                                 ret = -EINVAL;
862                                 goto out;
863                         }
864                 }
865
866                 INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
867                                        hdr.in_words * 8, hdr.out_words * 8);
868
869                 INIT_UDATA_BUF_OR_NULL(&uhw,
870                                        buf + ucore.inlen,
871                                        (unsigned long) ex_hdr.response + ucore.outlen,
872                                        ex_hdr.provider_in_words * 8,
873                                        ex_hdr.provider_out_words * 8);
874
875                 ret = uverbs_ex_cmd_table[command](file,
876                                                    ib_dev,
877                                                    &ucore,
878                                                    &uhw);
879                 if (!ret)
880                         ret = written_count;
881         } else {
882                 ret = -ENOSYS;
883         }
884
885 out:
886         srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
887         return ret;
888 }
889
890 static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
891 {
892         struct ib_uverbs_file *file = filp->private_data;
893         struct ib_device *ib_dev;
894         int ret = 0;
895         int srcu_key;
896
897         srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
898         ib_dev = srcu_dereference(file->device->ib_dev,
899                                   &file->device->disassociate_srcu);
900         if (!ib_dev) {
901                 ret = -EIO;
902                 goto out;
903         }
904
905         if (!file->ucontext)
906                 ret = -ENODEV;
907         else
908                 ret = ib_dev->mmap(file->ucontext, vma);
909 out:
910         srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
911         return ret;
912 }
913
914 /*
915  * ib_uverbs_open() does not need the BKL:
916  *
917  *  - the ib_uverbs_device structures are properly reference counted and
918  *    everything else is purely local to the file being created, so
919  *    races against other open calls are not a problem;
920  *  - there is no ioctl method to race against;
921  *  - the open method will either immediately run -ENXIO, or all
922  *    required initialization will be done.
923  */
924 static int ib_uverbs_open(struct inode *inode, struct file *filp)
925 {
926         struct ib_uverbs_device *dev;
927         struct ib_uverbs_file *file;
928         struct ib_device *ib_dev;
929         int ret;
930         int module_dependent;
931         int srcu_key;
932
933         dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
934         if (!atomic_inc_not_zero(&dev->refcount))
935                 return -ENXIO;
936
937         srcu_key = srcu_read_lock(&dev->disassociate_srcu);
938         mutex_lock(&dev->lists_mutex);
939         ib_dev = srcu_dereference(dev->ib_dev,
940                                   &dev->disassociate_srcu);
941         if (!ib_dev) {
942                 ret = -EIO;
943                 goto err;
944         }
945
946         /* In case IB device supports disassociate ucontext, there is no hard
947          * dependency between uverbs device and its low level device.
948          */
949         module_dependent = !(ib_dev->disassociate_ucontext);
950
951         if (module_dependent) {
952                 if (!try_module_get(ib_dev->owner)) {
953                         ret = -ENODEV;
954                         goto err;
955                 }
956         }
957
958         file = kzalloc(sizeof(*file), GFP_KERNEL);
959         if (!file) {
960                 ret = -ENOMEM;
961                 if (module_dependent)
962                         goto err_module;
963
964                 goto err;
965         }
966
967         file->device     = dev;
968         file->ucontext   = NULL;
969         file->async_file = NULL;
970         kref_init(&file->ref);
971         mutex_init(&file->mutex);
972         mutex_init(&file->cleanup_mutex);
973
974         filp->private_data = file;
975         kobject_get(&dev->kobj);
976         list_add_tail(&file->list, &dev->uverbs_file_list);
977         mutex_unlock(&dev->lists_mutex);
978         srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
979
980         return nonseekable_open(inode, filp);
981
982 err_module:
983         module_put(ib_dev->owner);
984
985 err:
986         mutex_unlock(&dev->lists_mutex);
987         srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
988         if (atomic_dec_and_test(&dev->refcount))
989                 ib_uverbs_comp_dev(dev);
990
991         return ret;
992 }
993
994 static int ib_uverbs_close(struct inode *inode, struct file *filp)
995 {
996         struct ib_uverbs_file *file = filp->private_data;
997         struct ib_uverbs_device *dev = file->device;
998
999         mutex_lock(&file->cleanup_mutex);
1000         if (file->ucontext) {
1001                 ib_uverbs_cleanup_ucontext(file, file->ucontext);
1002                 file->ucontext = NULL;
1003         }
1004         mutex_unlock(&file->cleanup_mutex);
1005
1006         mutex_lock(&file->device->lists_mutex);
1007         if (!file->is_closed) {
1008                 list_del(&file->list);
1009                 file->is_closed = 1;
1010         }
1011         mutex_unlock(&file->device->lists_mutex);
1012
1013         if (file->async_file)
1014                 kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
1015
1016         kref_put(&file->ref, ib_uverbs_release_file);
1017         kobject_put(&dev->kobj);
1018
1019         return 0;
1020 }
1021
1022 static const struct file_operations uverbs_fops = {
1023         .owner   = THIS_MODULE,
1024         .write   = ib_uverbs_write,
1025         .open    = ib_uverbs_open,
1026         .release = ib_uverbs_close,
1027         .llseek  = no_llseek,
1028 };
1029
1030 static const struct file_operations uverbs_mmap_fops = {
1031         .owner   = THIS_MODULE,
1032         .write   = ib_uverbs_write,
1033         .mmap    = ib_uverbs_mmap,
1034         .open    = ib_uverbs_open,
1035         .release = ib_uverbs_close,
1036         .llseek  = no_llseek,
1037 };
1038
1039 static struct ib_client uverbs_client = {
1040         .name   = "uverbs",
1041         .add    = ib_uverbs_add_one,
1042         .remove = ib_uverbs_remove_one
1043 };
1044
1045 static ssize_t show_ibdev(struct device *device, struct device_attribute *attr,
1046                           char *buf)
1047 {
1048         int ret = -ENODEV;
1049         int srcu_key;
1050         struct ib_uverbs_device *dev = dev_get_drvdata(device);
1051         struct ib_device *ib_dev;
1052
1053         if (!dev)
1054                 return -ENODEV;
1055
1056         srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1057         ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1058         if (ib_dev)
1059                 ret = sprintf(buf, "%s\n", ib_dev->name);
1060         srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1061
1062         return ret;
1063 }
1064 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1065
1066 static ssize_t show_dev_abi_version(struct device *device,
1067                                     struct device_attribute *attr, char *buf)
1068 {
1069         struct ib_uverbs_device *dev = dev_get_drvdata(device);
1070         int ret = -ENODEV;
1071         int srcu_key;
1072         struct ib_device *ib_dev;
1073
1074         if (!dev)
1075                 return -ENODEV;
1076         srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1077         ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1078         if (ib_dev)
1079                 ret = sprintf(buf, "%d\n", ib_dev->uverbs_abi_ver);
1080         srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1081
1082         return ret;
1083 }
1084 static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
1085
1086 static CLASS_ATTR_STRING(abi_version, S_IRUGO,
1087                          __stringify(IB_USER_VERBS_ABI_VERSION));
1088
1089 static dev_t overflow_maj;
1090 static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
1091
1092 /*
1093  * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
1094  * requesting a new major number and doubling the number of max devices we
1095  * support. It's stupid, but simple.
1096  */
1097 static int find_overflow_devnum(void)
1098 {
1099         int ret;
1100
1101         if (!overflow_maj) {
1102                 ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
1103                                           "infiniband_verbs");
1104                 if (ret) {
1105                         pr_err("user_verbs: couldn't register dynamic device number\n");
1106                         return ret;
1107                 }
1108         }
1109
1110         ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
1111         if (ret >= IB_UVERBS_MAX_DEVICES)
1112                 return -1;
1113
1114         return ret;
1115 }
1116
1117 static void ib_uverbs_add_one(struct ib_device *device)
1118 {
1119         int devnum;
1120         dev_t base;
1121         struct ib_uverbs_device *uverbs_dev;
1122         int ret;
1123
1124         if (!device->alloc_ucontext)
1125                 return;
1126
1127         uverbs_dev = kzalloc(sizeof *uverbs_dev, GFP_KERNEL);
1128         if (!uverbs_dev)
1129                 return;
1130
1131         ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
1132         if (ret) {
1133                 kfree(uverbs_dev);
1134                 return;
1135         }
1136
1137         atomic_set(&uverbs_dev->refcount, 1);
1138         init_completion(&uverbs_dev->comp);
1139         uverbs_dev->xrcd_tree = RB_ROOT;
1140         mutex_init(&uverbs_dev->xrcd_tree_mutex);
1141         kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
1142         mutex_init(&uverbs_dev->lists_mutex);
1143         INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
1144         INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
1145
1146         spin_lock(&map_lock);
1147         devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
1148         if (devnum >= IB_UVERBS_MAX_DEVICES) {
1149                 spin_unlock(&map_lock);
1150                 devnum = find_overflow_devnum();
1151                 if (devnum < 0)
1152                         goto err;
1153
1154                 spin_lock(&map_lock);
1155                 uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
1156                 base = devnum + overflow_maj;
1157                 set_bit(devnum, overflow_map);
1158         } else {
1159                 uverbs_dev->devnum = devnum;
1160                 base = devnum + IB_UVERBS_BASE_DEV;
1161                 set_bit(devnum, dev_map);
1162         }
1163         spin_unlock(&map_lock);
1164
1165         rcu_assign_pointer(uverbs_dev->ib_dev, device);
1166         uverbs_dev->num_comp_vectors = device->num_comp_vectors;
1167
1168         cdev_init(&uverbs_dev->cdev, NULL);
1169         uverbs_dev->cdev.owner = THIS_MODULE;
1170         uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
1171         uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
1172         kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
1173         if (cdev_add(&uverbs_dev->cdev, base, 1))
1174                 goto err_cdev;
1175
1176         uverbs_dev->dev = device_create(uverbs_class, device->dma_device,
1177                                         uverbs_dev->cdev.dev, uverbs_dev,
1178                                         "uverbs%d", uverbs_dev->devnum);
1179         if (IS_ERR(uverbs_dev->dev))
1180                 goto err_cdev;
1181
1182         if (device_create_file(uverbs_dev->dev, &dev_attr_ibdev))
1183                 goto err_class;
1184         if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
1185                 goto err_class;
1186
1187         ib_set_client_data(device, &uverbs_client, uverbs_dev);
1188
1189         return;
1190
1191 err_class:
1192         device_destroy(uverbs_class, uverbs_dev->cdev.dev);
1193
1194 err_cdev:
1195         cdev_del(&uverbs_dev->cdev);
1196         if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
1197                 clear_bit(devnum, dev_map);
1198         else
1199                 clear_bit(devnum, overflow_map);
1200
1201 err:
1202         if (atomic_dec_and_test(&uverbs_dev->refcount))
1203                 ib_uverbs_comp_dev(uverbs_dev);
1204         wait_for_completion(&uverbs_dev->comp);
1205         kobject_put(&uverbs_dev->kobj);
1206         return;
1207 }
1208
1209 static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
1210                                         struct ib_device *ib_dev)
1211 {
1212         struct ib_uverbs_file *file;
1213         struct ib_uverbs_event_file *event_file;
1214         struct ib_event event;
1215
1216         /* Pending running commands to terminate */
1217         synchronize_srcu(&uverbs_dev->disassociate_srcu);
1218         event.event = IB_EVENT_DEVICE_FATAL;
1219         event.element.port_num = 0;
1220         event.device = ib_dev;
1221
1222         mutex_lock(&uverbs_dev->lists_mutex);
1223         while (!list_empty(&uverbs_dev->uverbs_file_list)) {
1224                 struct ib_ucontext *ucontext;
1225                 file = list_first_entry(&uverbs_dev->uverbs_file_list,
1226                                         struct ib_uverbs_file, list);
1227                 file->is_closed = 1;
1228                 list_del(&file->list);
1229                 kref_get(&file->ref);
1230                 mutex_unlock(&uverbs_dev->lists_mutex);
1231
1232                 ib_uverbs_event_handler(&file->event_handler, &event);
1233
1234                 mutex_lock(&file->cleanup_mutex);
1235                 ucontext = file->ucontext;
1236                 file->ucontext = NULL;
1237                 mutex_unlock(&file->cleanup_mutex);
1238
1239                 /* At this point ib_uverbs_close cannot be running
1240                  * ib_uverbs_cleanup_ucontext
1241                  */
1242                 if (ucontext) {
1243                         /* We must release the mutex before going ahead and
1244                          * calling disassociate_ucontext. disassociate_ucontext
1245                          * might end up indirectly calling uverbs_close,
1246                          * for example due to freeing the resources
1247                          * (e.g mmput).
1248                          */
1249                         ib_dev->disassociate_ucontext(ucontext);
1250                         ib_uverbs_cleanup_ucontext(file, ucontext);
1251                 }
1252
1253                 mutex_lock(&uverbs_dev->lists_mutex);
1254                 kref_put(&file->ref, ib_uverbs_release_file);
1255         }
1256
1257         while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
1258                 event_file = list_first_entry(&uverbs_dev->
1259                                               uverbs_events_file_list,
1260                                               struct ib_uverbs_event_file,
1261                                               list);
1262                 spin_lock_irq(&event_file->lock);
1263                 event_file->is_closed = 1;
1264                 spin_unlock_irq(&event_file->lock);
1265
1266                 list_del(&event_file->list);
1267                 if (event_file->is_async) {
1268                         ib_unregister_event_handler(&event_file->uverbs_file->
1269                                                     event_handler);
1270                         event_file->uverbs_file->event_handler.device = NULL;
1271                 }
1272
1273                 wake_up_interruptible(&event_file->poll_wait);
1274                 kill_fasync(&event_file->async_queue, SIGIO, POLL_IN);
1275         }
1276         mutex_unlock(&uverbs_dev->lists_mutex);
1277 }
1278
1279 static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
1280 {
1281         struct ib_uverbs_device *uverbs_dev = client_data;
1282         int wait_clients = 1;
1283
1284         if (!uverbs_dev)
1285                 return;
1286
1287         dev_set_drvdata(uverbs_dev->dev, NULL);
1288         device_destroy(uverbs_class, uverbs_dev->cdev.dev);
1289         cdev_del(&uverbs_dev->cdev);
1290
1291         if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
1292                 clear_bit(uverbs_dev->devnum, dev_map);
1293         else
1294                 clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
1295
1296         if (device->disassociate_ucontext) {
1297                 /* We disassociate HW resources and immediately return.
1298                  * Userspace will see a EIO errno for all future access.
1299                  * Upon returning, ib_device may be freed internally and is not
1300                  * valid any more.
1301                  * uverbs_device is still available until all clients close
1302                  * their files, then the uverbs device ref count will be zero
1303                  * and its resources will be freed.
1304                  * Note: At this point no more files can be opened since the
1305                  * cdev was deleted, however active clients can still issue
1306                  * commands and close their open files.
1307                  */
1308                 rcu_assign_pointer(uverbs_dev->ib_dev, NULL);
1309                 ib_uverbs_free_hw_resources(uverbs_dev, device);
1310                 wait_clients = 0;
1311         }
1312
1313         if (atomic_dec_and_test(&uverbs_dev->refcount))
1314                 ib_uverbs_comp_dev(uverbs_dev);
1315         if (wait_clients)
1316                 wait_for_completion(&uverbs_dev->comp);
1317         kobject_put(&uverbs_dev->kobj);
1318 }
1319
1320 static char *uverbs_devnode(struct device *dev, umode_t *mode)
1321 {
1322         if (mode)
1323                 *mode = 0666;
1324         return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
1325 }
1326
1327 static int __init ib_uverbs_init(void)
1328 {
1329         int ret;
1330
1331         ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
1332                                      "infiniband_verbs");
1333         if (ret) {
1334                 pr_err("user_verbs: couldn't register device number\n");
1335                 goto out;
1336         }
1337
1338         uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
1339         if (IS_ERR(uverbs_class)) {
1340                 ret = PTR_ERR(uverbs_class);
1341                 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1342                 goto out_chrdev;
1343         }
1344
1345         uverbs_class->devnode = uverbs_devnode;
1346
1347         ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
1348         if (ret) {
1349                 pr_err("user_verbs: couldn't create abi_version attribute\n");
1350                 goto out_class;
1351         }
1352
1353         ret = ib_register_client(&uverbs_client);
1354         if (ret) {
1355                 pr_err("user_verbs: couldn't register client\n");
1356                 goto out_class;
1357         }
1358
1359         return 0;
1360
1361 out_class:
1362         class_destroy(uverbs_class);
1363
1364 out_chrdev:
1365         unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
1366
1367 out:
1368         return ret;
1369 }
1370
1371 static void __exit ib_uverbs_cleanup(void)
1372 {
1373         ib_unregister_client(&uverbs_client);
1374         class_destroy(uverbs_class);
1375         unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
1376         if (overflow_maj)
1377                 unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
1378         idr_destroy(&ib_uverbs_pd_idr);
1379         idr_destroy(&ib_uverbs_mr_idr);
1380         idr_destroy(&ib_uverbs_mw_idr);
1381         idr_destroy(&ib_uverbs_ah_idr);
1382         idr_destroy(&ib_uverbs_cq_idr);
1383         idr_destroy(&ib_uverbs_qp_idr);
1384         idr_destroy(&ib_uverbs_srq_idr);
1385 }
1386
1387 module_init(ib_uverbs_init);
1388 module_exit(ib_uverbs_cleanup);