34389f75fe65693a4ad5baa36715159fd5759bbb
[cascardo/linux.git] / drivers / vhost / vhost.c
1 /* Copyright (C) 2009 Red Hat, Inc.
2  * Copyright (C) 2006 Rusty Russell IBM Corporation
3  *
4  * Author: Michael S. Tsirkin <mst@redhat.com>
5  *
6  * Inspiration, some code, and most witty comments come from
7  * Documentation/virtual/lguest/lguest.c, by Rusty Russell
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.
10  *
11  * Generic code for virtio server in host kernel.
12  */
13
14 #include <linux/eventfd.h>
15 #include <linux/vhost.h>
16 #include <linux/virtio_net.h>
17 #include <linux/mm.h>
18 #include <linux/mmu_context.h>
19 #include <linux/miscdevice.h>
20 #include <linux/mutex.h>
21 #include <linux/rcupdate.h>
22 #include <linux/poll.h>
23 #include <linux/file.h>
24 #include <linux/highmem.h>
25 #include <linux/slab.h>
26 #include <linux/kthread.h>
27 #include <linux/cgroup.h>
28
29 #include "vhost.h"
30
31 enum {
32         VHOST_MEMORY_MAX_NREGIONS = 64,
33         VHOST_MEMORY_F_LOG = 0x1,
34 };
35
36 static unsigned vhost_zcopy_mask __read_mostly;
37
38 #define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
39 #define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
40
41 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
42                             poll_table *pt)
43 {
44         struct vhost_poll *poll;
45
46         poll = container_of(pt, struct vhost_poll, table);
47         poll->wqh = wqh;
48         add_wait_queue(wqh, &poll->wait);
49 }
50
51 static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
52                              void *key)
53 {
54         struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
55
56         if (!((unsigned long)key & poll->mask))
57                 return 0;
58
59         vhost_poll_queue(poll);
60         return 0;
61 }
62
63 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
64 {
65         INIT_LIST_HEAD(&work->node);
66         work->fn = fn;
67         init_waitqueue_head(&work->done);
68         work->flushing = 0;
69         work->queue_seq = work->done_seq = 0;
70 }
71
72 /* Init poll structure */
73 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
74                      unsigned long mask, struct vhost_dev *dev)
75 {
76         init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
77         init_poll_funcptr(&poll->table, vhost_poll_func);
78         poll->mask = mask;
79         poll->dev = dev;
80
81         vhost_work_init(&poll->work, fn);
82 }
83
84 /* Start polling a file. We add ourselves to file's wait queue. The caller must
85  * keep a reference to a file until after vhost_poll_stop is called. */
86 void vhost_poll_start(struct vhost_poll *poll, struct file *file)
87 {
88         unsigned long mask;
89
90         mask = file->f_op->poll(file, &poll->table);
91         if (mask)
92                 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
93 }
94
95 /* Stop polling a file. After this function returns, it becomes safe to drop the
96  * file reference. You must also flush afterwards. */
97 void vhost_poll_stop(struct vhost_poll *poll)
98 {
99         remove_wait_queue(poll->wqh, &poll->wait);
100 }
101
102 static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
103                                 unsigned seq)
104 {
105         int left;
106
107         spin_lock_irq(&dev->work_lock);
108         left = seq - work->done_seq;
109         spin_unlock_irq(&dev->work_lock);
110         return left <= 0;
111 }
112
113 static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
114 {
115         unsigned seq;
116         int flushing;
117
118         spin_lock_irq(&dev->work_lock);
119         seq = work->queue_seq;
120         work->flushing++;
121         spin_unlock_irq(&dev->work_lock);
122         wait_event(work->done, vhost_work_seq_done(dev, work, seq));
123         spin_lock_irq(&dev->work_lock);
124         flushing = --work->flushing;
125         spin_unlock_irq(&dev->work_lock);
126         BUG_ON(flushing < 0);
127 }
128
129 /* Flush any work that has been scheduled. When calling this, don't hold any
130  * locks that are also used by the callback. */
131 void vhost_poll_flush(struct vhost_poll *poll)
132 {
133         vhost_work_flush(poll->dev, &poll->work);
134 }
135
136 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
137 {
138         unsigned long flags;
139
140         spin_lock_irqsave(&dev->work_lock, flags);
141         if (list_empty(&work->node)) {
142                 list_add_tail(&work->node, &dev->work_list);
143                 work->queue_seq++;
144                 wake_up_process(dev->worker);
145         }
146         spin_unlock_irqrestore(&dev->work_lock, flags);
147 }
148
149 void vhost_poll_queue(struct vhost_poll *poll)
150 {
151         vhost_work_queue(poll->dev, &poll->work);
152 }
153
154 static void vhost_vq_reset(struct vhost_dev *dev,
155                            struct vhost_virtqueue *vq)
156 {
157         vq->num = 1;
158         vq->desc = NULL;
159         vq->avail = NULL;
160         vq->used = NULL;
161         vq->last_avail_idx = 0;
162         vq->avail_idx = 0;
163         vq->last_used_idx = 0;
164         vq->signalled_used = 0;
165         vq->signalled_used_valid = false;
166         vq->used_flags = 0;
167         vq->log_used = false;
168         vq->log_addr = -1ull;
169         vq->vhost_hlen = 0;
170         vq->sock_hlen = 0;
171         vq->private_data = NULL;
172         vq->log_base = NULL;
173         vq->error_ctx = NULL;
174         vq->error = NULL;
175         vq->kick = NULL;
176         vq->call_ctx = NULL;
177         vq->call = NULL;
178         vq->log_ctx = NULL;
179         vq->upend_idx = 0;
180         vq->done_idx = 0;
181         vq->ubufs = NULL;
182 }
183
184 static int vhost_worker(void *data)
185 {
186         struct vhost_dev *dev = data;
187         struct vhost_work *work = NULL;
188         unsigned uninitialized_var(seq);
189         mm_segment_t oldfs = get_fs();
190
191         set_fs(USER_DS);
192         use_mm(dev->mm);
193
194         for (;;) {
195                 /* mb paired w/ kthread_stop */
196                 set_current_state(TASK_INTERRUPTIBLE);
197
198                 spin_lock_irq(&dev->work_lock);
199                 if (work) {
200                         work->done_seq = seq;
201                         if (work->flushing)
202                                 wake_up_all(&work->done);
203                 }
204
205                 if (kthread_should_stop()) {
206                         spin_unlock_irq(&dev->work_lock);
207                         __set_current_state(TASK_RUNNING);
208                         break;
209                 }
210                 if (!list_empty(&dev->work_list)) {
211                         work = list_first_entry(&dev->work_list,
212                                                 struct vhost_work, node);
213                         list_del_init(&work->node);
214                         seq = work->queue_seq;
215                 } else
216                         work = NULL;
217                 spin_unlock_irq(&dev->work_lock);
218
219                 if (work) {
220                         __set_current_state(TASK_RUNNING);
221                         work->fn(work);
222                         if (need_resched())
223                                 schedule();
224                 } else
225                         schedule();
226
227         }
228         unuse_mm(dev->mm);
229         set_fs(oldfs);
230         return 0;
231 }
232
233 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
234 {
235         kfree(vq->indirect);
236         vq->indirect = NULL;
237         kfree(vq->log);
238         vq->log = NULL;
239         kfree(vq->heads);
240         vq->heads = NULL;
241         kfree(vq->ubuf_info);
242         vq->ubuf_info = NULL;
243 }
244
245 void vhost_enable_zcopy(int vq)
246 {
247         vhost_zcopy_mask |= 0x1 << vq;
248 }
249
250 /* Helper to allocate iovec buffers for all vqs. */
251 static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
252 {
253         int i;
254         bool zcopy;
255
256         for (i = 0; i < dev->nvqs; ++i) {
257                 dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
258                                                UIO_MAXIOV, GFP_KERNEL);
259                 dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV,
260                                           GFP_KERNEL);
261                 dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads *
262                                             UIO_MAXIOV, GFP_KERNEL);
263                 zcopy = vhost_zcopy_mask & (0x1 << i);
264                 if (zcopy)
265                         dev->vqs[i].ubuf_info =
266                                 kmalloc(sizeof *dev->vqs[i].ubuf_info *
267                                         UIO_MAXIOV, GFP_KERNEL);
268                 if (!dev->vqs[i].indirect || !dev->vqs[i].log ||
269                         !dev->vqs[i].heads ||
270                         (zcopy && !dev->vqs[i].ubuf_info))
271                         goto err_nomem;
272         }
273         return 0;
274
275 err_nomem:
276         for (; i >= 0; --i)
277                 vhost_vq_free_iovecs(&dev->vqs[i]);
278         return -ENOMEM;
279 }
280
281 static void vhost_dev_free_iovecs(struct vhost_dev *dev)
282 {
283         int i;
284
285         for (i = 0; i < dev->nvqs; ++i)
286                 vhost_vq_free_iovecs(&dev->vqs[i]);
287 }
288
289 long vhost_dev_init(struct vhost_dev *dev,
290                     struct vhost_virtqueue *vqs, int nvqs)
291 {
292         int i;
293
294         dev->vqs = vqs;
295         dev->nvqs = nvqs;
296         mutex_init(&dev->mutex);
297         dev->log_ctx = NULL;
298         dev->log_file = NULL;
299         dev->memory = NULL;
300         dev->mm = NULL;
301         spin_lock_init(&dev->work_lock);
302         INIT_LIST_HEAD(&dev->work_list);
303         dev->worker = NULL;
304
305         for (i = 0; i < dev->nvqs; ++i) {
306                 dev->vqs[i].log = NULL;
307                 dev->vqs[i].indirect = NULL;
308                 dev->vqs[i].heads = NULL;
309                 dev->vqs[i].ubuf_info = NULL;
310                 dev->vqs[i].dev = dev;
311                 mutex_init(&dev->vqs[i].mutex);
312                 vhost_vq_reset(dev, dev->vqs + i);
313                 if (dev->vqs[i].handle_kick)
314                         vhost_poll_init(&dev->vqs[i].poll,
315                                         dev->vqs[i].handle_kick, POLLIN, dev);
316         }
317
318         return 0;
319 }
320
321 /* Caller should have device mutex */
322 long vhost_dev_check_owner(struct vhost_dev *dev)
323 {
324         /* Are you the owner? If not, I don't think you mean to do that */
325         return dev->mm == current->mm ? 0 : -EPERM;
326 }
327
328 struct vhost_attach_cgroups_struct {
329         struct vhost_work work;
330         struct task_struct *owner;
331         int ret;
332 };
333
334 static void vhost_attach_cgroups_work(struct vhost_work *work)
335 {
336         struct vhost_attach_cgroups_struct *s;
337
338         s = container_of(work, struct vhost_attach_cgroups_struct, work);
339         s->ret = cgroup_attach_task_all(s->owner, current);
340 }
341
342 static int vhost_attach_cgroups(struct vhost_dev *dev)
343 {
344         struct vhost_attach_cgroups_struct attach;
345
346         attach.owner = current;
347         vhost_work_init(&attach.work, vhost_attach_cgroups_work);
348         vhost_work_queue(dev, &attach.work);
349         vhost_work_flush(dev, &attach.work);
350         return attach.ret;
351 }
352
353 /* Caller should have device mutex */
354 static long vhost_dev_set_owner(struct vhost_dev *dev)
355 {
356         struct task_struct *worker;
357         int err;
358
359         /* Is there an owner already? */
360         if (dev->mm) {
361                 err = -EBUSY;
362                 goto err_mm;
363         }
364
365         /* No owner, become one */
366         dev->mm = get_task_mm(current);
367         worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
368         if (IS_ERR(worker)) {
369                 err = PTR_ERR(worker);
370                 goto err_worker;
371         }
372
373         dev->worker = worker;
374         wake_up_process(worker);        /* avoid contributing to loadavg */
375
376         err = vhost_attach_cgroups(dev);
377         if (err)
378                 goto err_cgroup;
379
380         err = vhost_dev_alloc_iovecs(dev);
381         if (err)
382                 goto err_cgroup;
383
384         return 0;
385 err_cgroup:
386         kthread_stop(worker);
387         dev->worker = NULL;
388 err_worker:
389         if (dev->mm)
390                 mmput(dev->mm);
391         dev->mm = NULL;
392 err_mm:
393         return err;
394 }
395
396 /* Caller should have device mutex */
397 long vhost_dev_reset_owner(struct vhost_dev *dev)
398 {
399         struct vhost_memory *memory;
400
401         /* Restore memory to default empty mapping. */
402         memory = kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
403         if (!memory)
404                 return -ENOMEM;
405
406         vhost_dev_cleanup(dev, true);
407
408         memory->nregions = 0;
409         RCU_INIT_POINTER(dev->memory, memory);
410         return 0;
411 }
412
413 void vhost_dev_stop(struct vhost_dev *dev)
414 {
415         int i;
416
417         for (i = 0; i < dev->nvqs; ++i) {
418                 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
419                         vhost_poll_stop(&dev->vqs[i].poll);
420                         vhost_poll_flush(&dev->vqs[i].poll);
421                 }
422         }
423 }
424
425 /* Caller should have device mutex if and only if locked is set */
426 void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
427 {
428         int i;
429
430         for (i = 0; i < dev->nvqs; ++i) {
431                 if (dev->vqs[i].error_ctx)
432                         eventfd_ctx_put(dev->vqs[i].error_ctx);
433                 if (dev->vqs[i].error)
434                         fput(dev->vqs[i].error);
435                 if (dev->vqs[i].kick)
436                         fput(dev->vqs[i].kick);
437                 if (dev->vqs[i].call_ctx)
438                         eventfd_ctx_put(dev->vqs[i].call_ctx);
439                 if (dev->vqs[i].call)
440                         fput(dev->vqs[i].call);
441                 vhost_vq_reset(dev, dev->vqs + i);
442         }
443         vhost_dev_free_iovecs(dev);
444         if (dev->log_ctx)
445                 eventfd_ctx_put(dev->log_ctx);
446         dev->log_ctx = NULL;
447         if (dev->log_file)
448                 fput(dev->log_file);
449         dev->log_file = NULL;
450         /* No one will access memory at this point */
451         kfree(rcu_dereference_protected(dev->memory,
452                                         locked ==
453                                                 lockdep_is_held(&dev->mutex)));
454         RCU_INIT_POINTER(dev->memory, NULL);
455         WARN_ON(!list_empty(&dev->work_list));
456         if (dev->worker) {
457                 kthread_stop(dev->worker);
458                 dev->worker = NULL;
459         }
460         if (dev->mm)
461                 mmput(dev->mm);
462         dev->mm = NULL;
463 }
464
465 static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
466 {
467         u64 a = addr / VHOST_PAGE_SIZE / 8;
468
469         /* Make sure 64 bit math will not overflow. */
470         if (a > ULONG_MAX - (unsigned long)log_base ||
471             a + (unsigned long)log_base > ULONG_MAX)
472                 return 0;
473
474         return access_ok(VERIFY_WRITE, log_base + a,
475                          (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
476 }
477
478 /* Caller should have vq mutex and device mutex. */
479 static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
480                                int log_all)
481 {
482         int i;
483
484         if (!mem)
485                 return 0;
486
487         for (i = 0; i < mem->nregions; ++i) {
488                 struct vhost_memory_region *m = mem->regions + i;
489                 unsigned long a = m->userspace_addr;
490                 if (m->memory_size > ULONG_MAX)
491                         return 0;
492                 else if (!access_ok(VERIFY_WRITE, (void __user *)a,
493                                     m->memory_size))
494                         return 0;
495                 else if (log_all && !log_access_ok(log_base,
496                                                    m->guest_phys_addr,
497                                                    m->memory_size))
498                         return 0;
499         }
500         return 1;
501 }
502
503 /* Can we switch to this memory table? */
504 /* Caller should have device mutex but not vq mutex */
505 static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
506                             int log_all)
507 {
508         int i;
509
510         for (i = 0; i < d->nvqs; ++i) {
511                 int ok;
512                 mutex_lock(&d->vqs[i].mutex);
513                 /* If ring is inactive, will check when it's enabled. */
514                 if (d->vqs[i].private_data)
515                         ok = vq_memory_access_ok(d->vqs[i].log_base, mem,
516                                                  log_all);
517                 else
518                         ok = 1;
519                 mutex_unlock(&d->vqs[i].mutex);
520                 if (!ok)
521                         return 0;
522         }
523         return 1;
524 }
525
526 static int vq_access_ok(struct vhost_dev *d, unsigned int num,
527                         struct vring_desc __user *desc,
528                         struct vring_avail __user *avail,
529                         struct vring_used __user *used)
530 {
531         size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
532         return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
533                access_ok(VERIFY_READ, avail,
534                          sizeof *avail + num * sizeof *avail->ring + s) &&
535                access_ok(VERIFY_WRITE, used,
536                         sizeof *used + num * sizeof *used->ring + s);
537 }
538
539 /* Can we log writes? */
540 /* Caller should have device mutex but not vq mutex */
541 int vhost_log_access_ok(struct vhost_dev *dev)
542 {
543         struct vhost_memory *mp;
544
545         mp = rcu_dereference_protected(dev->memory,
546                                        lockdep_is_held(&dev->mutex));
547         return memory_access_ok(dev, mp, 1);
548 }
549
550 /* Verify access for write logging. */
551 /* Caller should have vq mutex and device mutex */
552 static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
553                             void __user *log_base)
554 {
555         struct vhost_memory *mp;
556         size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
557
558         mp = rcu_dereference_protected(vq->dev->memory,
559                                        lockdep_is_held(&vq->mutex));
560         return vq_memory_access_ok(log_base, mp,
561                             vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
562                 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
563                                         sizeof *vq->used +
564                                         vq->num * sizeof *vq->used->ring + s));
565 }
566
567 /* Can we start vq? */
568 /* Caller should have vq mutex and device mutex */
569 int vhost_vq_access_ok(struct vhost_virtqueue *vq)
570 {
571         return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
572                 vq_log_access_ok(vq->dev, vq, vq->log_base);
573 }
574
575 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
576 {
577         struct vhost_memory mem, *newmem, *oldmem;
578         unsigned long size = offsetof(struct vhost_memory, regions);
579
580         if (copy_from_user(&mem, m, size))
581                 return -EFAULT;
582         if (mem.padding)
583                 return -EOPNOTSUPP;
584         if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
585                 return -E2BIG;
586         newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
587         if (!newmem)
588                 return -ENOMEM;
589
590         memcpy(newmem, &mem, size);
591         if (copy_from_user(newmem->regions, m->regions,
592                            mem.nregions * sizeof *m->regions)) {
593                 kfree(newmem);
594                 return -EFAULT;
595         }
596
597         if (!memory_access_ok(d, newmem,
598                               vhost_has_feature(d, VHOST_F_LOG_ALL))) {
599                 kfree(newmem);
600                 return -EFAULT;
601         }
602         oldmem = rcu_dereference_protected(d->memory,
603                                            lockdep_is_held(&d->mutex));
604         rcu_assign_pointer(d->memory, newmem);
605         synchronize_rcu();
606         kfree(oldmem);
607         return 0;
608 }
609
610 long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
611 {
612         struct file *eventfp, *filep = NULL;
613         bool pollstart = false, pollstop = false;
614         struct eventfd_ctx *ctx = NULL;
615         u32 __user *idxp = argp;
616         struct vhost_virtqueue *vq;
617         struct vhost_vring_state s;
618         struct vhost_vring_file f;
619         struct vhost_vring_addr a;
620         u32 idx;
621         long r;
622
623         r = get_user(idx, idxp);
624         if (r < 0)
625                 return r;
626         if (idx >= d->nvqs)
627                 return -ENOBUFS;
628
629         vq = d->vqs + idx;
630
631         mutex_lock(&vq->mutex);
632
633         switch (ioctl) {
634         case VHOST_SET_VRING_NUM:
635                 /* Resizing ring with an active backend?
636                  * You don't want to do that. */
637                 if (vq->private_data) {
638                         r = -EBUSY;
639                         break;
640                 }
641                 if (copy_from_user(&s, argp, sizeof s)) {
642                         r = -EFAULT;
643                         break;
644                 }
645                 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
646                         r = -EINVAL;
647                         break;
648                 }
649                 vq->num = s.num;
650                 break;
651         case VHOST_SET_VRING_BASE:
652                 /* Moving base with an active backend?
653                  * You don't want to do that. */
654                 if (vq->private_data) {
655                         r = -EBUSY;
656                         break;
657                 }
658                 if (copy_from_user(&s, argp, sizeof s)) {
659                         r = -EFAULT;
660                         break;
661                 }
662                 if (s.num > 0xffff) {
663                         r = -EINVAL;
664                         break;
665                 }
666                 vq->last_avail_idx = s.num;
667                 /* Forget the cached index value. */
668                 vq->avail_idx = vq->last_avail_idx;
669                 break;
670         case VHOST_GET_VRING_BASE:
671                 s.index = idx;
672                 s.num = vq->last_avail_idx;
673                 if (copy_to_user(argp, &s, sizeof s))
674                         r = -EFAULT;
675                 break;
676         case VHOST_SET_VRING_ADDR:
677                 if (copy_from_user(&a, argp, sizeof a)) {
678                         r = -EFAULT;
679                         break;
680                 }
681                 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
682                         r = -EOPNOTSUPP;
683                         break;
684                 }
685                 /* For 32bit, verify that the top 32bits of the user
686                    data are set to zero. */
687                 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
688                     (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
689                     (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
690                         r = -EFAULT;
691                         break;
692                 }
693                 if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
694                     (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
695                     (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
696                         r = -EINVAL;
697                         break;
698                 }
699
700                 /* We only verify access here if backend is configured.
701                  * If it is not, we don't as size might not have been setup.
702                  * We will verify when backend is configured. */
703                 if (vq->private_data) {
704                         if (!vq_access_ok(d, vq->num,
705                                 (void __user *)(unsigned long)a.desc_user_addr,
706                                 (void __user *)(unsigned long)a.avail_user_addr,
707                                 (void __user *)(unsigned long)a.used_user_addr)) {
708                                 r = -EINVAL;
709                                 break;
710                         }
711
712                         /* Also validate log access for used ring if enabled. */
713                         if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
714                             !log_access_ok(vq->log_base, a.log_guest_addr,
715                                            sizeof *vq->used +
716                                            vq->num * sizeof *vq->used->ring)) {
717                                 r = -EINVAL;
718                                 break;
719                         }
720                 }
721
722                 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
723                 vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
724                 vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
725                 vq->log_addr = a.log_guest_addr;
726                 vq->used = (void __user *)(unsigned long)a.used_user_addr;
727                 break;
728         case VHOST_SET_VRING_KICK:
729                 if (copy_from_user(&f, argp, sizeof f)) {
730                         r = -EFAULT;
731                         break;
732                 }
733                 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
734                 if (IS_ERR(eventfp)) {
735                         r = PTR_ERR(eventfp);
736                         break;
737                 }
738                 if (eventfp != vq->kick) {
739                         pollstop = (filep = vq->kick) != NULL;
740                         pollstart = (vq->kick = eventfp) != NULL;
741                 } else
742                         filep = eventfp;
743                 break;
744         case VHOST_SET_VRING_CALL:
745                 if (copy_from_user(&f, argp, sizeof f)) {
746                         r = -EFAULT;
747                         break;
748                 }
749                 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
750                 if (IS_ERR(eventfp)) {
751                         r = PTR_ERR(eventfp);
752                         break;
753                 }
754                 if (eventfp != vq->call) {
755                         filep = vq->call;
756                         ctx = vq->call_ctx;
757                         vq->call = eventfp;
758                         vq->call_ctx = eventfp ?
759                                 eventfd_ctx_fileget(eventfp) : NULL;
760                 } else
761                         filep = eventfp;
762                 break;
763         case VHOST_SET_VRING_ERR:
764                 if (copy_from_user(&f, argp, sizeof f)) {
765                         r = -EFAULT;
766                         break;
767                 }
768                 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
769                 if (IS_ERR(eventfp)) {
770                         r = PTR_ERR(eventfp);
771                         break;
772                 }
773                 if (eventfp != vq->error) {
774                         filep = vq->error;
775                         vq->error = eventfp;
776                         ctx = vq->error_ctx;
777                         vq->error_ctx = eventfp ?
778                                 eventfd_ctx_fileget(eventfp) : NULL;
779                 } else
780                         filep = eventfp;
781                 break;
782         default:
783                 r = -ENOIOCTLCMD;
784         }
785
786         if (pollstop && vq->handle_kick)
787                 vhost_poll_stop(&vq->poll);
788
789         if (ctx)
790                 eventfd_ctx_put(ctx);
791         if (filep)
792                 fput(filep);
793
794         if (pollstart && vq->handle_kick)
795                 vhost_poll_start(&vq->poll, vq->kick);
796
797         mutex_unlock(&vq->mutex);
798
799         if (pollstop && vq->handle_kick)
800                 vhost_poll_flush(&vq->poll);
801         return r;
802 }
803
804 /* Caller must have device mutex */
805 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
806 {
807         struct file *eventfp, *filep = NULL;
808         struct eventfd_ctx *ctx = NULL;
809         u64 p;
810         long r;
811         int i, fd;
812
813         /* If you are not the owner, you can become one */
814         if (ioctl == VHOST_SET_OWNER) {
815                 r = vhost_dev_set_owner(d);
816                 goto done;
817         }
818
819         /* You must be the owner to do anything else */
820         r = vhost_dev_check_owner(d);
821         if (r)
822                 goto done;
823
824         switch (ioctl) {
825         case VHOST_SET_MEM_TABLE:
826                 r = vhost_set_memory(d, argp);
827                 break;
828         case VHOST_SET_LOG_BASE:
829                 if (copy_from_user(&p, argp, sizeof p)) {
830                         r = -EFAULT;
831                         break;
832                 }
833                 if ((u64)(unsigned long)p != p) {
834                         r = -EFAULT;
835                         break;
836                 }
837                 for (i = 0; i < d->nvqs; ++i) {
838                         struct vhost_virtqueue *vq;
839                         void __user *base = (void __user *)(unsigned long)p;
840                         vq = d->vqs + i;
841                         mutex_lock(&vq->mutex);
842                         /* If ring is inactive, will check when it's enabled. */
843                         if (vq->private_data && !vq_log_access_ok(d, vq, base))
844                                 r = -EFAULT;
845                         else
846                                 vq->log_base = base;
847                         mutex_unlock(&vq->mutex);
848                 }
849                 break;
850         case VHOST_SET_LOG_FD:
851                 r = get_user(fd, (int __user *)argp);
852                 if (r < 0)
853                         break;
854                 eventfp = fd == -1 ? NULL : eventfd_fget(fd);
855                 if (IS_ERR(eventfp)) {
856                         r = PTR_ERR(eventfp);
857                         break;
858                 }
859                 if (eventfp != d->log_file) {
860                         filep = d->log_file;
861                         ctx = d->log_ctx;
862                         d->log_ctx = eventfp ?
863                                 eventfd_ctx_fileget(eventfp) : NULL;
864                 } else
865                         filep = eventfp;
866                 for (i = 0; i < d->nvqs; ++i) {
867                         mutex_lock(&d->vqs[i].mutex);
868                         d->vqs[i].log_ctx = d->log_ctx;
869                         mutex_unlock(&d->vqs[i].mutex);
870                 }
871                 if (ctx)
872                         eventfd_ctx_put(ctx);
873                 if (filep)
874                         fput(filep);
875                 break;
876         default:
877                 r = -ENOIOCTLCMD;
878                 break;
879         }
880 done:
881         return r;
882 }
883
884 static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
885                                                      __u64 addr, __u32 len)
886 {
887         struct vhost_memory_region *reg;
888         int i;
889
890         /* linear search is not brilliant, but we really have on the order of 6
891          * regions in practice */
892         for (i = 0; i < mem->nregions; ++i) {
893                 reg = mem->regions + i;
894                 if (reg->guest_phys_addr <= addr &&
895                     reg->guest_phys_addr + reg->memory_size - 1 >= addr)
896                         return reg;
897         }
898         return NULL;
899 }
900
901 /* TODO: This is really inefficient.  We need something like get_user()
902  * (instruction directly accesses the data, with an exception table entry
903  * returning -EFAULT). See Documentation/x86/exception-tables.txt.
904  */
905 static int set_bit_to_user(int nr, void __user *addr)
906 {
907         unsigned long log = (unsigned long)addr;
908         struct page *page;
909         void *base;
910         int bit = nr + (log % PAGE_SIZE) * 8;
911         int r;
912
913         r = get_user_pages_fast(log, 1, 1, &page);
914         if (r < 0)
915                 return r;
916         BUG_ON(r != 1);
917         base = kmap_atomic(page);
918         set_bit(bit, base);
919         kunmap_atomic(base);
920         set_page_dirty_lock(page);
921         put_page(page);
922         return 0;
923 }
924
925 static int log_write(void __user *log_base,
926                      u64 write_address, u64 write_length)
927 {
928         u64 write_page = write_address / VHOST_PAGE_SIZE;
929         int r;
930
931         if (!write_length)
932                 return 0;
933         write_length += write_address % VHOST_PAGE_SIZE;
934         for (;;) {
935                 u64 base = (u64)(unsigned long)log_base;
936                 u64 log = base + write_page / 8;
937                 int bit = write_page % 8;
938                 if ((u64)(unsigned long)log != log)
939                         return -EFAULT;
940                 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
941                 if (r < 0)
942                         return r;
943                 if (write_length <= VHOST_PAGE_SIZE)
944                         break;
945                 write_length -= VHOST_PAGE_SIZE;
946                 write_page += 1;
947         }
948         return r;
949 }
950
951 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
952                     unsigned int log_num, u64 len)
953 {
954         int i, r;
955
956         /* Make sure data written is seen before log. */
957         smp_wmb();
958         for (i = 0; i < log_num; ++i) {
959                 u64 l = min(log[i].len, len);
960                 r = log_write(vq->log_base, log[i].addr, l);
961                 if (r < 0)
962                         return r;
963                 len -= l;
964                 if (!len) {
965                         if (vq->log_ctx)
966                                 eventfd_signal(vq->log_ctx, 1);
967                         return 0;
968                 }
969         }
970         /* Length written exceeds what we have stored. This is a bug. */
971         BUG();
972         return 0;
973 }
974
975 static int vhost_update_used_flags(struct vhost_virtqueue *vq)
976 {
977         void __user *used;
978         if (__put_user(vq->used_flags, &vq->used->flags) < 0)
979                 return -EFAULT;
980         if (unlikely(vq->log_used)) {
981                 /* Make sure the flag is seen before log. */
982                 smp_wmb();
983                 /* Log used flag write. */
984                 used = &vq->used->flags;
985                 log_write(vq->log_base, vq->log_addr +
986                           (used - (void __user *)vq->used),
987                           sizeof vq->used->flags);
988                 if (vq->log_ctx)
989                         eventfd_signal(vq->log_ctx, 1);
990         }
991         return 0;
992 }
993
994 static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
995 {
996         if (__put_user(vq->avail_idx, vhost_avail_event(vq)))
997                 return -EFAULT;
998         if (unlikely(vq->log_used)) {
999                 void __user *used;
1000                 /* Make sure the event is seen before log. */
1001                 smp_wmb();
1002                 /* Log avail event write */
1003                 used = vhost_avail_event(vq);
1004                 log_write(vq->log_base, vq->log_addr +
1005                           (used - (void __user *)vq->used),
1006                           sizeof *vhost_avail_event(vq));
1007                 if (vq->log_ctx)
1008                         eventfd_signal(vq->log_ctx, 1);
1009         }
1010         return 0;
1011 }
1012
1013 int vhost_init_used(struct vhost_virtqueue *vq)
1014 {
1015         int r;
1016         if (!vq->private_data)
1017                 return 0;
1018
1019         r = vhost_update_used_flags(vq);
1020         if (r)
1021                 return r;
1022         vq->signalled_used_valid = false;
1023         return get_user(vq->last_used_idx, &vq->used->idx);
1024 }
1025
1026 static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
1027                           struct iovec iov[], int iov_size)
1028 {
1029         const struct vhost_memory_region *reg;
1030         struct vhost_memory *mem;
1031         struct iovec *_iov;
1032         u64 s = 0;
1033         int ret = 0;
1034
1035         rcu_read_lock();
1036
1037         mem = rcu_dereference(dev->memory);
1038         while ((u64)len > s) {
1039                 u64 size;
1040                 if (unlikely(ret >= iov_size)) {
1041                         ret = -ENOBUFS;
1042                         break;
1043                 }
1044                 reg = find_region(mem, addr, len);
1045                 if (unlikely(!reg)) {
1046                         ret = -EFAULT;
1047                         break;
1048                 }
1049                 _iov = iov + ret;
1050                 size = reg->memory_size - addr + reg->guest_phys_addr;
1051                 _iov->iov_len = min((u64)len - s, size);
1052                 _iov->iov_base = (void __user *)(unsigned long)
1053                         (reg->userspace_addr + addr - reg->guest_phys_addr);
1054                 s += size;
1055                 addr += size;
1056                 ++ret;
1057         }
1058
1059         rcu_read_unlock();
1060         return ret;
1061 }
1062
1063 /* Each buffer in the virtqueues is actually a chain of descriptors.  This
1064  * function returns the next descriptor in the chain,
1065  * or -1U if we're at the end. */
1066 static unsigned next_desc(struct vring_desc *desc)
1067 {
1068         unsigned int next;
1069
1070         /* If this descriptor says it doesn't chain, we're done. */
1071         if (!(desc->flags & VRING_DESC_F_NEXT))
1072                 return -1U;
1073
1074         /* Check they're not leading us off end of descriptors. */
1075         next = desc->next;
1076         /* Make sure compiler knows to grab that: we don't want it changing! */
1077         /* We will use the result as an index in an array, so most
1078          * architectures only need a compiler barrier here. */
1079         read_barrier_depends();
1080
1081         return next;
1082 }
1083
1084 static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1085                         struct iovec iov[], unsigned int iov_size,
1086                         unsigned int *out_num, unsigned int *in_num,
1087                         struct vhost_log *log, unsigned int *log_num,
1088                         struct vring_desc *indirect)
1089 {
1090         struct vring_desc desc;
1091         unsigned int i = 0, count, found = 0;
1092         int ret;
1093
1094         /* Sanity check */
1095         if (unlikely(indirect->len % sizeof desc)) {
1096                 vq_err(vq, "Invalid length in indirect descriptor: "
1097                        "len 0x%llx not multiple of 0x%zx\n",
1098                        (unsigned long long)indirect->len,
1099                        sizeof desc);
1100                 return -EINVAL;
1101         }
1102
1103         ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
1104                              UIO_MAXIOV);
1105         if (unlikely(ret < 0)) {
1106                 vq_err(vq, "Translation failure %d in indirect.\n", ret);
1107                 return ret;
1108         }
1109
1110         /* We will use the result as an address to read from, so most
1111          * architectures only need a compiler barrier here. */
1112         read_barrier_depends();
1113
1114         count = indirect->len / sizeof desc;
1115         /* Buffers are chained via a 16 bit next field, so
1116          * we can have at most 2^16 of these. */
1117         if (unlikely(count > USHRT_MAX + 1)) {
1118                 vq_err(vq, "Indirect buffer length too big: %d\n",
1119                        indirect->len);
1120                 return -E2BIG;
1121         }
1122
1123         do {
1124                 unsigned iov_count = *in_num + *out_num;
1125                 if (unlikely(++found > count)) {
1126                         vq_err(vq, "Loop detected: last one at %u "
1127                                "indirect size %u\n",
1128                                i, count);
1129                         return -EINVAL;
1130                 }
1131                 if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
1132                                               vq->indirect, sizeof desc))) {
1133                         vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1134                                i, (size_t)indirect->addr + i * sizeof desc);
1135                         return -EINVAL;
1136                 }
1137                 if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
1138                         vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1139                                i, (size_t)indirect->addr + i * sizeof desc);
1140                         return -EINVAL;
1141                 }
1142
1143                 ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1144                                      iov_size - iov_count);
1145                 if (unlikely(ret < 0)) {
1146                         vq_err(vq, "Translation failure %d indirect idx %d\n",
1147                                ret, i);
1148                         return ret;
1149                 }
1150                 /* If this is an input descriptor, increment that count. */
1151                 if (desc.flags & VRING_DESC_F_WRITE) {
1152                         *in_num += ret;
1153                         if (unlikely(log)) {
1154                                 log[*log_num].addr = desc.addr;
1155                                 log[*log_num].len = desc.len;
1156                                 ++*log_num;
1157                         }
1158                 } else {
1159                         /* If it's an output descriptor, they're all supposed
1160                          * to come before any input descriptors. */
1161                         if (unlikely(*in_num)) {
1162                                 vq_err(vq, "Indirect descriptor "
1163                                        "has out after in: idx %d\n", i);
1164                                 return -EINVAL;
1165                         }
1166                         *out_num += ret;
1167                 }
1168         } while ((i = next_desc(&desc)) != -1);
1169         return 0;
1170 }
1171
1172 /* This looks in the virtqueue and for the first available buffer, and converts
1173  * it to an iovec for convenient access.  Since descriptors consist of some
1174  * number of output then some number of input descriptors, it's actually two
1175  * iovecs, but we pack them into one and note how many of each there were.
1176  *
1177  * This function returns the descriptor number found, or vq->num (which is
1178  * never a valid descriptor number) if none was found.  A negative code is
1179  * returned on error. */
1180 int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1181                       struct iovec iov[], unsigned int iov_size,
1182                       unsigned int *out_num, unsigned int *in_num,
1183                       struct vhost_log *log, unsigned int *log_num)
1184 {
1185         struct vring_desc desc;
1186         unsigned int i, head, found = 0;
1187         u16 last_avail_idx;
1188         int ret;
1189
1190         /* Check it isn't doing very strange things with descriptor numbers. */
1191         last_avail_idx = vq->last_avail_idx;
1192         if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
1193                 vq_err(vq, "Failed to access avail idx at %p\n",
1194                        &vq->avail->idx);
1195                 return -EFAULT;
1196         }
1197
1198         if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
1199                 vq_err(vq, "Guest moved used index from %u to %u",
1200                        last_avail_idx, vq->avail_idx);
1201                 return -EFAULT;
1202         }
1203
1204         /* If there's nothing new since last we looked, return invalid. */
1205         if (vq->avail_idx == last_avail_idx)
1206                 return vq->num;
1207
1208         /* Only get avail ring entries after they have been exposed by guest. */
1209         smp_rmb();
1210
1211         /* Grab the next descriptor number they're advertising, and increment
1212          * the index we've seen. */
1213         if (unlikely(__get_user(head,
1214                                 &vq->avail->ring[last_avail_idx % vq->num]))) {
1215                 vq_err(vq, "Failed to read head: idx %d address %p\n",
1216                        last_avail_idx,
1217                        &vq->avail->ring[last_avail_idx % vq->num]);
1218                 return -EFAULT;
1219         }
1220
1221         /* If their number is silly, that's an error. */
1222         if (unlikely(head >= vq->num)) {
1223                 vq_err(vq, "Guest says index %u > %u is available",
1224                        head, vq->num);
1225                 return -EINVAL;
1226         }
1227
1228         /* When we start there are none of either input nor output. */
1229         *out_num = *in_num = 0;
1230         if (unlikely(log))
1231                 *log_num = 0;
1232
1233         i = head;
1234         do {
1235                 unsigned iov_count = *in_num + *out_num;
1236                 if (unlikely(i >= vq->num)) {
1237                         vq_err(vq, "Desc index is %u > %u, head = %u",
1238                                i, vq->num, head);
1239                         return -EINVAL;
1240                 }
1241                 if (unlikely(++found > vq->num)) {
1242                         vq_err(vq, "Loop detected: last one at %u "
1243                                "vq size %u head %u\n",
1244                                i, vq->num, head);
1245                         return -EINVAL;
1246                 }
1247                 ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
1248                 if (unlikely(ret)) {
1249                         vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1250                                i, vq->desc + i);
1251                         return -EFAULT;
1252                 }
1253                 if (desc.flags & VRING_DESC_F_INDIRECT) {
1254                         ret = get_indirect(dev, vq, iov, iov_size,
1255                                            out_num, in_num,
1256                                            log, log_num, &desc);
1257                         if (unlikely(ret < 0)) {
1258                                 vq_err(vq, "Failure detected "
1259                                        "in indirect descriptor at idx %d\n", i);
1260                                 return ret;
1261                         }
1262                         continue;
1263                 }
1264
1265                 ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1266                                      iov_size - iov_count);
1267                 if (unlikely(ret < 0)) {
1268                         vq_err(vq, "Translation failure %d descriptor idx %d\n",
1269                                ret, i);
1270                         return ret;
1271                 }
1272                 if (desc.flags & VRING_DESC_F_WRITE) {
1273                         /* If this is an input descriptor,
1274                          * increment that count. */
1275                         *in_num += ret;
1276                         if (unlikely(log)) {
1277                                 log[*log_num].addr = desc.addr;
1278                                 log[*log_num].len = desc.len;
1279                                 ++*log_num;
1280                         }
1281                 } else {
1282                         /* If it's an output descriptor, they're all supposed
1283                          * to come before any input descriptors. */
1284                         if (unlikely(*in_num)) {
1285                                 vq_err(vq, "Descriptor has out after in: "
1286                                        "idx %d\n", i);
1287                                 return -EINVAL;
1288                         }
1289                         *out_num += ret;
1290                 }
1291         } while ((i = next_desc(&desc)) != -1);
1292
1293         /* On success, increment avail index. */
1294         vq->last_avail_idx++;
1295
1296         /* Assume notifications from guest are disabled at this point,
1297          * if they aren't we would need to update avail_event index. */
1298         BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
1299         return head;
1300 }
1301
1302 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1303 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
1304 {
1305         vq->last_avail_idx -= n;
1306 }
1307
1308 /* After we've used one of their buffers, we tell them about it.  We'll then
1309  * want to notify the guest, using eventfd. */
1310 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1311 {
1312         struct vring_used_elem __user *used;
1313
1314         /* The virtqueue contains a ring of used buffers.  Get a pointer to the
1315          * next entry in that used ring. */
1316         used = &vq->used->ring[vq->last_used_idx % vq->num];
1317         if (__put_user(head, &used->id)) {
1318                 vq_err(vq, "Failed to write used id");
1319                 return -EFAULT;
1320         }
1321         if (__put_user(len, &used->len)) {
1322                 vq_err(vq, "Failed to write used len");
1323                 return -EFAULT;
1324         }
1325         /* Make sure buffer is written before we update index. */
1326         smp_wmb();
1327         if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
1328                 vq_err(vq, "Failed to increment used idx");
1329                 return -EFAULT;
1330         }
1331         if (unlikely(vq->log_used)) {
1332                 /* Make sure data is seen before log. */
1333                 smp_wmb();
1334                 /* Log used ring entry write. */
1335                 log_write(vq->log_base,
1336                           vq->log_addr +
1337                            ((void __user *)used - (void __user *)vq->used),
1338                           sizeof *used);
1339                 /* Log used index update. */
1340                 log_write(vq->log_base,
1341                           vq->log_addr + offsetof(struct vring_used, idx),
1342                           sizeof vq->used->idx);
1343                 if (vq->log_ctx)
1344                         eventfd_signal(vq->log_ctx, 1);
1345         }
1346         vq->last_used_idx++;
1347         /* If the driver never bothers to signal in a very long while,
1348          * used index might wrap around. If that happens, invalidate
1349          * signalled_used index we stored. TODO: make sure driver
1350          * signals at least once in 2^16 and remove this. */
1351         if (unlikely(vq->last_used_idx == vq->signalled_used))
1352                 vq->signalled_used_valid = false;
1353         return 0;
1354 }
1355
1356 static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1357                             struct vring_used_elem *heads,
1358                             unsigned count)
1359 {
1360         struct vring_used_elem __user *used;
1361         u16 old, new;
1362         int start;
1363
1364         start = vq->last_used_idx % vq->num;
1365         used = vq->used->ring + start;
1366         if (__copy_to_user(used, heads, count * sizeof *used)) {
1367                 vq_err(vq, "Failed to write used");
1368                 return -EFAULT;
1369         }
1370         if (unlikely(vq->log_used)) {
1371                 /* Make sure data is seen before log. */
1372                 smp_wmb();
1373                 /* Log used ring entry write. */
1374                 log_write(vq->log_base,
1375                           vq->log_addr +
1376                            ((void __user *)used - (void __user *)vq->used),
1377                           count * sizeof *used);
1378         }
1379         old = vq->last_used_idx;
1380         new = (vq->last_used_idx += count);
1381         /* If the driver never bothers to signal in a very long while,
1382          * used index might wrap around. If that happens, invalidate
1383          * signalled_used index we stored. TODO: make sure driver
1384          * signals at least once in 2^16 and remove this. */
1385         if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
1386                 vq->signalled_used_valid = false;
1387         return 0;
1388 }
1389
1390 /* After we've used one of their buffers, we tell them about it.  We'll then
1391  * want to notify the guest, using eventfd. */
1392 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1393                      unsigned count)
1394 {
1395         int start, n, r;
1396
1397         start = vq->last_used_idx % vq->num;
1398         n = vq->num - start;
1399         if (n < count) {
1400                 r = __vhost_add_used_n(vq, heads, n);
1401                 if (r < 0)
1402                         return r;
1403                 heads += n;
1404                 count -= n;
1405         }
1406         r = __vhost_add_used_n(vq, heads, count);
1407
1408         /* Make sure buffer is written before we update index. */
1409         smp_wmb();
1410         if (put_user(vq->last_used_idx, &vq->used->idx)) {
1411                 vq_err(vq, "Failed to increment used idx");
1412                 return -EFAULT;
1413         }
1414         if (unlikely(vq->log_used)) {
1415                 /* Log used index update. */
1416                 log_write(vq->log_base,
1417                           vq->log_addr + offsetof(struct vring_used, idx),
1418                           sizeof vq->used->idx);
1419                 if (vq->log_ctx)
1420                         eventfd_signal(vq->log_ctx, 1);
1421         }
1422         return r;
1423 }
1424
1425 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1426 {
1427         __u16 old, new, event;
1428         bool v;
1429         /* Flush out used index updates. This is paired
1430          * with the barrier that the Guest executes when enabling
1431          * interrupts. */
1432         smp_mb();
1433
1434         if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1435             unlikely(vq->avail_idx == vq->last_avail_idx))
1436                 return true;
1437
1438         if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1439                 __u16 flags;
1440                 if (__get_user(flags, &vq->avail->flags)) {
1441                         vq_err(vq, "Failed to get flags");
1442                         return true;
1443                 }
1444                 return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
1445         }
1446         old = vq->signalled_used;
1447         v = vq->signalled_used_valid;
1448         new = vq->signalled_used = vq->last_used_idx;
1449         vq->signalled_used_valid = true;
1450
1451         if (unlikely(!v))
1452                 return true;
1453
1454         if (get_user(event, vhost_used_event(vq))) {
1455                 vq_err(vq, "Failed to get used event idx");
1456                 return true;
1457         }
1458         return vring_need_event(event, new, old);
1459 }
1460
1461 /* This actually signals the guest, using eventfd. */
1462 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1463 {
1464         /* Signal the Guest tell them we used something up. */
1465         if (vq->call_ctx && vhost_notify(dev, vq))
1466                 eventfd_signal(vq->call_ctx, 1);
1467 }
1468
1469 /* And here's the combo meal deal.  Supersize me! */
1470 void vhost_add_used_and_signal(struct vhost_dev *dev,
1471                                struct vhost_virtqueue *vq,
1472                                unsigned int head, int len)
1473 {
1474         vhost_add_used(vq, head, len);
1475         vhost_signal(dev, vq);
1476 }
1477
1478 /* multi-buffer version of vhost_add_used_and_signal */
1479 void vhost_add_used_and_signal_n(struct vhost_dev *dev,
1480                                  struct vhost_virtqueue *vq,
1481                                  struct vring_used_elem *heads, unsigned count)
1482 {
1483         vhost_add_used_n(vq, heads, count);
1484         vhost_signal(dev, vq);
1485 }
1486
1487 /* OK, now we need to know about added descriptors. */
1488 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1489 {
1490         u16 avail_idx;
1491         int r;
1492
1493         if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1494                 return false;
1495         vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
1496         if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1497                 r = vhost_update_used_flags(vq);
1498                 if (r) {
1499                         vq_err(vq, "Failed to enable notification at %p: %d\n",
1500                                &vq->used->flags, r);
1501                         return false;
1502                 }
1503         } else {
1504                 r = vhost_update_avail_event(vq, vq->avail_idx);
1505                 if (r) {
1506                         vq_err(vq, "Failed to update avail event index at %p: %d\n",
1507                                vhost_avail_event(vq), r);
1508                         return false;
1509                 }
1510         }
1511         /* They could have slipped one in as we were doing that: make
1512          * sure it's written, then check again. */
1513         smp_mb();
1514         r = __get_user(avail_idx, &vq->avail->idx);
1515         if (r) {
1516                 vq_err(vq, "Failed to check avail idx at %p: %d\n",
1517                        &vq->avail->idx, r);
1518                 return false;
1519         }
1520
1521         return avail_idx != vq->avail_idx;
1522 }
1523
1524 /* We don't need to be notified again. */
1525 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1526 {
1527         int r;
1528
1529         if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1530                 return;
1531         vq->used_flags |= VRING_USED_F_NO_NOTIFY;
1532         if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1533                 r = vhost_update_used_flags(vq);
1534                 if (r)
1535                         vq_err(vq, "Failed to enable notification at %p: %d\n",
1536                                &vq->used->flags, r);
1537         }
1538 }
1539
1540 static void vhost_zerocopy_done_signal(struct kref *kref)
1541 {
1542         struct vhost_ubuf_ref *ubufs = container_of(kref, struct vhost_ubuf_ref,
1543                                                     kref);
1544         wake_up(&ubufs->wait);
1545 }
1546
1547 struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq,
1548                                         bool zcopy)
1549 {
1550         struct vhost_ubuf_ref *ubufs;
1551         /* No zero copy backend? Nothing to count. */
1552         if (!zcopy)
1553                 return NULL;
1554         ubufs = kmalloc(sizeof *ubufs, GFP_KERNEL);
1555         if (!ubufs)
1556                 return ERR_PTR(-ENOMEM);
1557         kref_init(&ubufs->kref);
1558         init_waitqueue_head(&ubufs->wait);
1559         ubufs->vq = vq;
1560         return ubufs;
1561 }
1562
1563 void vhost_ubuf_put(struct vhost_ubuf_ref *ubufs)
1564 {
1565         kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1566 }
1567
1568 void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
1569 {
1570         kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1571         wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
1572         kfree(ubufs);
1573 }