b6f901a0dbddb6fadf033276c1aba1cccdfa57ad
[cascardo/linux.git] / fs / fuse / dev.c
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
22
23 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
24 MODULE_ALIAS("devname:fuse");
25
26 static struct kmem_cache *fuse_req_cachep;
27
28 static struct fuse_conn *fuse_get_conn(struct file *file)
29 {
30         /*
31          * Lockless access is OK, because file->private data is set
32          * once during mount and is valid until the file is released.
33          */
34         return file->private_data;
35 }
36
37 static void fuse_request_init(struct fuse_req *req, struct page **pages,
38                               struct fuse_page_desc *page_descs,
39                               unsigned npages)
40 {
41         memset(req, 0, sizeof(*req));
42         memset(pages, 0, sizeof(*pages) * npages);
43         memset(page_descs, 0, sizeof(*page_descs) * npages);
44         INIT_LIST_HEAD(&req->list);
45         INIT_LIST_HEAD(&req->intr_entry);
46         init_waitqueue_head(&req->waitq);
47         atomic_set(&req->count, 1);
48         req->pages = pages;
49         req->page_descs = page_descs;
50         req->max_pages = npages;
51         __set_bit(FR_PENDING, &req->flags);
52 }
53
54 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
55 {
56         struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
57         if (req) {
58                 struct page **pages;
59                 struct fuse_page_desc *page_descs;
60
61                 if (npages <= FUSE_REQ_INLINE_PAGES) {
62                         pages = req->inline_pages;
63                         page_descs = req->inline_page_descs;
64                 } else {
65                         pages = kmalloc(sizeof(struct page *) * npages, flags);
66                         page_descs = kmalloc(sizeof(struct fuse_page_desc) *
67                                              npages, flags);
68                 }
69
70                 if (!pages || !page_descs) {
71                         kfree(pages);
72                         kfree(page_descs);
73                         kmem_cache_free(fuse_req_cachep, req);
74                         return NULL;
75                 }
76
77                 fuse_request_init(req, pages, page_descs, npages);
78         }
79         return req;
80 }
81
82 struct fuse_req *fuse_request_alloc(unsigned npages)
83 {
84         return __fuse_request_alloc(npages, GFP_KERNEL);
85 }
86 EXPORT_SYMBOL_GPL(fuse_request_alloc);
87
88 struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
89 {
90         return __fuse_request_alloc(npages, GFP_NOFS);
91 }
92
93 void fuse_request_free(struct fuse_req *req)
94 {
95         if (req->pages != req->inline_pages) {
96                 kfree(req->pages);
97                 kfree(req->page_descs);
98         }
99         kmem_cache_free(fuse_req_cachep, req);
100 }
101
102 static void block_sigs(sigset_t *oldset)
103 {
104         sigset_t mask;
105
106         siginitsetinv(&mask, sigmask(SIGKILL));
107         sigprocmask(SIG_BLOCK, &mask, oldset);
108 }
109
110 static void restore_sigs(sigset_t *oldset)
111 {
112         sigprocmask(SIG_SETMASK, oldset, NULL);
113 }
114
115 void __fuse_get_request(struct fuse_req *req)
116 {
117         atomic_inc(&req->count);
118 }
119
120 /* Must be called with > 1 refcount */
121 static void __fuse_put_request(struct fuse_req *req)
122 {
123         BUG_ON(atomic_read(&req->count) < 2);
124         atomic_dec(&req->count);
125 }
126
127 static void fuse_req_init_context(struct fuse_req *req)
128 {
129         req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
130         req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
131         req->in.h.pid = current->pid;
132 }
133
134 void fuse_set_initialized(struct fuse_conn *fc)
135 {
136         /* Make sure stores before this are seen on another CPU */
137         smp_wmb();
138         fc->initialized = 1;
139 }
140
141 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
142 {
143         return !fc->initialized || (for_background && fc->blocked);
144 }
145
146 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
147                                        bool for_background)
148 {
149         struct fuse_req *req;
150         int err;
151         atomic_inc(&fc->num_waiting);
152
153         if (fuse_block_alloc(fc, for_background)) {
154                 sigset_t oldset;
155                 int intr;
156
157                 block_sigs(&oldset);
158                 intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
159                                 !fuse_block_alloc(fc, for_background));
160                 restore_sigs(&oldset);
161                 err = -EINTR;
162                 if (intr)
163                         goto out;
164         }
165         /* Matches smp_wmb() in fuse_set_initialized() */
166         smp_rmb();
167
168         err = -ENOTCONN;
169         if (!fc->connected)
170                 goto out;
171
172         err = -ECONNREFUSED;
173         if (fc->conn_error)
174                 goto out;
175
176         req = fuse_request_alloc(npages);
177         err = -ENOMEM;
178         if (!req) {
179                 if (for_background)
180                         wake_up(&fc->blocked_waitq);
181                 goto out;
182         }
183
184         fuse_req_init_context(req);
185         __set_bit(FR_WAITING, &req->flags);
186         if (for_background)
187                 __set_bit(FR_BACKGROUND, &req->flags);
188
189         return req;
190
191  out:
192         atomic_dec(&fc->num_waiting);
193         return ERR_PTR(err);
194 }
195
196 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
197 {
198         return __fuse_get_req(fc, npages, false);
199 }
200 EXPORT_SYMBOL_GPL(fuse_get_req);
201
202 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
203                                              unsigned npages)
204 {
205         return __fuse_get_req(fc, npages, true);
206 }
207 EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
208
209 /*
210  * Return request in fuse_file->reserved_req.  However that may
211  * currently be in use.  If that is the case, wait for it to become
212  * available.
213  */
214 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
215                                          struct file *file)
216 {
217         struct fuse_req *req = NULL;
218         struct fuse_file *ff = file->private_data;
219
220         do {
221                 wait_event(fc->reserved_req_waitq, ff->reserved_req);
222                 spin_lock(&fc->lock);
223                 if (ff->reserved_req) {
224                         req = ff->reserved_req;
225                         ff->reserved_req = NULL;
226                         req->stolen_file = get_file(file);
227                 }
228                 spin_unlock(&fc->lock);
229         } while (!req);
230
231         return req;
232 }
233
234 /*
235  * Put stolen request back into fuse_file->reserved_req
236  */
237 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
238 {
239         struct file *file = req->stolen_file;
240         struct fuse_file *ff = file->private_data;
241
242         spin_lock(&fc->lock);
243         fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
244         BUG_ON(ff->reserved_req);
245         ff->reserved_req = req;
246         wake_up_all(&fc->reserved_req_waitq);
247         spin_unlock(&fc->lock);
248         fput(file);
249 }
250
251 /*
252  * Gets a requests for a file operation, always succeeds
253  *
254  * This is used for sending the FLUSH request, which must get to
255  * userspace, due to POSIX locks which may need to be unlocked.
256  *
257  * If allocation fails due to OOM, use the reserved request in
258  * fuse_file.
259  *
260  * This is very unlikely to deadlock accidentally, since the
261  * filesystem should not have it's own file open.  If deadlock is
262  * intentional, it can still be broken by "aborting" the filesystem.
263  */
264 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
265                                              struct file *file)
266 {
267         struct fuse_req *req;
268
269         atomic_inc(&fc->num_waiting);
270         wait_event(fc->blocked_waitq, fc->initialized);
271         /* Matches smp_wmb() in fuse_set_initialized() */
272         smp_rmb();
273         req = fuse_request_alloc(0);
274         if (!req)
275                 req = get_reserved_req(fc, file);
276
277         fuse_req_init_context(req);
278         __set_bit(FR_WAITING, &req->flags);
279         __clear_bit(FR_BACKGROUND, &req->flags);
280         return req;
281 }
282
283 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
284 {
285         if (atomic_dec_and_test(&req->count)) {
286                 if (test_bit(FR_BACKGROUND, &req->flags)) {
287                         /*
288                          * We get here in the unlikely case that a background
289                          * request was allocated but not sent
290                          */
291                         spin_lock(&fc->lock);
292                         if (!fc->blocked)
293                                 wake_up(&fc->blocked_waitq);
294                         spin_unlock(&fc->lock);
295                 }
296
297                 if (test_bit(FR_WAITING, &req->flags)) {
298                         __clear_bit(FR_WAITING, &req->flags);
299                         atomic_dec(&fc->num_waiting);
300                 }
301
302                 if (req->stolen_file)
303                         put_reserved_req(fc, req);
304                 else
305                         fuse_request_free(req);
306         }
307 }
308 EXPORT_SYMBOL_GPL(fuse_put_request);
309
310 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
311 {
312         unsigned nbytes = 0;
313         unsigned i;
314
315         for (i = 0; i < numargs; i++)
316                 nbytes += args[i].size;
317
318         return nbytes;
319 }
320
321 static u64 fuse_get_unique(struct fuse_iqueue *fiq)
322 {
323         return ++fiq->reqctr;
324 }
325
326 static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
327 {
328         req->in.h.len = sizeof(struct fuse_in_header) +
329                 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
330         list_add_tail(&req->list, &fiq->pending);
331         wake_up_locked(&fiq->waitq);
332         kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
333 }
334
335 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
336                        u64 nodeid, u64 nlookup)
337 {
338         struct fuse_iqueue *fiq = &fc->iq;
339
340         forget->forget_one.nodeid = nodeid;
341         forget->forget_one.nlookup = nlookup;
342
343         spin_lock(&fiq->waitq.lock);
344         if (fiq->connected) {
345                 fiq->forget_list_tail->next = forget;
346                 fiq->forget_list_tail = forget;
347                 wake_up_locked(&fiq->waitq);
348                 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
349         } else {
350                 kfree(forget);
351         }
352         spin_unlock(&fiq->waitq.lock);
353 }
354
355 static void flush_bg_queue(struct fuse_conn *fc)
356 {
357         while (fc->active_background < fc->max_background &&
358                !list_empty(&fc->bg_queue)) {
359                 struct fuse_req *req;
360                 struct fuse_iqueue *fiq = &fc->iq;
361
362                 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
363                 list_del(&req->list);
364                 fc->active_background++;
365                 spin_lock(&fiq->waitq.lock);
366                 req->in.h.unique = fuse_get_unique(fiq);
367                 queue_request(fiq, req);
368                 spin_unlock(&fiq->waitq.lock);
369         }
370 }
371
372 /*
373  * This function is called when a request is finished.  Either a reply
374  * has arrived or it was aborted (and not yet sent) or some error
375  * occurred during communication with userspace, or the device file
376  * was closed.  The requester thread is woken up (if still waiting),
377  * the 'end' callback is called if given, else the reference to the
378  * request is released
379  *
380  * Called with fc->lock, unlocks it
381  */
382 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
383 __releases(fc->lock)
384 {
385         struct fuse_iqueue *fiq = &fc->iq;
386         void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
387         req->end = NULL;
388         list_del_init(&req->list);
389         spin_lock(&fiq->waitq.lock);
390         list_del_init(&req->intr_entry);
391         spin_unlock(&fiq->waitq.lock);
392         WARN_ON(test_bit(FR_PENDING, &req->flags));
393         WARN_ON(test_bit(FR_SENT, &req->flags));
394         smp_wmb();
395         set_bit(FR_FINISHED, &req->flags);
396         if (test_bit(FR_BACKGROUND, &req->flags)) {
397                 clear_bit(FR_BACKGROUND, &req->flags);
398                 if (fc->num_background == fc->max_background)
399                         fc->blocked = 0;
400
401                 /* Wake up next waiter, if any */
402                 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
403                         wake_up(&fc->blocked_waitq);
404
405                 if (fc->num_background == fc->congestion_threshold &&
406                     fc->connected && fc->bdi_initialized) {
407                         clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
408                         clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
409                 }
410                 fc->num_background--;
411                 fc->active_background--;
412                 flush_bg_queue(fc);
413         }
414         spin_unlock(&fc->lock);
415         wake_up(&req->waitq);
416         if (end)
417                 end(fc, req);
418         fuse_put_request(fc, req);
419 }
420
421 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
422 {
423         spin_lock(&fiq->waitq.lock);
424         if (list_empty(&req->intr_entry)) {
425                 list_add_tail(&req->intr_entry, &fiq->interrupts);
426                 wake_up_locked(&fiq->waitq);
427         }
428         spin_unlock(&fiq->waitq.lock);
429         kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
430 }
431
432 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
433 {
434         struct fuse_iqueue *fiq = &fc->iq;
435         int err;
436
437         if (!fc->no_interrupt) {
438                 /* Any signal may interrupt this */
439                 err = wait_event_interruptible(req->waitq,
440                                         test_bit(FR_FINISHED, &req->flags));
441                 if (!err)
442                         return;
443
444                 set_bit(FR_INTERRUPTED, &req->flags);
445                 /* matches barrier in fuse_dev_do_read() */
446                 smp_mb__after_atomic();
447                 if (test_bit(FR_SENT, &req->flags))
448                         queue_interrupt(fiq, req);
449         }
450
451         if (!test_bit(FR_FORCE, &req->flags)) {
452                 sigset_t oldset;
453
454                 /* Only fatal signals may interrupt this */
455                 block_sigs(&oldset);
456                 err = wait_event_interruptible(req->waitq,
457                                         test_bit(FR_FINISHED, &req->flags));
458                 restore_sigs(&oldset);
459
460                 if (!err)
461                         return;
462
463                 spin_lock(&fiq->waitq.lock);
464                 /* Request is not yet in userspace, bail out */
465                 if (test_bit(FR_PENDING, &req->flags)) {
466                         list_del(&req->list);
467                         spin_unlock(&fiq->waitq.lock);
468                         __fuse_put_request(req);
469                         req->out.h.error = -EINTR;
470                         return;
471                 }
472                 spin_unlock(&fiq->waitq.lock);
473         }
474
475         /*
476          * Either request is already in userspace, or it was forced.
477          * Wait it out.
478          */
479         wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
480 }
481
482 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
483 {
484         struct fuse_iqueue *fiq = &fc->iq;
485
486         BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
487         spin_lock(&fiq->waitq.lock);
488         if (!fiq->connected) {
489                 spin_unlock(&fiq->waitq.lock);
490                 req->out.h.error = -ENOTCONN;
491         } else {
492                 req->in.h.unique = fuse_get_unique(fiq);
493                 queue_request(fiq, req);
494                 /* acquire extra reference, since request is still needed
495                    after request_end() */
496                 __fuse_get_request(req);
497                 spin_unlock(&fiq->waitq.lock);
498
499                 request_wait_answer(fc, req);
500                 /* Pairs with smp_wmb() in request_end() */
501                 smp_rmb();
502         }
503 }
504
505 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
506 {
507         __set_bit(FR_ISREPLY, &req->flags);
508         if (!test_bit(FR_WAITING, &req->flags)) {
509                 __set_bit(FR_WAITING, &req->flags);
510                 atomic_inc(&fc->num_waiting);
511         }
512         __fuse_request_send(fc, req);
513 }
514 EXPORT_SYMBOL_GPL(fuse_request_send);
515
516 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
517 {
518         if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
519                 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
520
521         if (fc->minor < 9) {
522                 switch (args->in.h.opcode) {
523                 case FUSE_LOOKUP:
524                 case FUSE_CREATE:
525                 case FUSE_MKNOD:
526                 case FUSE_MKDIR:
527                 case FUSE_SYMLINK:
528                 case FUSE_LINK:
529                         args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
530                         break;
531                 case FUSE_GETATTR:
532                 case FUSE_SETATTR:
533                         args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
534                         break;
535                 }
536         }
537         if (fc->minor < 12) {
538                 switch (args->in.h.opcode) {
539                 case FUSE_CREATE:
540                         args->in.args[0].size = sizeof(struct fuse_open_in);
541                         break;
542                 case FUSE_MKNOD:
543                         args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
544                         break;
545                 }
546         }
547 }
548
549 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
550 {
551         struct fuse_req *req;
552         ssize_t ret;
553
554         req = fuse_get_req(fc, 0);
555         if (IS_ERR(req))
556                 return PTR_ERR(req);
557
558         /* Needs to be done after fuse_get_req() so that fc->minor is valid */
559         fuse_adjust_compat(fc, args);
560
561         req->in.h.opcode = args->in.h.opcode;
562         req->in.h.nodeid = args->in.h.nodeid;
563         req->in.numargs = args->in.numargs;
564         memcpy(req->in.args, args->in.args,
565                args->in.numargs * sizeof(struct fuse_in_arg));
566         req->out.argvar = args->out.argvar;
567         req->out.numargs = args->out.numargs;
568         memcpy(req->out.args, args->out.args,
569                args->out.numargs * sizeof(struct fuse_arg));
570         fuse_request_send(fc, req);
571         ret = req->out.h.error;
572         if (!ret && args->out.argvar) {
573                 BUG_ON(args->out.numargs != 1);
574                 ret = req->out.args[0].size;
575         }
576         fuse_put_request(fc, req);
577
578         return ret;
579 }
580
581 /*
582  * Called under fc->lock
583  *
584  * fc->connected must have been checked previously
585  */
586 void fuse_request_send_background_locked(struct fuse_conn *fc,
587                                          struct fuse_req *req)
588 {
589         BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
590         if (!test_bit(FR_WAITING, &req->flags)) {
591                 __set_bit(FR_WAITING, &req->flags);
592                 atomic_inc(&fc->num_waiting);
593         }
594         __set_bit(FR_ISREPLY, &req->flags);
595         fc->num_background++;
596         if (fc->num_background == fc->max_background)
597                 fc->blocked = 1;
598         if (fc->num_background == fc->congestion_threshold &&
599             fc->bdi_initialized) {
600                 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
601                 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
602         }
603         list_add_tail(&req->list, &fc->bg_queue);
604         flush_bg_queue(fc);
605 }
606
607 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
608 {
609         BUG_ON(!req->end);
610         spin_lock(&fc->lock);
611         if (fc->connected) {
612                 fuse_request_send_background_locked(fc, req);
613                 spin_unlock(&fc->lock);
614         } else {
615                 spin_unlock(&fc->lock);
616                 req->out.h.error = -ENOTCONN;
617                 req->end(fc, req);
618                 fuse_put_request(fc, req);
619         }
620 }
621 EXPORT_SYMBOL_GPL(fuse_request_send_background);
622
623 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
624                                           struct fuse_req *req, u64 unique)
625 {
626         int err = -ENODEV;
627         struct fuse_iqueue *fiq = &fc->iq;
628
629         __clear_bit(FR_ISREPLY, &req->flags);
630         req->in.h.unique = unique;
631         spin_lock(&fiq->waitq.lock);
632         if (fiq->connected) {
633                 queue_request(fiq, req);
634                 err = 0;
635         }
636         spin_unlock(&fiq->waitq.lock);
637
638         return err;
639 }
640
641 void fuse_force_forget(struct file *file, u64 nodeid)
642 {
643         struct inode *inode = file_inode(file);
644         struct fuse_conn *fc = get_fuse_conn(inode);
645         struct fuse_req *req;
646         struct fuse_forget_in inarg;
647
648         memset(&inarg, 0, sizeof(inarg));
649         inarg.nlookup = 1;
650         req = fuse_get_req_nofail_nopages(fc, file);
651         req->in.h.opcode = FUSE_FORGET;
652         req->in.h.nodeid = nodeid;
653         req->in.numargs = 1;
654         req->in.args[0].size = sizeof(inarg);
655         req->in.args[0].value = &inarg;
656         __clear_bit(FR_ISREPLY, &req->flags);
657         __fuse_request_send(fc, req);
658         /* ignore errors */
659         fuse_put_request(fc, req);
660 }
661
662 /*
663  * Lock the request.  Up to the next unlock_request() there mustn't be
664  * anything that could cause a page-fault.  If the request was already
665  * aborted bail out.
666  */
667 static int lock_request(struct fuse_req *req)
668 {
669         int err = 0;
670         if (req) {
671                 spin_lock(&req->waitq.lock);
672                 if (test_bit(FR_ABORTED, &req->flags))
673                         err = -ENOENT;
674                 else
675                         set_bit(FR_LOCKED, &req->flags);
676                 spin_unlock(&req->waitq.lock);
677         }
678         return err;
679 }
680
681 /*
682  * Unlock request.  If it was aborted while locked, caller is responsible
683  * for unlocking and ending the request.
684  */
685 static int unlock_request(struct fuse_req *req)
686 {
687         int err = 0;
688         if (req) {
689                 spin_lock(&req->waitq.lock);
690                 if (test_bit(FR_ABORTED, &req->flags))
691                         err = -ENOENT;
692                 else
693                         clear_bit(FR_LOCKED, &req->flags);
694                 spin_unlock(&req->waitq.lock);
695         }
696         return err;
697 }
698
699 struct fuse_copy_state {
700         int write;
701         struct fuse_req *req;
702         struct iov_iter *iter;
703         struct pipe_buffer *pipebufs;
704         struct pipe_buffer *currbuf;
705         struct pipe_inode_info *pipe;
706         unsigned long nr_segs;
707         struct page *pg;
708         unsigned len;
709         unsigned offset;
710         unsigned move_pages:1;
711 };
712
713 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
714                            struct iov_iter *iter)
715 {
716         memset(cs, 0, sizeof(*cs));
717         cs->write = write;
718         cs->iter = iter;
719 }
720
721 /* Unmap and put previous page of userspace buffer */
722 static void fuse_copy_finish(struct fuse_copy_state *cs)
723 {
724         if (cs->currbuf) {
725                 struct pipe_buffer *buf = cs->currbuf;
726
727                 if (cs->write)
728                         buf->len = PAGE_SIZE - cs->len;
729                 cs->currbuf = NULL;
730         } else if (cs->pg) {
731                 if (cs->write) {
732                         flush_dcache_page(cs->pg);
733                         set_page_dirty_lock(cs->pg);
734                 }
735                 put_page(cs->pg);
736         }
737         cs->pg = NULL;
738 }
739
740 /*
741  * Get another pagefull of userspace buffer, and map it to kernel
742  * address space, and lock request
743  */
744 static int fuse_copy_fill(struct fuse_copy_state *cs)
745 {
746         struct page *page;
747         int err;
748
749         err = unlock_request(cs->req);
750         if (err)
751                 return err;
752
753         fuse_copy_finish(cs);
754         if (cs->pipebufs) {
755                 struct pipe_buffer *buf = cs->pipebufs;
756
757                 if (!cs->write) {
758                         err = buf->ops->confirm(cs->pipe, buf);
759                         if (err)
760                                 return err;
761
762                         BUG_ON(!cs->nr_segs);
763                         cs->currbuf = buf;
764                         cs->pg = buf->page;
765                         cs->offset = buf->offset;
766                         cs->len = buf->len;
767                         cs->pipebufs++;
768                         cs->nr_segs--;
769                 } else {
770                         if (cs->nr_segs == cs->pipe->buffers)
771                                 return -EIO;
772
773                         page = alloc_page(GFP_HIGHUSER);
774                         if (!page)
775                                 return -ENOMEM;
776
777                         buf->page = page;
778                         buf->offset = 0;
779                         buf->len = 0;
780
781                         cs->currbuf = buf;
782                         cs->pg = page;
783                         cs->offset = 0;
784                         cs->len = PAGE_SIZE;
785                         cs->pipebufs++;
786                         cs->nr_segs++;
787                 }
788         } else {
789                 size_t off;
790                 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
791                 if (err < 0)
792                         return err;
793                 BUG_ON(!err);
794                 cs->len = err;
795                 cs->offset = off;
796                 cs->pg = page;
797                 cs->offset = off;
798                 iov_iter_advance(cs->iter, err);
799         }
800
801         return lock_request(cs->req);
802 }
803
804 /* Do as much copy to/from userspace buffer as we can */
805 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
806 {
807         unsigned ncpy = min(*size, cs->len);
808         if (val) {
809                 void *pgaddr = kmap_atomic(cs->pg);
810                 void *buf = pgaddr + cs->offset;
811
812                 if (cs->write)
813                         memcpy(buf, *val, ncpy);
814                 else
815                         memcpy(*val, buf, ncpy);
816
817                 kunmap_atomic(pgaddr);
818                 *val += ncpy;
819         }
820         *size -= ncpy;
821         cs->len -= ncpy;
822         cs->offset += ncpy;
823         return ncpy;
824 }
825
826 static int fuse_check_page(struct page *page)
827 {
828         if (page_mapcount(page) ||
829             page->mapping != NULL ||
830             page_count(page) != 1 ||
831             (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
832              ~(1 << PG_locked |
833                1 << PG_referenced |
834                1 << PG_uptodate |
835                1 << PG_lru |
836                1 << PG_active |
837                1 << PG_reclaim))) {
838                 printk(KERN_WARNING "fuse: trying to steal weird page\n");
839                 printk(KERN_WARNING "  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
840                 return 1;
841         }
842         return 0;
843 }
844
845 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
846 {
847         int err;
848         struct page *oldpage = *pagep;
849         struct page *newpage;
850         struct pipe_buffer *buf = cs->pipebufs;
851
852         err = unlock_request(cs->req);
853         if (err)
854                 return err;
855
856         fuse_copy_finish(cs);
857
858         err = buf->ops->confirm(cs->pipe, buf);
859         if (err)
860                 return err;
861
862         BUG_ON(!cs->nr_segs);
863         cs->currbuf = buf;
864         cs->len = buf->len;
865         cs->pipebufs++;
866         cs->nr_segs--;
867
868         if (cs->len != PAGE_SIZE)
869                 goto out_fallback;
870
871         if (buf->ops->steal(cs->pipe, buf) != 0)
872                 goto out_fallback;
873
874         newpage = buf->page;
875
876         if (!PageUptodate(newpage))
877                 SetPageUptodate(newpage);
878
879         ClearPageMappedToDisk(newpage);
880
881         if (fuse_check_page(newpage) != 0)
882                 goto out_fallback_unlock;
883
884         /*
885          * This is a new and locked page, it shouldn't be mapped or
886          * have any special flags on it
887          */
888         if (WARN_ON(page_mapped(oldpage)))
889                 goto out_fallback_unlock;
890         if (WARN_ON(page_has_private(oldpage)))
891                 goto out_fallback_unlock;
892         if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
893                 goto out_fallback_unlock;
894         if (WARN_ON(PageMlocked(oldpage)))
895                 goto out_fallback_unlock;
896
897         err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
898         if (err) {
899                 unlock_page(newpage);
900                 return err;
901         }
902
903         page_cache_get(newpage);
904
905         if (!(buf->flags & PIPE_BUF_FLAG_LRU))
906                 lru_cache_add_file(newpage);
907
908         err = 0;
909         spin_lock(&cs->req->waitq.lock);
910         if (test_bit(FR_ABORTED, &cs->req->flags))
911                 err = -ENOENT;
912         else
913                 *pagep = newpage;
914         spin_unlock(&cs->req->waitq.lock);
915
916         if (err) {
917                 unlock_page(newpage);
918                 page_cache_release(newpage);
919                 return err;
920         }
921
922         unlock_page(oldpage);
923         page_cache_release(oldpage);
924         cs->len = 0;
925
926         return 0;
927
928 out_fallback_unlock:
929         unlock_page(newpage);
930 out_fallback:
931         cs->pg = buf->page;
932         cs->offset = buf->offset;
933
934         err = lock_request(cs->req);
935         if (err)
936                 return err;
937
938         return 1;
939 }
940
941 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
942                          unsigned offset, unsigned count)
943 {
944         struct pipe_buffer *buf;
945         int err;
946
947         if (cs->nr_segs == cs->pipe->buffers)
948                 return -EIO;
949
950         err = unlock_request(cs->req);
951         if (err)
952                 return err;
953
954         fuse_copy_finish(cs);
955
956         buf = cs->pipebufs;
957         page_cache_get(page);
958         buf->page = page;
959         buf->offset = offset;
960         buf->len = count;
961
962         cs->pipebufs++;
963         cs->nr_segs++;
964         cs->len = 0;
965
966         return 0;
967 }
968
969 /*
970  * Copy a page in the request to/from the userspace buffer.  Must be
971  * done atomically
972  */
973 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
974                           unsigned offset, unsigned count, int zeroing)
975 {
976         int err;
977         struct page *page = *pagep;
978
979         if (page && zeroing && count < PAGE_SIZE)
980                 clear_highpage(page);
981
982         while (count) {
983                 if (cs->write && cs->pipebufs && page) {
984                         return fuse_ref_page(cs, page, offset, count);
985                 } else if (!cs->len) {
986                         if (cs->move_pages && page &&
987                             offset == 0 && count == PAGE_SIZE) {
988                                 err = fuse_try_move_page(cs, pagep);
989                                 if (err <= 0)
990                                         return err;
991                         } else {
992                                 err = fuse_copy_fill(cs);
993                                 if (err)
994                                         return err;
995                         }
996                 }
997                 if (page) {
998                         void *mapaddr = kmap_atomic(page);
999                         void *buf = mapaddr + offset;
1000                         offset += fuse_copy_do(cs, &buf, &count);
1001                         kunmap_atomic(mapaddr);
1002                 } else
1003                         offset += fuse_copy_do(cs, NULL, &count);
1004         }
1005         if (page && !cs->write)
1006                 flush_dcache_page(page);
1007         return 0;
1008 }
1009
1010 /* Copy pages in the request to/from userspace buffer */
1011 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
1012                            int zeroing)
1013 {
1014         unsigned i;
1015         struct fuse_req *req = cs->req;
1016
1017         for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
1018                 int err;
1019                 unsigned offset = req->page_descs[i].offset;
1020                 unsigned count = min(nbytes, req->page_descs[i].length);
1021
1022                 err = fuse_copy_page(cs, &req->pages[i], offset, count,
1023                                      zeroing);
1024                 if (err)
1025                         return err;
1026
1027                 nbytes -= count;
1028         }
1029         return 0;
1030 }
1031
1032 /* Copy a single argument in the request to/from userspace buffer */
1033 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1034 {
1035         while (size) {
1036                 if (!cs->len) {
1037                         int err = fuse_copy_fill(cs);
1038                         if (err)
1039                                 return err;
1040                 }
1041                 fuse_copy_do(cs, &val, &size);
1042         }
1043         return 0;
1044 }
1045
1046 /* Copy request arguments to/from userspace buffer */
1047 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1048                           unsigned argpages, struct fuse_arg *args,
1049                           int zeroing)
1050 {
1051         int err = 0;
1052         unsigned i;
1053
1054         for (i = 0; !err && i < numargs; i++)  {
1055                 struct fuse_arg *arg = &args[i];
1056                 if (i == numargs - 1 && argpages)
1057                         err = fuse_copy_pages(cs, arg->size, zeroing);
1058                 else
1059                         err = fuse_copy_one(cs, arg->value, arg->size);
1060         }
1061         return err;
1062 }
1063
1064 static int forget_pending(struct fuse_iqueue *fiq)
1065 {
1066         return fiq->forget_list_head.next != NULL;
1067 }
1068
1069 static int request_pending(struct fuse_iqueue *fiq)
1070 {
1071         return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1072                 forget_pending(fiq);
1073 }
1074
1075 /*
1076  * Transfer an interrupt request to userspace
1077  *
1078  * Unlike other requests this is assembled on demand, without a need
1079  * to allocate a separate fuse_req structure.
1080  *
1081  * Called with fiq->waitq.lock held, releases it
1082  */
1083 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1084                                struct fuse_copy_state *cs,
1085                                size_t nbytes, struct fuse_req *req)
1086 __releases(fiq->waitq.lock)
1087 {
1088         struct fuse_in_header ih;
1089         struct fuse_interrupt_in arg;
1090         unsigned reqsize = sizeof(ih) + sizeof(arg);
1091         int err;
1092
1093         list_del_init(&req->intr_entry);
1094         req->intr_unique = fuse_get_unique(fiq);
1095         memset(&ih, 0, sizeof(ih));
1096         memset(&arg, 0, sizeof(arg));
1097         ih.len = reqsize;
1098         ih.opcode = FUSE_INTERRUPT;
1099         ih.unique = req->intr_unique;
1100         arg.unique = req->in.h.unique;
1101
1102         spin_unlock(&fiq->waitq.lock);
1103         if (nbytes < reqsize)
1104                 return -EINVAL;
1105
1106         err = fuse_copy_one(cs, &ih, sizeof(ih));
1107         if (!err)
1108                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1109         fuse_copy_finish(cs);
1110
1111         return err ? err : reqsize;
1112 }
1113
1114 static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
1115                                                unsigned max,
1116                                                unsigned *countp)
1117 {
1118         struct fuse_forget_link *head = fiq->forget_list_head.next;
1119         struct fuse_forget_link **newhead = &head;
1120         unsigned count;
1121
1122         for (count = 0; *newhead != NULL && count < max; count++)
1123                 newhead = &(*newhead)->next;
1124
1125         fiq->forget_list_head.next = *newhead;
1126         *newhead = NULL;
1127         if (fiq->forget_list_head.next == NULL)
1128                 fiq->forget_list_tail = &fiq->forget_list_head;
1129
1130         if (countp != NULL)
1131                 *countp = count;
1132
1133         return head;
1134 }
1135
1136 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1137                                    struct fuse_copy_state *cs,
1138                                    size_t nbytes)
1139 __releases(fiq->waitq.lock)
1140 {
1141         int err;
1142         struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
1143         struct fuse_forget_in arg = {
1144                 .nlookup = forget->forget_one.nlookup,
1145         };
1146         struct fuse_in_header ih = {
1147                 .opcode = FUSE_FORGET,
1148                 .nodeid = forget->forget_one.nodeid,
1149                 .unique = fuse_get_unique(fiq),
1150                 .len = sizeof(ih) + sizeof(arg),
1151         };
1152
1153         spin_unlock(&fiq->waitq.lock);
1154         kfree(forget);
1155         if (nbytes < ih.len)
1156                 return -EINVAL;
1157
1158         err = fuse_copy_one(cs, &ih, sizeof(ih));
1159         if (!err)
1160                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1161         fuse_copy_finish(cs);
1162
1163         if (err)
1164                 return err;
1165
1166         return ih.len;
1167 }
1168
1169 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1170                                    struct fuse_copy_state *cs, size_t nbytes)
1171 __releases(fiq->waitq.lock)
1172 {
1173         int err;
1174         unsigned max_forgets;
1175         unsigned count;
1176         struct fuse_forget_link *head;
1177         struct fuse_batch_forget_in arg = { .count = 0 };
1178         struct fuse_in_header ih = {
1179                 .opcode = FUSE_BATCH_FORGET,
1180                 .unique = fuse_get_unique(fiq),
1181                 .len = sizeof(ih) + sizeof(arg),
1182         };
1183
1184         if (nbytes < ih.len) {
1185                 spin_unlock(&fiq->waitq.lock);
1186                 return -EINVAL;
1187         }
1188
1189         max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1190         head = dequeue_forget(fiq, max_forgets, &count);
1191         spin_unlock(&fiq->waitq.lock);
1192
1193         arg.count = count;
1194         ih.len += count * sizeof(struct fuse_forget_one);
1195         err = fuse_copy_one(cs, &ih, sizeof(ih));
1196         if (!err)
1197                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1198
1199         while (head) {
1200                 struct fuse_forget_link *forget = head;
1201
1202                 if (!err) {
1203                         err = fuse_copy_one(cs, &forget->forget_one,
1204                                             sizeof(forget->forget_one));
1205                 }
1206                 head = forget->next;
1207                 kfree(forget);
1208         }
1209
1210         fuse_copy_finish(cs);
1211
1212         if (err)
1213                 return err;
1214
1215         return ih.len;
1216 }
1217
1218 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1219                             struct fuse_copy_state *cs,
1220                             size_t nbytes)
1221 __releases(fiq->waitq.lock)
1222 {
1223         if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1224                 return fuse_read_single_forget(fiq, cs, nbytes);
1225         else
1226                 return fuse_read_batch_forget(fiq, cs, nbytes);
1227 }
1228
1229 /*
1230  * Read a single request into the userspace filesystem's buffer.  This
1231  * function waits until a request is available, then removes it from
1232  * the pending list and copies request data to userspace buffer.  If
1233  * no reply is needed (FORGET) or request has been aborted or there
1234  * was an error during the copying then it's finished by calling
1235  * request_end().  Otherwise add it to the processing list, and set
1236  * the 'sent' flag.
1237  */
1238 static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1239                                 struct fuse_copy_state *cs, size_t nbytes)
1240 {
1241         int err;
1242         struct fuse_iqueue *fiq = &fc->iq;
1243         struct fuse_req *req;
1244         struct fuse_in *in;
1245         unsigned reqsize;
1246
1247  restart:
1248         spin_lock(&fiq->waitq.lock);
1249         err = -EAGAIN;
1250         if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
1251             !request_pending(fiq))
1252                 goto err_unlock;
1253
1254         err = wait_event_interruptible_exclusive_locked(fiq->waitq,
1255                                 !fiq->connected || request_pending(fiq));
1256         if (err)
1257                 goto err_unlock;
1258
1259         err = -ENODEV;
1260         if (!fiq->connected)
1261                 goto err_unlock;
1262
1263         if (!list_empty(&fiq->interrupts)) {
1264                 req = list_entry(fiq->interrupts.next, struct fuse_req,
1265                                  intr_entry);
1266                 return fuse_read_interrupt(fiq, cs, nbytes, req);
1267         }
1268
1269         if (forget_pending(fiq)) {
1270                 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1271                         return fuse_read_forget(fc, fiq, cs, nbytes);
1272
1273                 if (fiq->forget_batch <= -8)
1274                         fiq->forget_batch = 16;
1275         }
1276
1277         req = list_entry(fiq->pending.next, struct fuse_req, list);
1278         clear_bit(FR_PENDING, &req->flags);
1279         list_del_init(&req->list);
1280         spin_unlock(&fiq->waitq.lock);
1281
1282         spin_lock(&fc->lock);
1283         list_add(&req->list, &fc->io);
1284
1285         in = &req->in;
1286         reqsize = in->h.len;
1287         /* If request is too large, reply with an error and restart the read */
1288         if (nbytes < reqsize) {
1289                 req->out.h.error = -EIO;
1290                 /* SETXATTR is special, since it may contain too large data */
1291                 if (in->h.opcode == FUSE_SETXATTR)
1292                         req->out.h.error = -E2BIG;
1293                 request_end(fc, req);
1294                 goto restart;
1295         }
1296         spin_unlock(&fc->lock);
1297         cs->req = req;
1298         err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1299         if (!err)
1300                 err = fuse_copy_args(cs, in->numargs, in->argpages,
1301                                      (struct fuse_arg *) in->args, 0);
1302         fuse_copy_finish(cs);
1303         spin_lock(&fc->lock);
1304         clear_bit(FR_LOCKED, &req->flags);
1305         if (!fc->connected) {
1306                 request_end(fc, req);
1307                 return -ENODEV;
1308         }
1309         if (err) {
1310                 req->out.h.error = -EIO;
1311                 request_end(fc, req);
1312                 return err;
1313         }
1314         if (!test_bit(FR_ISREPLY, &req->flags)) {
1315                 request_end(fc, req);
1316         } else {
1317                 list_move_tail(&req->list, &fc->processing);
1318                 set_bit(FR_SENT, &req->flags);
1319                 /* matches barrier in request_wait_answer() */
1320                 smp_mb__after_atomic();
1321                 if (test_bit(FR_INTERRUPTED, &req->flags))
1322                         queue_interrupt(fiq, req);
1323                 spin_unlock(&fc->lock);
1324         }
1325         return reqsize;
1326
1327  err_unlock:
1328         spin_unlock(&fiq->waitq.lock);
1329         return err;
1330 }
1331
1332 static int fuse_dev_open(struct inode *inode, struct file *file)
1333 {
1334         /*
1335          * The fuse device's file's private_data is used to hold
1336          * the fuse_conn(ection) when it is mounted, and is used to
1337          * keep track of whether the file has been mounted already.
1338          */
1339         file->private_data = NULL;
1340         return 0;
1341 }
1342
1343 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1344 {
1345         struct fuse_copy_state cs;
1346         struct file *file = iocb->ki_filp;
1347         struct fuse_conn *fc = fuse_get_conn(file);
1348         if (!fc)
1349                 return -EPERM;
1350
1351         if (!iter_is_iovec(to))
1352                 return -EINVAL;
1353
1354         fuse_copy_init(&cs, 1, to);
1355
1356         return fuse_dev_do_read(fc, file, &cs, iov_iter_count(to));
1357 }
1358
1359 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1360                                     struct pipe_inode_info *pipe,
1361                                     size_t len, unsigned int flags)
1362 {
1363         int ret;
1364         int page_nr = 0;
1365         int do_wakeup = 0;
1366         struct pipe_buffer *bufs;
1367         struct fuse_copy_state cs;
1368         struct fuse_conn *fc = fuse_get_conn(in);
1369         if (!fc)
1370                 return -EPERM;
1371
1372         bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1373         if (!bufs)
1374                 return -ENOMEM;
1375
1376         fuse_copy_init(&cs, 1, NULL);
1377         cs.pipebufs = bufs;
1378         cs.pipe = pipe;
1379         ret = fuse_dev_do_read(fc, in, &cs, len);
1380         if (ret < 0)
1381                 goto out;
1382
1383         ret = 0;
1384         pipe_lock(pipe);
1385
1386         if (!pipe->readers) {
1387                 send_sig(SIGPIPE, current, 0);
1388                 if (!ret)
1389                         ret = -EPIPE;
1390                 goto out_unlock;
1391         }
1392
1393         if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1394                 ret = -EIO;
1395                 goto out_unlock;
1396         }
1397
1398         while (page_nr < cs.nr_segs) {
1399                 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1400                 struct pipe_buffer *buf = pipe->bufs + newbuf;
1401
1402                 buf->page = bufs[page_nr].page;
1403                 buf->offset = bufs[page_nr].offset;
1404                 buf->len = bufs[page_nr].len;
1405                 /*
1406                  * Need to be careful about this.  Having buf->ops in module
1407                  * code can Oops if the buffer persists after module unload.
1408                  */
1409                 buf->ops = &nosteal_pipe_buf_ops;
1410
1411                 pipe->nrbufs++;
1412                 page_nr++;
1413                 ret += buf->len;
1414
1415                 if (pipe->files)
1416                         do_wakeup = 1;
1417         }
1418
1419 out_unlock:
1420         pipe_unlock(pipe);
1421
1422         if (do_wakeup) {
1423                 smp_mb();
1424                 if (waitqueue_active(&pipe->wait))
1425                         wake_up_interruptible(&pipe->wait);
1426                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1427         }
1428
1429 out:
1430         for (; page_nr < cs.nr_segs; page_nr++)
1431                 page_cache_release(bufs[page_nr].page);
1432
1433         kfree(bufs);
1434         return ret;
1435 }
1436
1437 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1438                             struct fuse_copy_state *cs)
1439 {
1440         struct fuse_notify_poll_wakeup_out outarg;
1441         int err = -EINVAL;
1442
1443         if (size != sizeof(outarg))
1444                 goto err;
1445
1446         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1447         if (err)
1448                 goto err;
1449
1450         fuse_copy_finish(cs);
1451         return fuse_notify_poll_wakeup(fc, &outarg);
1452
1453 err:
1454         fuse_copy_finish(cs);
1455         return err;
1456 }
1457
1458 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1459                                    struct fuse_copy_state *cs)
1460 {
1461         struct fuse_notify_inval_inode_out outarg;
1462         int err = -EINVAL;
1463
1464         if (size != sizeof(outarg))
1465                 goto err;
1466
1467         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1468         if (err)
1469                 goto err;
1470         fuse_copy_finish(cs);
1471
1472         down_read(&fc->killsb);
1473         err = -ENOENT;
1474         if (fc->sb) {
1475                 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1476                                                outarg.off, outarg.len);
1477         }
1478         up_read(&fc->killsb);
1479         return err;
1480
1481 err:
1482         fuse_copy_finish(cs);
1483         return err;
1484 }
1485
1486 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1487                                    struct fuse_copy_state *cs)
1488 {
1489         struct fuse_notify_inval_entry_out outarg;
1490         int err = -ENOMEM;
1491         char *buf;
1492         struct qstr name;
1493
1494         buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1495         if (!buf)
1496                 goto err;
1497
1498         err = -EINVAL;
1499         if (size < sizeof(outarg))
1500                 goto err;
1501
1502         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1503         if (err)
1504                 goto err;
1505
1506         err = -ENAMETOOLONG;
1507         if (outarg.namelen > FUSE_NAME_MAX)
1508                 goto err;
1509
1510         err = -EINVAL;
1511         if (size != sizeof(outarg) + outarg.namelen + 1)
1512                 goto err;
1513
1514         name.name = buf;
1515         name.len = outarg.namelen;
1516         err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1517         if (err)
1518                 goto err;
1519         fuse_copy_finish(cs);
1520         buf[outarg.namelen] = 0;
1521         name.hash = full_name_hash(name.name, name.len);
1522
1523         down_read(&fc->killsb);
1524         err = -ENOENT;
1525         if (fc->sb)
1526                 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1527         up_read(&fc->killsb);
1528         kfree(buf);
1529         return err;
1530
1531 err:
1532         kfree(buf);
1533         fuse_copy_finish(cs);
1534         return err;
1535 }
1536
1537 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1538                               struct fuse_copy_state *cs)
1539 {
1540         struct fuse_notify_delete_out outarg;
1541         int err = -ENOMEM;
1542         char *buf;
1543         struct qstr name;
1544
1545         buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1546         if (!buf)
1547                 goto err;
1548
1549         err = -EINVAL;
1550         if (size < sizeof(outarg))
1551                 goto err;
1552
1553         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1554         if (err)
1555                 goto err;
1556
1557         err = -ENAMETOOLONG;
1558         if (outarg.namelen > FUSE_NAME_MAX)
1559                 goto err;
1560
1561         err = -EINVAL;
1562         if (size != sizeof(outarg) + outarg.namelen + 1)
1563                 goto err;
1564
1565         name.name = buf;
1566         name.len = outarg.namelen;
1567         err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1568         if (err)
1569                 goto err;
1570         fuse_copy_finish(cs);
1571         buf[outarg.namelen] = 0;
1572         name.hash = full_name_hash(name.name, name.len);
1573
1574         down_read(&fc->killsb);
1575         err = -ENOENT;
1576         if (fc->sb)
1577                 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1578                                                outarg.child, &name);
1579         up_read(&fc->killsb);
1580         kfree(buf);
1581         return err;
1582
1583 err:
1584         kfree(buf);
1585         fuse_copy_finish(cs);
1586         return err;
1587 }
1588
1589 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1590                              struct fuse_copy_state *cs)
1591 {
1592         struct fuse_notify_store_out outarg;
1593         struct inode *inode;
1594         struct address_space *mapping;
1595         u64 nodeid;
1596         int err;
1597         pgoff_t index;
1598         unsigned int offset;
1599         unsigned int num;
1600         loff_t file_size;
1601         loff_t end;
1602
1603         err = -EINVAL;
1604         if (size < sizeof(outarg))
1605                 goto out_finish;
1606
1607         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1608         if (err)
1609                 goto out_finish;
1610
1611         err = -EINVAL;
1612         if (size - sizeof(outarg) != outarg.size)
1613                 goto out_finish;
1614
1615         nodeid = outarg.nodeid;
1616
1617         down_read(&fc->killsb);
1618
1619         err = -ENOENT;
1620         if (!fc->sb)
1621                 goto out_up_killsb;
1622
1623         inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1624         if (!inode)
1625                 goto out_up_killsb;
1626
1627         mapping = inode->i_mapping;
1628         index = outarg.offset >> PAGE_CACHE_SHIFT;
1629         offset = outarg.offset & ~PAGE_CACHE_MASK;
1630         file_size = i_size_read(inode);
1631         end = outarg.offset + outarg.size;
1632         if (end > file_size) {
1633                 file_size = end;
1634                 fuse_write_update_size(inode, file_size);
1635         }
1636
1637         num = outarg.size;
1638         while (num) {
1639                 struct page *page;
1640                 unsigned int this_num;
1641
1642                 err = -ENOMEM;
1643                 page = find_or_create_page(mapping, index,
1644                                            mapping_gfp_mask(mapping));
1645                 if (!page)
1646                         goto out_iput;
1647
1648                 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1649                 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1650                 if (!err && offset == 0 &&
1651                     (this_num == PAGE_CACHE_SIZE || file_size == end))
1652                         SetPageUptodate(page);
1653                 unlock_page(page);
1654                 page_cache_release(page);
1655
1656                 if (err)
1657                         goto out_iput;
1658
1659                 num -= this_num;
1660                 offset = 0;
1661                 index++;
1662         }
1663
1664         err = 0;
1665
1666 out_iput:
1667         iput(inode);
1668 out_up_killsb:
1669         up_read(&fc->killsb);
1670 out_finish:
1671         fuse_copy_finish(cs);
1672         return err;
1673 }
1674
1675 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1676 {
1677         release_pages(req->pages, req->num_pages, false);
1678 }
1679
1680 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1681                          struct fuse_notify_retrieve_out *outarg)
1682 {
1683         int err;
1684         struct address_space *mapping = inode->i_mapping;
1685         struct fuse_req *req;
1686         pgoff_t index;
1687         loff_t file_size;
1688         unsigned int num;
1689         unsigned int offset;
1690         size_t total_len = 0;
1691         int num_pages;
1692
1693         offset = outarg->offset & ~PAGE_CACHE_MASK;
1694         file_size = i_size_read(inode);
1695
1696         num = outarg->size;
1697         if (outarg->offset > file_size)
1698                 num = 0;
1699         else if (outarg->offset + num > file_size)
1700                 num = file_size - outarg->offset;
1701
1702         num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1703         num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1704
1705         req = fuse_get_req(fc, num_pages);
1706         if (IS_ERR(req))
1707                 return PTR_ERR(req);
1708
1709         req->in.h.opcode = FUSE_NOTIFY_REPLY;
1710         req->in.h.nodeid = outarg->nodeid;
1711         req->in.numargs = 2;
1712         req->in.argpages = 1;
1713         req->page_descs[0].offset = offset;
1714         req->end = fuse_retrieve_end;
1715
1716         index = outarg->offset >> PAGE_CACHE_SHIFT;
1717
1718         while (num && req->num_pages < num_pages) {
1719                 struct page *page;
1720                 unsigned int this_num;
1721
1722                 page = find_get_page(mapping, index);
1723                 if (!page)
1724                         break;
1725
1726                 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1727                 req->pages[req->num_pages] = page;
1728                 req->page_descs[req->num_pages].length = this_num;
1729                 req->num_pages++;
1730
1731                 offset = 0;
1732                 num -= this_num;
1733                 total_len += this_num;
1734                 index++;
1735         }
1736         req->misc.retrieve_in.offset = outarg->offset;
1737         req->misc.retrieve_in.size = total_len;
1738         req->in.args[0].size = sizeof(req->misc.retrieve_in);
1739         req->in.args[0].value = &req->misc.retrieve_in;
1740         req->in.args[1].size = total_len;
1741
1742         err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1743         if (err)
1744                 fuse_retrieve_end(fc, req);
1745
1746         return err;
1747 }
1748
1749 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1750                                 struct fuse_copy_state *cs)
1751 {
1752         struct fuse_notify_retrieve_out outarg;
1753         struct inode *inode;
1754         int err;
1755
1756         err = -EINVAL;
1757         if (size != sizeof(outarg))
1758                 goto copy_finish;
1759
1760         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1761         if (err)
1762                 goto copy_finish;
1763
1764         fuse_copy_finish(cs);
1765
1766         down_read(&fc->killsb);
1767         err = -ENOENT;
1768         if (fc->sb) {
1769                 u64 nodeid = outarg.nodeid;
1770
1771                 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1772                 if (inode) {
1773                         err = fuse_retrieve(fc, inode, &outarg);
1774                         iput(inode);
1775                 }
1776         }
1777         up_read(&fc->killsb);
1778
1779         return err;
1780
1781 copy_finish:
1782         fuse_copy_finish(cs);
1783         return err;
1784 }
1785
1786 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1787                        unsigned int size, struct fuse_copy_state *cs)
1788 {
1789         /* Don't try to move pages (yet) */
1790         cs->move_pages = 0;
1791
1792         switch (code) {
1793         case FUSE_NOTIFY_POLL:
1794                 return fuse_notify_poll(fc, size, cs);
1795
1796         case FUSE_NOTIFY_INVAL_INODE:
1797                 return fuse_notify_inval_inode(fc, size, cs);
1798
1799         case FUSE_NOTIFY_INVAL_ENTRY:
1800                 return fuse_notify_inval_entry(fc, size, cs);
1801
1802         case FUSE_NOTIFY_STORE:
1803                 return fuse_notify_store(fc, size, cs);
1804
1805         case FUSE_NOTIFY_RETRIEVE:
1806                 return fuse_notify_retrieve(fc, size, cs);
1807
1808         case FUSE_NOTIFY_DELETE:
1809                 return fuse_notify_delete(fc, size, cs);
1810
1811         default:
1812                 fuse_copy_finish(cs);
1813                 return -EINVAL;
1814         }
1815 }
1816
1817 /* Look up request on processing list by unique ID */
1818 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1819 {
1820         struct fuse_req *req;
1821
1822         list_for_each_entry(req, &fc->processing, list) {
1823                 if (req->in.h.unique == unique || req->intr_unique == unique)
1824                         return req;
1825         }
1826         return NULL;
1827 }
1828
1829 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1830                          unsigned nbytes)
1831 {
1832         unsigned reqsize = sizeof(struct fuse_out_header);
1833
1834         if (out->h.error)
1835                 return nbytes != reqsize ? -EINVAL : 0;
1836
1837         reqsize += len_args(out->numargs, out->args);
1838
1839         if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1840                 return -EINVAL;
1841         else if (reqsize > nbytes) {
1842                 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1843                 unsigned diffsize = reqsize - nbytes;
1844                 if (diffsize > lastarg->size)
1845                         return -EINVAL;
1846                 lastarg->size -= diffsize;
1847         }
1848         return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1849                               out->page_zeroing);
1850 }
1851
1852 /*
1853  * Write a single reply to a request.  First the header is copied from
1854  * the write buffer.  The request is then searched on the processing
1855  * list by the unique ID found in the header.  If found, then remove
1856  * it from the list and copy the rest of the buffer to the request.
1857  * The request is finished by calling request_end()
1858  */
1859 static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1860                                  struct fuse_copy_state *cs, size_t nbytes)
1861 {
1862         int err;
1863         struct fuse_req *req;
1864         struct fuse_out_header oh;
1865
1866         if (nbytes < sizeof(struct fuse_out_header))
1867                 return -EINVAL;
1868
1869         err = fuse_copy_one(cs, &oh, sizeof(oh));
1870         if (err)
1871                 goto err_finish;
1872
1873         err = -EINVAL;
1874         if (oh.len != nbytes)
1875                 goto err_finish;
1876
1877         /*
1878          * Zero oh.unique indicates unsolicited notification message
1879          * and error contains notification code.
1880          */
1881         if (!oh.unique) {
1882                 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1883                 return err ? err : nbytes;
1884         }
1885
1886         err = -EINVAL;
1887         if (oh.error <= -1000 || oh.error > 0)
1888                 goto err_finish;
1889
1890         spin_lock(&fc->lock);
1891         err = -ENOENT;
1892         if (!fc->connected)
1893                 goto err_unlock;
1894
1895         req = request_find(fc, oh.unique);
1896         if (!req)
1897                 goto err_unlock;
1898
1899         /* Is it an interrupt reply? */
1900         if (req->intr_unique == oh.unique) {
1901                 err = -EINVAL;
1902                 if (nbytes != sizeof(struct fuse_out_header))
1903                         goto err_unlock;
1904
1905                 if (oh.error == -ENOSYS)
1906                         fc->no_interrupt = 1;
1907                 else if (oh.error == -EAGAIN)
1908                         queue_interrupt(&fc->iq, req);
1909
1910                 spin_unlock(&fc->lock);
1911                 fuse_copy_finish(cs);
1912                 return nbytes;
1913         }
1914
1915         clear_bit(FR_SENT, &req->flags);
1916         list_move(&req->list, &fc->io);
1917         req->out.h = oh;
1918         set_bit(FR_LOCKED, &req->flags);
1919         cs->req = req;
1920         if (!req->out.page_replace)
1921                 cs->move_pages = 0;
1922         spin_unlock(&fc->lock);
1923
1924         err = copy_out_args(cs, &req->out, nbytes);
1925         fuse_copy_finish(cs);
1926
1927         spin_lock(&fc->lock);
1928         clear_bit(FR_LOCKED, &req->flags);
1929         if (!fc->connected)
1930                 err = -ENOENT;
1931         else if (err)
1932                 req->out.h.error = -EIO;
1933         request_end(fc, req);
1934
1935         return err ? err : nbytes;
1936
1937  err_unlock:
1938         spin_unlock(&fc->lock);
1939  err_finish:
1940         fuse_copy_finish(cs);
1941         return err;
1942 }
1943
1944 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1945 {
1946         struct fuse_copy_state cs;
1947         struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1948         if (!fc)
1949                 return -EPERM;
1950
1951         if (!iter_is_iovec(from))
1952                 return -EINVAL;
1953
1954         fuse_copy_init(&cs, 0, from);
1955
1956         return fuse_dev_do_write(fc, &cs, iov_iter_count(from));
1957 }
1958
1959 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1960                                      struct file *out, loff_t *ppos,
1961                                      size_t len, unsigned int flags)
1962 {
1963         unsigned nbuf;
1964         unsigned idx;
1965         struct pipe_buffer *bufs;
1966         struct fuse_copy_state cs;
1967         struct fuse_conn *fc;
1968         size_t rem;
1969         ssize_t ret;
1970
1971         fc = fuse_get_conn(out);
1972         if (!fc)
1973                 return -EPERM;
1974
1975         bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1976         if (!bufs)
1977                 return -ENOMEM;
1978
1979         pipe_lock(pipe);
1980         nbuf = 0;
1981         rem = 0;
1982         for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1983                 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1984
1985         ret = -EINVAL;
1986         if (rem < len) {
1987                 pipe_unlock(pipe);
1988                 goto out;
1989         }
1990
1991         rem = len;
1992         while (rem) {
1993                 struct pipe_buffer *ibuf;
1994                 struct pipe_buffer *obuf;
1995
1996                 BUG_ON(nbuf >= pipe->buffers);
1997                 BUG_ON(!pipe->nrbufs);
1998                 ibuf = &pipe->bufs[pipe->curbuf];
1999                 obuf = &bufs[nbuf];
2000
2001                 if (rem >= ibuf->len) {
2002                         *obuf = *ibuf;
2003                         ibuf->ops = NULL;
2004                         pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
2005                         pipe->nrbufs--;
2006                 } else {
2007                         ibuf->ops->get(pipe, ibuf);
2008                         *obuf = *ibuf;
2009                         obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2010                         obuf->len = rem;
2011                         ibuf->offset += obuf->len;
2012                         ibuf->len -= obuf->len;
2013                 }
2014                 nbuf++;
2015                 rem -= obuf->len;
2016         }
2017         pipe_unlock(pipe);
2018
2019         fuse_copy_init(&cs, 0, NULL);
2020         cs.pipebufs = bufs;
2021         cs.nr_segs = nbuf;
2022         cs.pipe = pipe;
2023
2024         if (flags & SPLICE_F_MOVE)
2025                 cs.move_pages = 1;
2026
2027         ret = fuse_dev_do_write(fc, &cs, len);
2028
2029         for (idx = 0; idx < nbuf; idx++) {
2030                 struct pipe_buffer *buf = &bufs[idx];
2031                 buf->ops->release(pipe, buf);
2032         }
2033 out:
2034         kfree(bufs);
2035         return ret;
2036 }
2037
2038 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2039 {
2040         unsigned mask = POLLOUT | POLLWRNORM;
2041         struct fuse_iqueue *fiq;
2042         struct fuse_conn *fc = fuse_get_conn(file);
2043         if (!fc)
2044                 return POLLERR;
2045
2046         fiq = &fc->iq;
2047         poll_wait(file, &fiq->waitq, wait);
2048
2049         spin_lock(&fiq->waitq.lock);
2050         if (!fiq->connected)
2051                 mask = POLLERR;
2052         else if (request_pending(fiq))
2053                 mask |= POLLIN | POLLRDNORM;
2054         spin_unlock(&fiq->waitq.lock);
2055
2056         return mask;
2057 }
2058
2059 /*
2060  * Abort all requests on the given list (pending or processing)
2061  *
2062  * This function releases and reacquires fc->lock
2063  */
2064 static void end_requests(struct fuse_conn *fc, struct list_head *head)
2065 __releases(fc->lock)
2066 __acquires(fc->lock)
2067 {
2068         while (!list_empty(head)) {
2069                 struct fuse_req *req;
2070                 req = list_entry(head->next, struct fuse_req, list);
2071                 req->out.h.error = -ECONNABORTED;
2072                 clear_bit(FR_PENDING, &req->flags);
2073                 clear_bit(FR_SENT, &req->flags);
2074                 request_end(fc, req);
2075                 spin_lock(&fc->lock);
2076         }
2077 }
2078
2079 static void end_polls(struct fuse_conn *fc)
2080 {
2081         struct rb_node *p;
2082
2083         p = rb_first(&fc->polled_files);
2084
2085         while (p) {
2086                 struct fuse_file *ff;
2087                 ff = rb_entry(p, struct fuse_file, polled_node);
2088                 wake_up_interruptible_all(&ff->poll_wait);
2089
2090                 p = rb_next(p);
2091         }
2092 }
2093
2094 /*
2095  * Abort all requests.
2096  *
2097  * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2098  * filesystem.
2099  *
2100  * The same effect is usually achievable through killing the filesystem daemon
2101  * and all users of the filesystem.  The exception is the combination of an
2102  * asynchronous request and the tricky deadlock (see
2103  * Documentation/filesystems/fuse.txt).
2104  *
2105  * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2106  * requests, they should be finished off immediately.  Locked requests will be
2107  * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2108  * requests.  It is possible that some request will finish before we can.  This
2109  * is OK, the request will in that case be removed from the list before we touch
2110  * it.
2111  */
2112 void fuse_abort_conn(struct fuse_conn *fc)
2113 {
2114         struct fuse_iqueue *fiq = &fc->iq;
2115
2116         spin_lock(&fc->lock);
2117         if (fc->connected) {
2118                 struct fuse_req *req, *next;
2119                 LIST_HEAD(to_end1);
2120                 LIST_HEAD(to_end2);
2121
2122                 fc->connected = 0;
2123                 fc->blocked = 0;
2124                 fuse_set_initialized(fc);
2125                 list_for_each_entry_safe(req, next, &fc->io, list) {
2126                         req->out.h.error = -ECONNABORTED;
2127                         spin_lock(&req->waitq.lock);
2128                         set_bit(FR_ABORTED, &req->flags);
2129                         if (!test_bit(FR_LOCKED, &req->flags))
2130                                 list_move(&req->list, &to_end1);
2131                         spin_unlock(&req->waitq.lock);
2132                 }
2133                 fc->max_background = UINT_MAX;
2134                 flush_bg_queue(fc);
2135
2136                 spin_lock(&fiq->waitq.lock);
2137                 fiq->connected = 0;
2138                 list_splice_init(&fiq->pending, &to_end2);
2139                 while (forget_pending(fiq))
2140                         kfree(dequeue_forget(fiq, 1, NULL));
2141                 wake_up_all_locked(&fiq->waitq);
2142                 spin_unlock(&fiq->waitq.lock);
2143                 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2144
2145                 list_splice_init(&fc->processing, &to_end2);
2146                 while (!list_empty(&to_end1)) {
2147                         req = list_first_entry(&to_end1, struct fuse_req, list);
2148                         __fuse_get_request(req);
2149                         request_end(fc, req);
2150                         spin_lock(&fc->lock);
2151                 }
2152                 end_requests(fc, &to_end2);
2153                 end_polls(fc);
2154                 wake_up_all(&fc->blocked_waitq);
2155         }
2156         spin_unlock(&fc->lock);
2157 }
2158 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2159
2160 int fuse_dev_release(struct inode *inode, struct file *file)
2161 {
2162         struct fuse_conn *fc = fuse_get_conn(file);
2163         if (fc) {
2164                 WARN_ON(!list_empty(&fc->io));
2165                 WARN_ON(fc->iq.fasync != NULL);
2166                 fuse_abort_conn(fc);
2167                 fuse_conn_put(fc);
2168         }
2169
2170         return 0;
2171 }
2172 EXPORT_SYMBOL_GPL(fuse_dev_release);
2173
2174 static int fuse_dev_fasync(int fd, struct file *file, int on)
2175 {
2176         struct fuse_conn *fc = fuse_get_conn(file);
2177         if (!fc)
2178                 return -EPERM;
2179
2180         /* No locking - fasync_helper does its own locking */
2181         return fasync_helper(fd, file, on, &fc->iq.fasync);
2182 }
2183
2184 const struct file_operations fuse_dev_operations = {
2185         .owner          = THIS_MODULE,
2186         .open           = fuse_dev_open,
2187         .llseek         = no_llseek,
2188         .read_iter      = fuse_dev_read,
2189         .splice_read    = fuse_dev_splice_read,
2190         .write_iter     = fuse_dev_write,
2191         .splice_write   = fuse_dev_splice_write,
2192         .poll           = fuse_dev_poll,
2193         .release        = fuse_dev_release,
2194         .fasync         = fuse_dev_fasync,
2195 };
2196 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2197
2198 static struct miscdevice fuse_miscdevice = {
2199         .minor = FUSE_MINOR,
2200         .name  = "fuse",
2201         .fops = &fuse_dev_operations,
2202 };
2203
2204 int __init fuse_dev_init(void)
2205 {
2206         int err = -ENOMEM;
2207         fuse_req_cachep = kmem_cache_create("fuse_request",
2208                                             sizeof(struct fuse_req),
2209                                             0, 0, NULL);
2210         if (!fuse_req_cachep)
2211                 goto out;
2212
2213         err = misc_register(&fuse_miscdevice);
2214         if (err)
2215                 goto out_cache_clean;
2216
2217         return 0;
2218
2219  out_cache_clean:
2220         kmem_cache_destroy(fuse_req_cachep);
2221  out:
2222         return err;
2223 }
2224
2225 void fuse_dev_cleanup(void)
2226 {
2227         misc_deregister(&fuse_miscdevice);
2228         kmem_cache_destroy(fuse_req_cachep);
2229 }