CIFS: Reset read oplock to NONE if we have mandatory locks after reopen
[cascardo/linux.git] / fs / cifs / file.c
1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <asm/div64.h>
37 #include "cifsfs.h"
38 #include "cifspdu.h"
39 #include "cifsglob.h"
40 #include "cifsproto.h"
41 #include "cifs_unicode.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
44 #include "fscache.h"
45
46
47 static inline int cifs_convert_flags(unsigned int flags)
48 {
49         if ((flags & O_ACCMODE) == O_RDONLY)
50                 return GENERIC_READ;
51         else if ((flags & O_ACCMODE) == O_WRONLY)
52                 return GENERIC_WRITE;
53         else if ((flags & O_ACCMODE) == O_RDWR) {
54                 /* GENERIC_ALL is too much permission to request
55                    can cause unnecessary access denied on create */
56                 /* return GENERIC_ALL; */
57                 return (GENERIC_READ | GENERIC_WRITE);
58         }
59
60         return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61                 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62                 FILE_READ_DATA);
63 }
64
65 static u32 cifs_posix_convert_flags(unsigned int flags)
66 {
67         u32 posix_flags = 0;
68
69         if ((flags & O_ACCMODE) == O_RDONLY)
70                 posix_flags = SMB_O_RDONLY;
71         else if ((flags & O_ACCMODE) == O_WRONLY)
72                 posix_flags = SMB_O_WRONLY;
73         else if ((flags & O_ACCMODE) == O_RDWR)
74                 posix_flags = SMB_O_RDWR;
75
76         if (flags & O_CREAT) {
77                 posix_flags |= SMB_O_CREAT;
78                 if (flags & O_EXCL)
79                         posix_flags |= SMB_O_EXCL;
80         } else if (flags & O_EXCL)
81                 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82                          current->comm, current->tgid);
83
84         if (flags & O_TRUNC)
85                 posix_flags |= SMB_O_TRUNC;
86         /* be safe and imply O_SYNC for O_DSYNC */
87         if (flags & O_DSYNC)
88                 posix_flags |= SMB_O_SYNC;
89         if (flags & O_DIRECTORY)
90                 posix_flags |= SMB_O_DIRECTORY;
91         if (flags & O_NOFOLLOW)
92                 posix_flags |= SMB_O_NOFOLLOW;
93         if (flags & O_DIRECT)
94                 posix_flags |= SMB_O_DIRECT;
95
96         return posix_flags;
97 }
98
99 static inline int cifs_get_disposition(unsigned int flags)
100 {
101         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102                 return FILE_CREATE;
103         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104                 return FILE_OVERWRITE_IF;
105         else if ((flags & O_CREAT) == O_CREAT)
106                 return FILE_OPEN_IF;
107         else if ((flags & O_TRUNC) == O_TRUNC)
108                 return FILE_OVERWRITE;
109         else
110                 return FILE_OPEN;
111 }
112
113 int cifs_posix_open(char *full_path, struct inode **pinode,
114                         struct super_block *sb, int mode, unsigned int f_flags,
115                         __u32 *poplock, __u16 *pnetfid, unsigned int xid)
116 {
117         int rc;
118         FILE_UNIX_BASIC_INFO *presp_data;
119         __u32 posix_flags = 0;
120         struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121         struct cifs_fattr fattr;
122         struct tcon_link *tlink;
123         struct cifs_tcon *tcon;
124
125         cifs_dbg(FYI, "posix open %s\n", full_path);
126
127         presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128         if (presp_data == NULL)
129                 return -ENOMEM;
130
131         tlink = cifs_sb_tlink(cifs_sb);
132         if (IS_ERR(tlink)) {
133                 rc = PTR_ERR(tlink);
134                 goto posix_open_ret;
135         }
136
137         tcon = tlink_tcon(tlink);
138         mode &= ~current_umask();
139
140         posix_flags = cifs_posix_convert_flags(f_flags);
141         rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142                              poplock, full_path, cifs_sb->local_nls,
143                              cifs_remap(cifs_sb));
144         cifs_put_tlink(tlink);
145
146         if (rc)
147                 goto posix_open_ret;
148
149         if (presp_data->Type == cpu_to_le32(-1))
150                 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152         if (!pinode)
153                 goto posix_open_ret; /* caller does not need info */
154
155         cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157         /* get new inode and set it up */
158         if (*pinode == NULL) {
159                 cifs_fill_uniqueid(sb, &fattr);
160                 *pinode = cifs_iget(sb, &fattr);
161                 if (!*pinode) {
162                         rc = -ENOMEM;
163                         goto posix_open_ret;
164                 }
165         } else {
166                 cifs_fattr_to_inode(*pinode, &fattr);
167         }
168
169 posix_open_ret:
170         kfree(presp_data);
171         return rc;
172 }
173
174 static int
175 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
176              struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177              struct cifs_fid *fid, unsigned int xid)
178 {
179         int rc;
180         int desired_access;
181         int disposition;
182         int create_options = CREATE_NOT_DIR;
183         FILE_ALL_INFO *buf;
184         struct TCP_Server_Info *server = tcon->ses->server;
185         struct cifs_open_parms oparms;
186
187         if (!server->ops->open)
188                 return -ENOSYS;
189
190         desired_access = cifs_convert_flags(f_flags);
191
192 /*********************************************************************
193  *  open flag mapping table:
194  *
195  *      POSIX Flag            CIFS Disposition
196  *      ----------            ----------------
197  *      O_CREAT               FILE_OPEN_IF
198  *      O_CREAT | O_EXCL      FILE_CREATE
199  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
200  *      O_TRUNC               FILE_OVERWRITE
201  *      none of the above     FILE_OPEN
202  *
203  *      Note that there is not a direct match between disposition
204  *      FILE_SUPERSEDE (ie create whether or not file exists although
205  *      O_CREAT | O_TRUNC is similar but truncates the existing
206  *      file rather than creating a new file as FILE_SUPERSEDE does
207  *      (which uses the attributes / metadata passed in on open call)
208  *?
209  *?  O_SYNC is a reasonable match to CIFS writethrough flag
210  *?  and the read write flags match reasonably.  O_LARGEFILE
211  *?  is irrelevant because largefile support is always used
212  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214  *********************************************************************/
215
216         disposition = cifs_get_disposition(f_flags);
217
218         /* BB pass O_SYNC flag through on file attributes .. BB */
219
220         buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221         if (!buf)
222                 return -ENOMEM;
223
224         if (backup_cred(cifs_sb))
225                 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
227         oparms.tcon = tcon;
228         oparms.cifs_sb = cifs_sb;
229         oparms.desired_access = desired_access;
230         oparms.create_options = create_options;
231         oparms.disposition = disposition;
232         oparms.path = full_path;
233         oparms.fid = fid;
234         oparms.reconnect = false;
235
236         rc = server->ops->open(xid, &oparms, oplock, buf);
237
238         if (rc)
239                 goto out;
240
241         if (tcon->unix_ext)
242                 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
243                                               xid);
244         else
245                 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
246                                          xid, fid);
247
248 out:
249         kfree(buf);
250         return rc;
251 }
252
253 static bool
254 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
255 {
256         struct cifs_fid_locks *cur;
257         bool has_locks = false;
258
259         down_read(&cinode->lock_sem);
260         list_for_each_entry(cur, &cinode->llist, llist) {
261                 if (!list_empty(&cur->locks)) {
262                         has_locks = true;
263                         break;
264                 }
265         }
266         up_read(&cinode->lock_sem);
267         return has_locks;
268 }
269
270 struct cifsFileInfo *
271 cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
272                   struct tcon_link *tlink, __u32 oplock)
273 {
274         struct dentry *dentry = file_dentry(file);
275         struct inode *inode = d_inode(dentry);
276         struct cifsInodeInfo *cinode = CIFS_I(inode);
277         struct cifsFileInfo *cfile;
278         struct cifs_fid_locks *fdlocks;
279         struct cifs_tcon *tcon = tlink_tcon(tlink);
280         struct TCP_Server_Info *server = tcon->ses->server;
281
282         cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
283         if (cfile == NULL)
284                 return cfile;
285
286         fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
287         if (!fdlocks) {
288                 kfree(cfile);
289                 return NULL;
290         }
291
292         INIT_LIST_HEAD(&fdlocks->locks);
293         fdlocks->cfile = cfile;
294         cfile->llist = fdlocks;
295         down_write(&cinode->lock_sem);
296         list_add(&fdlocks->llist, &cinode->llist);
297         up_write(&cinode->lock_sem);
298
299         cfile->count = 1;
300         cfile->pid = current->tgid;
301         cfile->uid = current_fsuid();
302         cfile->dentry = dget(dentry);
303         cfile->f_flags = file->f_flags;
304         cfile->invalidHandle = false;
305         cfile->tlink = cifs_get_tlink(tlink);
306         INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
307         mutex_init(&cfile->fh_mutex);
308         spin_lock_init(&cfile->file_info_lock);
309
310         cifs_sb_active(inode->i_sb);
311
312         /*
313          * If the server returned a read oplock and we have mandatory brlocks,
314          * set oplock level to None.
315          */
316         if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
317                 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
318                 oplock = 0;
319         }
320
321         spin_lock(&tcon->open_file_lock);
322         if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
323                 oplock = fid->pending_open->oplock;
324         list_del(&fid->pending_open->olist);
325
326         fid->purge_cache = false;
327         server->ops->set_fid(cfile, fid, oplock);
328
329         list_add(&cfile->tlist, &tcon->openFileList);
330
331         /* if readable file instance put first in list*/
332         if (file->f_mode & FMODE_READ)
333                 list_add(&cfile->flist, &cinode->openFileList);
334         else
335                 list_add_tail(&cfile->flist, &cinode->openFileList);
336         spin_unlock(&tcon->open_file_lock);
337
338         if (fid->purge_cache)
339                 cifs_zap_mapping(inode);
340
341         file->private_data = cfile;
342         return cfile;
343 }
344
345 struct cifsFileInfo *
346 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
347 {
348         spin_lock(&cifs_file->file_info_lock);
349         cifsFileInfo_get_locked(cifs_file);
350         spin_unlock(&cifs_file->file_info_lock);
351         return cifs_file;
352 }
353
354 /*
355  * Release a reference on the file private data. This may involve closing
356  * the filehandle out on the server. Must be called without holding
357  * tcon->open_file_lock and cifs_file->file_info_lock.
358  */
359 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
360 {
361         struct inode *inode = d_inode(cifs_file->dentry);
362         struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
363         struct TCP_Server_Info *server = tcon->ses->server;
364         struct cifsInodeInfo *cifsi = CIFS_I(inode);
365         struct super_block *sb = inode->i_sb;
366         struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
367         struct cifsLockInfo *li, *tmp;
368         struct cifs_fid fid;
369         struct cifs_pending_open open;
370         bool oplock_break_cancelled;
371
372         spin_lock(&tcon->open_file_lock);
373
374         spin_lock(&cifs_file->file_info_lock);
375         if (--cifs_file->count > 0) {
376                 spin_unlock(&cifs_file->file_info_lock);
377                 spin_unlock(&tcon->open_file_lock);
378                 return;
379         }
380         spin_unlock(&cifs_file->file_info_lock);
381
382         if (server->ops->get_lease_key)
383                 server->ops->get_lease_key(inode, &fid);
384
385         /* store open in pending opens to make sure we don't miss lease break */
386         cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
387
388         /* remove it from the lists */
389         list_del(&cifs_file->flist);
390         list_del(&cifs_file->tlist);
391
392         if (list_empty(&cifsi->openFileList)) {
393                 cifs_dbg(FYI, "closing last open instance for inode %p\n",
394                          d_inode(cifs_file->dentry));
395                 /*
396                  * In strict cache mode we need invalidate mapping on the last
397                  * close  because it may cause a error when we open this file
398                  * again and get at least level II oplock.
399                  */
400                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
401                         set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
402                 cifs_set_oplock_level(cifsi, 0);
403         }
404
405         spin_unlock(&tcon->open_file_lock);
406
407         oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
408
409         if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
410                 struct TCP_Server_Info *server = tcon->ses->server;
411                 unsigned int xid;
412
413                 xid = get_xid();
414                 if (server->ops->close)
415                         server->ops->close(xid, tcon, &cifs_file->fid);
416                 _free_xid(xid);
417         }
418
419         if (oplock_break_cancelled)
420                 cifs_done_oplock_break(cifsi);
421
422         cifs_del_pending_open(&open);
423
424         /*
425          * Delete any outstanding lock records. We'll lose them when the file
426          * is closed anyway.
427          */
428         down_write(&cifsi->lock_sem);
429         list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
430                 list_del(&li->llist);
431                 cifs_del_lock_waiters(li);
432                 kfree(li);
433         }
434         list_del(&cifs_file->llist->llist);
435         kfree(cifs_file->llist);
436         up_write(&cifsi->lock_sem);
437
438         cifs_put_tlink(cifs_file->tlink);
439         dput(cifs_file->dentry);
440         cifs_sb_deactive(sb);
441         kfree(cifs_file);
442 }
443
444 int cifs_open(struct inode *inode, struct file *file)
445
446 {
447         int rc = -EACCES;
448         unsigned int xid;
449         __u32 oplock;
450         struct cifs_sb_info *cifs_sb;
451         struct TCP_Server_Info *server;
452         struct cifs_tcon *tcon;
453         struct tcon_link *tlink;
454         struct cifsFileInfo *cfile = NULL;
455         char *full_path = NULL;
456         bool posix_open_ok = false;
457         struct cifs_fid fid;
458         struct cifs_pending_open open;
459
460         xid = get_xid();
461
462         cifs_sb = CIFS_SB(inode->i_sb);
463         tlink = cifs_sb_tlink(cifs_sb);
464         if (IS_ERR(tlink)) {
465                 free_xid(xid);
466                 return PTR_ERR(tlink);
467         }
468         tcon = tlink_tcon(tlink);
469         server = tcon->ses->server;
470
471         full_path = build_path_from_dentry(file_dentry(file));
472         if (full_path == NULL) {
473                 rc = -ENOMEM;
474                 goto out;
475         }
476
477         cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
478                  inode, file->f_flags, full_path);
479
480         if (file->f_flags & O_DIRECT &&
481             cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
482                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
483                         file->f_op = &cifs_file_direct_nobrl_ops;
484                 else
485                         file->f_op = &cifs_file_direct_ops;
486         }
487
488         if (server->oplocks)
489                 oplock = REQ_OPLOCK;
490         else
491                 oplock = 0;
492
493         if (!tcon->broken_posix_open && tcon->unix_ext &&
494             cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
495                                 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
496                 /* can not refresh inode info since size could be stale */
497                 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
498                                 cifs_sb->mnt_file_mode /* ignored */,
499                                 file->f_flags, &oplock, &fid.netfid, xid);
500                 if (rc == 0) {
501                         cifs_dbg(FYI, "posix open succeeded\n");
502                         posix_open_ok = true;
503                 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
504                         if (tcon->ses->serverNOS)
505                                 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
506                                          tcon->ses->serverName,
507                                          tcon->ses->serverNOS);
508                         tcon->broken_posix_open = true;
509                 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
510                          (rc != -EOPNOTSUPP)) /* path not found or net err */
511                         goto out;
512                 /*
513                  * Else fallthrough to retry open the old way on network i/o
514                  * or DFS errors.
515                  */
516         }
517
518         if (server->ops->get_lease_key)
519                 server->ops->get_lease_key(inode, &fid);
520
521         cifs_add_pending_open(&fid, tlink, &open);
522
523         if (!posix_open_ok) {
524                 if (server->ops->get_lease_key)
525                         server->ops->get_lease_key(inode, &fid);
526
527                 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
528                                   file->f_flags, &oplock, &fid, xid);
529                 if (rc) {
530                         cifs_del_pending_open(&open);
531                         goto out;
532                 }
533         }
534
535         cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
536         if (cfile == NULL) {
537                 if (server->ops->close)
538                         server->ops->close(xid, tcon, &fid);
539                 cifs_del_pending_open(&open);
540                 rc = -ENOMEM;
541                 goto out;
542         }
543
544         cifs_fscache_set_inode_cookie(inode, file);
545
546         if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
547                 /*
548                  * Time to set mode which we can not set earlier due to
549                  * problems creating new read-only files.
550                  */
551                 struct cifs_unix_set_info_args args = {
552                         .mode   = inode->i_mode,
553                         .uid    = INVALID_UID, /* no change */
554                         .gid    = INVALID_GID, /* no change */
555                         .ctime  = NO_CHANGE_64,
556                         .atime  = NO_CHANGE_64,
557                         .mtime  = NO_CHANGE_64,
558                         .device = 0,
559                 };
560                 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
561                                        cfile->pid);
562         }
563
564 out:
565         kfree(full_path);
566         free_xid(xid);
567         cifs_put_tlink(tlink);
568         return rc;
569 }
570
571 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
572
573 /*
574  * Try to reacquire byte range locks that were released when session
575  * to server was lost.
576  */
577 static int
578 cifs_relock_file(struct cifsFileInfo *cfile)
579 {
580         struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
581         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
582         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
583         int rc = 0;
584
585         down_read(&cinode->lock_sem);
586         if (cinode->can_cache_brlcks) {
587                 /* can cache locks - no need to relock */
588                 up_read(&cinode->lock_sem);
589                 return rc;
590         }
591
592         if (cap_unix(tcon->ses) &&
593             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
594             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
595                 rc = cifs_push_posix_locks(cfile);
596         else
597                 rc = tcon->ses->server->ops->push_mand_locks(cfile);
598
599         up_read(&cinode->lock_sem);
600         return rc;
601 }
602
603 static int
604 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
605 {
606         int rc = -EACCES;
607         unsigned int xid;
608         __u32 oplock;
609         struct cifs_sb_info *cifs_sb;
610         struct cifs_tcon *tcon;
611         struct TCP_Server_Info *server;
612         struct cifsInodeInfo *cinode;
613         struct inode *inode;
614         char *full_path = NULL;
615         int desired_access;
616         int disposition = FILE_OPEN;
617         int create_options = CREATE_NOT_DIR;
618         struct cifs_open_parms oparms;
619
620         xid = get_xid();
621         mutex_lock(&cfile->fh_mutex);
622         if (!cfile->invalidHandle) {
623                 mutex_unlock(&cfile->fh_mutex);
624                 rc = 0;
625                 free_xid(xid);
626                 return rc;
627         }
628
629         inode = d_inode(cfile->dentry);
630         cifs_sb = CIFS_SB(inode->i_sb);
631         tcon = tlink_tcon(cfile->tlink);
632         server = tcon->ses->server;
633
634         /*
635          * Can not grab rename sem here because various ops, including those
636          * that already have the rename sem can end up causing writepage to get
637          * called and if the server was down that means we end up here, and we
638          * can never tell if the caller already has the rename_sem.
639          */
640         full_path = build_path_from_dentry(cfile->dentry);
641         if (full_path == NULL) {
642                 rc = -ENOMEM;
643                 mutex_unlock(&cfile->fh_mutex);
644                 free_xid(xid);
645                 return rc;
646         }
647
648         cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
649                  inode, cfile->f_flags, full_path);
650
651         if (tcon->ses->server->oplocks)
652                 oplock = REQ_OPLOCK;
653         else
654                 oplock = 0;
655
656         if (tcon->unix_ext && cap_unix(tcon->ses) &&
657             (CIFS_UNIX_POSIX_PATH_OPS_CAP &
658                                 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
659                 /*
660                  * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
661                  * original open. Must mask them off for a reopen.
662                  */
663                 unsigned int oflags = cfile->f_flags &
664                                                 ~(O_CREAT | O_EXCL | O_TRUNC);
665
666                 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
667                                      cifs_sb->mnt_file_mode /* ignored */,
668                                      oflags, &oplock, &cfile->fid.netfid, xid);
669                 if (rc == 0) {
670                         cifs_dbg(FYI, "posix reopen succeeded\n");
671                         oparms.reconnect = true;
672                         goto reopen_success;
673                 }
674                 /*
675                  * fallthrough to retry open the old way on errors, especially
676                  * in the reconnect path it is important to retry hard
677                  */
678         }
679
680         desired_access = cifs_convert_flags(cfile->f_flags);
681
682         if (backup_cred(cifs_sb))
683                 create_options |= CREATE_OPEN_BACKUP_INTENT;
684
685         if (server->ops->get_lease_key)
686                 server->ops->get_lease_key(inode, &cfile->fid);
687
688         oparms.tcon = tcon;
689         oparms.cifs_sb = cifs_sb;
690         oparms.desired_access = desired_access;
691         oparms.create_options = create_options;
692         oparms.disposition = disposition;
693         oparms.path = full_path;
694         oparms.fid = &cfile->fid;
695         oparms.reconnect = true;
696
697         /*
698          * Can not refresh inode by passing in file_info buf to be returned by
699          * ops->open and then calling get_inode_info with returned buf since
700          * file might have write behind data that needs to be flushed and server
701          * version of file size can be stale. If we knew for sure that inode was
702          * not dirty locally we could do this.
703          */
704         rc = server->ops->open(xid, &oparms, &oplock, NULL);
705         if (rc == -ENOENT && oparms.reconnect == false) {
706                 /* durable handle timeout is expired - open the file again */
707                 rc = server->ops->open(xid, &oparms, &oplock, NULL);
708                 /* indicate that we need to relock the file */
709                 oparms.reconnect = true;
710         }
711
712         if (rc) {
713                 mutex_unlock(&cfile->fh_mutex);
714                 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
715                 cifs_dbg(FYI, "oplock: %d\n", oplock);
716                 goto reopen_error_exit;
717         }
718
719 reopen_success:
720         cfile->invalidHandle = false;
721         mutex_unlock(&cfile->fh_mutex);
722         cinode = CIFS_I(inode);
723
724         if (can_flush) {
725                 rc = filemap_write_and_wait(inode->i_mapping);
726                 mapping_set_error(inode->i_mapping, rc);
727
728                 if (tcon->unix_ext)
729                         rc = cifs_get_inode_info_unix(&inode, full_path,
730                                                       inode->i_sb, xid);
731                 else
732                         rc = cifs_get_inode_info(&inode, full_path, NULL,
733                                                  inode->i_sb, xid, NULL);
734         }
735         /*
736          * Else we are writing out data to server already and could deadlock if
737          * we tried to flush data, and since we do not know if we have data that
738          * would invalidate the current end of file on the server we can not go
739          * to the server to get the new inode info.
740          */
741
742         /*
743          * If the server returned a read oplock and we have mandatory brlocks,
744          * set oplock level to None.
745          */
746         if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
747                 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
748                 oplock = 0;
749         }
750
751         server->ops->set_fid(cfile, &cfile->fid, oplock);
752         if (oparms.reconnect)
753                 cifs_relock_file(cfile);
754
755 reopen_error_exit:
756         kfree(full_path);
757         free_xid(xid);
758         return rc;
759 }
760
761 int cifs_close(struct inode *inode, struct file *file)
762 {
763         if (file->private_data != NULL) {
764                 cifsFileInfo_put(file->private_data);
765                 file->private_data = NULL;
766         }
767
768         /* return code from the ->release op is always ignored */
769         return 0;
770 }
771
772 void
773 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
774 {
775         struct cifsFileInfo *open_file;
776         struct list_head *tmp;
777         struct list_head *tmp1;
778         struct list_head tmp_list;
779
780         cifs_dbg(FYI, "Reopen persistent handles");
781         INIT_LIST_HEAD(&tmp_list);
782
783         /* list all files open on tree connection, reopen resilient handles  */
784         spin_lock(&tcon->open_file_lock);
785         list_for_each(tmp, &tcon->openFileList) {
786                 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
787                 if (!open_file->invalidHandle)
788                         continue;
789                 cifsFileInfo_get(open_file);
790                 list_add_tail(&open_file->rlist, &tmp_list);
791         }
792         spin_unlock(&tcon->open_file_lock);
793
794         list_for_each_safe(tmp, tmp1, &tmp_list) {
795                 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
796                 cifs_reopen_file(open_file, false /* do not flush */);
797                 list_del_init(&open_file->rlist);
798                 cifsFileInfo_put(open_file);
799         }
800 }
801
802 int cifs_closedir(struct inode *inode, struct file *file)
803 {
804         int rc = 0;
805         unsigned int xid;
806         struct cifsFileInfo *cfile = file->private_data;
807         struct cifs_tcon *tcon;
808         struct TCP_Server_Info *server;
809         char *buf;
810
811         cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
812
813         if (cfile == NULL)
814                 return rc;
815
816         xid = get_xid();
817         tcon = tlink_tcon(cfile->tlink);
818         server = tcon->ses->server;
819
820         cifs_dbg(FYI, "Freeing private data in close dir\n");
821         spin_lock(&cfile->file_info_lock);
822         if (server->ops->dir_needs_close(cfile)) {
823                 cfile->invalidHandle = true;
824                 spin_unlock(&cfile->file_info_lock);
825                 if (server->ops->close_dir)
826                         rc = server->ops->close_dir(xid, tcon, &cfile->fid);
827                 else
828                         rc = -ENOSYS;
829                 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
830                 /* not much we can do if it fails anyway, ignore rc */
831                 rc = 0;
832         } else
833                 spin_unlock(&cfile->file_info_lock);
834
835         buf = cfile->srch_inf.ntwrk_buf_start;
836         if (buf) {
837                 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
838                 cfile->srch_inf.ntwrk_buf_start = NULL;
839                 if (cfile->srch_inf.smallBuf)
840                         cifs_small_buf_release(buf);
841                 else
842                         cifs_buf_release(buf);
843         }
844
845         cifs_put_tlink(cfile->tlink);
846         kfree(file->private_data);
847         file->private_data = NULL;
848         /* BB can we lock the filestruct while this is going on? */
849         free_xid(xid);
850         return rc;
851 }
852
853 static struct cifsLockInfo *
854 cifs_lock_init(__u64 offset, __u64 length, __u8 type)
855 {
856         struct cifsLockInfo *lock =
857                 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
858         if (!lock)
859                 return lock;
860         lock->offset = offset;
861         lock->length = length;
862         lock->type = type;
863         lock->pid = current->tgid;
864         INIT_LIST_HEAD(&lock->blist);
865         init_waitqueue_head(&lock->block_q);
866         return lock;
867 }
868
869 void
870 cifs_del_lock_waiters(struct cifsLockInfo *lock)
871 {
872         struct cifsLockInfo *li, *tmp;
873         list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
874                 list_del_init(&li->blist);
875                 wake_up(&li->block_q);
876         }
877 }
878
879 #define CIFS_LOCK_OP    0
880 #define CIFS_READ_OP    1
881 #define CIFS_WRITE_OP   2
882
883 /* @rw_check : 0 - no op, 1 - read, 2 - write */
884 static bool
885 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
886                             __u64 length, __u8 type, struct cifsFileInfo *cfile,
887                             struct cifsLockInfo **conf_lock, int rw_check)
888 {
889         struct cifsLockInfo *li;
890         struct cifsFileInfo *cur_cfile = fdlocks->cfile;
891         struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
892
893         list_for_each_entry(li, &fdlocks->locks, llist) {
894                 if (offset + length <= li->offset ||
895                     offset >= li->offset + li->length)
896                         continue;
897                 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
898                     server->ops->compare_fids(cfile, cur_cfile)) {
899                         /* shared lock prevents write op through the same fid */
900                         if (!(li->type & server->vals->shared_lock_type) ||
901                             rw_check != CIFS_WRITE_OP)
902                                 continue;
903                 }
904                 if ((type & server->vals->shared_lock_type) &&
905                     ((server->ops->compare_fids(cfile, cur_cfile) &&
906                      current->tgid == li->pid) || type == li->type))
907                         continue;
908                 if (conf_lock)
909                         *conf_lock = li;
910                 return true;
911         }
912         return false;
913 }
914
915 bool
916 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
917                         __u8 type, struct cifsLockInfo **conf_lock,
918                         int rw_check)
919 {
920         bool rc = false;
921         struct cifs_fid_locks *cur;
922         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
923
924         list_for_each_entry(cur, &cinode->llist, llist) {
925                 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
926                                                  cfile, conf_lock, rw_check);
927                 if (rc)
928                         break;
929         }
930
931         return rc;
932 }
933
934 /*
935  * Check if there is another lock that prevents us to set the lock (mandatory
936  * style). If such a lock exists, update the flock structure with its
937  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
938  * or leave it the same if we can't. Returns 0 if we don't need to request to
939  * the server or 1 otherwise.
940  */
941 static int
942 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
943                __u8 type, struct file_lock *flock)
944 {
945         int rc = 0;
946         struct cifsLockInfo *conf_lock;
947         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
948         struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
949         bool exist;
950
951         down_read(&cinode->lock_sem);
952
953         exist = cifs_find_lock_conflict(cfile, offset, length, type,
954                                         &conf_lock, CIFS_LOCK_OP);
955         if (exist) {
956                 flock->fl_start = conf_lock->offset;
957                 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
958                 flock->fl_pid = conf_lock->pid;
959                 if (conf_lock->type & server->vals->shared_lock_type)
960                         flock->fl_type = F_RDLCK;
961                 else
962                         flock->fl_type = F_WRLCK;
963         } else if (!cinode->can_cache_brlcks)
964                 rc = 1;
965         else
966                 flock->fl_type = F_UNLCK;
967
968         up_read(&cinode->lock_sem);
969         return rc;
970 }
971
972 static void
973 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
974 {
975         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
976         down_write(&cinode->lock_sem);
977         list_add_tail(&lock->llist, &cfile->llist->locks);
978         up_write(&cinode->lock_sem);
979 }
980
981 /*
982  * Set the byte-range lock (mandatory style). Returns:
983  * 1) 0, if we set the lock and don't need to request to the server;
984  * 2) 1, if no locks prevent us but we need to request to the server;
985  * 3) -EACCESS, if there is a lock that prevents us and wait is false.
986  */
987 static int
988 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
989                  bool wait)
990 {
991         struct cifsLockInfo *conf_lock;
992         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
993         bool exist;
994         int rc = 0;
995
996 try_again:
997         exist = false;
998         down_write(&cinode->lock_sem);
999
1000         exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1001                                         lock->type, &conf_lock, CIFS_LOCK_OP);
1002         if (!exist && cinode->can_cache_brlcks) {
1003                 list_add_tail(&lock->llist, &cfile->llist->locks);
1004                 up_write(&cinode->lock_sem);
1005                 return rc;
1006         }
1007
1008         if (!exist)
1009                 rc = 1;
1010         else if (!wait)
1011                 rc = -EACCES;
1012         else {
1013                 list_add_tail(&lock->blist, &conf_lock->blist);
1014                 up_write(&cinode->lock_sem);
1015                 rc = wait_event_interruptible(lock->block_q,
1016                                         (lock->blist.prev == &lock->blist) &&
1017                                         (lock->blist.next == &lock->blist));
1018                 if (!rc)
1019                         goto try_again;
1020                 down_write(&cinode->lock_sem);
1021                 list_del_init(&lock->blist);
1022         }
1023
1024         up_write(&cinode->lock_sem);
1025         return rc;
1026 }
1027
1028 /*
1029  * Check if there is another lock that prevents us to set the lock (posix
1030  * style). If such a lock exists, update the flock structure with its
1031  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1032  * or leave it the same if we can't. Returns 0 if we don't need to request to
1033  * the server or 1 otherwise.
1034  */
1035 static int
1036 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1037 {
1038         int rc = 0;
1039         struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1040         unsigned char saved_type = flock->fl_type;
1041
1042         if ((flock->fl_flags & FL_POSIX) == 0)
1043                 return 1;
1044
1045         down_read(&cinode->lock_sem);
1046         posix_test_lock(file, flock);
1047
1048         if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1049                 flock->fl_type = saved_type;
1050                 rc = 1;
1051         }
1052
1053         up_read(&cinode->lock_sem);
1054         return rc;
1055 }
1056
1057 /*
1058  * Set the byte-range lock (posix style). Returns:
1059  * 1) 0, if we set the lock and don't need to request to the server;
1060  * 2) 1, if we need to request to the server;
1061  * 3) <0, if the error occurs while setting the lock.
1062  */
1063 static int
1064 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1065 {
1066         struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1067         int rc = 1;
1068
1069         if ((flock->fl_flags & FL_POSIX) == 0)
1070                 return rc;
1071
1072 try_again:
1073         down_write(&cinode->lock_sem);
1074         if (!cinode->can_cache_brlcks) {
1075                 up_write(&cinode->lock_sem);
1076                 return rc;
1077         }
1078
1079         rc = posix_lock_file(file, flock, NULL);
1080         up_write(&cinode->lock_sem);
1081         if (rc == FILE_LOCK_DEFERRED) {
1082                 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1083                 if (!rc)
1084                         goto try_again;
1085                 posix_unblock_lock(flock);
1086         }
1087         return rc;
1088 }
1089
1090 int
1091 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1092 {
1093         unsigned int xid;
1094         int rc = 0, stored_rc;
1095         struct cifsLockInfo *li, *tmp;
1096         struct cifs_tcon *tcon;
1097         unsigned int num, max_num, max_buf;
1098         LOCKING_ANDX_RANGE *buf, *cur;
1099         int types[] = {LOCKING_ANDX_LARGE_FILES,
1100                        LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1101         int i;
1102
1103         xid = get_xid();
1104         tcon = tlink_tcon(cfile->tlink);
1105
1106         /*
1107          * Accessing maxBuf is racy with cifs_reconnect - need to store value
1108          * and check it for zero before using.
1109          */
1110         max_buf = tcon->ses->server->maxBuf;
1111         if (!max_buf) {
1112                 free_xid(xid);
1113                 return -EINVAL;
1114         }
1115
1116         max_num = (max_buf - sizeof(struct smb_hdr)) /
1117                                                 sizeof(LOCKING_ANDX_RANGE);
1118         buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1119         if (!buf) {
1120                 free_xid(xid);
1121                 return -ENOMEM;
1122         }
1123
1124         for (i = 0; i < 2; i++) {
1125                 cur = buf;
1126                 num = 0;
1127                 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1128                         if (li->type != types[i])
1129                                 continue;
1130                         cur->Pid = cpu_to_le16(li->pid);
1131                         cur->LengthLow = cpu_to_le32((u32)li->length);
1132                         cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1133                         cur->OffsetLow = cpu_to_le32((u32)li->offset);
1134                         cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1135                         if (++num == max_num) {
1136                                 stored_rc = cifs_lockv(xid, tcon,
1137                                                        cfile->fid.netfid,
1138                                                        (__u8)li->type, 0, num,
1139                                                        buf);
1140                                 if (stored_rc)
1141                                         rc = stored_rc;
1142                                 cur = buf;
1143                                 num = 0;
1144                         } else
1145                                 cur++;
1146                 }
1147
1148                 if (num) {
1149                         stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1150                                                (__u8)types[i], 0, num, buf);
1151                         if (stored_rc)
1152                                 rc = stored_rc;
1153                 }
1154         }
1155
1156         kfree(buf);
1157         free_xid(xid);
1158         return rc;
1159 }
1160
1161 static __u32
1162 hash_lockowner(fl_owner_t owner)
1163 {
1164         return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1165 }
1166
1167 struct lock_to_push {
1168         struct list_head llist;
1169         __u64 offset;
1170         __u64 length;
1171         __u32 pid;
1172         __u16 netfid;
1173         __u8 type;
1174 };
1175
1176 static int
1177 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1178 {
1179         struct inode *inode = d_inode(cfile->dentry);
1180         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1181         struct file_lock *flock;
1182         struct file_lock_context *flctx = inode->i_flctx;
1183         unsigned int count = 0, i;
1184         int rc = 0, xid, type;
1185         struct list_head locks_to_send, *el;
1186         struct lock_to_push *lck, *tmp;
1187         __u64 length;
1188
1189         xid = get_xid();
1190
1191         if (!flctx)
1192                 goto out;
1193
1194         spin_lock(&flctx->flc_lock);
1195         list_for_each(el, &flctx->flc_posix) {
1196                 count++;
1197         }
1198         spin_unlock(&flctx->flc_lock);
1199
1200         INIT_LIST_HEAD(&locks_to_send);
1201
1202         /*
1203          * Allocating count locks is enough because no FL_POSIX locks can be
1204          * added to the list while we are holding cinode->lock_sem that
1205          * protects locking operations of this inode.
1206          */
1207         for (i = 0; i < count; i++) {
1208                 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1209                 if (!lck) {
1210                         rc = -ENOMEM;
1211                         goto err_out;
1212                 }
1213                 list_add_tail(&lck->llist, &locks_to_send);
1214         }
1215
1216         el = locks_to_send.next;
1217         spin_lock(&flctx->flc_lock);
1218         list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
1219                 if (el == &locks_to_send) {
1220                         /*
1221                          * The list ended. We don't have enough allocated
1222                          * structures - something is really wrong.
1223                          */
1224                         cifs_dbg(VFS, "Can't push all brlocks!\n");
1225                         break;
1226                 }
1227                 length = 1 + flock->fl_end - flock->fl_start;
1228                 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1229                         type = CIFS_RDLCK;
1230                 else
1231                         type = CIFS_WRLCK;
1232                 lck = list_entry(el, struct lock_to_push, llist);
1233                 lck->pid = hash_lockowner(flock->fl_owner);
1234                 lck->netfid = cfile->fid.netfid;
1235                 lck->length = length;
1236                 lck->type = type;
1237                 lck->offset = flock->fl_start;
1238         }
1239         spin_unlock(&flctx->flc_lock);
1240
1241         list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1242                 int stored_rc;
1243
1244                 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1245                                              lck->offset, lck->length, NULL,
1246                                              lck->type, 0);
1247                 if (stored_rc)
1248                         rc = stored_rc;
1249                 list_del(&lck->llist);
1250                 kfree(lck);
1251         }
1252
1253 out:
1254         free_xid(xid);
1255         return rc;
1256 err_out:
1257         list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1258                 list_del(&lck->llist);
1259                 kfree(lck);
1260         }
1261         goto out;
1262 }
1263
1264 static int
1265 cifs_push_locks(struct cifsFileInfo *cfile)
1266 {
1267         struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1268         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1269         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1270         int rc = 0;
1271
1272         /* we are going to update can_cache_brlcks here - need a write access */
1273         down_write(&cinode->lock_sem);
1274         if (!cinode->can_cache_brlcks) {
1275                 up_write(&cinode->lock_sem);
1276                 return rc;
1277         }
1278
1279         if (cap_unix(tcon->ses) &&
1280             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1281             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1282                 rc = cifs_push_posix_locks(cfile);
1283         else
1284                 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1285
1286         cinode->can_cache_brlcks = false;
1287         up_write(&cinode->lock_sem);
1288         return rc;
1289 }
1290
1291 static void
1292 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1293                 bool *wait_flag, struct TCP_Server_Info *server)
1294 {
1295         if (flock->fl_flags & FL_POSIX)
1296                 cifs_dbg(FYI, "Posix\n");
1297         if (flock->fl_flags & FL_FLOCK)
1298                 cifs_dbg(FYI, "Flock\n");
1299         if (flock->fl_flags & FL_SLEEP) {
1300                 cifs_dbg(FYI, "Blocking lock\n");
1301                 *wait_flag = true;
1302         }
1303         if (flock->fl_flags & FL_ACCESS)
1304                 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1305         if (flock->fl_flags & FL_LEASE)
1306                 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1307         if (flock->fl_flags &
1308             (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1309                FL_ACCESS | FL_LEASE | FL_CLOSE)))
1310                 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
1311
1312         *type = server->vals->large_lock_type;
1313         if (flock->fl_type == F_WRLCK) {
1314                 cifs_dbg(FYI, "F_WRLCK\n");
1315                 *type |= server->vals->exclusive_lock_type;
1316                 *lock = 1;
1317         } else if (flock->fl_type == F_UNLCK) {
1318                 cifs_dbg(FYI, "F_UNLCK\n");
1319                 *type |= server->vals->unlock_lock_type;
1320                 *unlock = 1;
1321                 /* Check if unlock includes more than one lock range */
1322         } else if (flock->fl_type == F_RDLCK) {
1323                 cifs_dbg(FYI, "F_RDLCK\n");
1324                 *type |= server->vals->shared_lock_type;
1325                 *lock = 1;
1326         } else if (flock->fl_type == F_EXLCK) {
1327                 cifs_dbg(FYI, "F_EXLCK\n");
1328                 *type |= server->vals->exclusive_lock_type;
1329                 *lock = 1;
1330         } else if (flock->fl_type == F_SHLCK) {
1331                 cifs_dbg(FYI, "F_SHLCK\n");
1332                 *type |= server->vals->shared_lock_type;
1333                 *lock = 1;
1334         } else
1335                 cifs_dbg(FYI, "Unknown type of lock\n");
1336 }
1337
1338 static int
1339 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1340            bool wait_flag, bool posix_lck, unsigned int xid)
1341 {
1342         int rc = 0;
1343         __u64 length = 1 + flock->fl_end - flock->fl_start;
1344         struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1345         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1346         struct TCP_Server_Info *server = tcon->ses->server;
1347         __u16 netfid = cfile->fid.netfid;
1348
1349         if (posix_lck) {
1350                 int posix_lock_type;
1351
1352                 rc = cifs_posix_lock_test(file, flock);
1353                 if (!rc)
1354                         return rc;
1355
1356                 if (type & server->vals->shared_lock_type)
1357                         posix_lock_type = CIFS_RDLCK;
1358                 else
1359                         posix_lock_type = CIFS_WRLCK;
1360                 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1361                                       hash_lockowner(flock->fl_owner),
1362                                       flock->fl_start, length, flock,
1363                                       posix_lock_type, wait_flag);
1364                 return rc;
1365         }
1366
1367         rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1368         if (!rc)
1369                 return rc;
1370
1371         /* BB we could chain these into one lock request BB */
1372         rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1373                                     1, 0, false);
1374         if (rc == 0) {
1375                 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1376                                             type, 0, 1, false);
1377                 flock->fl_type = F_UNLCK;
1378                 if (rc != 0)
1379                         cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1380                                  rc);
1381                 return 0;
1382         }
1383
1384         if (type & server->vals->shared_lock_type) {
1385                 flock->fl_type = F_WRLCK;
1386                 return 0;
1387         }
1388
1389         type &= ~server->vals->exclusive_lock_type;
1390
1391         rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1392                                     type | server->vals->shared_lock_type,
1393                                     1, 0, false);
1394         if (rc == 0) {
1395                 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1396                         type | server->vals->shared_lock_type, 0, 1, false);
1397                 flock->fl_type = F_RDLCK;
1398                 if (rc != 0)
1399                         cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1400                                  rc);
1401         } else
1402                 flock->fl_type = F_WRLCK;
1403
1404         return 0;
1405 }
1406
1407 void
1408 cifs_move_llist(struct list_head *source, struct list_head *dest)
1409 {
1410         struct list_head *li, *tmp;
1411         list_for_each_safe(li, tmp, source)
1412                 list_move(li, dest);
1413 }
1414
1415 void
1416 cifs_free_llist(struct list_head *llist)
1417 {
1418         struct cifsLockInfo *li, *tmp;
1419         list_for_each_entry_safe(li, tmp, llist, llist) {
1420                 cifs_del_lock_waiters(li);
1421                 list_del(&li->llist);
1422                 kfree(li);
1423         }
1424 }
1425
1426 int
1427 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1428                   unsigned int xid)
1429 {
1430         int rc = 0, stored_rc;
1431         int types[] = {LOCKING_ANDX_LARGE_FILES,
1432                        LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1433         unsigned int i;
1434         unsigned int max_num, num, max_buf;
1435         LOCKING_ANDX_RANGE *buf, *cur;
1436         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1437         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1438         struct cifsLockInfo *li, *tmp;
1439         __u64 length = 1 + flock->fl_end - flock->fl_start;
1440         struct list_head tmp_llist;
1441
1442         INIT_LIST_HEAD(&tmp_llist);
1443
1444         /*
1445          * Accessing maxBuf is racy with cifs_reconnect - need to store value
1446          * and check it for zero before using.
1447          */
1448         max_buf = tcon->ses->server->maxBuf;
1449         if (!max_buf)
1450                 return -EINVAL;
1451
1452         max_num = (max_buf - sizeof(struct smb_hdr)) /
1453                                                 sizeof(LOCKING_ANDX_RANGE);
1454         buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1455         if (!buf)
1456                 return -ENOMEM;
1457
1458         down_write(&cinode->lock_sem);
1459         for (i = 0; i < 2; i++) {
1460                 cur = buf;
1461                 num = 0;
1462                 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1463                         if (flock->fl_start > li->offset ||
1464                             (flock->fl_start + length) <
1465                             (li->offset + li->length))
1466                                 continue;
1467                         if (current->tgid != li->pid)
1468                                 continue;
1469                         if (types[i] != li->type)
1470                                 continue;
1471                         if (cinode->can_cache_brlcks) {
1472                                 /*
1473                                  * We can cache brlock requests - simply remove
1474                                  * a lock from the file's list.
1475                                  */
1476                                 list_del(&li->llist);
1477                                 cifs_del_lock_waiters(li);
1478                                 kfree(li);
1479                                 continue;
1480                         }
1481                         cur->Pid = cpu_to_le16(li->pid);
1482                         cur->LengthLow = cpu_to_le32((u32)li->length);
1483                         cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1484                         cur->OffsetLow = cpu_to_le32((u32)li->offset);
1485                         cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1486                         /*
1487                          * We need to save a lock here to let us add it again to
1488                          * the file's list if the unlock range request fails on
1489                          * the server.
1490                          */
1491                         list_move(&li->llist, &tmp_llist);
1492                         if (++num == max_num) {
1493                                 stored_rc = cifs_lockv(xid, tcon,
1494                                                        cfile->fid.netfid,
1495                                                        li->type, num, 0, buf);
1496                                 if (stored_rc) {
1497                                         /*
1498                                          * We failed on the unlock range
1499                                          * request - add all locks from the tmp
1500                                          * list to the head of the file's list.
1501                                          */
1502                                         cifs_move_llist(&tmp_llist,
1503                                                         &cfile->llist->locks);
1504                                         rc = stored_rc;
1505                                 } else
1506                                         /*
1507                                          * The unlock range request succeed -
1508                                          * free the tmp list.
1509                                          */
1510                                         cifs_free_llist(&tmp_llist);
1511                                 cur = buf;
1512                                 num = 0;
1513                         } else
1514                                 cur++;
1515                 }
1516                 if (num) {
1517                         stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1518                                                types[i], num, 0, buf);
1519                         if (stored_rc) {
1520                                 cifs_move_llist(&tmp_llist,
1521                                                 &cfile->llist->locks);
1522                                 rc = stored_rc;
1523                         } else
1524                                 cifs_free_llist(&tmp_llist);
1525                 }
1526         }
1527
1528         up_write(&cinode->lock_sem);
1529         kfree(buf);
1530         return rc;
1531 }
1532
1533 static int
1534 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1535            bool wait_flag, bool posix_lck, int lock, int unlock,
1536            unsigned int xid)
1537 {
1538         int rc = 0;
1539         __u64 length = 1 + flock->fl_end - flock->fl_start;
1540         struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1541         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1542         struct TCP_Server_Info *server = tcon->ses->server;
1543         struct inode *inode = d_inode(cfile->dentry);
1544
1545         if (posix_lck) {
1546                 int posix_lock_type;
1547
1548                 rc = cifs_posix_lock_set(file, flock);
1549                 if (!rc || rc < 0)
1550                         return rc;
1551
1552                 if (type & server->vals->shared_lock_type)
1553                         posix_lock_type = CIFS_RDLCK;
1554                 else
1555                         posix_lock_type = CIFS_WRLCK;
1556
1557                 if (unlock == 1)
1558                         posix_lock_type = CIFS_UNLCK;
1559
1560                 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1561                                       hash_lockowner(flock->fl_owner),
1562                                       flock->fl_start, length,
1563                                       NULL, posix_lock_type, wait_flag);
1564                 goto out;
1565         }
1566
1567         if (lock) {
1568                 struct cifsLockInfo *lock;
1569
1570                 lock = cifs_lock_init(flock->fl_start, length, type);
1571                 if (!lock)
1572                         return -ENOMEM;
1573
1574                 rc = cifs_lock_add_if(cfile, lock, wait_flag);
1575                 if (rc < 0) {
1576                         kfree(lock);
1577                         return rc;
1578                 }
1579                 if (!rc)
1580                         goto out;
1581
1582                 /*
1583                  * Windows 7 server can delay breaking lease from read to None
1584                  * if we set a byte-range lock on a file - break it explicitly
1585                  * before sending the lock to the server to be sure the next
1586                  * read won't conflict with non-overlapted locks due to
1587                  * pagereading.
1588                  */
1589                 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1590                                         CIFS_CACHE_READ(CIFS_I(inode))) {
1591                         cifs_zap_mapping(inode);
1592                         cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1593                                  inode);
1594                         CIFS_I(inode)->oplock = 0;
1595                 }
1596
1597                 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1598                                             type, 1, 0, wait_flag);
1599                 if (rc) {
1600                         kfree(lock);
1601                         return rc;
1602                 }
1603
1604                 cifs_lock_add(cfile, lock);
1605         } else if (unlock)
1606                 rc = server->ops->mand_unlock_range(cfile, flock, xid);
1607
1608 out:
1609         if (flock->fl_flags & FL_POSIX && !rc)
1610                 rc = locks_lock_file_wait(file, flock);
1611         return rc;
1612 }
1613
1614 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1615 {
1616         int rc, xid;
1617         int lock = 0, unlock = 0;
1618         bool wait_flag = false;
1619         bool posix_lck = false;
1620         struct cifs_sb_info *cifs_sb;
1621         struct cifs_tcon *tcon;
1622         struct cifsInodeInfo *cinode;
1623         struct cifsFileInfo *cfile;
1624         __u16 netfid;
1625         __u32 type;
1626
1627         rc = -EACCES;
1628         xid = get_xid();
1629
1630         cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1631                  cmd, flock->fl_flags, flock->fl_type,
1632                  flock->fl_start, flock->fl_end);
1633
1634         cfile = (struct cifsFileInfo *)file->private_data;
1635         tcon = tlink_tcon(cfile->tlink);
1636
1637         cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1638                         tcon->ses->server);
1639
1640         cifs_sb = CIFS_FILE_SB(file);
1641         netfid = cfile->fid.netfid;
1642         cinode = CIFS_I(file_inode(file));
1643
1644         if (cap_unix(tcon->ses) &&
1645             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1646             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1647                 posix_lck = true;
1648         /*
1649          * BB add code here to normalize offset and length to account for
1650          * negative length which we can not accept over the wire.
1651          */
1652         if (IS_GETLK(cmd)) {
1653                 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1654                 free_xid(xid);
1655                 return rc;
1656         }
1657
1658         if (!lock && !unlock) {
1659                 /*
1660                  * if no lock or unlock then nothing to do since we do not
1661                  * know what it is
1662                  */
1663                 free_xid(xid);
1664                 return -EOPNOTSUPP;
1665         }
1666
1667         rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1668                         xid);
1669         free_xid(xid);
1670         return rc;
1671 }
1672
1673 /*
1674  * update the file size (if needed) after a write. Should be called with
1675  * the inode->i_lock held
1676  */
1677 void
1678 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1679                       unsigned int bytes_written)
1680 {
1681         loff_t end_of_write = offset + bytes_written;
1682
1683         if (end_of_write > cifsi->server_eof)
1684                 cifsi->server_eof = end_of_write;
1685 }
1686
1687 static ssize_t
1688 cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1689            size_t write_size, loff_t *offset)
1690 {
1691         int rc = 0;
1692         unsigned int bytes_written = 0;
1693         unsigned int total_written;
1694         struct cifs_sb_info *cifs_sb;
1695         struct cifs_tcon *tcon;
1696         struct TCP_Server_Info *server;
1697         unsigned int xid;
1698         struct dentry *dentry = open_file->dentry;
1699         struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
1700         struct cifs_io_parms io_parms;
1701
1702         cifs_sb = CIFS_SB(dentry->d_sb);
1703
1704         cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1705                  write_size, *offset, dentry);
1706
1707         tcon = tlink_tcon(open_file->tlink);
1708         server = tcon->ses->server;
1709
1710         if (!server->ops->sync_write)
1711                 return -ENOSYS;
1712
1713         xid = get_xid();
1714
1715         for (total_written = 0; write_size > total_written;
1716              total_written += bytes_written) {
1717                 rc = -EAGAIN;
1718                 while (rc == -EAGAIN) {
1719                         struct kvec iov[2];
1720                         unsigned int len;
1721
1722                         if (open_file->invalidHandle) {
1723                                 /* we could deadlock if we called
1724                                    filemap_fdatawait from here so tell
1725                                    reopen_file not to flush data to
1726                                    server now */
1727                                 rc = cifs_reopen_file(open_file, false);
1728                                 if (rc != 0)
1729                                         break;
1730                         }
1731
1732                         len = min(server->ops->wp_retry_size(d_inode(dentry)),
1733                                   (unsigned int)write_size - total_written);
1734                         /* iov[0] is reserved for smb header */
1735                         iov[1].iov_base = (char *)write_data + total_written;
1736                         iov[1].iov_len = len;
1737                         io_parms.pid = pid;
1738                         io_parms.tcon = tcon;
1739                         io_parms.offset = *offset;
1740                         io_parms.length = len;
1741                         rc = server->ops->sync_write(xid, &open_file->fid,
1742                                         &io_parms, &bytes_written, iov, 1);
1743                 }
1744                 if (rc || (bytes_written == 0)) {
1745                         if (total_written)
1746                                 break;
1747                         else {
1748                                 free_xid(xid);
1749                                 return rc;
1750                         }
1751                 } else {
1752                         spin_lock(&d_inode(dentry)->i_lock);
1753                         cifs_update_eof(cifsi, *offset, bytes_written);
1754                         spin_unlock(&d_inode(dentry)->i_lock);
1755                         *offset += bytes_written;
1756                 }
1757         }
1758
1759         cifs_stats_bytes_written(tcon, total_written);
1760
1761         if (total_written > 0) {
1762                 spin_lock(&d_inode(dentry)->i_lock);
1763                 if (*offset > d_inode(dentry)->i_size)
1764                         i_size_write(d_inode(dentry), *offset);
1765                 spin_unlock(&d_inode(dentry)->i_lock);
1766         }
1767         mark_inode_dirty_sync(d_inode(dentry));
1768         free_xid(xid);
1769         return total_written;
1770 }
1771
1772 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1773                                         bool fsuid_only)
1774 {
1775         struct cifsFileInfo *open_file = NULL;
1776         struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1777         struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
1778
1779         /* only filter by fsuid on multiuser mounts */
1780         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1781                 fsuid_only = false;
1782
1783         spin_lock(&tcon->open_file_lock);
1784         /* we could simply get the first_list_entry since write-only entries
1785            are always at the end of the list but since the first entry might
1786            have a close pending, we go through the whole list */
1787         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1788                 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1789                         continue;
1790                 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1791                         if (!open_file->invalidHandle) {
1792                                 /* found a good file */
1793                                 /* lock it so it will not be closed on us */
1794                                 cifsFileInfo_get(open_file);
1795                                 spin_unlock(&tcon->open_file_lock);
1796                                 return open_file;
1797                         } /* else might as well continue, and look for
1798                              another, or simply have the caller reopen it
1799                              again rather than trying to fix this handle */
1800                 } else /* write only file */
1801                         break; /* write only files are last so must be done */
1802         }
1803         spin_unlock(&tcon->open_file_lock);
1804         return NULL;
1805 }
1806
1807 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1808                                         bool fsuid_only)
1809 {
1810         struct cifsFileInfo *open_file, *inv_file = NULL;
1811         struct cifs_sb_info *cifs_sb;
1812         struct cifs_tcon *tcon;
1813         bool any_available = false;
1814         int rc;
1815         unsigned int refind = 0;
1816
1817         /* Having a null inode here (because mapping->host was set to zero by
1818         the VFS or MM) should not happen but we had reports of on oops (due to
1819         it being zero) during stress testcases so we need to check for it */
1820
1821         if (cifs_inode == NULL) {
1822                 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
1823                 dump_stack();
1824                 return NULL;
1825         }
1826
1827         cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1828         tcon = cifs_sb_master_tcon(cifs_sb);
1829
1830         /* only filter by fsuid on multiuser mounts */
1831         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1832                 fsuid_only = false;
1833
1834         spin_lock(&tcon->open_file_lock);
1835 refind_writable:
1836         if (refind > MAX_REOPEN_ATT) {
1837                 spin_unlock(&tcon->open_file_lock);
1838                 return NULL;
1839         }
1840         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1841                 if (!any_available && open_file->pid != current->tgid)
1842                         continue;
1843                 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1844                         continue;
1845                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1846                         if (!open_file->invalidHandle) {
1847                                 /* found a good writable file */
1848                                 cifsFileInfo_get(open_file);
1849                                 spin_unlock(&tcon->open_file_lock);
1850                                 return open_file;
1851                         } else {
1852                                 if (!inv_file)
1853                                         inv_file = open_file;
1854                         }
1855                 }
1856         }
1857         /* couldn't find useable FH with same pid, try any available */
1858         if (!any_available) {
1859                 any_available = true;
1860                 goto refind_writable;
1861         }
1862
1863         if (inv_file) {
1864                 any_available = false;
1865                 cifsFileInfo_get(inv_file);
1866         }
1867
1868         spin_unlock(&tcon->open_file_lock);
1869
1870         if (inv_file) {
1871                 rc = cifs_reopen_file(inv_file, false);
1872                 if (!rc)
1873                         return inv_file;
1874                 else {
1875                         spin_lock(&tcon->open_file_lock);
1876                         list_move_tail(&inv_file->flist,
1877                                         &cifs_inode->openFileList);
1878                         spin_unlock(&tcon->open_file_lock);
1879                         cifsFileInfo_put(inv_file);
1880                         ++refind;
1881                         inv_file = NULL;
1882                         spin_lock(&tcon->open_file_lock);
1883                         goto refind_writable;
1884                 }
1885         }
1886
1887         return NULL;
1888 }
1889
1890 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1891 {
1892         struct address_space *mapping = page->mapping;
1893         loff_t offset = (loff_t)page->index << PAGE_SHIFT;
1894         char *write_data;
1895         int rc = -EFAULT;
1896         int bytes_written = 0;
1897         struct inode *inode;
1898         struct cifsFileInfo *open_file;
1899
1900         if (!mapping || !mapping->host)
1901                 return -EFAULT;
1902
1903         inode = page->mapping->host;
1904
1905         offset += (loff_t)from;
1906         write_data = kmap(page);
1907         write_data += from;
1908
1909         if ((to > PAGE_SIZE) || (from > to)) {
1910                 kunmap(page);
1911                 return -EIO;
1912         }
1913
1914         /* racing with truncate? */
1915         if (offset > mapping->host->i_size) {
1916                 kunmap(page);
1917                 return 0; /* don't care */
1918         }
1919
1920         /* check to make sure that we are not extending the file */
1921         if (mapping->host->i_size - offset < (loff_t)to)
1922                 to = (unsigned)(mapping->host->i_size - offset);
1923
1924         open_file = find_writable_file(CIFS_I(mapping->host), false);
1925         if (open_file) {
1926                 bytes_written = cifs_write(open_file, open_file->pid,
1927                                            write_data, to - from, &offset);
1928                 cifsFileInfo_put(open_file);
1929                 /* Does mm or vfs already set times? */
1930                 inode->i_atime = inode->i_mtime = current_time(inode);
1931                 if ((bytes_written > 0) && (offset))
1932                         rc = 0;
1933                 else if (bytes_written < 0)
1934                         rc = bytes_written;
1935         } else {
1936                 cifs_dbg(FYI, "No writeable filehandles for inode\n");
1937                 rc = -EIO;
1938         }
1939
1940         kunmap(page);
1941         return rc;
1942 }
1943
1944 static struct cifs_writedata *
1945 wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1946                           pgoff_t end, pgoff_t *index,
1947                           unsigned int *found_pages)
1948 {
1949         unsigned int nr_pages;
1950         struct page **pages;
1951         struct cifs_writedata *wdata;
1952
1953         wdata = cifs_writedata_alloc((unsigned int)tofind,
1954                                      cifs_writev_complete);
1955         if (!wdata)
1956                 return NULL;
1957
1958         /*
1959          * find_get_pages_tag seems to return a max of 256 on each
1960          * iteration, so we must call it several times in order to
1961          * fill the array or the wsize is effectively limited to
1962          * 256 * PAGE_SIZE.
1963          */
1964         *found_pages = 0;
1965         pages = wdata->pages;
1966         do {
1967                 nr_pages = find_get_pages_tag(mapping, index,
1968                                               PAGECACHE_TAG_DIRTY, tofind,
1969                                               pages);
1970                 *found_pages += nr_pages;
1971                 tofind -= nr_pages;
1972                 pages += nr_pages;
1973         } while (nr_pages && tofind && *index <= end);
1974
1975         return wdata;
1976 }
1977
1978 static unsigned int
1979 wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1980                     struct address_space *mapping,
1981                     struct writeback_control *wbc,
1982                     pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
1983 {
1984         unsigned int nr_pages = 0, i;
1985         struct page *page;
1986
1987         for (i = 0; i < found_pages; i++) {
1988                 page = wdata->pages[i];
1989                 /*
1990                  * At this point we hold neither mapping->tree_lock nor
1991                  * lock on the page itself: the page may be truncated or
1992                  * invalidated (changing page->mapping to NULL), or even
1993                  * swizzled back from swapper_space to tmpfs file
1994                  * mapping
1995                  */
1996
1997                 if (nr_pages == 0)
1998                         lock_page(page);
1999                 else if (!trylock_page(page))
2000                         break;
2001
2002                 if (unlikely(page->mapping != mapping)) {
2003                         unlock_page(page);
2004                         break;
2005                 }
2006
2007                 if (!wbc->range_cyclic && page->index > end) {
2008                         *done = true;
2009                         unlock_page(page);
2010                         break;
2011                 }
2012
2013                 if (*next && (page->index != *next)) {
2014                         /* Not next consecutive page */
2015                         unlock_page(page);
2016                         break;
2017                 }
2018
2019                 if (wbc->sync_mode != WB_SYNC_NONE)
2020                         wait_on_page_writeback(page);
2021
2022                 if (PageWriteback(page) ||
2023                                 !clear_page_dirty_for_io(page)) {
2024                         unlock_page(page);
2025                         break;
2026                 }
2027
2028                 /*
2029                  * This actually clears the dirty bit in the radix tree.
2030                  * See cifs_writepage() for more commentary.
2031                  */
2032                 set_page_writeback(page);
2033                 if (page_offset(page) >= i_size_read(mapping->host)) {
2034                         *done = true;
2035                         unlock_page(page);
2036                         end_page_writeback(page);
2037                         break;
2038                 }
2039
2040                 wdata->pages[i] = page;
2041                 *next = page->index + 1;
2042                 ++nr_pages;
2043         }
2044
2045         /* reset index to refind any pages skipped */
2046         if (nr_pages == 0)
2047                 *index = wdata->pages[0]->index + 1;
2048
2049         /* put any pages we aren't going to use */
2050         for (i = nr_pages; i < found_pages; i++) {
2051                 put_page(wdata->pages[i]);
2052                 wdata->pages[i] = NULL;
2053         }
2054
2055         return nr_pages;
2056 }
2057
2058 static int
2059 wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2060                  struct address_space *mapping, struct writeback_control *wbc)
2061 {
2062         int rc = 0;
2063         struct TCP_Server_Info *server;
2064         unsigned int i;
2065
2066         wdata->sync_mode = wbc->sync_mode;
2067         wdata->nr_pages = nr_pages;
2068         wdata->offset = page_offset(wdata->pages[0]);
2069         wdata->pagesz = PAGE_SIZE;
2070         wdata->tailsz = min(i_size_read(mapping->host) -
2071                         page_offset(wdata->pages[nr_pages - 1]),
2072                         (loff_t)PAGE_SIZE);
2073         wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
2074
2075         if (wdata->cfile != NULL)
2076                 cifsFileInfo_put(wdata->cfile);
2077         wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2078         if (!wdata->cfile) {
2079                 cifs_dbg(VFS, "No writable handles for inode\n");
2080                 rc = -EBADF;
2081         } else {
2082                 wdata->pid = wdata->cfile->pid;
2083                 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2084                 rc = server->ops->async_writev(wdata, cifs_writedata_release);
2085         }
2086
2087         for (i = 0; i < nr_pages; ++i)
2088                 unlock_page(wdata->pages[i]);
2089
2090         return rc;
2091 }
2092
2093 static int cifs_writepages(struct address_space *mapping,
2094                            struct writeback_control *wbc)
2095 {
2096         struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
2097         struct TCP_Server_Info *server;
2098         bool done = false, scanned = false, range_whole = false;
2099         pgoff_t end, index;
2100         struct cifs_writedata *wdata;
2101         int rc = 0;
2102
2103         /*
2104          * If wsize is smaller than the page cache size, default to writing
2105          * one page at a time via cifs_writepage
2106          */
2107         if (cifs_sb->wsize < PAGE_SIZE)
2108                 return generic_writepages(mapping, wbc);
2109
2110         if (wbc->range_cyclic) {
2111                 index = mapping->writeback_index; /* Start from prev offset */
2112                 end = -1;
2113         } else {
2114                 index = wbc->range_start >> PAGE_SHIFT;
2115                 end = wbc->range_end >> PAGE_SHIFT;
2116                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2117                         range_whole = true;
2118                 scanned = true;
2119         }
2120         server = cifs_sb_master_tcon(cifs_sb)->ses->server;
2121 retry:
2122         while (!done && index <= end) {
2123                 unsigned int i, nr_pages, found_pages, wsize, credits;
2124                 pgoff_t next = 0, tofind, saved_index = index;
2125
2126                 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2127                                                    &wsize, &credits);
2128                 if (rc)
2129                         break;
2130
2131                 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
2132
2133                 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2134                                                   &found_pages);
2135                 if (!wdata) {
2136                         rc = -ENOMEM;
2137                         add_credits_and_wake_if(server, credits, 0);
2138                         break;
2139                 }
2140
2141                 if (found_pages == 0) {
2142                         kref_put(&wdata->refcount, cifs_writedata_release);
2143                         add_credits_and_wake_if(server, credits, 0);
2144                         break;
2145                 }
2146
2147                 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2148                                                end, &index, &next, &done);
2149
2150                 /* nothing to write? */
2151                 if (nr_pages == 0) {
2152                         kref_put(&wdata->refcount, cifs_writedata_release);
2153                         add_credits_and_wake_if(server, credits, 0);
2154                         continue;
2155                 }
2156
2157                 wdata->credits = credits;
2158
2159                 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
2160
2161                 /* send failure -- clean up the mess */
2162                 if (rc != 0) {
2163                         add_credits_and_wake_if(server, wdata->credits, 0);
2164                         for (i = 0; i < nr_pages; ++i) {
2165                                 if (rc == -EAGAIN)
2166                                         redirty_page_for_writepage(wbc,
2167                                                            wdata->pages[i]);
2168                                 else
2169                                         SetPageError(wdata->pages[i]);
2170                                 end_page_writeback(wdata->pages[i]);
2171                                 put_page(wdata->pages[i]);
2172                         }
2173                         if (rc != -EAGAIN)
2174                                 mapping_set_error(mapping, rc);
2175                 }
2176                 kref_put(&wdata->refcount, cifs_writedata_release);
2177
2178                 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2179                         index = saved_index;
2180                         continue;
2181                 }
2182
2183                 wbc->nr_to_write -= nr_pages;
2184                 if (wbc->nr_to_write <= 0)
2185                         done = true;
2186
2187                 index = next;
2188         }
2189
2190         if (!scanned && !done) {
2191                 /*
2192                  * We hit the last page and there is more work to be done: wrap
2193                  * back to the start of the file
2194                  */
2195                 scanned = true;
2196                 index = 0;
2197                 goto retry;
2198         }
2199
2200         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2201                 mapping->writeback_index = index;
2202
2203         return rc;
2204 }
2205
2206 static int
2207 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2208 {
2209         int rc;
2210         unsigned int xid;
2211
2212         xid = get_xid();
2213 /* BB add check for wbc flags */
2214         get_page(page);
2215         if (!PageUptodate(page))
2216                 cifs_dbg(FYI, "ppw - page not up to date\n");
2217
2218         /*
2219          * Set the "writeback" flag, and clear "dirty" in the radix tree.
2220          *
2221          * A writepage() implementation always needs to do either this,
2222          * or re-dirty the page with "redirty_page_for_writepage()" in
2223          * the case of a failure.
2224          *
2225          * Just unlocking the page will cause the radix tree tag-bits
2226          * to fail to update with the state of the page correctly.
2227          */
2228         set_page_writeback(page);
2229 retry_write:
2230         rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
2231         if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2232                 goto retry_write;
2233         else if (rc == -EAGAIN)
2234                 redirty_page_for_writepage(wbc, page);
2235         else if (rc != 0)
2236                 SetPageError(page);
2237         else
2238                 SetPageUptodate(page);
2239         end_page_writeback(page);
2240         put_page(page);
2241         free_xid(xid);
2242         return rc;
2243 }
2244
2245 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2246 {
2247         int rc = cifs_writepage_locked(page, wbc);
2248         unlock_page(page);
2249         return rc;
2250 }
2251
2252 static int cifs_write_end(struct file *file, struct address_space *mapping,
2253                         loff_t pos, unsigned len, unsigned copied,
2254                         struct page *page, void *fsdata)
2255 {
2256         int rc;
2257         struct inode *inode = mapping->host;
2258         struct cifsFileInfo *cfile = file->private_data;
2259         struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2260         __u32 pid;
2261
2262         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2263                 pid = cfile->pid;
2264         else
2265                 pid = current->tgid;
2266
2267         cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
2268                  page, pos, copied);
2269
2270         if (PageChecked(page)) {
2271                 if (copied == len)
2272                         SetPageUptodate(page);
2273                 ClearPageChecked(page);
2274         } else if (!PageUptodate(page) && copied == PAGE_SIZE)
2275                 SetPageUptodate(page);
2276
2277         if (!PageUptodate(page)) {
2278                 char *page_data;
2279                 unsigned offset = pos & (PAGE_SIZE - 1);
2280                 unsigned int xid;
2281
2282                 xid = get_xid();
2283                 /* this is probably better than directly calling
2284                    partialpage_write since in this function the file handle is
2285                    known which we might as well leverage */
2286                 /* BB check if anything else missing out of ppw
2287                    such as updating last write time */
2288                 page_data = kmap(page);
2289                 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
2290                 /* if (rc < 0) should we set writebehind rc? */
2291                 kunmap(page);
2292
2293                 free_xid(xid);
2294         } else {
2295                 rc = copied;
2296                 pos += copied;
2297                 set_page_dirty(page);
2298         }
2299
2300         if (rc > 0) {
2301                 spin_lock(&inode->i_lock);
2302                 if (pos > inode->i_size)
2303                         i_size_write(inode, pos);
2304                 spin_unlock(&inode->i_lock);
2305         }
2306
2307         unlock_page(page);
2308         put_page(page);
2309
2310         return rc;
2311 }
2312
2313 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2314                       int datasync)
2315 {
2316         unsigned int xid;
2317         int rc = 0;
2318         struct cifs_tcon *tcon;
2319         struct TCP_Server_Info *server;
2320         struct cifsFileInfo *smbfile = file->private_data;
2321         struct inode *inode = file_inode(file);
2322         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2323
2324         rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2325         if (rc)
2326                 return rc;
2327         inode_lock(inode);
2328
2329         xid = get_xid();
2330
2331         cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2332                  file, datasync);
2333
2334         if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2335                 rc = cifs_zap_mapping(inode);
2336                 if (rc) {
2337                         cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2338                         rc = 0; /* don't care about it in fsync */
2339                 }
2340         }
2341
2342         tcon = tlink_tcon(smbfile->tlink);
2343         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2344                 server = tcon->ses->server;
2345                 if (server->ops->flush)
2346                         rc = server->ops->flush(xid, tcon, &smbfile->fid);
2347                 else
2348                         rc = -ENOSYS;
2349         }
2350
2351         free_xid(xid);
2352         inode_unlock(inode);
2353         return rc;
2354 }
2355
2356 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2357 {
2358         unsigned int xid;
2359         int rc = 0;
2360         struct cifs_tcon *tcon;
2361         struct TCP_Server_Info *server;
2362         struct cifsFileInfo *smbfile = file->private_data;
2363         struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2364         struct inode *inode = file->f_mapping->host;
2365
2366         rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2367         if (rc)
2368                 return rc;
2369         inode_lock(inode);
2370
2371         xid = get_xid();
2372
2373         cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2374                  file, datasync);
2375
2376         tcon = tlink_tcon(smbfile->tlink);
2377         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2378                 server = tcon->ses->server;
2379                 if (server->ops->flush)
2380                         rc = server->ops->flush(xid, tcon, &smbfile->fid);
2381                 else
2382                         rc = -ENOSYS;
2383         }
2384
2385         free_xid(xid);
2386         inode_unlock(inode);
2387         return rc;
2388 }
2389
2390 /*
2391  * As file closes, flush all cached write data for this inode checking
2392  * for write behind errors.
2393  */
2394 int cifs_flush(struct file *file, fl_owner_t id)
2395 {
2396         struct inode *inode = file_inode(file);
2397         int rc = 0;
2398
2399         if (file->f_mode & FMODE_WRITE)
2400                 rc = filemap_write_and_wait(inode->i_mapping);
2401
2402         cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2403
2404         return rc;
2405 }
2406
2407 static int
2408 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2409 {
2410         int rc = 0;
2411         unsigned long i;
2412
2413         for (i = 0; i < num_pages; i++) {
2414                 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2415                 if (!pages[i]) {
2416                         /*
2417                          * save number of pages we have already allocated and
2418                          * return with ENOMEM error
2419                          */
2420                         num_pages = i;
2421                         rc = -ENOMEM;
2422                         break;
2423                 }
2424         }
2425
2426         if (rc) {
2427                 for (i = 0; i < num_pages; i++)
2428                         put_page(pages[i]);
2429         }
2430         return rc;
2431 }
2432
2433 static inline
2434 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2435 {
2436         size_t num_pages;
2437         size_t clen;
2438
2439         clen = min_t(const size_t, len, wsize);
2440         num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
2441
2442         if (cur_len)
2443                 *cur_len = clen;
2444
2445         return num_pages;
2446 }
2447
2448 static void
2449 cifs_uncached_writedata_release(struct kref *refcount)
2450 {
2451         int i;
2452         struct cifs_writedata *wdata = container_of(refcount,
2453                                         struct cifs_writedata, refcount);
2454
2455         for (i = 0; i < wdata->nr_pages; i++)
2456                 put_page(wdata->pages[i]);
2457         cifs_writedata_release(refcount);
2458 }
2459
2460 static void
2461 cifs_uncached_writev_complete(struct work_struct *work)
2462 {
2463         struct cifs_writedata *wdata = container_of(work,
2464                                         struct cifs_writedata, work);
2465         struct inode *inode = d_inode(wdata->cfile->dentry);
2466         struct cifsInodeInfo *cifsi = CIFS_I(inode);
2467
2468         spin_lock(&inode->i_lock);
2469         cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2470         if (cifsi->server_eof > inode->i_size)
2471                 i_size_write(inode, cifsi->server_eof);
2472         spin_unlock(&inode->i_lock);
2473
2474         complete(&wdata->done);
2475
2476         kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2477 }
2478
2479 static int
2480 wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2481                       size_t *len, unsigned long *num_pages)
2482 {
2483         size_t save_len, copied, bytes, cur_len = *len;
2484         unsigned long i, nr_pages = *num_pages;
2485
2486         save_len = cur_len;
2487         for (i = 0; i < nr_pages; i++) {
2488                 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2489                 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2490                 cur_len -= copied;
2491                 /*
2492                  * If we didn't copy as much as we expected, then that
2493                  * may mean we trod into an unmapped area. Stop copying
2494                  * at that point. On the next pass through the big
2495                  * loop, we'll likely end up getting a zero-length
2496                  * write and bailing out of it.
2497                  */
2498                 if (copied < bytes)
2499                         break;
2500         }
2501         cur_len = save_len - cur_len;
2502         *len = cur_len;
2503
2504         /*
2505          * If we have no data to send, then that probably means that
2506          * the copy above failed altogether. That's most likely because
2507          * the address in the iovec was bogus. Return -EFAULT and let
2508          * the caller free anything we allocated and bail out.
2509          */
2510         if (!cur_len)
2511                 return -EFAULT;
2512
2513         /*
2514          * i + 1 now represents the number of pages we actually used in
2515          * the copy phase above.
2516          */
2517         *num_pages = i + 1;
2518         return 0;
2519 }
2520
2521 static int
2522 cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2523                      struct cifsFileInfo *open_file,
2524                      struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
2525 {
2526         int rc = 0;
2527         size_t cur_len;
2528         unsigned long nr_pages, num_pages, i;
2529         struct cifs_writedata *wdata;
2530         struct iov_iter saved_from = *from;
2531         loff_t saved_offset = offset;
2532         pid_t pid;
2533         struct TCP_Server_Info *server;
2534
2535         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2536                 pid = open_file->pid;
2537         else
2538                 pid = current->tgid;
2539
2540         server = tlink_tcon(open_file->tlink)->ses->server;
2541
2542         do {
2543                 unsigned int wsize, credits;
2544
2545                 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2546                                                    &wsize, &credits);
2547                 if (rc)
2548                         break;
2549
2550                 nr_pages = get_numpages(wsize, len, &cur_len);
2551                 wdata = cifs_writedata_alloc(nr_pages,
2552                                              cifs_uncached_writev_complete);
2553                 if (!wdata) {
2554                         rc = -ENOMEM;
2555                         add_credits_and_wake_if(server, credits, 0);
2556                         break;
2557                 }
2558
2559                 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2560                 if (rc) {
2561                         kfree(wdata);
2562                         add_credits_and_wake_if(server, credits, 0);
2563                         break;
2564                 }
2565
2566                 num_pages = nr_pages;
2567                 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2568                 if (rc) {
2569                         for (i = 0; i < nr_pages; i++)
2570                                 put_page(wdata->pages[i]);
2571                         kfree(wdata);
2572                         add_credits_and_wake_if(server, credits, 0);
2573                         break;
2574                 }
2575
2576                 /*
2577                  * Bring nr_pages down to the number of pages we actually used,
2578                  * and free any pages that we didn't use.
2579                  */
2580                 for ( ; nr_pages > num_pages; nr_pages--)
2581                         put_page(wdata->pages[nr_pages - 1]);
2582
2583                 wdata->sync_mode = WB_SYNC_ALL;
2584                 wdata->nr_pages = nr_pages;
2585                 wdata->offset = (__u64)offset;
2586                 wdata->cfile = cifsFileInfo_get(open_file);
2587                 wdata->pid = pid;
2588                 wdata->bytes = cur_len;
2589                 wdata->pagesz = PAGE_SIZE;
2590                 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2591                 wdata->credits = credits;
2592
2593                 if (!wdata->cfile->invalidHandle ||
2594                     !cifs_reopen_file(wdata->cfile, false))
2595                         rc = server->ops->async_writev(wdata,
2596                                         cifs_uncached_writedata_release);
2597                 if (rc) {
2598                         add_credits_and_wake_if(server, wdata->credits, 0);
2599                         kref_put(&wdata->refcount,
2600                                  cifs_uncached_writedata_release);
2601                         if (rc == -EAGAIN) {
2602                                 *from = saved_from;
2603                                 iov_iter_advance(from, offset - saved_offset);
2604                                 continue;
2605                         }
2606                         break;
2607                 }
2608
2609                 list_add_tail(&wdata->list, wdata_list);
2610                 offset += cur_len;
2611                 len -= cur_len;
2612         } while (len > 0);
2613
2614         return rc;
2615 }
2616
2617 ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
2618 {
2619         struct file *file = iocb->ki_filp;
2620         ssize_t total_written = 0;
2621         struct cifsFileInfo *open_file;
2622         struct cifs_tcon *tcon;
2623         struct cifs_sb_info *cifs_sb;
2624         struct cifs_writedata *wdata, *tmp;
2625         struct list_head wdata_list;
2626         struct iov_iter saved_from = *from;
2627         int rc;
2628
2629         /*
2630          * BB - optimize the way when signing is disabled. We can drop this
2631          * extra memory-to-memory copying and use iovec buffers for constructing
2632          * write request.
2633          */
2634
2635         rc = generic_write_checks(iocb, from);
2636         if (rc <= 0)
2637                 return rc;
2638
2639         INIT_LIST_HEAD(&wdata_list);
2640         cifs_sb = CIFS_FILE_SB(file);
2641         open_file = file->private_data;
2642         tcon = tlink_tcon(open_file->tlink);
2643
2644         if (!tcon->ses->server->ops->async_writev)
2645                 return -ENOSYS;
2646
2647         rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
2648                                   open_file, cifs_sb, &wdata_list);
2649
2650         /*
2651          * If at least one write was successfully sent, then discard any rc
2652          * value from the later writes. If the other write succeeds, then
2653          * we'll end up returning whatever was written. If it fails, then
2654          * we'll get a new rc value from that.
2655          */
2656         if (!list_empty(&wdata_list))
2657                 rc = 0;
2658
2659         /*
2660          * Wait for and collect replies for any successful sends in order of
2661          * increasing offset. Once an error is hit or we get a fatal signal
2662          * while waiting, then return without waiting for any more replies.
2663          */
2664 restart_loop:
2665         list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2666                 if (!rc) {
2667                         /* FIXME: freezable too? */
2668                         rc = wait_for_completion_killable(&wdata->done);
2669                         if (rc)
2670                                 rc = -EINTR;
2671                         else if (wdata->result)
2672                                 rc = wdata->result;
2673                         else
2674                                 total_written += wdata->bytes;
2675
2676                         /* resend call if it's a retryable error */
2677                         if (rc == -EAGAIN) {
2678                                 struct list_head tmp_list;
2679                                 struct iov_iter tmp_from = saved_from;
2680
2681                                 INIT_LIST_HEAD(&tmp_list);
2682                                 list_del_init(&wdata->list);
2683
2684                                 iov_iter_advance(&tmp_from,
2685                                                  wdata->offset - iocb->ki_pos);
2686
2687                                 rc = cifs_write_from_iter(wdata->offset,
2688                                                 wdata->bytes, &tmp_from,
2689                                                 open_file, cifs_sb, &tmp_list);
2690
2691                                 list_splice(&tmp_list, &wdata_list);
2692
2693                                 kref_put(&wdata->refcount,
2694                                          cifs_uncached_writedata_release);
2695                                 goto restart_loop;
2696                         }
2697                 }
2698                 list_del_init(&wdata->list);
2699                 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2700         }
2701
2702         if (unlikely(!total_written))
2703                 return rc;
2704
2705         iocb->ki_pos += total_written;
2706         set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
2707         cifs_stats_bytes_written(tcon, total_written);
2708         return total_written;
2709 }
2710
2711 static ssize_t
2712 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2713 {
2714         struct file *file = iocb->ki_filp;
2715         struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2716         struct inode *inode = file->f_mapping->host;
2717         struct cifsInodeInfo *cinode = CIFS_I(inode);
2718         struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2719         ssize_t rc;
2720
2721         /*
2722          * We need to hold the sem to be sure nobody modifies lock list
2723          * with a brlock that prevents writing.
2724          */
2725         down_read(&cinode->lock_sem);
2726         inode_lock(inode);
2727
2728         rc = generic_write_checks(iocb, from);
2729         if (rc <= 0)
2730                 goto out;
2731
2732         if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2733                                      server->vals->exclusive_lock_type, NULL,
2734                                      CIFS_WRITE_OP))
2735                 rc = __generic_file_write_iter(iocb, from);
2736         else
2737                 rc = -EACCES;
2738 out:
2739         inode_unlock(inode);
2740
2741         if (rc > 0)
2742                 rc = generic_write_sync(iocb, rc);
2743         up_read(&cinode->lock_sem);
2744         return rc;
2745 }
2746
2747 ssize_t
2748 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2749 {
2750         struct inode *inode = file_inode(iocb->ki_filp);
2751         struct cifsInodeInfo *cinode = CIFS_I(inode);
2752         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2753         struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2754                                                 iocb->ki_filp->private_data;
2755         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2756         ssize_t written;
2757
2758         written = cifs_get_writer(cinode);
2759         if (written)
2760                 return written;
2761
2762         if (CIFS_CACHE_WRITE(cinode)) {
2763                 if (cap_unix(tcon->ses) &&
2764                 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2765                   && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2766                         written = generic_file_write_iter(iocb, from);
2767                         goto out;
2768                 }
2769                 written = cifs_writev(iocb, from);
2770                 goto out;
2771         }
2772         /*
2773          * For non-oplocked files in strict cache mode we need to write the data
2774          * to the server exactly from the pos to pos+len-1 rather than flush all
2775          * affected pages because it may cause a error with mandatory locks on
2776          * these pages but not on the region from pos to ppos+len-1.
2777          */
2778         written = cifs_user_writev(iocb, from);
2779         if (written > 0 && CIFS_CACHE_READ(cinode)) {
2780                 /*
2781                  * Windows 7 server can delay breaking level2 oplock if a write
2782                  * request comes - break it on the client to prevent reading
2783                  * an old data.
2784                  */
2785                 cifs_zap_mapping(inode);
2786                 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2787                          inode);
2788                 cinode->oplock = 0;
2789         }
2790 out:
2791         cifs_put_writer(cinode);
2792         return written;
2793 }
2794
2795 static struct cifs_readdata *
2796 cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
2797 {
2798         struct cifs_readdata *rdata;
2799
2800         rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2801                         GFP_KERNEL);
2802         if (rdata != NULL) {
2803                 kref_init(&rdata->refcount);
2804                 INIT_LIST_HEAD(&rdata->list);
2805                 init_completion(&rdata->done);
2806                 INIT_WORK(&rdata->work, complete);
2807         }
2808
2809         return rdata;
2810 }
2811
2812 void
2813 cifs_readdata_release(struct kref *refcount)
2814 {
2815         struct cifs_readdata *rdata = container_of(refcount,
2816                                         struct cifs_readdata, refcount);
2817
2818         if (rdata->cfile)
2819                 cifsFileInfo_put(rdata->cfile);
2820
2821         kfree(rdata);
2822 }
2823
2824 static int
2825 cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
2826 {
2827         int rc = 0;
2828         struct page *page;
2829         unsigned int i;
2830
2831         for (i = 0; i < nr_pages; i++) {
2832                 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2833                 if (!page) {
2834                         rc = -ENOMEM;
2835                         break;
2836                 }
2837                 rdata->pages[i] = page;
2838         }
2839
2840         if (rc) {
2841                 for (i = 0; i < nr_pages; i++) {
2842                         put_page(rdata->pages[i]);
2843                         rdata->pages[i] = NULL;
2844                 }
2845         }
2846         return rc;
2847 }
2848
2849 static void
2850 cifs_uncached_readdata_release(struct kref *refcount)
2851 {
2852         struct cifs_readdata *rdata = container_of(refcount,
2853                                         struct cifs_readdata, refcount);
2854         unsigned int i;
2855
2856         for (i = 0; i < rdata->nr_pages; i++) {
2857                 put_page(rdata->pages[i]);
2858                 rdata->pages[i] = NULL;
2859         }
2860         cifs_readdata_release(refcount);
2861 }
2862
2863 /**
2864  * cifs_readdata_to_iov - copy data from pages in response to an iovec
2865  * @rdata:      the readdata response with list of pages holding data
2866  * @iter:       destination for our data
2867  *
2868  * This function copies data from a list of pages in a readdata response into
2869  * an array of iovecs. It will first calculate where the data should go
2870  * based on the info in the readdata and then copy the data into that spot.
2871  */
2872 static int
2873 cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
2874 {
2875         size_t remaining = rdata->got_bytes;
2876         unsigned int i;
2877
2878         for (i = 0; i < rdata->nr_pages; i++) {
2879                 struct page *page = rdata->pages[i];
2880                 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
2881                 size_t written = copy_page_to_iter(page, 0, copy, iter);
2882                 remaining -= written;
2883                 if (written < copy && iov_iter_count(iter) > 0)
2884                         break;
2885         }
2886         return remaining ? -EFAULT : 0;
2887 }
2888
2889 static void
2890 cifs_uncached_readv_complete(struct work_struct *work)
2891 {
2892         struct cifs_readdata *rdata = container_of(work,
2893                                                 struct cifs_readdata, work);
2894
2895         complete(&rdata->done);
2896         kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2897 }
2898
2899 static int
2900 cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2901                         struct cifs_readdata *rdata, unsigned int len)
2902 {
2903         int result = 0;
2904         unsigned int i;
2905         unsigned int nr_pages = rdata->nr_pages;
2906
2907         rdata->got_bytes = 0;
2908         rdata->tailsz = PAGE_SIZE;
2909         for (i = 0; i < nr_pages; i++) {
2910                 struct page *page = rdata->pages[i];
2911                 size_t n;
2912
2913                 if (len <= 0) {
2914                         /* no need to hold page hostage */
2915                         rdata->pages[i] = NULL;
2916                         rdata->nr_pages--;
2917                         put_page(page);
2918                         continue;
2919                 }
2920                 n = len;
2921                 if (len >= PAGE_SIZE) {
2922                         /* enough data to fill the page */
2923                         n = PAGE_SIZE;
2924                         len -= n;
2925                 } else {
2926                         zero_user(page, len, PAGE_SIZE - len);
2927                         rdata->tailsz = len;
2928                         len = 0;
2929                 }
2930                 result = cifs_read_page_from_socket(server, page, n);
2931                 if (result < 0)
2932                         break;
2933
2934                 rdata->got_bytes += result;
2935         }
2936
2937         return rdata->got_bytes > 0 && result != -ECONNABORTED ?
2938                                                 rdata->got_bytes : result;
2939 }
2940
2941 static int
2942 cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
2943                      struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
2944 {
2945         struct cifs_readdata *rdata;
2946         unsigned int npages, rsize, credits;
2947         size_t cur_len;
2948         int rc;
2949         pid_t pid;
2950         struct TCP_Server_Info *server;
2951
2952         server = tlink_tcon(open_file->tlink)->ses->server;
2953
2954         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2955                 pid = open_file->pid;
2956         else
2957                 pid = current->tgid;
2958
2959         do {
2960                 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
2961                                                    &rsize, &credits);
2962                 if (rc)
2963                         break;
2964
2965                 cur_len = min_t(const size_t, len, rsize);
2966                 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
2967
2968                 /* allocate a readdata struct */
2969                 rdata = cifs_readdata_alloc(npages,
2970                                             cifs_uncached_readv_complete);
2971                 if (!rdata) {
2972                         add_credits_and_wake_if(server, credits, 0);
2973                         rc = -ENOMEM;
2974                         break;
2975                 }
2976
2977                 rc = cifs_read_allocate_pages(rdata, npages);
2978                 if (rc)
2979                         goto error;
2980
2981                 rdata->cfile = cifsFileInfo_get(open_file);
2982                 rdata->nr_pages = npages;
2983                 rdata->offset = offset;
2984                 rdata->bytes = cur_len;
2985                 rdata->pid = pid;
2986                 rdata->pagesz = PAGE_SIZE;
2987                 rdata->read_into_pages = cifs_uncached_read_into_pages;
2988                 rdata->credits = credits;
2989
2990                 if (!rdata->cfile->invalidHandle ||
2991                     !cifs_reopen_file(rdata->cfile, true))
2992                         rc = server->ops->async_readv(rdata);
2993 error:
2994                 if (rc) {
2995                         add_credits_and_wake_if(server, rdata->credits, 0);
2996                         kref_put(&rdata->refcount,
2997                                  cifs_uncached_readdata_release);
2998                         if (rc == -EAGAIN)
2999                                 continue;
3000                         break;
3001                 }
3002
3003                 list_add_tail(&rdata->list, rdata_list);
3004                 offset += cur_len;
3005                 len -= cur_len;
3006         } while (len > 0);
3007
3008         return rc;
3009 }
3010
3011 ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3012 {
3013         struct file *file = iocb->ki_filp;
3014         ssize_t rc;
3015         size_t len;
3016         ssize_t total_read = 0;
3017         loff_t offset = iocb->ki_pos;
3018         struct cifs_sb_info *cifs_sb;
3019         struct cifs_tcon *tcon;
3020         struct cifsFileInfo *open_file;
3021         struct cifs_readdata *rdata, *tmp;
3022         struct list_head rdata_list;
3023
3024         len = iov_iter_count(to);
3025         if (!len)
3026                 return 0;
3027
3028         INIT_LIST_HEAD(&rdata_list);
3029         cifs_sb = CIFS_FILE_SB(file);
3030         open_file = file->private_data;
3031         tcon = tlink_tcon(open_file->tlink);
3032
3033         if (!tcon->ses->server->ops->async_readv)
3034                 return -ENOSYS;
3035
3036         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3037                 cifs_dbg(FYI, "attempting read on write only file instance\n");
3038
3039         rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3040
3041         /* if at least one read request send succeeded, then reset rc */
3042         if (!list_empty(&rdata_list))
3043                 rc = 0;
3044
3045         len = iov_iter_count(to);
3046         /* the loop below should proceed in the order of increasing offsets */
3047 again:
3048         list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3049                 if (!rc) {
3050                         /* FIXME: freezable sleep too? */
3051                         rc = wait_for_completion_killable(&rdata->done);
3052                         if (rc)
3053                                 rc = -EINTR;
3054                         else if (rdata->result == -EAGAIN) {
3055                                 /* resend call if it's a retryable error */
3056                                 struct list_head tmp_list;
3057                                 unsigned int got_bytes = rdata->got_bytes;
3058
3059                                 list_del_init(&rdata->list);
3060                                 INIT_LIST_HEAD(&tmp_list);
3061
3062                                 /*
3063                                  * Got a part of data and then reconnect has
3064                                  * happened -- fill the buffer and continue
3065                                  * reading.
3066                                  */
3067                                 if (got_bytes && got_bytes < rdata->bytes) {
3068                                         rc = cifs_readdata_to_iov(rdata, to);
3069                                         if (rc) {
3070                                                 kref_put(&rdata->refcount,
3071                                                 cifs_uncached_readdata_release);
3072                                                 continue;
3073                                         }
3074                                 }
3075
3076                                 rc = cifs_send_async_read(
3077                                                 rdata->offset + got_bytes,
3078                                                 rdata->bytes - got_bytes,
3079                                                 rdata->cfile, cifs_sb,
3080                                                 &tmp_list);
3081
3082                                 list_splice(&tmp_list, &rdata_list);
3083
3084                                 kref_put(&rdata->refcount,
3085                                          cifs_uncached_readdata_release);
3086                                 goto again;
3087                         } else if (rdata->result)
3088                                 rc = rdata->result;
3089                         else
3090                                 rc = cifs_readdata_to_iov(rdata, to);
3091
3092                         /* if there was a short read -- discard anything left */
3093                         if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3094                                 rc = -ENODATA;
3095                 }
3096                 list_del_init(&rdata->list);
3097                 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3098         }
3099
3100         total_read = len - iov_iter_count(to);
3101
3102         cifs_stats_bytes_read(tcon, total_read);
3103
3104         /* mask nodata case */
3105         if (rc == -ENODATA)
3106                 rc = 0;
3107
3108         if (total_read) {
3109                 iocb->ki_pos += total_read;
3110                 return total_read;
3111         }
3112         return rc;
3113 }
3114
3115 ssize_t
3116 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
3117 {
3118         struct inode *inode = file_inode(iocb->ki_filp);
3119         struct cifsInodeInfo *cinode = CIFS_I(inode);
3120         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3121         struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3122                                                 iocb->ki_filp->private_data;
3123         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3124         int rc = -EACCES;
3125
3126         /*
3127          * In strict cache mode we need to read from the server all the time
3128          * if we don't have level II oplock because the server can delay mtime
3129          * change - so we can't make a decision about inode invalidating.
3130          * And we can also fail with pagereading if there are mandatory locks
3131          * on pages affected by this read but not on the region from pos to
3132          * pos+len-1.
3133          */
3134         if (!CIFS_CACHE_READ(cinode))
3135                 return cifs_user_readv(iocb, to);
3136
3137         if (cap_unix(tcon->ses) &&
3138             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3139             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
3140                 return generic_file_read_iter(iocb, to);
3141
3142         /*
3143          * We need to hold the sem to be sure nobody modifies lock list
3144          * with a brlock that prevents reading.
3145          */
3146         down_read(&cinode->lock_sem);
3147         if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
3148                                      tcon->ses->server->vals->shared_lock_type,
3149                                      NULL, CIFS_READ_OP))
3150                 rc = generic_file_read_iter(iocb, to);
3151         up_read(&cinode->lock_sem);
3152         return rc;
3153 }
3154
3155 static ssize_t
3156 cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
3157 {
3158         int rc = -EACCES;
3159         unsigned int bytes_read = 0;
3160         unsigned int total_read;
3161         unsigned int current_read_size;
3162         unsigned int rsize;
3163         struct cifs_sb_info *cifs_sb;
3164         struct cifs_tcon *tcon;
3165         struct TCP_Server_Info *server;
3166         unsigned int xid;
3167         char *cur_offset;
3168         struct cifsFileInfo *open_file;
3169         struct cifs_io_parms io_parms;
3170         int buf_type = CIFS_NO_BUFFER;
3171         __u32 pid;
3172
3173         xid = get_xid();
3174         cifs_sb = CIFS_FILE_SB(file);
3175
3176         /* FIXME: set up handlers for larger reads and/or convert to async */
3177         rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3178
3179         if (file->private_data == NULL) {
3180                 rc = -EBADF;
3181                 free_xid(xid);
3182                 return rc;
3183         }
3184         open_file = file->private_data;
3185         tcon = tlink_tcon(open_file->tlink);
3186         server = tcon->ses->server;
3187
3188         if (!server->ops->sync_read) {
3189                 free_xid(xid);
3190                 return -ENOSYS;
3191         }
3192
3193         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3194                 pid = open_file->pid;
3195         else
3196                 pid = current->tgid;
3197
3198         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3199                 cifs_dbg(FYI, "attempting read on write only file instance\n");
3200
3201         for (total_read = 0, cur_offset = read_data; read_size > total_read;
3202              total_read += bytes_read, cur_offset += bytes_read) {
3203                 do {
3204                         current_read_size = min_t(uint, read_size - total_read,
3205                                                   rsize);
3206                         /*
3207                          * For windows me and 9x we do not want to request more
3208                          * than it negotiated since it will refuse the read
3209                          * then.
3210                          */
3211                         if ((tcon->ses) && !(tcon->ses->capabilities &
3212                                 tcon->ses->server->vals->cap_large_files)) {
3213                                 current_read_size = min_t(uint,
3214                                         current_read_size, CIFSMaxBufSize);
3215                         }
3216                         if (open_file->invalidHandle) {
3217                                 rc = cifs_reopen_file(open_file, true);
3218                                 if (rc != 0)
3219                                         break;
3220                         }
3221                         io_parms.pid = pid;
3222                         io_parms.tcon = tcon;
3223                         io_parms.offset = *offset;
3224                         io_parms.length = current_read_size;
3225                         rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
3226                                                     &bytes_read, &cur_offset,
3227                                                     &buf_type);
3228                 } while (rc == -EAGAIN);
3229
3230                 if (rc || (bytes_read == 0)) {
3231                         if (total_read) {
3232                                 break;
3233                         } else {
3234                                 free_xid(xid);
3235                                 return rc;
3236                         }
3237                 } else {
3238                         cifs_stats_bytes_read(tcon, total_read);
3239                         *offset += bytes_read;
3240                 }
3241         }
3242         free_xid(xid);
3243         return total_read;
3244 }
3245
3246 /*
3247  * If the page is mmap'ed into a process' page tables, then we need to make
3248  * sure that it doesn't change while being written back.
3249  */
3250 static int
3251 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3252 {
3253         struct page *page = vmf->page;
3254
3255         lock_page(page);
3256         return VM_FAULT_LOCKED;
3257 }
3258
3259 static const struct vm_operations_struct cifs_file_vm_ops = {
3260         .fault = filemap_fault,
3261         .map_pages = filemap_map_pages,
3262         .page_mkwrite = cifs_page_mkwrite,
3263 };
3264
3265 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3266 {
3267         int rc, xid;
3268         struct inode *inode = file_inode(file);
3269
3270         xid = get_xid();
3271
3272         if (!CIFS_CACHE_READ(CIFS_I(inode))) {
3273                 rc = cifs_zap_mapping(inode);
3274                 if (rc)
3275                         return rc;
3276         }
3277
3278         rc = generic_file_mmap(file, vma);
3279         if (rc == 0)
3280                 vma->vm_ops = &cifs_file_vm_ops;
3281         free_xid(xid);
3282         return rc;
3283 }
3284
3285 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3286 {
3287         int rc, xid;
3288
3289         xid = get_xid();
3290         rc = cifs_revalidate_file(file);
3291         if (rc) {
3292                 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3293                          rc);
3294                 free_xid(xid);
3295                 return rc;
3296         }
3297         rc = generic_file_mmap(file, vma);
3298         if (rc == 0)
3299                 vma->vm_ops = &cifs_file_vm_ops;
3300         free_xid(xid);
3301         return rc;
3302 }
3303
3304 static void
3305 cifs_readv_complete(struct work_struct *work)
3306 {
3307         unsigned int i, got_bytes;
3308         struct cifs_readdata *rdata = container_of(work,
3309                                                 struct cifs_readdata, work);
3310
3311         got_bytes = rdata->got_bytes;
3312         for (i = 0; i < rdata->nr_pages; i++) {
3313                 struct page *page = rdata->pages[i];
3314
3315                 lru_cache_add_file(page);
3316
3317                 if (rdata->result == 0 ||
3318                     (rdata->result == -EAGAIN && got_bytes)) {
3319                         flush_dcache_page(page);
3320                         SetPageUptodate(page);
3321                 }
3322
3323                 unlock_page(page);
3324
3325                 if (rdata->result == 0 ||
3326                     (rdata->result == -EAGAIN && got_bytes))
3327                         cifs_readpage_to_fscache(rdata->mapping->host, page);
3328
3329                 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
3330
3331                 put_page(page);
3332                 rdata->pages[i] = NULL;
3333         }
3334         kref_put(&rdata->refcount, cifs_readdata_release);
3335 }
3336
3337 static int
3338 cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3339                         struct cifs_readdata *rdata, unsigned int len)
3340 {
3341         int result = 0;
3342         unsigned int i;
3343         u64 eof;
3344         pgoff_t eof_index;
3345         unsigned int nr_pages = rdata->nr_pages;
3346
3347         /* determine the eof that the server (probably) has */
3348         eof = CIFS_I(rdata->mapping->host)->server_eof;
3349         eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
3350         cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
3351
3352         rdata->got_bytes = 0;
3353         rdata->tailsz = PAGE_SIZE;
3354         for (i = 0; i < nr_pages; i++) {
3355                 struct page *page = rdata->pages[i];
3356                 size_t n = PAGE_SIZE;
3357
3358                 if (len >= PAGE_SIZE) {
3359                         len -= PAGE_SIZE;
3360                 } else if (len > 0) {
3361                         /* enough for partial page, fill and zero the rest */
3362                         zero_user(page, len, PAGE_SIZE - len);
3363                         n = rdata->tailsz = len;
3364                         len = 0;
3365                 } else if (page->index > eof_index) {
3366                         /*
3367                          * The VFS will not try to do readahead past the
3368                          * i_size, but it's possible that we have outstanding
3369                          * writes with gaps in the middle and the i_size hasn't
3370                          * caught up yet. Populate those with zeroed out pages
3371                          * to prevent the VFS from repeatedly attempting to
3372                          * fill them until the writes are flushed.
3373                          */
3374                         zero_user(page, 0, PAGE_SIZE);
3375                         lru_cache_add_file(page);
3376                         flush_dcache_page(page);
3377                         SetPageUptodate(page);
3378                         unlock_page(page);
3379                         put_page(page);
3380                         rdata->pages[i] = NULL;
3381                         rdata->nr_pages--;
3382                         continue;
3383                 } else {
3384                         /* no need to hold page hostage */
3385                         lru_cache_add_file(page);
3386                         unlock_page(page);
3387                         put_page(page);
3388                         rdata->pages[i] = NULL;
3389                         rdata->nr_pages--;
3390                         continue;
3391                 }
3392
3393                 result = cifs_read_page_from_socket(server, page, n);
3394                 if (result < 0)
3395                         break;
3396
3397                 rdata->got_bytes += result;
3398         }
3399
3400         return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3401                                                 rdata->got_bytes : result;
3402 }
3403
3404 static int
3405 readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3406                     unsigned int rsize, struct list_head *tmplist,
3407                     unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3408 {
3409         struct page *page, *tpage;
3410         unsigned int expected_index;
3411         int rc;
3412         gfp_t gfp = readahead_gfp_mask(mapping);
3413
3414         INIT_LIST_HEAD(tmplist);
3415
3416         page = list_entry(page_list->prev, struct page, lru);
3417
3418         /*
3419          * Lock the page and put it in the cache. Since no one else
3420          * should have access to this page, we're safe to simply set
3421          * PG_locked without checking it first.
3422          */
3423         __SetPageLocked(page);
3424         rc = add_to_page_cache_locked(page, mapping,
3425                                       page->index, gfp);
3426
3427         /* give up if we can't stick it in the cache */
3428         if (rc) {
3429                 __ClearPageLocked(page);
3430                 return rc;
3431         }
3432
3433         /* move first page to the tmplist */
3434         *offset = (loff_t)page->index << PAGE_SHIFT;
3435         *bytes = PAGE_SIZE;
3436         *nr_pages = 1;
3437         list_move_tail(&page->lru, tmplist);
3438
3439         /* now try and add more pages onto the request */
3440         expected_index = page->index + 1;
3441         list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3442                 /* discontinuity ? */
3443                 if (page->index != expected_index)
3444                         break;
3445
3446                 /* would this page push the read over the rsize? */
3447                 if (*bytes + PAGE_SIZE > rsize)
3448                         break;
3449
3450                 __SetPageLocked(page);
3451                 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
3452                         __ClearPageLocked(page);
3453                         break;
3454                 }
3455                 list_move_tail(&page->lru, tmplist);
3456                 (*bytes) += PAGE_SIZE;
3457                 expected_index++;
3458                 (*nr_pages)++;
3459         }
3460         return rc;
3461 }
3462
3463 static int cifs_readpages(struct file *file, struct address_space *mapping,
3464         struct list_head *page_list, unsigned num_pages)
3465 {
3466         int rc;
3467         struct list_head tmplist;
3468         struct cifsFileInfo *open_file = file->private_data;
3469         struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
3470         struct TCP_Server_Info *server;
3471         pid_t pid;
3472
3473         /*
3474          * Reads as many pages as possible from fscache. Returns -ENOBUFS
3475          * immediately if the cookie is negative
3476          *
3477          * After this point, every page in the list might have PG_fscache set,
3478          * so we will need to clean that up off of every page we don't use.
3479          */
3480         rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3481                                          &num_pages);
3482         if (rc == 0)
3483                 return rc;
3484
3485         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3486                 pid = open_file->pid;
3487         else
3488                 pid = current->tgid;
3489
3490         rc = 0;
3491         server = tlink_tcon(open_file->tlink)->ses->server;
3492
3493         cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3494                  __func__, file, mapping, num_pages);
3495
3496         /*
3497          * Start with the page at end of list and move it to private
3498          * list. Do the same with any following pages until we hit
3499          * the rsize limit, hit an index discontinuity, or run out of
3500          * pages. Issue the async read and then start the loop again
3501          * until the list is empty.
3502          *
3503          * Note that list order is important. The page_list is in
3504          * the order of declining indexes. When we put the pages in
3505          * the rdata->pages, then we want them in increasing order.
3506          */
3507         while (!list_empty(page_list)) {
3508                 unsigned int i, nr_pages, bytes, rsize;
3509                 loff_t offset;
3510                 struct page *page, *tpage;
3511                 struct cifs_readdata *rdata;
3512                 unsigned credits;
3513
3514                 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3515                                                    &rsize, &credits);
3516                 if (rc)
3517                         break;
3518
3519                 /*
3520                  * Give up immediately if rsize is too small to read an entire
3521                  * page. The VFS will fall back to readpage. We should never
3522                  * reach this point however since we set ra_pages to 0 when the
3523                  * rsize is smaller than a cache page.
3524                  */
3525                 if (unlikely(rsize < PAGE_SIZE)) {
3526                         add_credits_and_wake_if(server, credits, 0);
3527                         return 0;
3528                 }
3529
3530                 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3531                                          &nr_pages, &offset, &bytes);
3532                 if (rc) {
3533                         add_credits_and_wake_if(server, credits, 0);
3534                         break;
3535                 }
3536
3537                 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
3538                 if (!rdata) {
3539                         /* best to give up if we're out of mem */
3540                         list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3541                                 list_del(&page->lru);
3542                                 lru_cache_add_file(page);
3543                                 unlock_page(page);
3544                                 put_page(page);
3545                         }
3546                         rc = -ENOMEM;
3547                         add_credits_and_wake_if(server, credits, 0);
3548                         break;
3549                 }
3550
3551                 rdata->cfile = cifsFileInfo_get(open_file);
3552                 rdata->mapping = mapping;
3553                 rdata->offset = offset;
3554                 rdata->bytes = bytes;
3555                 rdata->pid = pid;
3556                 rdata->pagesz = PAGE_SIZE;
3557                 rdata->read_into_pages = cifs_readpages_read_into_pages;
3558                 rdata->credits = credits;
3559
3560                 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3561                         list_del(&page->lru);
3562                         rdata->pages[rdata->nr_pages++] = page;
3563                 }
3564
3565                 if (!rdata->cfile->invalidHandle ||
3566                     !cifs_reopen_file(rdata->cfile, true))
3567                         rc = server->ops->async_readv(rdata);
3568                 if (rc) {
3569                         add_credits_and_wake_if(server, rdata->credits, 0);
3570                         for (i = 0; i < rdata->nr_pages; i++) {
3571                                 page = rdata->pages[i];
3572                                 lru_cache_add_file(page);
3573                                 unlock_page(page);
3574                                 put_page(page);
3575                         }
3576                         /* Fallback to the readpage in error/reconnect cases */
3577                         kref_put(&rdata->refcount, cifs_readdata_release);
3578                         break;
3579                 }
3580
3581                 kref_put(&rdata->refcount, cifs_readdata_release);
3582         }
3583
3584         /* Any pages that have been shown to fscache but didn't get added to
3585          * the pagecache must be uncached before they get returned to the
3586          * allocator.
3587          */
3588         cifs_fscache_readpages_cancel(mapping->host, page_list);
3589         return rc;
3590 }
3591
3592 /*
3593  * cifs_readpage_worker must be called with the page pinned
3594  */
3595 static int cifs_readpage_worker(struct file *file, struct page *page,
3596         loff_t *poffset)
3597 {
3598         char *read_data;
3599         int rc;
3600
3601         /* Is the page cached? */
3602         rc = cifs_readpage_from_fscache(file_inode(file), page);
3603         if (rc == 0)
3604                 goto read_complete;
3605
3606         read_data = kmap(page);
3607         /* for reads over a certain size could initiate async read ahead */
3608
3609         rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
3610
3611         if (rc < 0)
3612                 goto io_error;
3613         else
3614                 cifs_dbg(FYI, "Bytes read %d\n", rc);
3615
3616         file_inode(file)->i_atime =
3617                 current_time(file_inode(file));
3618
3619         if (PAGE_SIZE > rc)
3620                 memset(read_data + rc, 0, PAGE_SIZE - rc);
3621
3622         flush_dcache_page(page);
3623         SetPageUptodate(page);
3624
3625         /* send this page to the cache */
3626         cifs_readpage_to_fscache(file_inode(file), page);
3627
3628         rc = 0;
3629
3630 io_error:
3631         kunmap(page);
3632         unlock_page(page);
3633
3634 read_complete:
3635         return rc;
3636 }
3637
3638 static int cifs_readpage(struct file *file, struct page *page)
3639 {
3640         loff_t offset = (loff_t)page->index << PAGE_SHIFT;
3641         int rc = -EACCES;
3642         unsigned int xid;
3643
3644         xid = get_xid();
3645
3646         if (file->private_data == NULL) {
3647                 rc = -EBADF;
3648                 free_xid(xid);
3649                 return rc;
3650         }
3651
3652         cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
3653                  page, (int)offset, (int)offset);
3654
3655         rc = cifs_readpage_worker(file, page, &offset);
3656
3657         free_xid(xid);
3658         return rc;
3659 }
3660
3661 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3662 {
3663         struct cifsFileInfo *open_file;
3664         struct cifs_tcon *tcon =
3665                 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
3666
3667         spin_lock(&tcon->open_file_lock);
3668         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3669                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3670                         spin_unlock(&tcon->open_file_lock);
3671                         return 1;
3672                 }
3673         }
3674         spin_unlock(&tcon->open_file_lock);
3675         return 0;
3676 }
3677
3678 /* We do not want to update the file size from server for inodes
3679    open for write - to avoid races with writepage extending
3680    the file - in the future we could consider allowing
3681    refreshing the inode only on increases in the file size
3682    but this is tricky to do without racing with writebehind
3683    page caching in the current Linux kernel design */
3684 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
3685 {
3686         if (!cifsInode)
3687                 return true;
3688
3689         if (is_inode_writable(cifsInode)) {
3690                 /* This inode is open for write at least once */
3691                 struct cifs_sb_info *cifs_sb;
3692
3693                 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
3694                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3695                         /* since no page cache to corrupt on directio
3696                         we can change size safely */
3697                         return true;
3698                 }
3699
3700                 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
3701                         return true;
3702
3703                 return false;
3704         } else
3705                 return true;
3706 }
3707
3708 static int cifs_write_begin(struct file *file, struct address_space *mapping,
3709                         loff_t pos, unsigned len, unsigned flags,
3710                         struct page **pagep, void **fsdata)
3711 {
3712         int oncethru = 0;
3713         pgoff_t index = pos >> PAGE_SHIFT;
3714         loff_t offset = pos & (PAGE_SIZE - 1);
3715         loff_t page_start = pos & PAGE_MASK;
3716         loff_t i_size;
3717         struct page *page;
3718         int rc = 0;
3719
3720         cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
3721
3722 start:
3723         page = grab_cache_page_write_begin(mapping, index, flags);
3724         if (!page) {
3725                 rc = -ENOMEM;
3726                 goto out;
3727         }
3728
3729         if (PageUptodate(page))
3730                 goto out;
3731
3732         /*
3733          * If we write a full page it will be up to date, no need to read from
3734          * the server. If the write is short, we'll end up doing a sync write
3735          * instead.
3736          */
3737         if (len == PAGE_SIZE)
3738                 goto out;
3739
3740         /*
3741          * optimize away the read when we have an oplock, and we're not
3742          * expecting to use any of the data we'd be reading in. That
3743          * is, when the page lies beyond the EOF, or straddles the EOF
3744          * and the write will cover all of the existing data.
3745          */
3746         if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
3747                 i_size = i_size_read(mapping->host);
3748                 if (page_start >= i_size ||
3749                     (offset == 0 && (pos + len) >= i_size)) {
3750                         zero_user_segments(page, 0, offset,
3751                                            offset + len,
3752                                            PAGE_SIZE);
3753                         /*
3754                          * PageChecked means that the parts of the page
3755                          * to which we're not writing are considered up
3756                          * to date. Once the data is copied to the
3757                          * page, it can be set uptodate.
3758                          */
3759                         SetPageChecked(page);
3760                         goto out;
3761                 }
3762         }
3763
3764         if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
3765                 /*
3766                  * might as well read a page, it is fast enough. If we get
3767                  * an error, we don't need to return it. cifs_write_end will
3768                  * do a sync write instead since PG_uptodate isn't set.
3769                  */
3770                 cifs_readpage_worker(file, page, &page_start);
3771                 put_page(page);
3772                 oncethru = 1;
3773                 goto start;
3774         } else {
3775                 /* we could try using another file handle if there is one -
3776                    but how would we lock it to prevent close of that handle
3777                    racing with this read? In any case
3778                    this will be written out by write_end so is fine */
3779         }
3780 out:
3781         *pagep = page;
3782         return rc;
3783 }
3784
3785 static int cifs_release_page(struct page *page, gfp_t gfp)
3786 {
3787         if (PagePrivate(page))
3788                 return 0;
3789
3790         return cifs_fscache_release_page(page, gfp);
3791 }
3792
3793 static void cifs_invalidate_page(struct page *page, unsigned int offset,
3794                                  unsigned int length)
3795 {
3796         struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3797
3798         if (offset == 0 && length == PAGE_SIZE)
3799                 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3800 }
3801
3802 static int cifs_launder_page(struct page *page)
3803 {
3804         int rc = 0;
3805         loff_t range_start = page_offset(page);
3806         loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
3807         struct writeback_control wbc = {
3808                 .sync_mode = WB_SYNC_ALL,
3809                 .nr_to_write = 0,
3810                 .range_start = range_start,
3811                 .range_end = range_end,
3812         };
3813
3814         cifs_dbg(FYI, "Launder page: %p\n", page);
3815
3816         if (clear_page_dirty_for_io(page))
3817                 rc = cifs_writepage_locked(page, &wbc);
3818
3819         cifs_fscache_invalidate_page(page, page->mapping->host);
3820         return rc;
3821 }
3822
3823 void cifs_oplock_break(struct work_struct *work)
3824 {
3825         struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3826                                                   oplock_break);
3827         struct inode *inode = d_inode(cfile->dentry);
3828         struct cifsInodeInfo *cinode = CIFS_I(inode);
3829         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3830         struct TCP_Server_Info *server = tcon->ses->server;
3831         int rc = 0;
3832
3833         wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3834                         TASK_UNINTERRUPTIBLE);
3835
3836         server->ops->downgrade_oplock(server, cinode,
3837                 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3838
3839         if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3840                                                 cifs_has_mand_locks(cinode)) {
3841                 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3842                          inode);
3843                 cinode->oplock = 0;
3844         }
3845
3846         if (inode && S_ISREG(inode->i_mode)) {
3847                 if (CIFS_CACHE_READ(cinode))
3848                         break_lease(inode, O_RDONLY);
3849                 else
3850                         break_lease(inode, O_WRONLY);
3851                 rc = filemap_fdatawrite(inode->i_mapping);
3852                 if (!CIFS_CACHE_READ(cinode)) {
3853                         rc = filemap_fdatawait(inode->i_mapping);
3854                         mapping_set_error(inode->i_mapping, rc);
3855                         cifs_zap_mapping(inode);
3856                 }
3857                 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3858         }
3859
3860         rc = cifs_push_locks(cfile);
3861         if (rc)
3862                 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3863
3864         /*
3865          * releasing stale oplock after recent reconnect of smb session using
3866          * a now incorrect file handle is not a data integrity issue but do
3867          * not bother sending an oplock release if session to server still is
3868          * disconnected since oplock already released by the server
3869          */
3870         if (!cfile->oplock_break_cancelled) {
3871                 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3872                                                              cinode);
3873                 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3874         }
3875         cifs_done_oplock_break(cinode);
3876 }
3877
3878 /*
3879  * The presence of cifs_direct_io() in the address space ops vector
3880  * allowes open() O_DIRECT flags which would have failed otherwise.
3881  *
3882  * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3883  * so this method should never be called.
3884  *
3885  * Direct IO is not yet supported in the cached mode. 
3886  */
3887 static ssize_t
3888 cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
3889 {
3890         /*
3891          * FIXME
3892          * Eventually need to support direct IO for non forcedirectio mounts
3893          */
3894         return -EINVAL;
3895 }
3896
3897
3898 const struct address_space_operations cifs_addr_ops = {
3899         .readpage = cifs_readpage,
3900         .readpages = cifs_readpages,
3901         .writepage = cifs_writepage,
3902         .writepages = cifs_writepages,
3903         .write_begin = cifs_write_begin,
3904         .write_end = cifs_write_end,
3905         .set_page_dirty = __set_page_dirty_nobuffers,
3906         .releasepage = cifs_release_page,
3907         .direct_IO = cifs_direct_io,
3908         .invalidatepage = cifs_invalidate_page,
3909         .launder_page = cifs_launder_page,
3910 };
3911
3912 /*
3913  * cifs_readpages requires the server to support a buffer large enough to
3914  * contain the header plus one complete page of data.  Otherwise, we need
3915  * to leave cifs_readpages out of the address space operations.
3916  */
3917 const struct address_space_operations cifs_addr_ops_smallbuf = {
3918         .readpage = cifs_readpage,
3919         .writepage = cifs_writepage,
3920         .writepages = cifs_writepages,
3921         .write_begin = cifs_write_begin,
3922         .write_end = cifs_write_end,
3923         .set_page_dirty = __set_page_dirty_nobuffers,
3924         .releasepage = cifs_release_page,
3925         .invalidatepage = cifs_invalidate_page,
3926         .launder_page = cifs_launder_page,
3927 };