7e618c546214fccab4458e8f52b71d4a79c4a0f6
[cascardo/linux.git] / drivers / staging / lustre / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/llite/llite_lib.c
33  *
34  * Lustre Light Super operations
35  */
36
37 #define DEBUG_SUBSYSTEM S_LLITE
38
39 #include <linux/module.h>
40 #include <linux/statfs.h>
41 #include <linux/types.h>
42 #include <linux/mm.h>
43
44 #include "../include/lustre/lustre_ioctl.h"
45 #include "../include/lustre_ha.h"
46 #include "../include/lustre_dlm.h"
47 #include "../include/lprocfs_status.h"
48 #include "../include/lustre_disk.h"
49 #include "../include/lustre_param.h"
50 #include "../include/lustre_log.h"
51 #include "../include/cl_object.h"
52 #include "../include/obd_cksum.h"
53 #include "llite_internal.h"
54
55 struct kmem_cache *ll_file_data_slab;
56 struct dentry *llite_root;
57 struct kset *llite_kset;
58
59 #ifndef log2
60 #define log2(n) ffz(~(n))
61 #endif
62
63 static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
64 {
65         struct ll_sb_info *sbi = NULL;
66         unsigned long pages;
67         unsigned long lru_page_max;
68         struct sysinfo si;
69         class_uuid_t uuid;
70         int i;
71
72         sbi = kzalloc(sizeof(*sbi), GFP_NOFS);
73         if (!sbi)
74                 return NULL;
75
76         spin_lock_init(&sbi->ll_lock);
77         mutex_init(&sbi->ll_lco.lco_lock);
78         spin_lock_init(&sbi->ll_pp_extent_lock);
79         spin_lock_init(&sbi->ll_process_lock);
80         sbi->ll_rw_stats_on = 0;
81
82         si_meminfo(&si);
83         pages = si.totalram - si.totalhigh;
84         lru_page_max = pages / 2;
85
86         sbi->ll_cache = cl_cache_init(lru_page_max);
87         if (!sbi->ll_cache) {
88                 kfree(sbi);
89                 return NULL;
90         }
91
92         sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
93                                            SBI_DEFAULT_READAHEAD_MAX);
94         sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
95         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
96                                            SBI_DEFAULT_READAHEAD_WHOLE_MAX;
97
98         ll_generate_random_uuid(uuid);
99         class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
100         CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
101
102         sbi->ll_flags |= LL_SBI_VERBOSE;
103         sbi->ll_flags |= LL_SBI_CHECKSUM;
104
105         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
106
107         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
108                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
109                                pp_r_hist.oh_lock);
110                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
111                                pp_w_hist.oh_lock);
112         }
113
114         /* metadata statahead is enabled by default */
115         sbi->ll_sa_max = LL_SA_RPC_DEF;
116         atomic_set(&sbi->ll_sa_total, 0);
117         atomic_set(&sbi->ll_sa_wrong, 0);
118         atomic_set(&sbi->ll_sa_running, 0);
119         atomic_set(&sbi->ll_agl_total, 0);
120         sbi->ll_flags |= LL_SBI_AGL_ENABLED;
121
122         /* root squash */
123         sbi->ll_squash.rsi_uid = 0;
124         sbi->ll_squash.rsi_gid = 0;
125         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
126         init_rwsem(&sbi->ll_squash.rsi_sem);
127
128         sbi->ll_sb = sb;
129
130         return sbi;
131 }
132
133 static void ll_free_sbi(struct super_block *sb)
134 {
135         struct ll_sb_info *sbi = ll_s2sbi(sb);
136
137         if (sbi->ll_cache) {
138                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
139                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
140                 cl_cache_decref(sbi->ll_cache);
141                 sbi->ll_cache = NULL;
142         }
143
144         kfree(sbi);
145 }
146
147 static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
148                                     struct vfsmount *mnt)
149 {
150         struct inode *root = NULL;
151         struct ll_sb_info *sbi = ll_s2sbi(sb);
152         struct obd_device *obd;
153         struct obd_statfs *osfs = NULL;
154         struct ptlrpc_request *request = NULL;
155         struct obd_connect_data *data = NULL;
156         struct obd_uuid *uuid;
157         struct md_op_data *op_data;
158         struct lustre_md lmd;
159         u64 valid;
160         int size, err, checksum;
161
162         obd = class_name2obd(md);
163         if (!obd) {
164                 CERROR("MD %s: not setup or attached\n", md);
165                 return -EINVAL;
166         }
167
168         data = kzalloc(sizeof(*data), GFP_NOFS);
169         if (!data)
170                 return -ENOMEM;
171
172         osfs = kzalloc(sizeof(*osfs), GFP_NOFS);
173         if (!osfs) {
174                 kfree(data);
175                 return -ENOMEM;
176         }
177
178         /* indicate the features supported by this client */
179         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
180                                   OBD_CONNECT_ATTRFID  |
181                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
182                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
183                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
184                                   OBD_CONNECT_VBR       | OBD_CONNECT_FULL20  |
185                                   OBD_CONNECT_64BITHASH |
186                                   OBD_CONNECT_EINPROGRESS |
187                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
188                                   OBD_CONNECT_LAYOUTLOCK |
189                                   OBD_CONNECT_PINGLESS |
190                                   OBD_CONNECT_MAX_EASIZE |
191                                   OBD_CONNECT_FLOCK_DEAD |
192                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
193                                   OBD_CONNECT_OPEN_BY_FID |
194                                   OBD_CONNECT_DIR_STRIPE;
195
196         if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
197                 data->ocd_connect_flags |= OBD_CONNECT_SOM;
198
199         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
200                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
201 #ifdef CONFIG_FS_POSIX_ACL
202         data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
203 #endif
204
205         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
206                 /* flag mdc connection as lightweight, only used for test
207                  * purpose, use with care
208                  */
209                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
210
211         data->ocd_ibits_known = MDS_INODELOCK_FULL;
212         data->ocd_version = LUSTRE_VERSION_CODE;
213
214         if (sb->s_flags & MS_RDONLY)
215                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
216         if (sbi->ll_flags & LL_SBI_USER_XATTR)
217                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
218
219         if (sbi->ll_flags & LL_SBI_FLOCK)
220                 sbi->ll_fop = &ll_file_operations_flock;
221         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
222                 sbi->ll_fop = &ll_file_operations;
223         else
224                 sbi->ll_fop = &ll_file_operations_noflock;
225
226         /* real client */
227         data->ocd_connect_flags |= OBD_CONNECT_REAL;
228
229         data->ocd_brw_size = MD_MAX_BRW_SIZE;
230
231         err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
232                           data, NULL);
233         if (err == -EBUSY) {
234                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
235                                    md);
236                 goto out;
237         } else if (err) {
238                 CERROR("cannot connect to %s: rc = %d\n", md, err);
239                 goto out;
240         }
241
242         sbi->ll_md_exp->exp_connect_data = *data;
243
244         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
245                            LUSTRE_SEQ_METADATA);
246         if (err) {
247                 CERROR("%s: Can't init metadata layer FID infrastructure, rc = %d\n",
248                        sbi->ll_md_exp->exp_obd->obd_name, err);
249                 goto out_md;
250         }
251
252         /* For mount, we only need fs info from MDT0, and also in DNE, it
253          * can make sure the client can be mounted as long as MDT0 is
254          * available
255          */
256         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
257                          cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
258                          OBD_STATFS_FOR_MDT0);
259         if (err)
260                 goto out_md_fid;
261
262         /* This needs to be after statfs to ensure connect has finished.
263          * Note that "data" does NOT contain the valid connect reply.
264          * If connecting to a 1.8 server there will be no LMV device, so
265          * we can access the MDC export directly and exp_connect_flags will
266          * be non-zero, but if accessing an upgraded 2.1 server it will
267          * have the correct flags filled in.
268          * XXX: fill in the LMV exp_connect_flags from MDC(s).
269          */
270         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
271         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
272             valid != CLIENT_CONNECT_MDT_REQD) {
273                 char *buf;
274
275                 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
276                 if (!buf) {
277                         err = -ENOMEM;
278                         goto out_md_fid;
279                 }
280                 obd_connect_flags2str(buf, PAGE_SIZE,
281                                       valid ^ CLIENT_CONNECT_MDT_REQD, ",");
282                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
283                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
284                 kfree(buf);
285                 err = -EPROTO;
286                 goto out_md_fid;
287         }
288
289         size = sizeof(*data);
290         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
291                            KEY_CONN_DATA,  &size, data, NULL);
292         if (err) {
293                 CERROR("%s: Get connect data failed: rc = %d\n",
294                        sbi->ll_md_exp->exp_obd->obd_name, err);
295                 goto out_md_fid;
296         }
297
298         LASSERT(osfs->os_bsize);
299         sb->s_blocksize = osfs->os_bsize;
300         sb->s_blocksize_bits = log2(osfs->os_bsize);
301         sb->s_magic = LL_SUPER_MAGIC;
302         sb->s_maxbytes = MAX_LFS_FILESIZE;
303         sbi->ll_namelen = osfs->os_namelen;
304
305         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
306             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
307                 LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
308                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
309         }
310
311         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
312                 sb->s_flags |= MS_POSIXACL;
313                 sbi->ll_flags |= LL_SBI_ACL;
314         } else {
315                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
316                 sb->s_flags &= ~MS_POSIXACL;
317                 sbi->ll_flags &= ~LL_SBI_ACL;
318         }
319
320         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
321                 sbi->ll_flags |= LL_SBI_64BIT_HASH;
322
323         if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
324                 sbi->ll_md_brw_pages = data->ocd_brw_size >> PAGE_SHIFT;
325         else
326                 sbi->ll_md_brw_pages = 1;
327
328         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
329                 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
330
331         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
332                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
333                         LCONSOLE_INFO(
334                                 "%s: disabling xattr cache due to unknown maximum xattr size.\n",
335                                 dt);
336                 } else {
337                         sbi->ll_flags |= LL_SBI_XATTR_CACHE;
338                         sbi->ll_xattr_cache_enabled = 1;
339                 }
340         }
341
342         obd = class_name2obd(dt);
343         if (!obd) {
344                 CERROR("DT %s: not setup or attached\n", dt);
345                 err = -ENODEV;
346                 goto out_md_fid;
347         }
348
349         data->ocd_connect_flags = OBD_CONNECT_GRANT     | OBD_CONNECT_VERSION  |
350                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
351                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID      |
352                                   OBD_CONNECT_SRVLOCK   | OBD_CONNECT_TRUNCLOCK|
353                                   OBD_CONNECT_AT        | OBD_CONNECT_OSS_CAPA |
354                                   OBD_CONNECT_VBR       | OBD_CONNECT_FULL20   |
355                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
356                                   OBD_CONNECT_EINPROGRESS |
357                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
358                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
359
360         if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
361                 data->ocd_connect_flags |= OBD_CONNECT_SOM;
362
363         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
364                 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
365                  * disabled by default, because it can still be enabled on the
366                  * fly via /sys. As a consequence, we still need to come to an
367                  * agreement on the supported algorithms at connect time
368                  */
369                 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
370
371                 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
372                         data->ocd_cksum_types = OBD_CKSUM_ADLER;
373                 else
374                         data->ocd_cksum_types = cksum_types_supported_client();
375         }
376
377         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
378
379         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
380                data->ocd_connect_flags,
381                data->ocd_version, data->ocd_grant);
382
383         obd->obd_upcall.onu_owner = &sbi->ll_lco;
384         obd->obd_upcall.onu_upcall = cl_ocd_update;
385
386         data->ocd_brw_size = DT_MAX_BRW_SIZE;
387
388         err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
389                           NULL);
390         if (err == -EBUSY) {
391                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing recovery, of which this client is not a part.  Please wait for recovery to complete, abort, or time out.\n",
392                                    dt);
393                 goto out_md;
394         } else if (err) {
395                 CERROR("%s: Cannot connect to %s: rc = %d\n",
396                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
397                 goto out_md;
398         }
399
400         sbi->ll_dt_exp->exp_connect_data = *data;
401
402         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
403                            LUSTRE_SEQ_METADATA);
404         if (err) {
405                 CERROR("%s: Can't init data layer FID infrastructure, rc = %d\n",
406                        sbi->ll_dt_exp->exp_obd->obd_name, err);
407                 goto out_dt;
408         }
409
410         mutex_lock(&sbi->ll_lco.lco_lock);
411         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
412         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
413         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
414         mutex_unlock(&sbi->ll_lco.lco_lock);
415
416         fid_zero(&sbi->ll_root_fid);
417         err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid);
418         if (err) {
419                 CERROR("cannot mds_connect: rc = %d\n", err);
420                 goto out_lock_cn_cb;
421         }
422         if (!fid_is_sane(&sbi->ll_root_fid)) {
423                 CERROR("%s: Invalid root fid "DFID" during mount\n",
424                        sbi->ll_md_exp->exp_obd->obd_name,
425                        PFID(&sbi->ll_root_fid));
426                 err = -EINVAL;
427                 goto out_lock_cn_cb;
428         }
429         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
430
431         sb->s_op = &lustre_super_operations;
432         sb->s_xattr = ll_xattr_handlers;
433 #if THREAD_SIZE >= 8192 /*b=17630*/
434         sb->s_export_op = &lustre_export_operations;
435 #endif
436
437         /* make root inode
438          * XXX: move this to after cbd setup?
439          */
440         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
441         if (sbi->ll_flags & LL_SBI_ACL)
442                 valid |= OBD_MD_FLACL;
443
444         op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
445         if (!op_data) {
446                 err = -ENOMEM;
447                 goto out_lock_cn_cb;
448         }
449
450         op_data->op_fid1 = sbi->ll_root_fid;
451         op_data->op_mode = 0;
452         op_data->op_valid = valid;
453
454         err = md_getattr(sbi->ll_md_exp, op_data, &request);
455         kfree(op_data);
456         if (err) {
457                 CERROR("%s: md_getattr failed for root: rc = %d\n",
458                        sbi->ll_md_exp->exp_obd->obd_name, err);
459                 goto out_lock_cn_cb;
460         }
461
462         err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
463                                sbi->ll_md_exp, &lmd);
464         if (err) {
465                 CERROR("failed to understand root inode md: rc = %d\n", err);
466                 ptlrpc_req_finished(request);
467                 goto out_lock_cn_cb;
468         }
469
470         LASSERT(fid_is_sane(&sbi->ll_root_fid));
471         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
472                                             sbi->ll_flags & LL_SBI_32BIT_API),
473                        &lmd);
474         md_free_lustre_md(sbi->ll_md_exp, &lmd);
475         ptlrpc_req_finished(request);
476
477         if (IS_ERR(root)) {
478                 if (lmd.lsm)
479                         obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
480 #ifdef CONFIG_FS_POSIX_ACL
481                 if (lmd.posix_acl) {
482                         posix_acl_release(lmd.posix_acl);
483                         lmd.posix_acl = NULL;
484                 }
485 #endif
486                 err = -EBADF;
487                 CERROR("lustre_lite: bad iget4 for root\n");
488                 goto out_root;
489         }
490
491         err = ll_close_thread_start(&sbi->ll_lcq);
492         if (err) {
493                 CERROR("cannot start close thread: rc %d\n", err);
494                 goto out_root;
495         }
496
497         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
498         err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
499                                  KEY_CHECKSUM, sizeof(checksum), &checksum,
500                                  NULL);
501         if (err) {
502                 CERROR("%s: Set checksum failed: rc = %d\n",
503                        sbi->ll_dt_exp->exp_obd->obd_name, err);
504                 goto out_root;
505         }
506         cl_sb_init(sb);
507
508         err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
509                                  KEY_CACHE_SET, sizeof(*sbi->ll_cache),
510                                  sbi->ll_cache, NULL);
511         if (err) {
512                 CERROR("%s: Set cache_set failed: rc = %d\n",
513                        sbi->ll_dt_exp->exp_obd->obd_name, err);
514                 goto out_root;
515         }
516
517         sb->s_root = d_make_root(root);
518         if (!sb->s_root) {
519                 CERROR("%s: can't make root dentry\n",
520                        ll_get_fsname(sb, NULL, 0));
521                 err = -ENOMEM;
522                 goto out_lock_cn_cb;
523         }
524
525         sbi->ll_sdev_orig = sb->s_dev;
526
527         /* We set sb->s_dev equal on all lustre clients in order to support
528          * NFS export clustering.  NFSD requires that the FSID be the same
529          * on all clients.
530          */
531         /* s_dev is also used in lt_compare() to compare two fs, but that is
532          * only a node-local comparison.
533          */
534         uuid = obd_get_uuid(sbi->ll_md_exp);
535         if (uuid) {
536                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
537                 get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
538         }
539
540         kfree(data);
541         kfree(osfs);
542
543         if (llite_root) {
544                 err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
545                 if (err < 0) {
546                         CERROR("%s: could not register mount in debugfs: "
547                                "rc = %d\n", ll_get_fsname(sb, NULL, 0), err);
548                         err = 0;
549                 }
550         }
551
552         return err;
553 out_root:
554         iput(root);
555 out_lock_cn_cb:
556         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
557 out_dt:
558         obd_disconnect(sbi->ll_dt_exp);
559         sbi->ll_dt_exp = NULL;
560 out_md_fid:
561         obd_fid_fini(sbi->ll_md_exp->exp_obd);
562 out_md:
563         obd_disconnect(sbi->ll_md_exp);
564         sbi->ll_md_exp = NULL;
565 out:
566         kfree(data);
567         kfree(osfs);
568         return err;
569 }
570
571 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
572 {
573         int size, rc;
574
575         *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
576         size = sizeof(int);
577         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
578                           KEY_MAX_EASIZE, &size, lmmsize, NULL);
579         if (rc)
580                 CERROR("Get max mdsize error rc %d\n", rc);
581
582         return rc;
583 }
584
585 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
586 {
587         int size, rc;
588
589         size = sizeof(int);
590         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
591                           KEY_DEFAULT_EASIZE, &size, lmmsize, NULL);
592         if (rc)
593                 CERROR("Get default mdsize error rc %d\n", rc);
594
595         return rc;
596 }
597
598 static void client_common_put_super(struct super_block *sb)
599 {
600         struct ll_sb_info *sbi = ll_s2sbi(sb);
601
602         ll_close_thread_shutdown(sbi->ll_lcq);
603
604         cl_sb_fini(sb);
605
606         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
607         obd_disconnect(sbi->ll_dt_exp);
608         sbi->ll_dt_exp = NULL;
609
610         ldebugfs_unregister_mountpoint(sbi);
611
612         obd_fid_fini(sbi->ll_md_exp->exp_obd);
613         obd_disconnect(sbi->ll_md_exp);
614         sbi->ll_md_exp = NULL;
615 }
616
617 void ll_kill_super(struct super_block *sb)
618 {
619         struct ll_sb_info *sbi;
620
621         /* not init sb ?*/
622         if (!(sb->s_flags & MS_ACTIVE))
623                 return;
624
625         sbi = ll_s2sbi(sb);
626         /* we need to restore s_dev from changed for clustered NFS before
627          * put_super because new kernels have cached s_dev and change sb->s_dev
628          * in put_super not affected real removing devices
629          */
630         if (sbi) {
631                 sb->s_dev = sbi->ll_sdev_orig;
632                 sbi->ll_umounting = 1;
633
634                 /* wait running statahead threads to quit */
635                 while (atomic_read(&sbi->ll_sa_running) > 0) {
636                         set_current_state(TASK_UNINTERRUPTIBLE);
637                         schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC >> 3));
638                 }
639         }
640 }
641
642 static inline int ll_set_opt(const char *opt, char *data, int fl)
643 {
644         if (strncmp(opt, data, strlen(opt)) != 0)
645                 return 0;
646         else
647                 return fl;
648 }
649
650 /* non-client-specific mount options are parsed in lmd_parse */
651 static int ll_options(char *options, int *flags)
652 {
653         int tmp;
654         char *s1 = options, *s2;
655
656         if (!options)
657                 return 0;
658
659         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
660
661         while (*s1) {
662                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
663                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
664                 if (tmp) {
665                         *flags |= tmp;
666                         goto next;
667                 }
668                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
669                 if (tmp) {
670                         *flags |= tmp;
671                         goto next;
672                 }
673                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
674                 if (tmp) {
675                         *flags |= tmp;
676                         goto next;
677                 }
678                 tmp = ll_set_opt("noflock", s1,
679                                  LL_SBI_FLOCK | LL_SBI_LOCALFLOCK);
680                 if (tmp) {
681                         *flags &= ~tmp;
682                         goto next;
683                 }
684                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
685                 if (tmp) {
686                         *flags |= tmp;
687                         goto next;
688                 }
689                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
690                 if (tmp) {
691                         *flags &= ~tmp;
692                         goto next;
693                 }
694                 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
695                 if (tmp) {
696                         *flags |= tmp;
697                         goto next;
698                 }
699                 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
700                 if (tmp) {
701                         *flags &= ~tmp;
702                         goto next;
703                 }
704
705                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
706                 if (tmp) {
707                         *flags |= tmp;
708                         goto next;
709                 }
710                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
711                 if (tmp) {
712                         *flags &= ~tmp;
713                         goto next;
714                 }
715                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
716                 if (tmp) {
717                         *flags |= tmp;
718                         goto next;
719                 }
720                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
721                 if (tmp) {
722                         *flags &= ~tmp;
723                         goto next;
724                 }
725                 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
726                 if (tmp) {
727                         *flags |= tmp;
728                         goto next;
729                 }
730                 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
731                 if (tmp) {
732                         *flags &= ~tmp;
733                         goto next;
734                 }
735                 tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
736                 if (tmp) {
737                         *flags |= tmp;
738                         goto next;
739                 }
740                 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
741                 if (tmp) {
742                         *flags |= tmp;
743                         goto next;
744                 }
745                 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
746                 if (tmp) {
747                         *flags |= tmp;
748                         goto next;
749                 }
750                 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
751                 if (tmp) {
752                         *flags &= ~tmp;
753                         goto next;
754                 }
755                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
756                                    s1);
757                 return -EINVAL;
758
759 next:
760                 /* Find next opt */
761                 s2 = strchr(s1, ',');
762                 if (!s2)
763                         break;
764                 s1 = s2 + 1;
765         }
766         return 0;
767 }
768
769 void ll_lli_init(struct ll_inode_info *lli)
770 {
771         lli->lli_inode_magic = LLI_INODE_MAGIC;
772         lli->lli_flags = 0;
773         lli->lli_ioepoch = 0;
774         lli->lli_maxbytes = MAX_LFS_FILESIZE;
775         spin_lock_init(&lli->lli_lock);
776         lli->lli_posix_acl = NULL;
777         /* Do not set lli_fid, it has been initialized already. */
778         fid_zero(&lli->lli_pfid);
779         INIT_LIST_HEAD(&lli->lli_close_list);
780         lli->lli_pending_och = NULL;
781         lli->lli_mds_read_och = NULL;
782         lli->lli_mds_write_och = NULL;
783         lli->lli_mds_exec_och = NULL;
784         lli->lli_open_fd_read_count = 0;
785         lli->lli_open_fd_write_count = 0;
786         lli->lli_open_fd_exec_count = 0;
787         mutex_init(&lli->lli_och_mutex);
788         spin_lock_init(&lli->lli_agl_lock);
789         lli->lli_has_smd = false;
790         spin_lock_init(&lli->lli_layout_lock);
791         ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
792         lli->lli_clob = NULL;
793
794         init_rwsem(&lli->lli_xattrs_list_rwsem);
795         mutex_init(&lli->lli_xattrs_enq_lock);
796
797         LASSERT(lli->lli_vfs_inode.i_mode != 0);
798         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
799                 mutex_init(&lli->lli_readdir_mutex);
800                 lli->lli_opendir_key = NULL;
801                 lli->lli_sai = NULL;
802                 spin_lock_init(&lli->lli_sa_lock);
803                 lli->lli_opendir_pid = 0;
804                 lli->lli_sa_enabled = 0;
805                 lli->lli_def_stripe_offset = -1;
806         } else {
807                 mutex_init(&lli->lli_size_mutex);
808                 lli->lli_symlink_name = NULL;
809                 init_rwsem(&lli->lli_trunc_sem);
810                 range_lock_tree_init(&lli->lli_write_tree);
811                 init_rwsem(&lli->lli_glimpse_sem);
812                 lli->lli_glimpse_time = 0;
813                 INIT_LIST_HEAD(&lli->lli_agl_list);
814                 lli->lli_agl_index = 0;
815                 lli->lli_async_rc = 0;
816         }
817         mutex_init(&lli->lli_layout_mutex);
818 }
819
820 static inline int ll_bdi_register(struct backing_dev_info *bdi)
821 {
822         static atomic_t ll_bdi_num = ATOMIC_INIT(0);
823
824         bdi->name = "lustre";
825         return bdi_register(bdi, NULL, "lustre-%d",
826                             atomic_inc_return(&ll_bdi_num));
827 }
828
829 int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
830 {
831         struct lustre_profile *lprof = NULL;
832         struct lustre_sb_info *lsi = s2lsi(sb);
833         struct ll_sb_info *sbi;
834         char  *dt = NULL, *md = NULL;
835         char  *profilenm = get_profile_name(sb);
836         struct config_llog_instance *cfg;
837         int    err;
838
839         CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
840
841         cfg = kzalloc(sizeof(*cfg), GFP_NOFS);
842         if (!cfg)
843                 return -ENOMEM;
844
845         try_module_get(THIS_MODULE);
846
847         /* client additional sb info */
848         sbi = ll_init_sbi(sb);
849         lsi->lsi_llsbi = sbi;
850         if (!sbi) {
851                 module_put(THIS_MODULE);
852                 kfree(cfg);
853                 return -ENOMEM;
854         }
855
856         err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
857         if (err)
858                 goto out_free;
859
860         err = bdi_init(&lsi->lsi_bdi);
861         if (err)
862                 goto out_free;
863         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
864         lsi->lsi_bdi.capabilities = 0;
865         err = ll_bdi_register(&lsi->lsi_bdi);
866         if (err)
867                 goto out_free;
868
869         sb->s_bdi = &lsi->lsi_bdi;
870         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
871         sb->s_d_op = &ll_d_ops;
872
873         /* Generate a string unique to this super, in case some joker tries
874          * to mount the same fs at two mount points.
875          * Use the address of the super itself.
876          */
877         cfg->cfg_instance = sb;
878         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
879         cfg->cfg_callback = class_config_llog_handler;
880         /* set up client obds */
881         err = lustre_process_log(sb, profilenm, cfg);
882         if (err < 0)
883                 goto out_free;
884
885         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
886         lprof = class_get_profile(profilenm);
887         if (!lprof) {
888                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS.  Does that filesystem exist?\n",
889                                    profilenm);
890                 err = -EINVAL;
891                 goto out_free;
892         }
893         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
894                lprof->lp_md, lprof->lp_dt);
895
896         dt = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
897         if (!dt) {
898                 err = -ENOMEM;
899                 goto out_free;
900         }
901
902         md = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_md, cfg->cfg_instance);
903         if (!md) {
904                 err = -ENOMEM;
905                 goto out_free;
906         }
907
908         /* connections, registrations, sb setup */
909         err = client_common_fill_super(sb, md, dt, mnt);
910
911 out_free:
912         kfree(md);
913         kfree(dt);
914         if (err)
915                 ll_put_super(sb);
916         else if (sbi->ll_flags & LL_SBI_VERBOSE)
917                 LCONSOLE_WARN("Mounted %s\n", profilenm);
918
919         kfree(cfg);
920         return err;
921 } /* ll_fill_super */
922
923 void ll_put_super(struct super_block *sb)
924 {
925         struct config_llog_instance cfg, params_cfg;
926         struct obd_device *obd;
927         struct lustre_sb_info *lsi = s2lsi(sb);
928         struct ll_sb_info *sbi = ll_s2sbi(sb);
929         char *profilenm = get_profile_name(sb);
930         int next, force = 1, rc = 0;
931         long ccc_count;
932
933         CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
934
935         cfg.cfg_instance = sb;
936         lustre_end_log(sb, profilenm, &cfg);
937
938         params_cfg.cfg_instance = sb;
939         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
940
941         if (sbi->ll_md_exp) {
942                 obd = class_exp2obd(sbi->ll_md_exp);
943                 if (obd)
944                         force = obd->obd_force;
945         }
946
947         /* Wait for unstable pages to be committed to stable storage */
948         if (!force) {
949                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
950
951                 rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq,
952                                   !atomic_long_read(&sbi->ll_cache->ccc_unstable_nr),
953                                   &lwi);
954         }
955
956         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
957         if (!force && rc != -EINTR)
958                 LASSERTF(!ccc_count, "count: %li\n", ccc_count);
959
960         /* We need to set force before the lov_disconnect in
961          * lustre_common_put_super, since l_d cleans up osc's as well.
962          */
963         if (force) {
964                 next = 0;
965                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
966                                                      &next)) != NULL) {
967                         obd->obd_force = force;
968                 }
969         }
970
971         if (sbi->ll_lcq) {
972                 /* Only if client_common_fill_super succeeded */
973                 client_common_put_super(sb);
974         }
975
976         next = 0;
977         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
978                 class_manual_cleanup(obd);
979
980         if (sbi->ll_flags & LL_SBI_VERBOSE)
981                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
982
983         if (profilenm)
984                 class_del_profile(profilenm);
985
986         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
987                 bdi_destroy(&lsi->lsi_bdi);
988                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
989         }
990
991         ll_free_sbi(sb);
992         lsi->lsi_llsbi = NULL;
993
994         lustre_common_put_super(sb);
995
996         cl_env_cache_purge(~0);
997
998         module_put(THIS_MODULE);
999 } /* client_put_super */
1000
1001 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1002 {
1003         struct inode *inode = NULL;
1004
1005         /* NOTE: we depend on atomic igrab() -bzzz */
1006         lock_res_and_lock(lock);
1007         if (lock->l_resource->lr_lvb_inode) {
1008                 struct ll_inode_info *lli;
1009
1010                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1011                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1012                         inode = igrab(lock->l_resource->lr_lvb_inode);
1013                 } else {
1014                         inode = lock->l_resource->lr_lvb_inode;
1015                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1016                                          D_WARNING, lock, "lr_lvb_inode %p is bogus: magic %08x",
1017                                          lock->l_resource->lr_lvb_inode,
1018                                          lli->lli_inode_magic);
1019                         inode = NULL;
1020                 }
1021         }
1022         unlock_res_and_lock(lock);
1023         return inode;
1024 }
1025
1026 static void ll_dir_clear_lsm_md(struct inode *inode)
1027 {
1028         struct ll_inode_info *lli = ll_i2info(inode);
1029
1030         LASSERT(S_ISDIR(inode->i_mode));
1031
1032         if (lli->lli_lsm_md) {
1033                 lmv_free_memmd(lli->lli_lsm_md);
1034                 lli->lli_lsm_md = NULL;
1035         }
1036 }
1037
1038 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1039                                       const struct lu_fid *fid,
1040                                       struct lustre_md *md)
1041 {
1042         struct ll_sb_info *sbi = ll_s2sbi(sb);
1043         struct mdt_body *body = md->body;
1044         struct inode *inode;
1045         ino_t ino;
1046
1047         ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1048         inode = iget_locked(sb, ino);
1049         if (!inode) {
1050                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1051                        ll_get_fsname(sb, NULL, 0), PFID(fid));
1052                 return ERR_PTR(-ENOENT);
1053         }
1054
1055         if (inode->i_state & I_NEW) {
1056                 struct ll_inode_info *lli = ll_i2info(inode);
1057                 struct lmv_stripe_md *lsm = md->lmv;
1058
1059                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1060                                 (body->mbo_mode & S_IFMT);
1061                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1062                          PFID(fid));
1063
1064                 LTIME_S(inode->i_mtime) = 0;
1065                 LTIME_S(inode->i_atime) = 0;
1066                 LTIME_S(inode->i_ctime) = 0;
1067                 inode->i_rdev = 0;
1068
1069                 inode->i_op = &ll_dir_inode_operations;
1070                 inode->i_fop = &ll_dir_operations;
1071                 lli->lli_fid = *fid;
1072                 ll_lli_init(lli);
1073
1074                 LASSERT(lsm);
1075                 /* master object FID */
1076                 lli->lli_pfid = body->mbo_fid1;
1077                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1078                        lli, PFID(fid), PFID(&lli->lli_pfid));
1079                 unlock_new_inode(inode);
1080         }
1081
1082         return inode;
1083 }
1084
1085 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1086 {
1087         struct lmv_stripe_md *lsm = md->lmv;
1088         struct lu_fid *fid;
1089         int i;
1090
1091         LASSERT(lsm);
1092         /*
1093          * XXX sigh, this lsm_root initialization should be in
1094          * LMV layer, but it needs ll_iget right now, so we
1095          * put this here right now.
1096          */
1097         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1098                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1099                 LASSERT(!lsm->lsm_md_oinfo[i].lmo_root);
1100                 /* Unfortunately ll_iget will call ll_update_inode,
1101                  * where the initialization of slave inode is slightly
1102                  * different, so it reset lsm_md to NULL to avoid
1103                  * initializing lsm for slave inode.
1104                  */
1105                 /* For migrating inode, master stripe and master object will
1106                  * be same, so we only need assign this inode
1107                  */
1108                 if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION && !i)
1109                         lsm->lsm_md_oinfo[i].lmo_root = inode;
1110                 else
1111                         lsm->lsm_md_oinfo[i].lmo_root =
1112                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1113                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1114                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1115
1116                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1117                         return rc;
1118                 }
1119         }
1120
1121         return 0;
1122 }
1123
1124 static inline int lli_lsm_md_eq(const struct lmv_stripe_md *lsm_md1,
1125                                 const struct lmv_stripe_md *lsm_md2)
1126 {
1127         return lsm_md1->lsm_md_magic == lsm_md2->lsm_md_magic &&
1128                lsm_md1->lsm_md_stripe_count == lsm_md2->lsm_md_stripe_count &&
1129                lsm_md1->lsm_md_master_mdt_index ==
1130                         lsm_md2->lsm_md_master_mdt_index &&
1131                lsm_md1->lsm_md_hash_type == lsm_md2->lsm_md_hash_type &&
1132                lsm_md1->lsm_md_layout_version ==
1133                         lsm_md2->lsm_md_layout_version &&
1134                !strcmp(lsm_md1->lsm_md_pool_name,
1135                        lsm_md2->lsm_md_pool_name);
1136 }
1137
1138 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1139 {
1140         struct ll_inode_info *lli = ll_i2info(inode);
1141         struct lmv_stripe_md *lsm = md->lmv;
1142         int rc;
1143
1144         LASSERT(S_ISDIR(inode->i_mode));
1145         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1146                PFID(ll_inode2fid(inode)));
1147
1148         /* no striped information from request. */
1149         if (!lsm) {
1150                 if (!lli->lli_lsm_md) {
1151                         return 0;
1152                 } else if (lli->lli_lsm_md->lsm_md_hash_type &
1153                            LMV_HASH_FLAG_MIGRATION) {
1154                         /*
1155                          * migration is done, the temporay MIGRATE layout has
1156                          * been removed
1157                          */
1158                         CDEBUG(D_INODE, DFID" finish migration.\n",
1159                                PFID(ll_inode2fid(inode)));
1160                         lmv_free_memmd(lli->lli_lsm_md);
1161                         lli->lli_lsm_md = NULL;
1162                         return 0;
1163                 } else {
1164                         /*
1165                          * The lustre_md from req does not include stripeEA,
1166                          * see ll_md_setattr
1167                          */
1168                         return 0;
1169                 }
1170         }
1171
1172         /* set the directory layout */
1173         if (!lli->lli_lsm_md) {
1174                 rc = ll_init_lsm_md(inode, md);
1175                 if (rc)
1176                         return rc;
1177
1178                 lli->lli_lsm_md = lsm;
1179                 /*
1180                  * set lsm_md to NULL, so the following free lustre_md
1181                  * will not free this lsm
1182                  */
1183                 md->lmv = NULL;
1184                 CDEBUG(D_INODE, "Set lsm %p magic %x to "DFID"\n", lsm,
1185                        lsm->lsm_md_magic, PFID(ll_inode2fid(inode)));
1186                 return 0;
1187         }
1188
1189         /* Compare the old and new stripe information */
1190         if (!lsm_md_eq(lli->lli_lsm_md, lsm)) {
1191                 struct lmv_stripe_md *old_lsm = lli->lli_lsm_md;
1192                 int idx;
1193
1194                 CERROR("%s: inode "DFID"(%p)'s lmv layout mismatch (%p)/(%p) magic:0x%x/0x%x stripe count: %d/%d master_mdt: %d/%d hash_type:0x%x/0x%x layout: 0x%x/0x%x pool:%s/%s\n",
1195                        ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid),
1196                        inode, lsm, old_lsm,
1197                        lsm->lsm_md_magic, old_lsm->lsm_md_magic,
1198                        lsm->lsm_md_stripe_count,
1199                        old_lsm->lsm_md_stripe_count,
1200                        lsm->lsm_md_master_mdt_index,
1201                        old_lsm->lsm_md_master_mdt_index,
1202                        lsm->lsm_md_hash_type, old_lsm->lsm_md_hash_type,
1203                        lsm->lsm_md_layout_version,
1204                        old_lsm->lsm_md_layout_version,
1205                        lsm->lsm_md_pool_name,
1206                        old_lsm->lsm_md_pool_name);
1207
1208                 for (idx = 0; idx < old_lsm->lsm_md_stripe_count; idx++) {
1209                         CERROR("%s: sub FIDs in old lsm idx %d, old: "DFID"\n",
1210                                ll_get_fsname(inode->i_sb, NULL, 0), idx,
1211                                PFID(&old_lsm->lsm_md_oinfo[idx].lmo_fid));
1212                 }
1213
1214                 for (idx = 0; idx < lsm->lsm_md_stripe_count; idx++) {
1215                         CERROR("%s: sub FIDs in new lsm idx %d, new: "DFID"\n",
1216                                ll_get_fsname(inode->i_sb, NULL, 0), idx,
1217                                PFID(&lsm->lsm_md_oinfo[idx].lmo_fid));
1218                 }
1219
1220                 return -EIO;
1221         }
1222
1223         return 0;
1224 }
1225
1226 void ll_clear_inode(struct inode *inode)
1227 {
1228         struct ll_inode_info *lli = ll_i2info(inode);
1229         struct ll_sb_info *sbi = ll_i2sbi(inode);
1230
1231         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1232                PFID(ll_inode2fid(inode)), inode);
1233
1234         if (S_ISDIR(inode->i_mode)) {
1235                 /* these should have been cleared in ll_file_release */
1236                 LASSERT(!lli->lli_opendir_key);
1237                 LASSERT(!lli->lli_sai);
1238                 LASSERT(lli->lli_opendir_pid == 0);
1239         }
1240
1241         spin_lock(&lli->lli_lock);
1242         ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
1243         spin_unlock(&lli->lli_lock);
1244         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1245
1246         LASSERT(!lli->lli_open_fd_write_count);
1247         LASSERT(!lli->lli_open_fd_read_count);
1248         LASSERT(!lli->lli_open_fd_exec_count);
1249
1250         if (lli->lli_mds_write_och)
1251                 ll_md_real_close(inode, FMODE_WRITE);
1252         if (lli->lli_mds_exec_och)
1253                 ll_md_real_close(inode, FMODE_EXEC);
1254         if (lli->lli_mds_read_och)
1255                 ll_md_real_close(inode, FMODE_READ);
1256
1257         if (S_ISLNK(inode->i_mode)) {
1258                 kfree(lli->lli_symlink_name);
1259                 lli->lli_symlink_name = NULL;
1260         }
1261
1262         ll_xattr_cache_destroy(inode);
1263
1264 #ifdef CONFIG_FS_POSIX_ACL
1265         if (lli->lli_posix_acl) {
1266                 posix_acl_release(lli->lli_posix_acl);
1267                 lli->lli_posix_acl = NULL;
1268         }
1269 #endif
1270         lli->lli_inode_magic = LLI_INODE_DEAD;
1271
1272         if (S_ISDIR(inode->i_mode))
1273                 ll_dir_clear_lsm_md(inode);
1274         if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1275                 LASSERT(list_empty(&lli->lli_agl_list));
1276
1277         /*
1278          * XXX This has to be done before lsm is freed below, because
1279          * cl_object still uses inode lsm.
1280          */
1281         cl_inode_fini(inode);
1282         lli->lli_has_smd = false;
1283 }
1284
1285 #define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
1286
1287 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
1288                          struct md_open_data **mod)
1289 {
1290         struct lustre_md md;
1291         struct inode *inode = d_inode(dentry);
1292         struct ll_sb_info *sbi = ll_i2sbi(inode);
1293         struct ptlrpc_request *request = NULL;
1294         int rc, ia_valid;
1295
1296         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1297                                      LUSTRE_OPC_ANY, NULL);
1298         if (IS_ERR(op_data))
1299                 return PTR_ERR(op_data);
1300
1301         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
1302                         &request, mod);
1303         if (rc) {
1304                 ptlrpc_req_finished(request);
1305                 if (rc == -ENOENT) {
1306                         clear_nlink(inode);
1307                         /* Unlinked special device node? Or just a race?
1308                          * Pretend we did everything.
1309                          */
1310                         if (!S_ISREG(inode->i_mode) &&
1311                             !S_ISDIR(inode->i_mode)) {
1312                                 ia_valid = op_data->op_attr.ia_valid;
1313                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1314                                 rc = simple_setattr(dentry, &op_data->op_attr);
1315                                 op_data->op_attr.ia_valid = ia_valid;
1316                         }
1317                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1318                         CERROR("md_setattr fails: rc = %d\n", rc);
1319                 }
1320                 return rc;
1321         }
1322
1323         rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1324                               sbi->ll_md_exp, &md);
1325         if (rc) {
1326                 ptlrpc_req_finished(request);
1327                 return rc;
1328         }
1329
1330         ia_valid = op_data->op_attr.ia_valid;
1331         /* inode size will be in cl_setattr_ost, can't do it now since dirty
1332          * cache is not cleared yet.
1333          */
1334         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1335         rc = simple_setattr(dentry, &op_data->op_attr);
1336         op_data->op_attr.ia_valid = ia_valid;
1337
1338         /* Extract epoch data if obtained. */
1339         op_data->op_handle = md.body->mbo_handle;
1340         op_data->op_ioepoch = md.body->mbo_ioepoch;
1341
1342         rc = ll_update_inode(inode, &md);
1343         ptlrpc_req_finished(request);
1344
1345         return rc;
1346 }
1347
1348 /* Close IO epoch and send Size-on-MDS attribute update. */
1349 static int ll_setattr_done_writing(struct inode *inode,
1350                                    struct md_op_data *op_data,
1351                                    struct md_open_data *mod)
1352 {
1353         struct ll_inode_info *lli = ll_i2info(inode);
1354         int rc = 0;
1355
1356         if (!S_ISREG(inode->i_mode))
1357                 return 0;
1358
1359         CDEBUG(D_INODE, "Epoch %llu closed on "DFID" for truncate\n",
1360                op_data->op_ioepoch, PFID(&lli->lli_fid));
1361
1362         op_data->op_flags = MF_EPOCH_CLOSE;
1363         ll_done_writing_attr(inode, op_data);
1364         ll_pack_inode2opdata(inode, op_data, NULL);
1365
1366         rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
1367         if (rc == -EAGAIN)
1368                 /* MDS has instructed us to obtain Size-on-MDS attribute
1369                  * from OSTs and send setattr to back to MDS.
1370                  */
1371                 rc = ll_som_update(inode, op_data);
1372         else if (rc) {
1373                 CERROR("%s: inode "DFID" mdc truncate failed: rc = %d\n",
1374                        ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
1375                        PFID(ll_inode2fid(inode)), rc);
1376         }
1377         return rc;
1378 }
1379
1380 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1381  * object(s) determine the file size and mtime.  Otherwise, the MDS will
1382  * keep these values until such a time that objects are allocated for it.
1383  * We do the MDS operations first, as it is checking permissions for us.
1384  * We don't to the MDS RPC if there is nothing that we want to store there,
1385  * otherwise there is no harm in updating mtime/atime on the MDS if we are
1386  * going to do an RPC anyways.
1387  *
1388  * If we are doing a truncate, we will send the mtime and ctime updates
1389  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1390  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1391  * at the same time.
1392  *
1393  * In case of HSMimport, we only set attr on MDS.
1394  */
1395 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
1396 {
1397         struct inode *inode = d_inode(dentry);
1398         struct ll_inode_info *lli = ll_i2info(inode);
1399         struct md_op_data *op_data = NULL;
1400         struct md_open_data *mod = NULL;
1401         bool file_is_released = false;
1402         int rc = 0, rc1 = 0;
1403
1404         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
1405                ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
1406                i_size_read(inode), attr->ia_size, attr->ia_valid, hsm_import);
1407
1408         if (attr->ia_valid & ATTR_SIZE) {
1409                 /* Check new size against VFS/VM file size limit and rlimit */
1410                 rc = inode_newsize_ok(inode, attr->ia_size);
1411                 if (rc)
1412                         return rc;
1413
1414                 /* The maximum Lustre file size is variable, based on the
1415                  * OST maximum object size and number of stripes.  This
1416                  * needs another check in addition to the VFS check above.
1417                  */
1418                 if (attr->ia_size > ll_file_maxbytes(inode)) {
1419                         CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n",
1420                                PFID(&lli->lli_fid), attr->ia_size,
1421                                ll_file_maxbytes(inode));
1422                         return -EFBIG;
1423                 }
1424
1425                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1426         }
1427
1428         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1429         if (attr->ia_valid & TIMES_SET_FLAGS) {
1430                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
1431                     !capable(CFS_CAP_FOWNER))
1432                         return -EPERM;
1433         }
1434
1435         /* We mark all of the fields "set" so MDS/OST does not re-set them */
1436         if (attr->ia_valid & ATTR_CTIME) {
1437                 attr->ia_ctime = CURRENT_TIME;
1438                 attr->ia_valid |= ATTR_CTIME_SET;
1439         }
1440         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1441             (attr->ia_valid & ATTR_ATIME)) {
1442                 attr->ia_atime = CURRENT_TIME;
1443                 attr->ia_valid |= ATTR_ATIME_SET;
1444         }
1445         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1446             (attr->ia_valid & ATTR_MTIME)) {
1447                 attr->ia_mtime = CURRENT_TIME;
1448                 attr->ia_valid |= ATTR_MTIME_SET;
1449         }
1450
1451         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1452                 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %llu\n",
1453                        LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
1454                        (s64)ktime_get_real_seconds());
1455
1456         /* We always do an MDS RPC, even if we're only changing the size;
1457          * only the MDS knows whether truncate() should fail with -ETXTBUSY
1458          */
1459
1460         op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
1461         if (!op_data)
1462                 return -ENOMEM;
1463
1464         if (!S_ISDIR(inode->i_mode))
1465                 inode_unlock(inode);
1466
1467         /* truncate on a released file must failed with -ENODATA,
1468          * so size must not be set on MDS for released file
1469          * but other attributes must be set
1470          */
1471         if (S_ISREG(inode->i_mode)) {
1472                 struct lov_stripe_md *lsm;
1473                 __u32 gen;
1474
1475                 ll_layout_refresh(inode, &gen);
1476                 lsm = ccc_inode_lsm_get(inode);
1477                 if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
1478                         file_is_released = true;
1479                 ccc_inode_lsm_put(inode, lsm);
1480
1481                 if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
1482                         if (file_is_released) {
1483                                 rc = ll_layout_restore(inode, 0, attr->ia_size);
1484                                 if (rc < 0)
1485                                         goto out;
1486
1487                                 file_is_released = false;
1488                                 ll_layout_refresh(inode, &gen);
1489                         }
1490
1491                         /*
1492                          * If we are changing file size, file content is
1493                          * modified, flag it.
1494                          */
1495                         attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1496                         spin_lock(&lli->lli_lock);
1497                         lli->lli_flags |= LLIF_DATA_MODIFIED;
1498                         spin_unlock(&lli->lli_lock);
1499                         op_data->op_bias |= MDS_DATA_MODIFIED;
1500                 }
1501         }
1502
1503         memcpy(&op_data->op_attr, attr, sizeof(*attr));
1504
1505         /* Open epoch for truncate. */
1506         if (exp_connect_som(ll_i2mdexp(inode)) && !hsm_import &&
1507             (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
1508                 op_data->op_flags = MF_EPOCH_OPEN;
1509
1510         rc = ll_md_setattr(dentry, op_data, &mod);
1511         if (rc)
1512                 goto out;
1513
1514         /* RPC to MDT is sent, cancel data modification flag */
1515         if (op_data->op_bias & MDS_DATA_MODIFIED) {
1516                 spin_lock(&lli->lli_lock);
1517                 lli->lli_flags &= ~LLIF_DATA_MODIFIED;
1518                 spin_unlock(&lli->lli_lock);
1519         }
1520
1521         ll_ioepoch_open(lli, op_data->op_ioepoch);
1522         if (!S_ISREG(inode->i_mode) || file_is_released) {
1523                 rc = 0;
1524                 goto out;
1525         }
1526
1527         if (attr->ia_valid & (ATTR_SIZE |
1528                               ATTR_ATIME | ATTR_ATIME_SET |
1529                               ATTR_MTIME | ATTR_MTIME_SET)) {
1530                 /* For truncate and utimes sending attributes to OSTs, setting
1531                  * mtime/atime to the past will be performed under PW [0:EOF]
1532                  * extent lock (new_size:EOF for truncate).  It may seem
1533                  * excessive to send mtime/atime updates to OSTs when not
1534                  * setting times to past, but it is necessary due to possible
1535                  * time de-synchronization between MDT inode and OST objects
1536                  */
1537                 if (attr->ia_valid & ATTR_SIZE)
1538                         down_write(&lli->lli_trunc_sem);
1539                 rc = cl_setattr_ost(inode, attr);
1540                 if (attr->ia_valid & ATTR_SIZE)
1541                         up_write(&lli->lli_trunc_sem);
1542         }
1543 out:
1544         if (op_data->op_ioepoch) {
1545                 rc1 = ll_setattr_done_writing(inode, op_data, mod);
1546                 if (!rc)
1547                         rc = rc1;
1548         }
1549         ll_finish_md_op_data(op_data);
1550
1551         if (!S_ISDIR(inode->i_mode)) {
1552                 inode_lock(inode);
1553                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
1554                         inode_dio_wait(inode);
1555         }
1556
1557         ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
1558                         LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
1559
1560         return rc;
1561 }
1562
1563 int ll_setattr(struct dentry *de, struct iattr *attr)
1564 {
1565         int mode = d_inode(de)->i_mode;
1566
1567         if ((attr->ia_valid & (ATTR_CTIME | ATTR_SIZE | ATTR_MODE)) ==
1568                               (ATTR_CTIME | ATTR_SIZE | ATTR_MODE))
1569                 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1570
1571         if (((attr->ia_valid & (ATTR_MODE | ATTR_FORCE | ATTR_SIZE)) ==
1572                                (ATTR_SIZE | ATTR_MODE)) &&
1573             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
1574              (((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) &&
1575               !(attr->ia_mode & S_ISGID))))
1576                 attr->ia_valid |= ATTR_FORCE;
1577
1578         if ((attr->ia_valid & ATTR_MODE) &&
1579             (mode & S_ISUID) &&
1580             !(attr->ia_mode & S_ISUID) &&
1581             !(attr->ia_valid & ATTR_KILL_SUID))
1582                 attr->ia_valid |= ATTR_KILL_SUID;
1583
1584         if ((attr->ia_valid & ATTR_MODE) &&
1585             ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) &&
1586             !(attr->ia_mode & S_ISGID) &&
1587             !(attr->ia_valid & ATTR_KILL_SGID))
1588                 attr->ia_valid |= ATTR_KILL_SGID;
1589
1590         return ll_setattr_raw(de, attr, false);
1591 }
1592
1593 int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1594                        __u64 max_age, __u32 flags)
1595 {
1596         struct ll_sb_info *sbi = ll_s2sbi(sb);
1597         struct obd_statfs obd_osfs;
1598         int rc;
1599
1600         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
1601         if (rc) {
1602                 CERROR("md_statfs fails: rc = %d\n", rc);
1603                 return rc;
1604         }
1605
1606         osfs->os_type = sb->s_magic;
1607
1608         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
1609                osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,
1610                osfs->os_files);
1611
1612         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
1613                 flags |= OBD_STATFS_NODELAY;
1614
1615         rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
1616         if (rc) {
1617                 CERROR("obd_statfs fails: rc = %d\n", rc);
1618                 return rc;
1619         }
1620
1621         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
1622                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1623                obd_osfs.os_files);
1624
1625         osfs->os_bsize = obd_osfs.os_bsize;
1626         osfs->os_blocks = obd_osfs.os_blocks;
1627         osfs->os_bfree = obd_osfs.os_bfree;
1628         osfs->os_bavail = obd_osfs.os_bavail;
1629
1630         /* If we don't have as many objects free on the OST as inodes
1631          * on the MDS, we reduce the total number of inodes to
1632          * compensate, so that the "inodes in use" number is correct.
1633          */
1634         if (obd_osfs.os_ffree < osfs->os_ffree) {
1635                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1636                         obd_osfs.os_ffree;
1637                 osfs->os_ffree = obd_osfs.os_ffree;
1638         }
1639
1640         return rc;
1641 }
1642
1643 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1644 {
1645         struct super_block *sb = de->d_sb;
1646         struct obd_statfs osfs;
1647         int rc;
1648
1649         CDEBUG(D_VFSTRACE, "VFS Op: at %llu jiffies\n", get_jiffies_64());
1650         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1651
1652         /* Some amount of caching on the client is allowed */
1653         rc = ll_statfs_internal(sb, &osfs,
1654                                 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
1655                                 0);
1656         if (rc)
1657                 return rc;
1658
1659         statfs_unpack(sfs, &osfs);
1660
1661         /* We need to downshift for all 32-bit kernels, because we can't
1662          * tell if the kernel is being called via sys_statfs64() or not.
1663          * Stop before overflowing f_bsize - in which case it is better
1664          * to just risk EOVERFLOW if caller is using old sys_statfs().
1665          */
1666         if (sizeof(long) < 8) {
1667                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1668                         sfs->f_bsize <<= 1;
1669
1670                         osfs.os_blocks >>= 1;
1671                         osfs.os_bfree >>= 1;
1672                         osfs.os_bavail >>= 1;
1673                 }
1674         }
1675
1676         sfs->f_blocks = osfs.os_blocks;
1677         sfs->f_bfree = osfs.os_bfree;
1678         sfs->f_bavail = osfs.os_bavail;
1679         sfs->f_fsid = ll_s2sbi(sb)->ll_fsid;
1680         return 0;
1681 }
1682
1683 void ll_inode_size_lock(struct inode *inode)
1684 {
1685         struct ll_inode_info *lli;
1686
1687         LASSERT(!S_ISDIR(inode->i_mode));
1688
1689         lli = ll_i2info(inode);
1690         mutex_lock(&lli->lli_size_mutex);
1691 }
1692
1693 void ll_inode_size_unlock(struct inode *inode)
1694 {
1695         struct ll_inode_info *lli;
1696
1697         lli = ll_i2info(inode);
1698         mutex_unlock(&lli->lli_size_mutex);
1699 }
1700
1701 int ll_update_inode(struct inode *inode, struct lustre_md *md)
1702 {
1703         struct ll_inode_info *lli = ll_i2info(inode);
1704         struct mdt_body *body = md->body;
1705         struct lov_stripe_md *lsm = md->lsm;
1706         struct ll_sb_info *sbi = ll_i2sbi(inode);
1707
1708         LASSERT((lsm != NULL) == ((body->mbo_valid & OBD_MD_FLEASIZE) != 0));
1709         if (lsm) {
1710                 if (!lli->lli_has_smd &&
1711                     !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
1712                         cl_file_inode_init(inode, md);
1713
1714                 lli->lli_maxbytes = lsm->lsm_maxbytes;
1715                 if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
1716                         lli->lli_maxbytes = MAX_LFS_FILESIZE;
1717         }
1718
1719         if (S_ISDIR(inode->i_mode)) {
1720                 int rc;
1721
1722                 rc = ll_update_lsm_md(inode, md);
1723                 if (rc)
1724                         return rc;
1725         }
1726
1727 #ifdef CONFIG_FS_POSIX_ACL
1728         if (body->mbo_valid & OBD_MD_FLACL) {
1729                 spin_lock(&lli->lli_lock);
1730                 if (lli->lli_posix_acl)
1731                         posix_acl_release(lli->lli_posix_acl);
1732                 lli->lli_posix_acl = md->posix_acl;
1733                 spin_unlock(&lli->lli_lock);
1734         }
1735 #endif
1736         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
1737                                         sbi->ll_flags & LL_SBI_32BIT_API);
1738         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
1739
1740         if (body->mbo_valid & OBD_MD_FLATIME) {
1741                 if (body->mbo_atime > LTIME_S(inode->i_atime))
1742                         LTIME_S(inode->i_atime) = body->mbo_atime;
1743                 lli->lli_atime = body->mbo_atime;
1744         }
1745         if (body->mbo_valid & OBD_MD_FLMTIME) {
1746                 if (body->mbo_mtime > LTIME_S(inode->i_mtime)) {
1747                         CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
1748                                inode->i_ino, LTIME_S(inode->i_mtime),
1749                                body->mbo_mtime);
1750                         LTIME_S(inode->i_mtime) = body->mbo_mtime;
1751                 }
1752                 lli->lli_mtime = body->mbo_mtime;
1753         }
1754         if (body->mbo_valid & OBD_MD_FLCTIME) {
1755                 if (body->mbo_ctime > LTIME_S(inode->i_ctime))
1756                         LTIME_S(inode->i_ctime) = body->mbo_ctime;
1757                 lli->lli_ctime = body->mbo_ctime;
1758         }
1759         if (body->mbo_valid & OBD_MD_FLMODE)
1760                 inode->i_mode = (inode->i_mode & S_IFMT) |
1761                                 (body->mbo_mode & ~S_IFMT);
1762         if (body->mbo_valid & OBD_MD_FLTYPE)
1763                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1764                                 (body->mbo_mode & S_IFMT);
1765         LASSERT(inode->i_mode != 0);
1766         if (S_ISREG(inode->i_mode))
1767                 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
1768                                        LL_MAX_BLKSIZE_BITS);
1769         else
1770                 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
1771         if (body->mbo_valid & OBD_MD_FLUID)
1772                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
1773         if (body->mbo_valid & OBD_MD_FLGID)
1774                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
1775         if (body->mbo_valid & OBD_MD_FLFLAGS)
1776                 inode->i_flags = ll_ext_to_inode_flags(body->mbo_flags);
1777         if (body->mbo_valid & OBD_MD_FLNLINK)
1778                 set_nlink(inode, body->mbo_nlink);
1779         if (body->mbo_valid & OBD_MD_FLRDEV)
1780                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
1781
1782         if (body->mbo_valid & OBD_MD_FLID) {
1783                 /* FID shouldn't be changed! */
1784                 if (fid_is_sane(&lli->lli_fid)) {
1785                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
1786                                  "Trying to change FID "DFID" to the "DFID", inode "DFID"(%p)\n",
1787                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
1788                                  PFID(ll_inode2fid(inode)), inode);
1789                 } else {
1790                         lli->lli_fid = body->mbo_fid1;
1791                 }
1792         }
1793
1794         LASSERT(fid_seq(&lli->lli_fid) != 0);
1795
1796         if (body->mbo_valid & OBD_MD_FLSIZE) {
1797                 if (exp_connect_som(ll_i2mdexp(inode)) &&
1798                     S_ISREG(inode->i_mode)) {
1799                         struct lustre_handle lockh;
1800                         enum ldlm_mode mode;
1801
1802                         /* As it is possible a blocking ast has been processed
1803                          * by this time, we need to check there is an UPDATE
1804                          * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1805                          * it.
1806                          */
1807                         mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
1808                                                &lockh, LDLM_FL_CBPENDING,
1809                                                LCK_CR | LCK_CW |
1810                                                LCK_PR | LCK_PW);
1811                         if (mode) {
1812                                 if (lli->lli_flags & (LLIF_DONE_WRITING |
1813                                                       LLIF_EPOCH_PENDING |
1814                                                       LLIF_SOM_DIRTY)) {
1815                                         CERROR("%s: inode "DFID" flags %u still has size authority! do not trust the size got from MDS\n",
1816                                                sbi->ll_md_exp->exp_obd->obd_name,
1817                                                PFID(ll_inode2fid(inode)),
1818                                                lli->lli_flags);
1819                                 } else {
1820                                         /* Use old size assignment to avoid
1821                                          * deadlock bz14138 & bz14326
1822                                          */
1823                                         i_size_write(inode, body->mbo_size);
1824                                         spin_lock(&lli->lli_lock);
1825                                         lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
1826                                         spin_unlock(&lli->lli_lock);
1827                                 }
1828                                 ldlm_lock_decref(&lockh, mode);
1829                         }
1830                 } else {
1831                         /* Use old size assignment to avoid
1832                          * deadlock bz14138 & bz14326
1833                          */
1834                         i_size_write(inode, body->mbo_size);
1835
1836                         CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
1837                                inode->i_ino, (unsigned long long)body->mbo_size);
1838                 }
1839
1840                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
1841                         inode->i_blocks = body->mbo_blocks;
1842         }
1843
1844         if (body->mbo_valid & OBD_MD_TSTATE) {
1845                 if (body->mbo_t_state & MS_RESTORE)
1846                         lli->lli_flags |= LLIF_FILE_RESTORING;
1847         }
1848
1849         return 0;
1850 }
1851
1852 int ll_read_inode2(struct inode *inode, void *opaque)
1853 {
1854         struct lustre_md *md = opaque;
1855         struct ll_inode_info *lli = ll_i2info(inode);
1856         int rc;
1857
1858         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1859                PFID(&lli->lli_fid), inode);
1860
1861         LASSERT(!lli->lli_has_smd);
1862
1863         /* Core attributes from the MDS first.  This is a new inode, and
1864          * the VFS doesn't zero times in the core inode so we have to do
1865          * it ourselves.  They will be overwritten by either MDS or OST
1866          * attributes - we just need to make sure they aren't newer.
1867          */
1868         LTIME_S(inode->i_mtime) = 0;
1869         LTIME_S(inode->i_atime) = 0;
1870         LTIME_S(inode->i_ctime) = 0;
1871         inode->i_rdev = 0;
1872         rc = ll_update_inode(inode, md);
1873         if (rc)
1874                 return rc;
1875
1876         /* OIDEBUG(inode); */
1877
1878         if (S_ISREG(inode->i_mode)) {
1879                 struct ll_sb_info *sbi = ll_i2sbi(inode);
1880
1881                 inode->i_op = &ll_file_inode_operations;
1882                 inode->i_fop = sbi->ll_fop;
1883                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
1884         } else if (S_ISDIR(inode->i_mode)) {
1885                 inode->i_op = &ll_dir_inode_operations;
1886                 inode->i_fop = &ll_dir_operations;
1887         } else if (S_ISLNK(inode->i_mode)) {
1888                 inode->i_op = &ll_fast_symlink_inode_operations;
1889         } else {
1890                 inode->i_op = &ll_special_inode_operations;
1891
1892                 init_special_inode(inode, inode->i_mode,
1893                                    inode->i_rdev);
1894         }
1895
1896         return 0;
1897 }
1898
1899 void ll_delete_inode(struct inode *inode)
1900 {
1901         struct ll_inode_info *lli = ll_i2info(inode);
1902
1903         if (S_ISREG(inode->i_mode) && lli->lli_clob)
1904                 /* discard all dirty pages before truncating them, required by
1905                  * osc_extent implementation at LU-1030.
1906                  */
1907                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
1908                                    CL_FSYNC_LOCAL, 1);
1909
1910         truncate_inode_pages_final(&inode->i_data);
1911
1912         LASSERTF(!inode->i_data.nrpages,
1913                  "inode=" DFID "(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
1914                  PFID(ll_inode2fid(inode)), inode, inode->i_data.nrpages);
1915
1916         ll_clear_inode(inode);
1917         clear_inode(inode);
1918 }
1919
1920 int ll_iocontrol(struct inode *inode, struct file *file,
1921                  unsigned int cmd, unsigned long arg)
1922 {
1923         struct ll_sb_info *sbi = ll_i2sbi(inode);
1924         struct ptlrpc_request *req = NULL;
1925         int rc, flags = 0;
1926
1927         switch (cmd) {
1928         case FSFILT_IOC_GETFLAGS: {
1929                 struct mdt_body *body;
1930                 struct md_op_data *op_data;
1931
1932                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
1933                                              0, 0, LUSTRE_OPC_ANY,
1934                                              NULL);
1935                 if (IS_ERR(op_data))
1936                         return PTR_ERR(op_data);
1937
1938                 op_data->op_valid = OBD_MD_FLFLAGS;
1939                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
1940                 ll_finish_md_op_data(op_data);
1941                 if (rc) {
1942                         CERROR("%s: failure inode "DFID": rc = %d\n",
1943                                sbi->ll_md_exp->exp_obd->obd_name,
1944                                PFID(ll_inode2fid(inode)), rc);
1945                         return -abs(rc);
1946                 }
1947
1948                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1949
1950                 flags = body->mbo_flags;
1951
1952                 ptlrpc_req_finished(req);
1953
1954                 return put_user(flags, (int __user *)arg);
1955         }
1956         case FSFILT_IOC_SETFLAGS: {
1957                 struct lov_stripe_md *lsm;
1958                 struct obd_info oinfo = { };
1959                 struct md_op_data *op_data;
1960
1961                 if (get_user(flags, (int __user *)arg))
1962                         return -EFAULT;
1963
1964                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1965                                              LUSTRE_OPC_ANY, NULL);
1966                 if (IS_ERR(op_data))
1967                         return PTR_ERR(op_data);
1968
1969                 op_data->op_attr_flags = flags;
1970                 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
1971                 rc = md_setattr(sbi->ll_md_exp, op_data,
1972                                 NULL, 0, NULL, 0, &req, NULL);
1973                 ll_finish_md_op_data(op_data);
1974                 ptlrpc_req_finished(req);
1975                 if (rc)
1976                         return rc;
1977
1978                 inode->i_flags = ll_ext_to_inode_flags(flags);
1979
1980                 lsm = ccc_inode_lsm_get(inode);
1981                 if (!lsm_has_objects(lsm)) {
1982                         ccc_inode_lsm_put(inode, lsm);
1983                         return 0;
1984                 }
1985
1986                 oinfo.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
1987                 if (!oinfo.oi_oa) {
1988                         ccc_inode_lsm_put(inode, lsm);
1989                         return -ENOMEM;
1990                 }
1991                 oinfo.oi_md = lsm;
1992                 oinfo.oi_oa->o_oi = lsm->lsm_oi;
1993                 oinfo.oi_oa->o_flags = flags;
1994                 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
1995                                        OBD_MD_FLGROUP;
1996                 obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
1997                 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
1998                 kmem_cache_free(obdo_cachep, oinfo.oi_oa);
1999                 ccc_inode_lsm_put(inode, lsm);
2000
2001                 if (rc && rc != -EPERM && rc != -EACCES)
2002                         CERROR("osc_setattr_async fails: rc = %d\n", rc);
2003
2004                 return rc;
2005         }
2006         default:
2007                 return -ENOSYS;
2008         }
2009
2010         return 0;
2011 }
2012
2013 int ll_flush_ctx(struct inode *inode)
2014 {
2015         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2016
2017         CDEBUG(D_SEC, "flush context for user %d\n",
2018                from_kuid(&init_user_ns, current_uid()));
2019
2020         obd_set_info_async(NULL, sbi->ll_md_exp,
2021                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2022                            0, NULL, NULL);
2023         obd_set_info_async(NULL, sbi->ll_dt_exp,
2024                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2025                            0, NULL, NULL);
2026         return 0;
2027 }
2028
2029 /* umount -f client means force down, don't save state */
2030 void ll_umount_begin(struct super_block *sb)
2031 {
2032         struct ll_sb_info *sbi = ll_s2sbi(sb);
2033         struct obd_device *obd;
2034         struct obd_ioctl_data *ioc_data;
2035
2036         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2037                sb->s_count, atomic_read(&sb->s_active));
2038
2039         obd = class_exp2obd(sbi->ll_md_exp);
2040         if (!obd) {
2041                 CERROR("Invalid MDC connection handle %#llx\n",
2042                        sbi->ll_md_exp->exp_handle.h_cookie);
2043                 return;
2044         }
2045         obd->obd_force = 1;
2046
2047         obd = class_exp2obd(sbi->ll_dt_exp);
2048         if (!obd) {
2049                 CERROR("Invalid LOV connection handle %#llx\n",
2050                        sbi->ll_dt_exp->exp_handle.h_cookie);
2051                 return;
2052         }
2053         obd->obd_force = 1;
2054
2055         ioc_data = kzalloc(sizeof(*ioc_data), GFP_NOFS);
2056         if (ioc_data) {
2057                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2058                               sizeof(*ioc_data), ioc_data, NULL);
2059
2060                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2061                               sizeof(*ioc_data), ioc_data, NULL);
2062
2063                 kfree(ioc_data);
2064         }
2065
2066         /* Really, we'd like to wait until there are no requests outstanding,
2067          * and then continue.  For now, we just invalidate the requests,
2068          * schedule() and sleep one second if needed, and hope.
2069          */
2070         schedule();
2071 }
2072
2073 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2074 {
2075         struct ll_sb_info *sbi = ll_s2sbi(sb);
2076         char *profilenm = get_profile_name(sb);
2077         int err;
2078         __u32 read_only;
2079
2080         if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
2081                 read_only = *flags & MS_RDONLY;
2082                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2083                                          sizeof(KEY_READ_ONLY),
2084                                          KEY_READ_ONLY, sizeof(read_only),
2085                                          &read_only, NULL);
2086                 if (err) {
2087                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2088                                       profilenm, read_only ?
2089                                       "read-only" : "read-write", err);
2090                         return err;
2091                 }
2092
2093                 if (read_only)
2094                         sb->s_flags |= MS_RDONLY;
2095                 else
2096                         sb->s_flags &= ~MS_RDONLY;
2097
2098                 if (sbi->ll_flags & LL_SBI_VERBOSE)
2099                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2100                                       read_only ?  "read-only" : "read-write");
2101         }
2102         return 0;
2103 }
2104
2105 /**
2106  * Cleanup the open handle that is cached on MDT-side.
2107  *
2108  * For open case, the client side open handling thread may hit error
2109  * after the MDT grant the open. Under such case, the client should
2110  * send close RPC to the MDT as cleanup; otherwise, the open handle
2111  * on the MDT will be leaked there until the client umount or evicted.
2112  *
2113  * In further, if someone unlinked the file, because the open handle
2114  * holds the reference on such file/object, then it will block the
2115  * subsequent threads that want to locate such object via FID.
2116  *
2117  * \param[in] sb        super block for this file-system
2118  * \param[in] open_req  pointer to the original open request
2119  */
2120 void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
2121 {
2122         struct mdt_body                 *body;
2123         struct md_op_data               *op_data;
2124         struct ptlrpc_request           *close_req = NULL;
2125         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
2126
2127         body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
2128         op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
2129         if (!op_data)
2130                 return;
2131
2132         op_data->op_fid1 = body->mbo_fid1;
2133         op_data->op_ioepoch = body->mbo_ioepoch;
2134         op_data->op_handle = body->mbo_handle;
2135         op_data->op_mod_time = get_seconds();
2136         md_close(exp, op_data, NULL, &close_req);
2137         ptlrpc_req_finished(close_req);
2138         ll_finish_md_op_data(op_data);
2139 }
2140
2141 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2142                   struct super_block *sb, struct lookup_intent *it)
2143 {
2144         struct ll_sb_info *sbi = NULL;
2145         struct lustre_md md = { NULL };
2146         int rc;
2147
2148         LASSERT(*inode || sb);
2149         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2150         rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2151                               sbi->ll_md_exp, &md);
2152         if (rc)
2153                 goto cleanup;
2154
2155         if (*inode) {
2156                 rc = ll_update_inode(*inode, &md);
2157                 if (rc)
2158                         goto out;
2159         } else {
2160                 LASSERT(sb);
2161
2162                 /*
2163                  * At this point server returns to client's same fid as client
2164                  * generated for creating. So using ->fid1 is okay here.
2165                  */
2166                 if (!fid_is_sane(&md.body->mbo_fid1)) {
2167                         CERROR("%s: Fid is insane " DFID "\n",
2168                                ll_get_fsname(sb, NULL, 0),
2169                                PFID(&md.body->mbo_fid1));
2170                         rc = -EINVAL;
2171                         goto out;
2172                 }
2173
2174                 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
2175                                              sbi->ll_flags & LL_SBI_32BIT_API),
2176                                  &md);
2177                 if (IS_ERR(*inode)) {
2178 #ifdef CONFIG_FS_POSIX_ACL
2179                         if (md.posix_acl) {
2180                                 posix_acl_release(md.posix_acl);
2181                                 md.posix_acl = NULL;
2182                         }
2183 #endif
2184                         rc = -ENOMEM;
2185                         CERROR("new_inode -fatal: rc %d\n", rc);
2186                         goto out;
2187                 }
2188         }
2189
2190         /* Handling piggyback layout lock.
2191          * Layout lock can be piggybacked by getattr and open request.
2192          * The lsm can be applied to inode only if it comes with a layout lock
2193          * otherwise correct layout may be overwritten, for example:
2194          * 1. proc1: mdt returns a lsm but not granting layout
2195          * 2. layout was changed by another client
2196          * 3. proc2: refresh layout and layout lock granted
2197          * 4. proc1: to apply a stale layout
2198          */
2199         if (it && it->it_lock_mode != 0) {
2200                 struct lustre_handle lockh;
2201                 struct ldlm_lock *lock;
2202
2203                 lockh.cookie = it->it_lock_handle;
2204                 lock = ldlm_handle2lock(&lockh);
2205                 LASSERT(lock);
2206                 if (ldlm_has_layout(lock)) {
2207                         struct cl_object_conf conf;
2208
2209                         memset(&conf, 0, sizeof(conf));
2210                         conf.coc_opc = OBJECT_CONF_SET;
2211                         conf.coc_inode = *inode;
2212                         conf.coc_lock = lock;
2213                         conf.u.coc_md = &md;
2214                         (void)ll_layout_conf(*inode, &conf);
2215                 }
2216                 LDLM_LOCK_PUT(lock);
2217         }
2218
2219 out:
2220         if (md.lsm)
2221                 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2222         md_free_lustre_md(sbi->ll_md_exp, &md);
2223
2224 cleanup:
2225         if (rc != 0 && it && it->it_op & IT_OPEN)
2226                 ll_open_cleanup(sb ? sb : (*inode)->i_sb, req);
2227
2228         return rc;
2229 }
2230
2231 int ll_obd_statfs(struct inode *inode, void __user *arg)
2232 {
2233         struct ll_sb_info *sbi = NULL;
2234         struct obd_export *exp;
2235         char *buf = NULL;
2236         struct obd_ioctl_data *data = NULL;
2237         __u32 type;
2238         int len = 0, rc;
2239
2240         if (!inode) {
2241                 rc = -EINVAL;
2242                 goto out_statfs;
2243         }
2244
2245         sbi = ll_i2sbi(inode);
2246         if (!sbi) {
2247                 rc = -EINVAL;
2248                 goto out_statfs;
2249         }
2250
2251         rc = obd_ioctl_getdata(&buf, &len, arg);
2252         if (rc)
2253                 goto out_statfs;
2254
2255         data = (void *)buf;
2256         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2257             !data->ioc_pbuf1 || !data->ioc_pbuf2) {
2258                 rc = -EINVAL;
2259                 goto out_statfs;
2260         }
2261
2262         if (data->ioc_inllen1 != sizeof(__u32) ||
2263             data->ioc_inllen2 != sizeof(__u32) ||
2264             data->ioc_plen1 != sizeof(struct obd_statfs) ||
2265             data->ioc_plen2 != sizeof(struct obd_uuid)) {
2266                 rc = -EINVAL;
2267                 goto out_statfs;
2268         }
2269
2270         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2271         if (type & LL_STATFS_LMV) {
2272                 exp = sbi->ll_md_exp;
2273         } else if (type & LL_STATFS_LOV) {
2274                 exp = sbi->ll_dt_exp;
2275         } else {
2276                 rc = -ENODEV;
2277                 goto out_statfs;
2278         }
2279
2280         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
2281         if (rc)
2282                 goto out_statfs;
2283 out_statfs:
2284         if (buf)
2285                 obd_ioctl_freedata(buf, len);
2286         return rc;
2287 }
2288
2289 int ll_process_config(struct lustre_cfg *lcfg)
2290 {
2291         char *ptr;
2292         void *sb;
2293         struct lprocfs_static_vars lvars;
2294         unsigned long x;
2295         int rc = 0;
2296
2297         lprocfs_llite_init_vars(&lvars);
2298
2299         /* The instance name contains the sb: lustre-client-aacfe000 */
2300         ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2301         if (!ptr || !*(++ptr))
2302                 return -EINVAL;
2303         rc = kstrtoul(ptr, 16, &x);
2304         if (rc != 0)
2305                 return -EINVAL;
2306         sb = (void *)x;
2307         /* This better be a real Lustre superblock! */
2308         LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2309
2310         /* Note we have not called client_common_fill_super yet, so
2311          * proc fns must be able to handle that!
2312          */
2313         rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2314                                       lcfg, sb);
2315         if (rc > 0)
2316                 rc = 0;
2317         return rc;
2318 }
2319
2320 /* this function prepares md_op_data hint for passing ot down to MD stack. */
2321 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
2322                                       struct inode *i1, struct inode *i2,
2323                                       const char *name, size_t namelen,
2324                                       u32 mode, __u32 opc, void *data)
2325 {
2326         if (!name) {
2327                 /* Do not reuse namelen for something else. */
2328                 if (namelen)
2329                         return ERR_PTR(-EINVAL);
2330         } else {
2331                 if (namelen > ll_i2sbi(i1)->ll_namelen)
2332                         return ERR_PTR(-ENAMETOOLONG);
2333
2334                 if (!lu_name_is_valid_2(name, namelen))
2335                         return ERR_PTR(-EINVAL);
2336         }
2337
2338         if (!op_data)
2339                 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
2340
2341         if (!op_data)
2342                 return ERR_PTR(-ENOMEM);
2343
2344         ll_i2gids(op_data->op_suppgids, i1, i2);
2345         op_data->op_fid1 = *ll_inode2fid(i1);
2346         op_data->op_default_stripe_offset = -1;
2347         if (S_ISDIR(i1->i_mode)) {
2348                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
2349                 op_data->op_default_stripe_offset =
2350                         ll_i2info(i1)->lli_def_stripe_offset;
2351         }
2352
2353         if (i2) {
2354                 op_data->op_fid2 = *ll_inode2fid(i2);
2355                 if (S_ISDIR(i2->i_mode))
2356                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
2357         } else {
2358                 fid_zero(&op_data->op_fid2);
2359         }
2360
2361         if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
2362                 op_data->op_cli_flags |= CLI_HASH64;
2363
2364         if (ll_need_32bit_api(ll_i2sbi(i1)))
2365                 op_data->op_cli_flags |= CLI_API32;
2366
2367         op_data->op_name = name;
2368         op_data->op_namelen = namelen;
2369         op_data->op_mode = mode;
2370         op_data->op_mod_time = ktime_get_real_seconds();
2371         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2372         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
2373         op_data->op_cap = cfs_curproc_cap_pack();
2374         op_data->op_bias = 0;
2375         op_data->op_cli_flags = 0;
2376         if ((opc == LUSTRE_OPC_CREATE) && name &&
2377             filename_is_volatile(name, namelen, &op_data->op_mds))
2378                 op_data->op_bias |= MDS_CREATE_VOLATILE;
2379         else
2380                 op_data->op_mds = 0;
2381         op_data->op_data = data;
2382
2383         /* When called by ll_setattr_raw, file is i1. */
2384         if (ll_i2info(i1)->lli_flags & LLIF_DATA_MODIFIED)
2385                 op_data->op_bias |= MDS_DATA_MODIFIED;
2386
2387         return op_data;
2388 }
2389
2390 void ll_finish_md_op_data(struct md_op_data *op_data)
2391 {
2392         kfree(op_data);
2393 }
2394
2395 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2396 {
2397         struct ll_sb_info *sbi;
2398
2399         LASSERT(seq && dentry);
2400         sbi = ll_s2sbi(dentry->d_sb);
2401
2402         if (sbi->ll_flags & LL_SBI_NOLCK)
2403                 seq_puts(seq, ",nolock");
2404
2405         if (sbi->ll_flags & LL_SBI_FLOCK)
2406                 seq_puts(seq, ",flock");
2407
2408         if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2409                 seq_puts(seq, ",localflock");
2410
2411         if (sbi->ll_flags & LL_SBI_USER_XATTR)
2412                 seq_puts(seq, ",user_xattr");
2413
2414         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2415                 seq_puts(seq, ",lazystatfs");
2416
2417         if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
2418                 seq_puts(seq, ",user_fid2path");
2419
2420         return 0;
2421 }
2422
2423 /**
2424  * Get obd name by cmd, and copy out to user space
2425  */
2426 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
2427 {
2428         struct ll_sb_info *sbi = ll_i2sbi(inode);
2429         struct obd_device *obd;
2430
2431         if (cmd == OBD_IOC_GETDTNAME)
2432                 obd = class_exp2obd(sbi->ll_dt_exp);
2433         else if (cmd == OBD_IOC_GETMDNAME)
2434                 obd = class_exp2obd(sbi->ll_md_exp);
2435         else
2436                 return -EINVAL;
2437
2438         if (!obd)
2439                 return -ENOENT;
2440
2441         if (copy_to_user((void __user *)arg, obd->obd_name,
2442                          strlen(obd->obd_name) + 1))
2443                 return -EFAULT;
2444
2445         return 0;
2446 }
2447
2448 /**
2449  * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
2450  * fsname will be returned in this buffer; otherwise, a static buffer will be
2451  * used to store the fsname and returned to caller.
2452  */
2453 char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
2454 {
2455         static char fsname_static[MTI_NAME_MAXLEN];
2456         struct lustre_sb_info *lsi = s2lsi(sb);
2457         char *ptr;
2458         int len;
2459
2460         if (!buf) {
2461                 /* this means the caller wants to use static buffer
2462                  * and it doesn't care about race. Usually this is
2463                  * in error reporting path
2464                  */
2465                 buf = fsname_static;
2466                 buflen = sizeof(fsname_static);
2467         }
2468
2469         len = strlen(lsi->lsi_lmd->lmd_profile);
2470         ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
2471         if (ptr && (strcmp(ptr, "-client") == 0))
2472                 len -= 7;
2473
2474         if (unlikely(len >= buflen))
2475                 len = buflen - 1;
2476         strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
2477         buf[len] = '\0';
2478
2479         return buf;
2480 }
2481
2482 void ll_dirty_page_discard_warn(struct page *page, int ioret)
2483 {
2484         char *buf, *path = NULL;
2485         struct dentry *dentry = NULL;
2486         struct vvp_object *obj = cl_inode2vvp(page->mapping->host);
2487
2488         /* this can be called inside spin lock so use GFP_ATOMIC. */
2489         buf = (char *)__get_free_page(GFP_ATOMIC);
2490         if (buf) {
2491                 dentry = d_find_alias(page->mapping->host);
2492                 if (dentry)
2493                         path = dentry_path_raw(dentry, buf, PAGE_SIZE);
2494         }
2495
2496         CDEBUG(D_WARNING,
2497                "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
2498                ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
2499                s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
2500                PFID(&obj->vob_header.coh_lu.loh_fid),
2501                (path && !IS_ERR(path)) ? path : "", ioret);
2502
2503         if (dentry)
2504                 dput(dentry);
2505
2506         if (buf)
2507                 free_page((unsigned long)buf);
2508 }
2509
2510 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
2511                         struct lov_user_md **kbuf)
2512 {
2513         struct lov_user_md lum;
2514         ssize_t lum_size;
2515
2516         if (copy_from_user(&lum, md, sizeof(lum))) {
2517                 lum_size = -EFAULT;
2518                 goto no_kbuf;
2519         }
2520
2521         lum_size = ll_lov_user_md_size(&lum);
2522         if (lum_size < 0)
2523                 goto no_kbuf;
2524
2525         *kbuf = kzalloc(lum_size, GFP_NOFS);
2526         if (!*kbuf) {
2527                 lum_size = -ENOMEM;
2528                 goto no_kbuf;
2529         }
2530
2531         if (copy_from_user(*kbuf, md, lum_size) != 0) {
2532                 kfree(*kbuf);
2533                 *kbuf = NULL;
2534                 lum_size = -EFAULT;
2535         }
2536 no_kbuf:
2537         return lum_size;
2538 }
2539
2540 /*
2541  * Compute llite root squash state after a change of root squash
2542  * configuration setting or add/remove of a lnet nid
2543  */
2544 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
2545 {
2546         struct root_squash_info *squash = &sbi->ll_squash;
2547         lnet_process_id_t id;
2548         bool matched;
2549         int i;
2550
2551         /* Update norootsquash flag */
2552         down_write(&squash->rsi_sem);
2553         if (list_empty(&squash->rsi_nosquash_nids)) {
2554                 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
2555         } else {
2556                 /*
2557                  * Do not apply root squash as soon as one of our NIDs is
2558                  * in the nosquash_nids list
2559                  */
2560                 matched = false;
2561                 i = 0;
2562
2563                 while (LNetGetId(i++, &id) != -ENOENT) {
2564                         if (LNET_NETTYP(LNET_NIDNET(id.nid)) == LOLND)
2565                                 continue;
2566                         if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
2567                                 matched = true;
2568                                 break;
2569                         }
2570                 }
2571                 if (matched)
2572                         sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
2573                 else
2574                         sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
2575         }
2576         up_write(&squash->rsi_sem);
2577 }
2578
2579 /**
2580  * Parse linkea content to extract information about a given hardlink
2581  *
2582  * \param[in]   ldata           - Initialized linkea data
2583  * \param[in]   linkno          - Link identifier
2584  * \param[out]  parent_fid      - The entry's parent FID
2585  * \param[in]   size            - Entry name destination buffer
2586  *
2587  * \retval 0 on success
2588  * \retval Appropriate negative error code on failure
2589  */
2590 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
2591                             struct lu_fid *parent_fid, struct lu_name *ln)
2592 {
2593         unsigned int idx;
2594         int rc;
2595
2596         rc = linkea_init(ldata);
2597         if (rc < 0)
2598                 return rc;
2599
2600         if (linkno >= ldata->ld_leh->leh_reccount)
2601                 /* beyond last link */
2602                 return -ENODATA;
2603
2604         linkea_first_entry(ldata);
2605         for (idx = 0; ldata->ld_lee; idx++) {
2606                 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
2607                                     parent_fid);
2608                 if (idx == linkno)
2609                         break;
2610
2611                 linkea_next_entry(ldata);
2612         }
2613
2614         if (idx < linkno)
2615                 return -ENODATA;
2616
2617         return 0;
2618 }
2619
2620 /**
2621  * Get parent FID and name of an identified link. Operation is performed for
2622  * a given link number, letting the caller iterate over linkno to list one or
2623  * all links of an entry.
2624  *
2625  * \param[in]     file  - File descriptor against which to perform the operation
2626  * \param[in,out] arg   - User-filled structure containing the linkno to operate
2627  *                        on and the available size. It is eventually filled with
2628  *                        the requested information or left untouched on error
2629  *
2630  * \retval - 0 on success
2631  * \retval - Appropriate negative error code on failure
2632  */
2633 int ll_getparent(struct file *file, struct getparent __user *arg)
2634 {
2635         struct inode *inode = file_inode(file);
2636         struct linkea_data *ldata;
2637         struct lu_fid parent_fid;
2638         struct lu_buf buf = {
2639                 .lb_buf = NULL,
2640                 .lb_len = 0
2641         };
2642         struct lu_name ln;
2643         u32 name_size;
2644         u32 linkno;
2645         int rc;
2646
2647         if (!capable(CFS_CAP_DAC_READ_SEARCH) &&
2648             !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
2649                 return -EPERM;
2650
2651         if (get_user(name_size, &arg->gp_name_size))
2652                 return -EFAULT;
2653
2654         if (get_user(linkno, &arg->gp_linkno))
2655                 return -EFAULT;
2656
2657         if (name_size > PATH_MAX)
2658                 return -EINVAL;
2659
2660         ldata = kzalloc(sizeof(*ldata), GFP_NOFS);
2661         if (!ldata)
2662                 return -ENOMEM;
2663
2664         rc = linkea_data_new(ldata, &buf);
2665         if (rc < 0)
2666                 goto ldata_free;
2667
2668         rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
2669                            buf.lb_len, OBD_MD_FLXATTR);
2670         if (rc < 0)
2671                 goto lb_free;
2672
2673         rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
2674         if (rc < 0)
2675                 goto lb_free;
2676
2677         if (ln.ln_namelen >= name_size) {
2678                 rc = -EOVERFLOW;
2679                 goto lb_free;
2680         }
2681
2682         if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid))) {
2683                 rc = -EFAULT;
2684                 goto lb_free;
2685         }
2686
2687         if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen)) {
2688                 rc = -EFAULT;
2689                 goto lb_free;
2690         }
2691
2692         if (put_user('\0', arg->gp_name + ln.ln_namelen)) {
2693                 rc = -EFAULT;
2694                 goto lb_free;
2695         }
2696
2697 lb_free:
2698         lu_buf_free(&buf);
2699 ldata_free:
2700         kfree(ldata);
2701         return rc;
2702 }