Merge remote-tracking branches 'regulator/topic/load', 'regulator/topic/max77802...
[cascardo/linux.git] / drivers / staging / lustre / lustre / llite / dir.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/llite/dir.c
37  *
38  * Directory code for lustre client.
39  */
40
41 #include <linux/fs.h>
42 #include <linux/pagemap.h>
43 #include <linux/mm.h>
44 #include <linux/uaccess.h>
45 #include <linux/buffer_head.h>   /* for wait_on_buffer */
46 #include <linux/pagevec.h>
47 #include <linux/prefetch.h>
48
49 #define DEBUG_SUBSYSTEM S_LLITE
50
51 #include "../include/obd_support.h"
52 #include "../include/obd_class.h"
53 #include "../include/lustre_lib.h"
54 #include "../include/lustre/lustre_idl.h"
55 #include "../include/lustre_lite.h"
56 #include "../include/lustre_dlm.h"
57 #include "../include/lustre_fid.h"
58 #include "llite_internal.h"
59
60 /*
61  * (new) readdir implementation overview.
62  *
63  * Original lustre readdir implementation cached exact copy of raw directory
64  * pages on the client. These pages were indexed in client page cache by
65  * logical offset in the directory file. This design, while very simple and
66  * intuitive had some inherent problems:
67  *
68  *     . it implies that byte offset to the directory entry serves as a
69  *     telldir(3)/seekdir(3) cookie, but that offset is not stable: in
70  *     ext3/htree directory entries may move due to splits, and more
71  *     importantly,
72  *
73  *     . it is incompatible with the design of split directories for cmd3,
74  *     that assumes that names are distributed across nodes based on their
75  *     hash, and so readdir should be done in hash order.
76  *
77  * New readdir implementation does readdir in hash order, and uses hash of a
78  * file name as a telldir/seekdir cookie. This led to number of complications:
79  *
80  *     . hash is not unique, so it cannot be used to index cached directory
81  *     pages on the client (note, that it requires a whole pageful of hash
82  *     collided entries to cause two pages to have identical hashes);
83  *
84  *     . hash is not unique, so it cannot, strictly speaking, be used as an
85  *     entry cookie. ext3/htree has the same problem and lustre implementation
86  *     mimics their solution: seekdir(hash) positions directory at the first
87  *     entry with the given hash.
88  *
89  * Client side.
90  *
91  * 0. caching
92  *
93  * Client caches directory pages using hash of the first entry as an index. As
94  * noted above hash is not unique, so this solution doesn't work as is:
95  * special processing is needed for "page hash chains" (i.e., sequences of
96  * pages filled with entries all having the same hash value).
97  *
98  * First, such chains have to be detected. To this end, server returns to the
99  * client the hash of the first entry on the page next to one returned. When
100  * client detects that this hash is the same as hash of the first entry on the
101  * returned page, page hash collision has to be handled. Pages in the
102  * hash chain, except first one, are termed "overflow pages".
103  *
104  * Solution to index uniqueness problem is to not cache overflow
105  * pages. Instead, when page hash collision is detected, all overflow pages
106  * from emerging chain are immediately requested from the server and placed in
107  * a special data structure (struct ll_dir_chain). This data structure is used
108  * by ll_readdir() to process entries from overflow pages. When readdir
109  * invocation finishes, overflow pages are discarded. If page hash collision
110  * chain weren't completely processed, next call to readdir will again detect
111  * page hash collision, again read overflow pages in, process next portion of
112  * entries and again discard the pages. This is not as wasteful as it looks,
113  * because, given reasonable hash, page hash collisions are extremely rare.
114  *
115  * 1. directory positioning
116  *
117  * When seekdir(hash) is called, original
118  *
119  *
120  *
121  *
122  *
123  *
124  *
125  *
126  * Server.
127  *
128  * identification of and access to overflow pages
129  *
130  * page format
131  *
132  * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
133  * a header lu_dirpage which describes the start/end hash, and whether this
134  * page is empty (contains no dir entry) or hash collide with next page.
135  * After client receives reply, several pages will be integrated into dir page
136  * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the
137  * lu_dirpage for this integrated page will be adjusted. See
138  * lmv_adjust_dirpages().
139  *
140  */
141
142 /* returns the page unlocked, but with a reference */
143 static int ll_dir_filler(void *_hash, struct page *page0)
144 {
145         struct inode *inode = page0->mapping->host;
146         int hash64 = ll_i2sbi(inode)->ll_flags & LL_SBI_64BIT_HASH;
147         struct obd_export *exp = ll_i2sbi(inode)->ll_md_exp;
148         struct ptlrpc_request *request;
149         struct mdt_body *body;
150         struct md_op_data *op_data;
151         __u64 hash = *((__u64 *)_hash);
152         struct page **page_pool;
153         struct page *page;
154         struct lu_dirpage *dp;
155         int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT;
156         int nrdpgs = 0; /* number of pages read actually */
157         int npages;
158         int i;
159         int rc;
160
161         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) hash %llu\n",
162                inode->i_ino, inode->i_generation, inode, hash);
163
164         LASSERT(max_pages > 0 && max_pages <= MD_MAX_BRW_PAGES);
165
166         page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
167         if (page_pool) {
168                 page_pool[0] = page0;
169         } else {
170                 page_pool = &page0;
171                 max_pages = 1;
172         }
173         for (npages = 1; npages < max_pages; npages++) {
174                 page = page_cache_alloc_cold(inode->i_mapping);
175                 if (!page)
176                         break;
177                 page_pool[npages] = page;
178         }
179
180         op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
181                                      LUSTRE_OPC_ANY, NULL);
182         op_data->op_npages = npages;
183         op_data->op_offset = hash;
184         rc = md_readpage(exp, op_data, page_pool, &request);
185         ll_finish_md_op_data(op_data);
186         if (rc < 0) {
187                 /* page0 is special, which was added into page cache early */
188                 delete_from_page_cache(page0);
189         } else if (rc == 0) {
190                 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
191                 /* Checked by mdc_readpage() */
192                 LASSERT(body != NULL);
193
194                 if (body->valid & OBD_MD_FLSIZE)
195                         cl_isize_write(inode, body->size);
196
197                 nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1)
198                          >> PAGE_CACHE_SHIFT;
199                 SetPageUptodate(page0);
200         }
201         unlock_page(page0);
202         ptlrpc_req_finished(request);
203
204         CDEBUG(D_VFSTRACE, "read %d/%d pages\n", nrdpgs, npages);
205
206         for (i = 1; i < npages; i++) {
207                 unsigned long offset;
208                 int ret;
209
210                 page = page_pool[i];
211
212                 if (rc < 0 || i >= nrdpgs) {
213                         page_cache_release(page);
214                         continue;
215                 }
216
217                 SetPageUptodate(page);
218
219                 dp = kmap(page);
220                 hash = le64_to_cpu(dp->ldp_hash_start);
221                 kunmap(page);
222
223                 offset = hash_x_index(hash, hash64);
224
225                 prefetchw(&page->flags);
226                 ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
227                                             GFP_NOFS);
228                 if (ret == 0) {
229                         unlock_page(page);
230                 } else {
231                         CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
232                                offset, ret);
233                 }
234                 page_cache_release(page);
235         }
236
237         if (page_pool != &page0)
238                 kfree(page_pool);
239         return rc;
240 }
241
242 static void ll_check_page(struct inode *dir, struct page *page)
243 {
244         /* XXX: check page format later */
245         SetPageChecked(page);
246 }
247
248 void ll_release_page(struct page *page, int remove)
249 {
250         kunmap(page);
251         if (remove) {
252                 lock_page(page);
253                 if (likely(page->mapping != NULL))
254                         truncate_complete_page(page->mapping, page);
255                 unlock_page(page);
256         }
257         page_cache_release(page);
258 }
259
260 /*
261  * Find, kmap and return page that contains given hash.
262  */
263 static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
264                                        __u64 *start, __u64 *end)
265 {
266         int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
267         struct address_space *mapping = dir->i_mapping;
268         /*
269          * Complement of hash is used as an index so that
270          * radix_tree_gang_lookup() can be used to find a page with starting
271          * hash _smaller_ than one we are looking for.
272          */
273         unsigned long offset = hash_x_index(*hash, hash64);
274         struct page *page;
275         int found;
276
277         spin_lock_irq(&mapping->tree_lock);
278         found = radix_tree_gang_lookup(&mapping->page_tree,
279                                        (void **)&page, offset, 1);
280         if (found > 0 && !radix_tree_exceptional_entry(page)) {
281                 struct lu_dirpage *dp;
282
283                 page_cache_get(page);
284                 spin_unlock_irq(&mapping->tree_lock);
285                 /*
286                  * In contrast to find_lock_page() we are sure that directory
287                  * page cannot be truncated (while DLM lock is held) and,
288                  * hence, can avoid restart.
289                  *
290                  * In fact, page cannot be locked here at all, because
291                  * ll_dir_filler() does synchronous io.
292                  */
293                 wait_on_page_locked(page);
294                 if (PageUptodate(page)) {
295                         dp = kmap(page);
296                         if (BITS_PER_LONG == 32 && hash64) {
297                                 *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
298                                 *end   = le64_to_cpu(dp->ldp_hash_end) >> 32;
299                                 *hash  = *hash >> 32;
300                         } else {
301                                 *start = le64_to_cpu(dp->ldp_hash_start);
302                                 *end   = le64_to_cpu(dp->ldp_hash_end);
303                         }
304                         LASSERTF(*start <= *hash, "start = %#llx,end = %#llx,hash = %#llx\n",
305                                  *start, *end, *hash);
306                         CDEBUG(D_VFSTRACE, "page %lu [%llu %llu], hash %llu\n",
307                                offset, *start, *end, *hash);
308                         if (*hash > *end) {
309                                 ll_release_page(page, 0);
310                                 page = NULL;
311                         } else if (*end != *start && *hash == *end) {
312                                 /*
313                                  * upon hash collision, remove this page,
314                                  * otherwise put page reference, and
315                                  * ll_get_dir_page() will issue RPC to fetch
316                                  * the page we want.
317                                  */
318                                 ll_release_page(page,
319                                     le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
320                                 page = NULL;
321                         }
322                 } else {
323                         page_cache_release(page);
324                         page = ERR_PTR(-EIO);
325                 }
326
327         } else {
328                 spin_unlock_irq(&mapping->tree_lock);
329                 page = NULL;
330         }
331         return page;
332 }
333
334 struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
335                              struct ll_dir_chain *chain)
336 {
337         ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_UPDATE} };
338         struct address_space *mapping = dir->i_mapping;
339         struct lustre_handle lockh;
340         struct lu_dirpage *dp;
341         struct page *page;
342         ldlm_mode_t mode;
343         int rc;
344         __u64 start = 0;
345         __u64 end = 0;
346         __u64 lhash = hash;
347         struct ll_inode_info *lli = ll_i2info(dir);
348         int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
349
350         mode = LCK_PR;
351         rc = md_lock_match(ll_i2sbi(dir)->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
352                            ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh);
353         if (!rc) {
354                 struct ldlm_enqueue_info einfo = {
355                         .ei_type = LDLM_IBITS,
356                         .ei_mode = mode,
357                         .ei_cb_bl = ll_md_blocking_ast,
358                         .ei_cb_cp = ldlm_completion_ast,
359                 };
360                 struct lookup_intent it = { .it_op = IT_READDIR };
361                 struct ptlrpc_request *request;
362                 struct md_op_data *op_data;
363
364                 op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
365                 LUSTRE_OPC_ANY, NULL);
366                 if (IS_ERR(op_data))
367                         return (void *)op_data;
368
369                 rc = md_enqueue(ll_i2sbi(dir)->ll_md_exp, &einfo, &it,
370                                 op_data, &lockh, NULL, 0, NULL, 0);
371
372                 ll_finish_md_op_data(op_data);
373
374                 request = (struct ptlrpc_request *)it.d.lustre.it_data;
375                 if (request)
376                         ptlrpc_req_finished(request);
377                 if (rc < 0) {
378                         CERROR("lock enqueue: "DFID" at %llu: rc %d\n",
379                                 PFID(ll_inode2fid(dir)), hash, rc);
380                         return ERR_PTR(rc);
381                 }
382
383                 CDEBUG(D_INODE, "setting lr_lvb_inode to inode %p (%lu/%u)\n",
384                        dir, dir->i_ino, dir->i_generation);
385                 md_set_lock_data(ll_i2sbi(dir)->ll_md_exp,
386                                  &it.d.lustre.it_lock_handle, dir, NULL);
387         } else {
388                 /* for cross-ref object, l_ast_data of the lock may not be set,
389                  * we reset it here */
390                 md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie,
391                                  dir, NULL);
392         }
393         ldlm_lock_dump_handle(D_OTHER, &lockh);
394
395         mutex_lock(&lli->lli_readdir_mutex);
396         page = ll_dir_page_locate(dir, &lhash, &start, &end);
397         if (IS_ERR(page)) {
398                 CERROR("dir page locate: "DFID" at %llu: rc %ld\n",
399                        PFID(ll_inode2fid(dir)), lhash, PTR_ERR(page));
400                 goto out_unlock;
401         } else if (page != NULL) {
402                 /*
403                  * XXX nikita: not entirely correct handling of a corner case:
404                  * suppose hash chain of entries with hash value HASH crosses
405                  * border between pages P0 and P1. First both P0 and P1 are
406                  * cached, seekdir() is called for some entry from the P0 part
407                  * of the chain. Later P0 goes out of cache. telldir(HASH)
408                  * happens and finds P1, as it starts with matching hash
409                  * value. Remaining entries from P0 part of the chain are
410                  * skipped. (Is that really a bug?)
411                  *
412                  * Possible solutions: 0. don't cache P1 is such case, handle
413                  * it as an "overflow" page. 1. invalidate all pages at
414                  * once. 2. use HASH|1 as an index for P1.
415                  */
416                 goto hash_collision;
417         }
418
419         page = read_cache_page(mapping, hash_x_index(hash, hash64),
420                                ll_dir_filler, &lhash);
421         if (IS_ERR(page)) {
422                 CERROR("read cache page: "DFID" at %llu: rc %ld\n",
423                        PFID(ll_inode2fid(dir)), hash, PTR_ERR(page));
424                 goto out_unlock;
425         }
426
427         wait_on_page_locked(page);
428         (void)kmap(page);
429         if (!PageUptodate(page)) {
430                 CERROR("page not updated: "DFID" at %llu: rc %d\n",
431                        PFID(ll_inode2fid(dir)), hash, -5);
432                 goto fail;
433         }
434         if (!PageChecked(page))
435                 ll_check_page(dir, page);
436         if (PageError(page)) {
437                 CERROR("page error: "DFID" at %llu: rc %d\n",
438                        PFID(ll_inode2fid(dir)), hash, -5);
439                 goto fail;
440         }
441 hash_collision:
442         dp = page_address(page);
443         if (BITS_PER_LONG == 32 && hash64) {
444                 start = le64_to_cpu(dp->ldp_hash_start) >> 32;
445                 end   = le64_to_cpu(dp->ldp_hash_end) >> 32;
446                 lhash = hash >> 32;
447         } else {
448                 start = le64_to_cpu(dp->ldp_hash_start);
449                 end   = le64_to_cpu(dp->ldp_hash_end);
450                 lhash = hash;
451         }
452         if (end == start) {
453                 LASSERT(start == lhash);
454                 CWARN("Page-wide hash collision: %llu\n", end);
455                 if (BITS_PER_LONG == 32 && hash64)
456                         CWARN("Real page-wide hash collision at [%llu %llu] with hash %llu\n",
457                               le64_to_cpu(dp->ldp_hash_start),
458                               le64_to_cpu(dp->ldp_hash_end), hash);
459                 /*
460                  * Fetch whole overflow chain...
461                  *
462                  * XXX not yet.
463                  */
464                 goto fail;
465         }
466 out_unlock:
467         mutex_unlock(&lli->lli_readdir_mutex);
468         ldlm_lock_decref(&lockh, mode);
469         return page;
470
471 fail:
472         ll_release_page(page, 1);
473         page = ERR_PTR(-EIO);
474         goto out_unlock;
475 }
476
477 int ll_dir_read(struct inode *inode, struct dir_context *ctx)
478 {
479         struct ll_inode_info *info       = ll_i2info(inode);
480         struct ll_sb_info    *sbi       = ll_i2sbi(inode);
481         __u64              pos          = ctx->pos;
482         int                api32      = ll_need_32bit_api(sbi);
483         int                hash64     = sbi->ll_flags & LL_SBI_64BIT_HASH;
484         struct page       *page;
485         struct ll_dir_chain   chain;
486         int                done = 0;
487         int                rc = 0;
488
489         ll_dir_chain_init(&chain);
490
491         page = ll_get_dir_page(inode, pos, &chain);
492
493         while (rc == 0 && !done) {
494                 struct lu_dirpage *dp;
495                 struct lu_dirent  *ent;
496
497                 if (!IS_ERR(page)) {
498                         /*
499                          * If page is empty (end of directory is reached),
500                          * use this value.
501                          */
502                         __u64 hash = MDS_DIR_END_OFF;
503                         __u64 next;
504
505                         dp = page_address(page);
506                         for (ent = lu_dirent_start(dp); ent != NULL && !done;
507                              ent = lu_dirent_next(ent)) {
508                                 __u16     type;
509                                 int         namelen;
510                                 struct lu_fid  fid;
511                                 __u64     lhash;
512                                 __u64     ino;
513
514                                 /*
515                                  * XXX: implement correct swabbing here.
516                                  */
517
518                                 hash = le64_to_cpu(ent->lde_hash);
519                                 if (hash < pos)
520                                         /*
521                                          * Skip until we find target hash
522                                          * value.
523                                          */
524                                         continue;
525
526                                 namelen = le16_to_cpu(ent->lde_namelen);
527                                 if (namelen == 0)
528                                         /*
529                                          * Skip dummy record.
530                                          */
531                                         continue;
532
533                                 if (api32 && hash64)
534                                         lhash = hash >> 32;
535                                 else
536                                         lhash = hash;
537                                 fid_le_to_cpu(&fid, &ent->lde_fid);
538                                 ino = cl_fid_build_ino(&fid, api32);
539                                 type = ll_dirent_type_get(ent);
540                                 ctx->pos = lhash;
541                                 /* For 'll_nfs_get_name_filldir()', it will try
542                                  * to access the 'ent' through its 'lde_name',
543                                  * so the parameter 'name' for 'ctx->actor()'
544                                  * must be part of the 'ent'.
545                                  */
546                                 done = !dir_emit(ctx, ent->lde_name,
547                                                  namelen, ino, type);
548                         }
549                         next = le64_to_cpu(dp->ldp_hash_end);
550                         if (!done) {
551                                 pos = next;
552                                 if (pos == MDS_DIR_END_OFF) {
553                                         /*
554                                          * End of directory reached.
555                                          */
556                                         done = 1;
557                                         ll_release_page(page, 0);
558                                 } else if (1 /* chain is exhausted*/) {
559                                         /*
560                                          * Normal case: continue to the next
561                                          * page.
562                                          */
563                                         ll_release_page(page,
564                                             le32_to_cpu(dp->ldp_flags) &
565                                                         LDF_COLLIDE);
566                                         next = pos;
567                                         page = ll_get_dir_page(inode, pos,
568                                                                &chain);
569                                 } else {
570                                         /*
571                                          * go into overflow page.
572                                          */
573                                         LASSERT(le32_to_cpu(dp->ldp_flags) &
574                                                 LDF_COLLIDE);
575                                         ll_release_page(page, 1);
576                                 }
577                         } else {
578                                 pos = hash;
579                                 ll_release_page(page, 0);
580                         }
581                 } else {
582                         rc = PTR_ERR(page);
583                         CERROR("error reading dir "DFID" at %lu: rc %d\n",
584                                PFID(&info->lli_fid), (unsigned long)pos, rc);
585                 }
586         }
587
588         ctx->pos = pos;
589         ll_dir_chain_fini(&chain);
590         return rc;
591 }
592
593 static int ll_readdir(struct file *filp, struct dir_context *ctx)
594 {
595         struct inode            *inode  = file_inode(filp);
596         struct ll_file_data     *lfd    = LUSTRE_FPRIVATE(filp);
597         struct ll_sb_info       *sbi    = ll_i2sbi(inode);
598         int                     hash64  = sbi->ll_flags & LL_SBI_64BIT_HASH;
599         int                     api32   = ll_need_32bit_api(sbi);
600         int                     rc;
601
602         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu 32bit_api %d\n",
603                inode->i_ino, inode->i_generation,
604                inode, (unsigned long)lfd->lfd_pos, i_size_read(inode), api32);
605
606         if (lfd->lfd_pos == MDS_DIR_END_OFF) {
607                 /*
608                  * end-of-file.
609                  */
610                 rc = 0;
611                 goto out;
612         }
613
614         ctx->pos = lfd->lfd_pos;
615         rc = ll_dir_read(inode, ctx);
616         lfd->lfd_pos = ctx->pos;
617         if (ctx->pos == MDS_DIR_END_OFF) {
618                 if (api32)
619                         ctx->pos = LL_DIR_END_OFF_32BIT;
620                 else
621                         ctx->pos = LL_DIR_END_OFF;
622         } else {
623                 if (api32 && hash64)
624                         ctx->pos >>= 32;
625         }
626         filp->f_version = inode->i_version;
627
628 out:
629         if (!rc)
630                 ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1);
631
632         return rc;
633 }
634
635 static int ll_send_mgc_param(struct obd_export *mgc, char *string)
636 {
637         struct mgs_send_param *msp;
638         int rc = 0;
639
640         msp = kzalloc(sizeof(*msp), GFP_NOFS);
641         if (!msp)
642                 return -ENOMEM;
643
644         strncpy(msp->mgs_param, string, MGS_PARAM_MAXLEN);
645         rc = obd_set_info_async(NULL, mgc, sizeof(KEY_SET_INFO), KEY_SET_INFO,
646                                 sizeof(struct mgs_send_param), msp, NULL);
647         if (rc)
648                 CERROR("Failed to set parameter: %d\n", rc);
649         kfree(msp);
650
651         return rc;
652 }
653
654 static int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
655                                char *filename)
656 {
657         struct ptlrpc_request *request = NULL;
658         struct md_op_data *op_data;
659         struct ll_sb_info *sbi = ll_i2sbi(dir);
660         int mode;
661         int err;
662
663         mode = (0755 & ~current_umask()) | S_IFDIR;
664         op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
665                                      strlen(filename), mode, LUSTRE_OPC_MKDIR,
666                                      lump);
667         if (IS_ERR(op_data)) {
668                 err = PTR_ERR(op_data);
669                 goto err_exit;
670         }
671
672         op_data->op_cli_flags |= CLI_SET_MEA;
673         err = md_create(sbi->ll_md_exp, op_data, lump, sizeof(*lump), mode,
674                         from_kuid(&init_user_ns, current_fsuid()),
675                         from_kgid(&init_user_ns, current_fsgid()),
676                         cfs_curproc_cap_pack(), 0, &request);
677         ll_finish_md_op_data(op_data);
678         if (err)
679                 goto err_exit;
680 err_exit:
681         ptlrpc_req_finished(request);
682         return err;
683 }
684
685 int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
686                      int set_default)
687 {
688         struct ll_sb_info *sbi = ll_i2sbi(inode);
689         struct md_op_data *op_data;
690         struct ptlrpc_request *req = NULL;
691         int rc = 0;
692         struct lustre_sb_info *lsi = s2lsi(inode->i_sb);
693         struct obd_device *mgc = lsi->lsi_mgc;
694         int lum_size;
695
696         if (lump != NULL) {
697                 /*
698                  * This is coming from userspace, so should be in
699                  * local endian.  But the MDS would like it in little
700                  * endian, so we swab it before we send it.
701                  */
702                 switch (lump->lmm_magic) {
703                 case LOV_USER_MAGIC_V1: {
704                         if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V1))
705                                 lustre_swab_lov_user_md_v1(lump);
706                         lum_size = sizeof(struct lov_user_md_v1);
707                         break;
708                 }
709                 case LOV_USER_MAGIC_V3: {
710                         if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V3))
711                                 lustre_swab_lov_user_md_v3(
712                                         (struct lov_user_md_v3 *)lump);
713                         lum_size = sizeof(struct lov_user_md_v3);
714                         break;
715                 }
716                 default: {
717                         CDEBUG(D_IOCTL, "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
718                                lump->lmm_magic, LOV_USER_MAGIC_V1,
719                                LOV_USER_MAGIC_V3);
720                         return -EINVAL;
721                 }
722                 }
723         } else {
724                 lum_size = sizeof(struct lov_user_md_v1);
725         }
726
727         op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
728                                      LUSTRE_OPC_ANY, NULL);
729         if (IS_ERR(op_data))
730                 return PTR_ERR(op_data);
731
732         if (lump != NULL && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC))
733                 op_data->op_cli_flags |= CLI_SET_MEA;
734
735         /* swabbing is done in lov_setstripe() on server side */
736         rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size,
737                         NULL, 0, &req, NULL);
738         ll_finish_md_op_data(op_data);
739         ptlrpc_req_finished(req);
740         if (rc) {
741                 if (rc != -EPERM && rc != -EACCES)
742                         CERROR("mdc_setattr fails: rc = %d\n", rc);
743         }
744
745         /* In the following we use the fact that LOV_USER_MAGIC_V1 and
746          LOV_USER_MAGIC_V3 have the same initial fields so we do not
747          need to make the distinction between the 2 versions */
748         if (set_default && mgc->u.cli.cl_mgc_mgsexp) {
749                 char *param = NULL;
750                 char *buf;
751
752                 param = kzalloc(MGS_PARAM_MAXLEN, GFP_NOFS);
753                 if (!param)
754                         return -ENOMEM;
755
756                 buf = param;
757                 /* Get fsname and assume devname to be -MDT0000. */
758                 ll_get_fsname(inode->i_sb, buf, MTI_NAME_MAXLEN);
759                 strcat(buf, "-MDT0000.lov");
760                 buf += strlen(buf);
761
762                 /* Set root stripesize */
763                 sprintf(buf, ".stripesize=%u",
764                         lump ? le32_to_cpu(lump->lmm_stripe_size) : 0);
765                 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
766                 if (rc)
767                         goto end;
768
769                 /* Set root stripecount */
770                 sprintf(buf, ".stripecount=%hd",
771                         lump ? le16_to_cpu(lump->lmm_stripe_count) : 0);
772                 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
773                 if (rc)
774                         goto end;
775
776                 /* Set root stripeoffset */
777                 sprintf(buf, ".stripeoffset=%hd",
778                         lump ? le16_to_cpu(lump->lmm_stripe_offset) :
779                         (typeof(lump->lmm_stripe_offset))(-1));
780                 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
781
782 end:
783                 kfree(param);
784         }
785         return rc;
786 }
787
788 int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
789                      int *lmm_size, struct ptlrpc_request **request)
790 {
791         struct ll_sb_info *sbi = ll_i2sbi(inode);
792         struct mdt_body   *body;
793         struct lov_mds_md *lmm = NULL;
794         struct ptlrpc_request *req = NULL;
795         int rc, lmmsize;
796         struct md_op_data *op_data;
797
798         rc = ll_get_default_mdsize(sbi, &lmmsize);
799         if (rc)
800                 return rc;
801
802         op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
803                                      0, lmmsize, LUSTRE_OPC_ANY,
804                                      NULL);
805         if (IS_ERR(op_data))
806                 return PTR_ERR(op_data);
807
808         op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
809         rc = md_getattr(sbi->ll_md_exp, op_data, &req);
810         ll_finish_md_op_data(op_data);
811         if (rc < 0) {
812                 CDEBUG(D_INFO, "md_getattr failed on inode %lu/%u: rc %d\n",
813                        inode->i_ino,
814                        inode->i_generation, rc);
815                 goto out;
816         }
817
818         body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
819         LASSERT(body != NULL);
820
821         lmmsize = body->eadatasize;
822
823         if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
824             lmmsize == 0) {
825                 rc = -ENODATA;
826                 goto out;
827         }
828
829         lmm = req_capsule_server_sized_get(&req->rq_pill,
830                                            &RMF_MDT_MD, lmmsize);
831         LASSERT(lmm != NULL);
832
833         /*
834          * This is coming from the MDS, so is probably in
835          * little endian.  We convert it to host endian before
836          * passing it to userspace.
837          */
838         /* We don't swab objects for directories */
839         switch (le32_to_cpu(lmm->lmm_magic)) {
840         case LOV_MAGIC_V1:
841                 if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC))
842                         lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
843                 break;
844         case LOV_MAGIC_V3:
845                 if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC))
846                         lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
847                 break;
848         default:
849                 CERROR("unknown magic: %lX\n", (unsigned long)lmm->lmm_magic);
850                 rc = -EPROTO;
851         }
852 out:
853         *lmmp = lmm;
854         *lmm_size = lmmsize;
855         *request = req;
856         return rc;
857 }
858
859 /*
860  *  Get MDT index for the inode.
861  */
862 int ll_get_mdt_idx(struct inode *inode)
863 {
864         struct ll_sb_info *sbi = ll_i2sbi(inode);
865         struct md_op_data *op_data;
866         int rc, mdtidx;
867
868         op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0,
869                                      0, LUSTRE_OPC_ANY, NULL);
870         if (IS_ERR(op_data))
871                 return PTR_ERR(op_data);
872
873         op_data->op_flags |= MF_GET_MDT_IDX;
874         rc = md_getattr(sbi->ll_md_exp, op_data, NULL);
875         mdtidx = op_data->op_mds;
876         ll_finish_md_op_data(op_data);
877         if (rc < 0) {
878                 CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
879                 return rc;
880         }
881         return mdtidx;
882 }
883
884 /**
885  * Generic handler to do any pre-copy work.
886  *
887  * It send a first hsm_progress (with extent length == 0) to coordinator as a
888  * first information for it that real work has started.
889  *
890  * Moreover, for a ARCHIVE request, it will sample the file data version and
891  * store it in \a copy.
892  *
893  * \return 0 on success.
894  */
895 static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
896 {
897         struct ll_sb_info               *sbi = ll_s2sbi(sb);
898         struct hsm_progress_kernel       hpk;
899         int                              rc;
900
901         /* Forge a hsm_progress based on data from copy. */
902         hpk.hpk_fid = copy->hc_hai.hai_fid;
903         hpk.hpk_cookie = copy->hc_hai.hai_cookie;
904         hpk.hpk_extent.offset = copy->hc_hai.hai_extent.offset;
905         hpk.hpk_extent.length = 0;
906         hpk.hpk_flags = 0;
907         hpk.hpk_errval = 0;
908         hpk.hpk_data_version = 0;
909
910
911         /* For archive request, we need to read the current file version. */
912         if (copy->hc_hai.hai_action == HSMA_ARCHIVE) {
913                 struct inode    *inode;
914                 __u64            data_version = 0;
915
916                 /* Get inode for this fid */
917                 inode = search_inode_for_lustre(sb, &copy->hc_hai.hai_fid);
918                 if (IS_ERR(inode)) {
919                         hpk.hpk_flags |= HP_FLAG_RETRY;
920                         /* hpk_errval is >= 0 */
921                         hpk.hpk_errval = -PTR_ERR(inode);
922                         rc = PTR_ERR(inode);
923                         goto progress;
924                 }
925
926                 /* Read current file data version */
927                 rc = ll_data_version(inode, &data_version, 1);
928                 iput(inode);
929                 if (rc != 0) {
930                         CDEBUG(D_HSM, "Could not read file data version of "
931                                       DFID" (rc = %d). Archive request (%#llx) could not be done.\n",
932                                       PFID(&copy->hc_hai.hai_fid), rc,
933                                       copy->hc_hai.hai_cookie);
934                         hpk.hpk_flags |= HP_FLAG_RETRY;
935                         /* hpk_errval must be >= 0 */
936                         hpk.hpk_errval = -rc;
937                         goto progress;
938                 }
939
940                 /* Store it the hsm_copy for later copytool use.
941                  * Always modified even if no lsm. */
942                 copy->hc_data_version = data_version;
943         }
944
945 progress:
946         rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
947                            &hpk, NULL);
948
949         return rc;
950 }
951
952 /**
953  * Generic handler to do any post-copy work.
954  *
955  * It will send the last hsm_progress update to coordinator to inform it
956  * that copy is finished and whether it was successful or not.
957  *
958  * Moreover,
959  * - for ARCHIVE request, it will sample the file data version and compare it
960  *   with the version saved in ll_ioc_copy_start(). If they do not match, copy
961  *   will be considered as failed.
962  * - for RESTORE request, it will sample the file data version and send it to
963  *   coordinator which is useful if the file was imported as 'released'.
964  *
965  * \return 0 on success.
966  */
967 static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
968 {
969         struct ll_sb_info               *sbi = ll_s2sbi(sb);
970         struct hsm_progress_kernel       hpk;
971         int                              rc;
972
973         /* If you modify the logic here, also check llapi_hsm_copy_end(). */
974         /* Take care: copy->hc_hai.hai_action, len, gid and data are not
975          * initialized if copy_end was called with copy == NULL.
976          */
977
978         /* Forge a hsm_progress based on data from copy. */
979         hpk.hpk_fid = copy->hc_hai.hai_fid;
980         hpk.hpk_cookie = copy->hc_hai.hai_cookie;
981         hpk.hpk_extent = copy->hc_hai.hai_extent;
982         hpk.hpk_flags = copy->hc_flags | HP_FLAG_COMPLETED;
983         hpk.hpk_errval = copy->hc_errval;
984         hpk.hpk_data_version = 0;
985
986         /* For archive request, we need to check the file data was not changed.
987          *
988          * For restore request, we need to send the file data version, this is
989          * useful when the file was created using hsm_import.
990          */
991         if (((copy->hc_hai.hai_action == HSMA_ARCHIVE) ||
992              (copy->hc_hai.hai_action == HSMA_RESTORE)) &&
993             (copy->hc_errval == 0)) {
994                 struct inode    *inode;
995                 __u64            data_version = 0;
996
997                 /* Get lsm for this fid */
998                 inode = search_inode_for_lustre(sb, &copy->hc_hai.hai_fid);
999                 if (IS_ERR(inode)) {
1000                         hpk.hpk_flags |= HP_FLAG_RETRY;
1001                         /* hpk_errval must be >= 0 */
1002                         hpk.hpk_errval = -PTR_ERR(inode);
1003                         rc = PTR_ERR(inode);
1004                         goto progress;
1005                 }
1006
1007                 rc = ll_data_version(inode, &data_version,
1008                                      copy->hc_hai.hai_action == HSMA_ARCHIVE);
1009                 iput(inode);
1010                 if (rc) {
1011                         CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n");
1012                         if (hpk.hpk_errval == 0)
1013                                 hpk.hpk_errval = -rc;
1014                         goto progress;
1015                 }
1016
1017                 /* Store it the hsm_copy for later copytool use.
1018                  * Always modified even if no lsm. */
1019                 hpk.hpk_data_version = data_version;
1020
1021                 /* File could have been stripped during archiving, so we need
1022                  * to check anyway. */
1023                 if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
1024                     (copy->hc_data_version != data_version)) {
1025                         CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. "
1026                                DFID", start:%#llx current:%#llx\n",
1027                                PFID(&copy->hc_hai.hai_fid),
1028                                copy->hc_data_version, data_version);
1029                         /* File was changed, send error to cdt. Do not ask for
1030                          * retry because if a file is modified frequently,
1031                          * the cdt will loop on retried archive requests.
1032                          * The policy engine will ask for a new archive later
1033                          * when the file will not be modified for some tunable
1034                          * time */
1035                         /* we do not notify caller */
1036                         hpk.hpk_flags &= ~HP_FLAG_RETRY;
1037                         /* hpk_errval must be >= 0 */
1038                         hpk.hpk_errval = EBUSY;
1039                 }
1040
1041         }
1042
1043 progress:
1044         rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
1045                            &hpk, NULL);
1046
1047         return rc;
1048 }
1049
1050
1051 static int copy_and_ioctl(int cmd, struct obd_export *exp,
1052                           const void __user *data, size_t size)
1053 {
1054         void *copy;
1055         int rc;
1056
1057         copy = kzalloc(size, GFP_NOFS);
1058         if (!copy)
1059                 return -ENOMEM;
1060
1061         if (copy_from_user(copy, data, size)) {
1062                 rc = -EFAULT;
1063                 goto out;
1064         }
1065
1066         rc = obd_iocontrol(cmd, exp, size, copy, NULL);
1067 out:
1068         kfree(copy);
1069
1070         return rc;
1071 }
1072
1073 static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
1074 {
1075         int cmd = qctl->qc_cmd;
1076         int type = qctl->qc_type;
1077         int id = qctl->qc_id;
1078         int valid = qctl->qc_valid;
1079         int rc = 0;
1080
1081         switch (cmd) {
1082         case LUSTRE_Q_INVALIDATE:
1083         case LUSTRE_Q_FINVALIDATE:
1084         case Q_QUOTAON:
1085         case Q_QUOTAOFF:
1086         case Q_SETQUOTA:
1087         case Q_SETINFO:
1088                 if (!capable(CFS_CAP_SYS_ADMIN) ||
1089                     sbi->ll_flags & LL_SBI_RMT_CLIENT)
1090                         return -EPERM;
1091                 break;
1092         case Q_GETQUOTA:
1093                 if (((type == USRQUOTA &&
1094                       !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
1095                      (type == GRPQUOTA &&
1096                       !in_egroup_p(make_kgid(&init_user_ns, id)))) &&
1097                     (!capable(CFS_CAP_SYS_ADMIN) ||
1098                      sbi->ll_flags & LL_SBI_RMT_CLIENT))
1099                         return -EPERM;
1100                 break;
1101         case Q_GETINFO:
1102                 break;
1103         default:
1104                 CERROR("unsupported quotactl op: %#x\n", cmd);
1105                 return -ENOTTY;
1106         }
1107
1108         if (valid != QC_GENERAL) {
1109                 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
1110                         return -EOPNOTSUPP;
1111
1112                 if (cmd == Q_GETINFO)
1113                         qctl->qc_cmd = Q_GETOINFO;
1114                 else if (cmd == Q_GETQUOTA)
1115                         qctl->qc_cmd = Q_GETOQUOTA;
1116                 else
1117                         return -EINVAL;
1118
1119                 switch (valid) {
1120                 case QC_MDTIDX:
1121                         rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
1122                                            sizeof(*qctl), qctl, NULL);
1123                         break;
1124                 case QC_OSTIDX:
1125                         rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_dt_exp,
1126                                            sizeof(*qctl), qctl, NULL);
1127                         break;
1128                 case QC_UUID:
1129                         rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
1130                                            sizeof(*qctl), qctl, NULL);
1131                         if (rc == -EAGAIN)
1132                                 rc = obd_iocontrol(OBD_IOC_QUOTACTL,
1133                                                    sbi->ll_dt_exp,
1134                                                    sizeof(*qctl), qctl, NULL);
1135                         break;
1136                 default:
1137                         rc = -EINVAL;
1138                         break;
1139                 }
1140
1141                 if (rc)
1142                         return rc;
1143
1144                 qctl->qc_cmd = cmd;
1145         } else {
1146                 struct obd_quotactl *oqctl;
1147
1148                 oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
1149                 if (!oqctl)
1150                         return -ENOMEM;
1151
1152                 QCTL_COPY(oqctl, qctl);
1153                 rc = obd_quotactl(sbi->ll_md_exp, oqctl);
1154                 if (rc) {
1155                         if (rc != -EALREADY && cmd == Q_QUOTAON) {
1156                                 oqctl->qc_cmd = Q_QUOTAOFF;
1157                                 obd_quotactl(sbi->ll_md_exp, oqctl);
1158                         }
1159                         kfree(oqctl);
1160                         return rc;
1161                 }
1162                 /* If QIF_SPACE is not set, client should collect the
1163                  * space usage from OSSs by itself */
1164                 if (cmd == Q_GETQUOTA &&
1165                     !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) &&
1166                     !oqctl->qc_dqblk.dqb_curspace) {
1167                         struct obd_quotactl *oqctl_tmp;
1168
1169                         oqctl_tmp = kzalloc(sizeof(*oqctl_tmp), GFP_NOFS);
1170                         if (!oqctl_tmp) {
1171                                 rc = -ENOMEM;
1172                                 goto out;
1173                         }
1174
1175                         oqctl_tmp->qc_cmd = Q_GETOQUOTA;
1176                         oqctl_tmp->qc_id = oqctl->qc_id;
1177                         oqctl_tmp->qc_type = oqctl->qc_type;
1178
1179                         /* collect space usage from OSTs */
1180                         oqctl_tmp->qc_dqblk.dqb_curspace = 0;
1181                         rc = obd_quotactl(sbi->ll_dt_exp, oqctl_tmp);
1182                         if (!rc || rc == -EREMOTEIO) {
1183                                 oqctl->qc_dqblk.dqb_curspace =
1184                                         oqctl_tmp->qc_dqblk.dqb_curspace;
1185                                 oqctl->qc_dqblk.dqb_valid |= QIF_SPACE;
1186                         }
1187
1188                         /* collect space & inode usage from MDTs */
1189                         oqctl_tmp->qc_dqblk.dqb_curspace = 0;
1190                         oqctl_tmp->qc_dqblk.dqb_curinodes = 0;
1191                         rc = obd_quotactl(sbi->ll_md_exp, oqctl_tmp);
1192                         if (!rc || rc == -EREMOTEIO) {
1193                                 oqctl->qc_dqblk.dqb_curspace +=
1194                                         oqctl_tmp->qc_dqblk.dqb_curspace;
1195                                 oqctl->qc_dqblk.dqb_curinodes =
1196                                         oqctl_tmp->qc_dqblk.dqb_curinodes;
1197                                 oqctl->qc_dqblk.dqb_valid |= QIF_INODES;
1198                         } else {
1199                                 oqctl->qc_dqblk.dqb_valid &= ~QIF_SPACE;
1200                         }
1201
1202                         kfree(oqctl_tmp);
1203                 }
1204 out:
1205                 QCTL_COPY(qctl, oqctl);
1206                 kfree(oqctl);
1207         }
1208
1209         return rc;
1210 }
1211
1212 /* This function tries to get a single name component,
1213  * to send to the server. No actual path traversal involved,
1214  * so we limit to NAME_MAX */
1215 static char *ll_getname(const char __user *filename)
1216 {
1217         int ret = 0, len;
1218         char *tmp;
1219
1220         tmp = kzalloc(NAME_MAX + 1, GFP_KERNEL);
1221         if (!tmp)
1222                 return ERR_PTR(-ENOMEM);
1223
1224         len = strncpy_from_user(tmp, filename, NAME_MAX + 1);
1225         if (len < 0)
1226                 ret = len;
1227         else if (len == 0)
1228                 ret = -ENOENT;
1229         else if (len > NAME_MAX && tmp[NAME_MAX] != 0)
1230                 ret = -ENAMETOOLONG;
1231
1232         if (ret) {
1233                 kfree(tmp);
1234                 tmp =  ERR_PTR(ret);
1235         }
1236         return tmp;
1237 }
1238
1239 #define ll_putname(filename) kfree(filename)
1240
1241 static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1242 {
1243         struct inode *inode = file_inode(file);
1244         struct ll_sb_info *sbi = ll_i2sbi(inode);
1245         struct obd_ioctl_data *data;
1246         int rc = 0;
1247
1248         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), cmd=%#x\n",
1249                inode->i_ino, inode->i_generation, inode, cmd);
1250
1251         /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
1252         if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
1253                 return -ENOTTY;
1254
1255         ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
1256         switch (cmd) {
1257         case FSFILT_IOC_GETFLAGS:
1258         case FSFILT_IOC_SETFLAGS:
1259                 return ll_iocontrol(inode, file, cmd, arg);
1260         case FSFILT_IOC_GETVERSION_OLD:
1261         case FSFILT_IOC_GETVERSION:
1262                 return put_user(inode->i_generation, (int *)arg);
1263         /* We need to special case any other ioctls we want to handle,
1264          * to send them to the MDS/OST as appropriate and to properly
1265          * network encode the arg field.
1266         case FSFILT_IOC_SETVERSION_OLD:
1267         case FSFILT_IOC_SETVERSION:
1268         */
1269         case LL_IOC_GET_MDTIDX: {
1270                 int mdtidx;
1271
1272                 mdtidx = ll_get_mdt_idx(inode);
1273                 if (mdtidx < 0)
1274                         return mdtidx;
1275
1276                 if (put_user((int)mdtidx, (int *)arg))
1277                         return -EFAULT;
1278
1279                 return 0;
1280         }
1281         case IOC_MDC_LOOKUP: {
1282                 struct ptlrpc_request *request = NULL;
1283                 int namelen, len = 0;
1284                 char *buf = NULL;
1285                 char *filename;
1286                 struct md_op_data *op_data;
1287
1288                 rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
1289                 if (rc)
1290                         return rc;
1291                 data = (void *)buf;
1292
1293                 filename = data->ioc_inlbuf1;
1294                 namelen = strlen(filename);
1295
1296                 if (namelen < 1) {
1297                         CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
1298                         rc = -EINVAL;
1299                         goto out_free;
1300                 }
1301
1302                 op_data = ll_prep_md_op_data(NULL, inode, NULL, filename, namelen,
1303                                              0, LUSTRE_OPC_ANY, NULL);
1304                 if (IS_ERR(op_data)) {
1305                         rc = PTR_ERR(op_data);
1306                         goto out_free;
1307                 }
1308
1309                 op_data->op_valid = OBD_MD_FLID;
1310                 rc = md_getattr_name(sbi->ll_md_exp, op_data, &request);
1311                 ll_finish_md_op_data(op_data);
1312                 if (rc < 0) {
1313                         CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
1314                         goto out_free;
1315                 }
1316                 ptlrpc_req_finished(request);
1317 out_free:
1318                 obd_ioctl_freedata(buf, len);
1319                 return rc;
1320         }
1321         case LL_IOC_LMV_SETSTRIPE: {
1322                 struct lmv_user_md  *lum;
1323                 char            *buf = NULL;
1324                 char            *filename;
1325                 int              namelen = 0;
1326                 int              lumlen = 0;
1327                 int              len;
1328                 int              rc;
1329
1330                 rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
1331                 if (rc)
1332                         return rc;
1333
1334                 data = (void *)buf;
1335                 if (data->ioc_inlbuf1 == NULL || data->ioc_inlbuf2 == NULL ||
1336                     data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0) {
1337                         rc = -EINVAL;
1338                         goto lmv_out_free;
1339                 }
1340
1341                 filename = data->ioc_inlbuf1;
1342                 namelen = data->ioc_inllen1;
1343
1344                 if (namelen < 1) {
1345                         CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
1346                         rc = -EINVAL;
1347                         goto lmv_out_free;
1348                 }
1349                 lum = (struct lmv_user_md *)data->ioc_inlbuf2;
1350                 lumlen = data->ioc_inllen2;
1351
1352                 if (lum->lum_magic != LMV_USER_MAGIC ||
1353                     lumlen != sizeof(*lum)) {
1354                         CERROR("%s: wrong lum magic %x or size %d: rc = %d\n",
1355                                filename, lum->lum_magic, lumlen, -EFAULT);
1356                         rc = -EINVAL;
1357                         goto lmv_out_free;
1358                 }
1359
1360                 /**
1361                  * ll_dir_setdirstripe will be used to set dir stripe
1362                  *  mdc_create--->mdt_reint_create (with dirstripe)
1363                  */
1364                 rc = ll_dir_setdirstripe(inode, lum, filename);
1365 lmv_out_free:
1366                 obd_ioctl_freedata(buf, len);
1367                 return rc;
1368
1369         }
1370         case LL_IOC_LOV_SETSTRIPE: {
1371                 struct lov_user_md_v3 lumv3;
1372                 struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
1373                 struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
1374                 struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
1375
1376                 int set_default = 0;
1377
1378                 LASSERT(sizeof(lumv3) == sizeof(*lumv3p));
1379                 LASSERT(sizeof(lumv3.lmm_objects[0]) ==
1380                         sizeof(lumv3p->lmm_objects[0]));
1381                 /* first try with v1 which is smaller than v3 */
1382                 if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
1383                         return -EFAULT;
1384
1385                 if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
1386                         if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
1387                                 return -EFAULT;
1388                 }
1389
1390                 if (is_root_inode(inode))
1391                         set_default = 1;
1392
1393                 /* in v1 and v3 cases lumv1 points to data */
1394                 rc = ll_dir_setstripe(inode, lumv1, set_default);
1395
1396                 return rc;
1397         }
1398         case LL_IOC_LMV_GETSTRIPE: {
1399                 struct lmv_user_md *lump = (struct lmv_user_md *)arg;
1400                 struct lmv_user_md lum;
1401                 struct lmv_user_md *tmp;
1402                 int lum_size;
1403                 int rc = 0;
1404                 int mdtindex;
1405
1406                 if (copy_from_user(&lum, lump, sizeof(struct lmv_user_md)))
1407                         return -EFAULT;
1408
1409                 if (lum.lum_magic != LMV_MAGIC_V1)
1410                         return -EINVAL;
1411
1412                 lum_size = lmv_user_md_size(1, LMV_MAGIC_V1);
1413                 tmp = kzalloc(lum_size, GFP_NOFS);
1414                 if (!tmp) {
1415                         rc = -ENOMEM;
1416                         goto free_lmv;
1417                 }
1418
1419                 *tmp = lum;
1420                 tmp->lum_type = LMV_STRIPE_TYPE;
1421                 tmp->lum_stripe_count = 1;
1422                 mdtindex = ll_get_mdt_idx(inode);
1423                 if (mdtindex < 0) {
1424                         rc = -ENOMEM;
1425                         goto free_lmv;
1426                 }
1427
1428                 tmp->lum_stripe_offset = mdtindex;
1429                 tmp->lum_objects[0].lum_mds = mdtindex;
1430                 memcpy(&tmp->lum_objects[0].lum_fid, ll_inode2fid(inode),
1431                        sizeof(struct lu_fid));
1432                 if (copy_to_user((void *)arg, tmp, lum_size)) {
1433                         rc = -EFAULT;
1434                         goto free_lmv;
1435                 }
1436 free_lmv:
1437                 kfree(tmp);
1438                 return rc;
1439         }
1440         case LL_IOC_LOV_SWAP_LAYOUTS:
1441                 return -EPERM;
1442         case LL_IOC_OBD_STATFS:
1443                 return ll_obd_statfs(inode, (void *)arg);
1444         case LL_IOC_LOV_GETSTRIPE:
1445         case LL_IOC_MDC_GETINFO:
1446         case IOC_MDC_GETFILEINFO:
1447         case IOC_MDC_GETFILESTRIPE: {
1448                 struct ptlrpc_request *request = NULL;
1449                 struct lov_user_md *lump;
1450                 struct lov_mds_md *lmm = NULL;
1451                 struct mdt_body *body;
1452                 char *filename = NULL;
1453                 int lmmsize;
1454
1455                 if (cmd == IOC_MDC_GETFILEINFO ||
1456                     cmd == IOC_MDC_GETFILESTRIPE) {
1457                         filename = ll_getname((const char *)arg);
1458                         if (IS_ERR(filename))
1459                                 return PTR_ERR(filename);
1460
1461                         rc = ll_lov_getstripe_ea_info(inode, filename, &lmm,
1462                                                       &lmmsize, &request);
1463                 } else {
1464                         rc = ll_dir_getstripe(inode, &lmm, &lmmsize, &request);
1465                 }
1466
1467                 if (request) {
1468                         body = req_capsule_server_get(&request->rq_pill,
1469                                                       &RMF_MDT_BODY);
1470                         LASSERT(body != NULL);
1471                 } else {
1472                         goto out_req;
1473                 }
1474
1475                 if (rc < 0) {
1476                         if (rc == -ENODATA && (cmd == IOC_MDC_GETFILEINFO ||
1477                                                cmd == LL_IOC_MDC_GETINFO)) {
1478                                 rc = 0;
1479                                 goto skip_lmm;
1480                         } else
1481                                 goto out_req;
1482                 }
1483
1484                 if (cmd == IOC_MDC_GETFILESTRIPE ||
1485                     cmd == LL_IOC_LOV_GETSTRIPE) {
1486                         lump = (struct lov_user_md *)arg;
1487                 } else {
1488                         struct lov_user_mds_data *lmdp;
1489
1490                         lmdp = (struct lov_user_mds_data *)arg;
1491                         lump = &lmdp->lmd_lmm;
1492                 }
1493                 if (copy_to_user(lump, lmm, lmmsize)) {
1494                         if (copy_to_user(lump, lmm, sizeof(*lump))) {
1495                                 rc = -EFAULT;
1496                                 goto out_req;
1497                         }
1498                         rc = -EOVERFLOW;
1499                 }
1500 skip_lmm:
1501                 if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) {
1502                         struct lov_user_mds_data *lmdp;
1503                         lstat_t st = { 0 };
1504
1505                         st.st_dev     = inode->i_sb->s_dev;
1506                         st.st_mode    = body->mode;
1507                         st.st_nlink   = body->nlink;
1508                         st.st_uid     = body->uid;
1509                         st.st_gid     = body->gid;
1510                         st.st_rdev    = body->rdev;
1511                         st.st_size    = body->size;
1512                         st.st_blksize = PAGE_CACHE_SIZE;
1513                         st.st_blocks  = body->blocks;
1514                         st.st_atime   = body->atime;
1515                         st.st_mtime   = body->mtime;
1516                         st.st_ctime   = body->ctime;
1517                         st.st_ino     = inode->i_ino;
1518
1519                         lmdp = (struct lov_user_mds_data *)arg;
1520                         if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) {
1521                                 rc = -EFAULT;
1522                                 goto out_req;
1523                         }
1524                 }
1525
1526 out_req:
1527                 ptlrpc_req_finished(request);
1528                 if (filename)
1529                         ll_putname(filename);
1530                 return rc;
1531         }
1532         case IOC_LOV_GETINFO: {
1533                 struct lov_user_mds_data *lumd;
1534                 struct lov_stripe_md *lsm;
1535                 struct lov_user_md *lum;
1536                 struct lov_mds_md *lmm;
1537                 int lmmsize;
1538                 lstat_t st;
1539
1540                 lumd = (struct lov_user_mds_data *)arg;
1541                 lum = &lumd->lmd_lmm;
1542
1543                 rc = ll_get_max_mdsize(sbi, &lmmsize);
1544                 if (rc)
1545                         return rc;
1546
1547                 lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS);
1548                 if (lmm == NULL)
1549                         return -ENOMEM;
1550                 if (copy_from_user(lmm, lum, lmmsize)) {
1551                         rc = -EFAULT;
1552                         goto free_lmm;
1553                 }
1554
1555                 switch (lmm->lmm_magic) {
1556                 case LOV_USER_MAGIC_V1:
1557                         if (LOV_USER_MAGIC_V1 == cpu_to_le32(LOV_USER_MAGIC_V1))
1558                                 break;
1559                         /* swab objects first so that stripes num will be sane */
1560                         lustre_swab_lov_user_md_objects(
1561                                 ((struct lov_user_md_v1 *)lmm)->lmm_objects,
1562                                 ((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
1563                         lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
1564                         break;
1565                 case LOV_USER_MAGIC_V3:
1566                         if (LOV_USER_MAGIC_V3 == cpu_to_le32(LOV_USER_MAGIC_V3))
1567                                 break;
1568                         /* swab objects first so that stripes num will be sane */
1569                         lustre_swab_lov_user_md_objects(
1570                                 ((struct lov_user_md_v3 *)lmm)->lmm_objects,
1571                                 ((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
1572                         lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
1573                         break;
1574                 default:
1575                         rc = -EINVAL;
1576                         goto free_lmm;
1577                 }
1578
1579                 rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
1580                 if (rc < 0) {
1581                         rc = -ENOMEM;
1582                         goto free_lmm;
1583                 }
1584
1585                 /* Perform glimpse_size operation. */
1586                 memset(&st, 0, sizeof(st));
1587
1588                 rc = ll_glimpse_ioctl(sbi, lsm, &st);
1589                 if (rc)
1590                         goto free_lsm;
1591
1592                 if (copy_to_user(&lumd->lmd_st, &st, sizeof(st))) {
1593                         rc = -EFAULT;
1594                         goto free_lsm;
1595                 }
1596
1597 free_lsm:
1598                 obd_free_memmd(sbi->ll_dt_exp, &lsm);
1599 free_lmm:
1600                 kvfree(lmm);
1601                 return rc;
1602         }
1603         case OBD_IOC_LLOG_CATINFO: {
1604                 return -EOPNOTSUPP;
1605         }
1606         case OBD_IOC_QUOTACHECK: {
1607                 struct obd_quotactl *oqctl;
1608                 int error = 0;
1609
1610                 if (!capable(CFS_CAP_SYS_ADMIN) ||
1611                     sbi->ll_flags & LL_SBI_RMT_CLIENT)
1612                         return -EPERM;
1613
1614                 oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
1615                 if (!oqctl)
1616                         return -ENOMEM;
1617                 oqctl->qc_type = arg;
1618                 rc = obd_quotacheck(sbi->ll_md_exp, oqctl);
1619                 if (rc < 0) {
1620                         CDEBUG(D_INFO, "md_quotacheck failed: rc %d\n", rc);
1621                         error = rc;
1622                 }
1623
1624                 rc = obd_quotacheck(sbi->ll_dt_exp, oqctl);
1625                 if (rc < 0)
1626                         CDEBUG(D_INFO, "obd_quotacheck failed: rc %d\n", rc);
1627
1628                 kfree(oqctl);
1629                 return error ?: rc;
1630         }
1631         case OBD_IOC_POLL_QUOTACHECK: {
1632                 struct if_quotacheck *check;
1633
1634                 if (!capable(CFS_CAP_SYS_ADMIN) ||
1635                     sbi->ll_flags & LL_SBI_RMT_CLIENT)
1636                         return -EPERM;
1637
1638                 check = kzalloc(sizeof(*check), GFP_NOFS);
1639                 if (!check)
1640                         return -ENOMEM;
1641
1642                 rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check,
1643                                    NULL);
1644                 if (rc) {
1645                         CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
1646                         if (copy_to_user((void *)arg, check,
1647                                              sizeof(*check)))
1648                                 CDEBUG(D_QUOTA, "copy_to_user failed\n");
1649                         goto out_poll;
1650                 }
1651
1652                 rc = obd_iocontrol(cmd, sbi->ll_dt_exp, 0, (void *)check,
1653                                    NULL);
1654                 if (rc) {
1655                         CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
1656                         if (copy_to_user((void *)arg, check,
1657                                              sizeof(*check)))
1658                                 CDEBUG(D_QUOTA, "copy_to_user failed\n");
1659                         goto out_poll;
1660                 }
1661 out_poll:
1662                 kfree(check);
1663                 return rc;
1664         }
1665         case LL_IOC_QUOTACTL: {
1666                 struct if_quotactl *qctl;
1667
1668                 qctl = kzalloc(sizeof(*qctl), GFP_NOFS);
1669                 if (!qctl)
1670                         return -ENOMEM;
1671
1672                 if (copy_from_user(qctl, (void *)arg, sizeof(*qctl))) {
1673                         rc = -EFAULT;
1674                         goto out_quotactl;
1675                 }
1676
1677                 rc = quotactl_ioctl(sbi, qctl);
1678
1679                 if (rc == 0 && copy_to_user((void *)arg, qctl, sizeof(*qctl)))
1680                         rc = -EFAULT;
1681
1682 out_quotactl:
1683                 kfree(qctl);
1684                 return rc;
1685         }
1686         case OBD_IOC_GETDTNAME:
1687         case OBD_IOC_GETMDNAME:
1688                 return ll_get_obd_name(inode, cmd, arg);
1689         case LL_IOC_FLUSHCTX:
1690                 return ll_flush_ctx(inode);
1691 #ifdef CONFIG_FS_POSIX_ACL
1692         case LL_IOC_RMTACL: {
1693             if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
1694                 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1695
1696                 LASSERT(fd != NULL);
1697                 rc = rct_add(&sbi->ll_rct, current_pid(), arg);
1698                 if (!rc)
1699                         fd->fd_flags |= LL_FILE_RMTACL;
1700                 return rc;
1701             } else
1702                 return 0;
1703         }
1704 #endif
1705         case LL_IOC_GETOBDCOUNT: {
1706                 int count, vallen;
1707                 struct obd_export *exp;
1708
1709                 if (copy_from_user(&count, (int *)arg, sizeof(int)))
1710                         return -EFAULT;
1711
1712                 /* get ost count when count is zero, get mdt count otherwise */
1713                 exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
1714                 vallen = sizeof(count);
1715                 rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT),
1716                                   KEY_TGT_COUNT, &vallen, &count, NULL);
1717                 if (rc) {
1718                         CERROR("get target count failed: %d\n", rc);
1719                         return rc;
1720                 }
1721
1722                 if (copy_to_user((int *)arg, &count, sizeof(int)))
1723                         return -EFAULT;
1724
1725                 return 0;
1726         }
1727         case LL_IOC_PATH2FID:
1728                 if (copy_to_user((void *)arg, ll_inode2fid(inode),
1729                                      sizeof(struct lu_fid)))
1730                         return -EFAULT;
1731                 return 0;
1732         case LL_IOC_GET_CONNECT_FLAGS: {
1733                 return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, (void *)arg);
1734         }
1735         case OBD_IOC_CHANGELOG_SEND:
1736         case OBD_IOC_CHANGELOG_CLEAR:
1737                 rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
1738                                     sizeof(struct ioc_changelog));
1739                 return rc;
1740         case OBD_IOC_FID2PATH:
1741                 return ll_fid2path(inode, (void *)arg);
1742         case LL_IOC_HSM_REQUEST: {
1743                 struct hsm_user_request *hur;
1744                 ssize_t                  totalsize;
1745
1746                 hur = memdup_user((void *)arg, sizeof(*hur));
1747                 if (IS_ERR(hur))
1748                         return PTR_ERR(hur);
1749
1750                 /* Compute the whole struct size */
1751                 totalsize = hur_len(hur);
1752                 kfree(hur);
1753                 if (totalsize < 0)
1754                         return -E2BIG;
1755
1756                 /* Final size will be more than double totalsize */
1757                 if (totalsize >= MDS_MAXREQSIZE / 3)
1758                         return -E2BIG;
1759
1760                 hur = libcfs_kvzalloc(totalsize, GFP_NOFS);
1761                 if (hur == NULL)
1762                         return -ENOMEM;
1763
1764                 /* Copy the whole struct */
1765                 if (copy_from_user(hur, (void *)arg, totalsize)) {
1766                         kvfree(hur);
1767                         return -EFAULT;
1768                 }
1769
1770                 if (hur->hur_request.hr_action == HUA_RELEASE) {
1771                         const struct lu_fid *fid;
1772                         struct inode *f;
1773                         int i;
1774
1775                         for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
1776                                 fid = &hur->hur_user_item[i].hui_fid;
1777                                 f = search_inode_for_lustre(inode->i_sb, fid);
1778                                 if (IS_ERR(f)) {
1779                                         rc = PTR_ERR(f);
1780                                         break;
1781                                 }
1782
1783                                 rc = ll_hsm_release(f);
1784                                 iput(f);
1785                                 if (rc != 0)
1786                                         break;
1787                         }
1788                 } else {
1789                         rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize,
1790                                            hur, NULL);
1791                 }
1792
1793                 kvfree(hur);
1794
1795                 return rc;
1796         }
1797         case LL_IOC_HSM_PROGRESS: {
1798                 struct hsm_progress_kernel      hpk;
1799                 struct hsm_progress             hp;
1800
1801                 if (copy_from_user(&hp, (void *)arg, sizeof(hp)))
1802                         return -EFAULT;
1803
1804                 hpk.hpk_fid = hp.hp_fid;
1805                 hpk.hpk_cookie = hp.hp_cookie;
1806                 hpk.hpk_extent = hp.hp_extent;
1807                 hpk.hpk_flags = hp.hp_flags;
1808                 hpk.hpk_errval = hp.hp_errval;
1809                 hpk.hpk_data_version = 0;
1810
1811                 /* File may not exist in Lustre; all progress
1812                  * reported to Lustre root */
1813                 rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk,
1814                                    NULL);
1815                 return rc;
1816         }
1817         case LL_IOC_HSM_CT_START:
1818                 rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
1819                                     sizeof(struct lustre_kernelcomm));
1820                 return rc;
1821
1822         case LL_IOC_HSM_COPY_START: {
1823                 struct hsm_copy *copy;
1824                 int              rc;
1825
1826                 copy = memdup_user((char *)arg, sizeof(*copy));
1827                 if (IS_ERR(copy))
1828                         return PTR_ERR(copy);
1829
1830                 rc = ll_ioc_copy_start(inode->i_sb, copy);
1831                 if (copy_to_user((char *)arg, copy, sizeof(*copy)))
1832                         rc = -EFAULT;
1833
1834                 kfree(copy);
1835                 return rc;
1836         }
1837         case LL_IOC_HSM_COPY_END: {
1838                 struct hsm_copy *copy;
1839                 int              rc;
1840
1841                 copy = memdup_user((char *)arg, sizeof(*copy));
1842                 if (IS_ERR(copy))
1843                         return PTR_ERR(copy);
1844
1845                 rc = ll_ioc_copy_end(inode->i_sb, copy);
1846                 if (copy_to_user((char *)arg, copy, sizeof(*copy)))
1847                         rc = -EFAULT;
1848
1849                 kfree(copy);
1850                 return rc;
1851         }
1852         default:
1853                 return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, (void *)arg);
1854         }
1855 }
1856
1857 static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
1858 {
1859         struct inode *inode = file->f_mapping->host;
1860         struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1861         struct ll_sb_info *sbi = ll_i2sbi(inode);
1862         int api32 = ll_need_32bit_api(sbi);
1863         loff_t ret = -EINVAL;
1864
1865         mutex_lock(&inode->i_mutex);
1866         switch (origin) {
1867         case SEEK_SET:
1868                 break;
1869         case SEEK_CUR:
1870                 offset += file->f_pos;
1871                 break;
1872         case SEEK_END:
1873                 if (offset > 0)
1874                         goto out;
1875                 if (api32)
1876                         offset += LL_DIR_END_OFF_32BIT;
1877                 else
1878                         offset += LL_DIR_END_OFF;
1879                 break;
1880         default:
1881                 goto out;
1882         }
1883
1884         if (offset >= 0 &&
1885             ((api32 && offset <= LL_DIR_END_OFF_32BIT) ||
1886              (!api32 && offset <= LL_DIR_END_OFF))) {
1887                 if (offset != file->f_pos) {
1888                         if ((api32 && offset == LL_DIR_END_OFF_32BIT) ||
1889                             (!api32 && offset == LL_DIR_END_OFF))
1890                                 fd->lfd_pos = MDS_DIR_END_OFF;
1891                         else if (api32 && sbi->ll_flags & LL_SBI_64BIT_HASH)
1892                                 fd->lfd_pos = offset << 32;
1893                         else
1894                                 fd->lfd_pos = offset;
1895                         file->f_pos = offset;
1896                         file->f_version = 0;
1897                 }
1898                 ret = offset;
1899         }
1900         goto out;
1901
1902 out:
1903         mutex_unlock(&inode->i_mutex);
1904         return ret;
1905 }
1906
1907 static int ll_dir_open(struct inode *inode, struct file *file)
1908 {
1909         return ll_file_open(inode, file);
1910 }
1911
1912 static int ll_dir_release(struct inode *inode, struct file *file)
1913 {
1914         return ll_file_release(inode, file);
1915 }
1916
1917 const struct file_operations ll_dir_operations = {
1918         .llseek   = ll_dir_seek,
1919         .open     = ll_dir_open,
1920         .release  = ll_dir_release,
1921         .read     = generic_read_dir,
1922         .iterate  = ll_readdir,
1923         .unlocked_ioctl   = ll_dir_ioctl,
1924         .fsync    = ll_fsync,
1925 };