f2fs: fix extent cache memory leak
[cascardo/linux.git] / fs / f2fs / super.c
1 /*
2  * fs/f2fs/super.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/f2fs_fs.h>
26 #include <linux/sysfs.h>
27
28 #include "f2fs.h"
29 #include "node.h"
30 #include "segment.h"
31 #include "xattr.h"
32 #include "gc.h"
33 #include "trace.h"
34
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/f2fs.h>
37
38 static struct proc_dir_entry *f2fs_proc_root;
39 static struct kmem_cache *f2fs_inode_cachep;
40 static struct kset *f2fs_kset;
41
42 enum {
43         Opt_gc_background,
44         Opt_disable_roll_forward,
45         Opt_norecovery,
46         Opt_discard,
47         Opt_noheap,
48         Opt_user_xattr,
49         Opt_nouser_xattr,
50         Opt_acl,
51         Opt_noacl,
52         Opt_active_logs,
53         Opt_disable_ext_identify,
54         Opt_inline_xattr,
55         Opt_inline_data,
56         Opt_inline_dentry,
57         Opt_flush_merge,
58         Opt_nobarrier,
59         Opt_fastboot,
60         Opt_extent_cache,
61         Opt_err,
62 };
63
64 static match_table_t f2fs_tokens = {
65         {Opt_gc_background, "background_gc=%s"},
66         {Opt_disable_roll_forward, "disable_roll_forward"},
67         {Opt_norecovery, "norecovery"},
68         {Opt_discard, "discard"},
69         {Opt_noheap, "no_heap"},
70         {Opt_user_xattr, "user_xattr"},
71         {Opt_nouser_xattr, "nouser_xattr"},
72         {Opt_acl, "acl"},
73         {Opt_noacl, "noacl"},
74         {Opt_active_logs, "active_logs=%u"},
75         {Opt_disable_ext_identify, "disable_ext_identify"},
76         {Opt_inline_xattr, "inline_xattr"},
77         {Opt_inline_data, "inline_data"},
78         {Opt_inline_dentry, "inline_dentry"},
79         {Opt_flush_merge, "flush_merge"},
80         {Opt_nobarrier, "nobarrier"},
81         {Opt_fastboot, "fastboot"},
82         {Opt_extent_cache, "extent_cache"},
83         {Opt_err, NULL},
84 };
85
86 /* Sysfs support for f2fs */
87 enum {
88         GC_THREAD,      /* struct f2fs_gc_thread */
89         SM_INFO,        /* struct f2fs_sm_info */
90         NM_INFO,        /* struct f2fs_nm_info */
91         F2FS_SBI,       /* struct f2fs_sb_info */
92 };
93
94 struct f2fs_attr {
95         struct attribute attr;
96         ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
97         ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
98                          const char *, size_t);
99         int struct_type;
100         int offset;
101 };
102
103 static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
104 {
105         if (struct_type == GC_THREAD)
106                 return (unsigned char *)sbi->gc_thread;
107         else if (struct_type == SM_INFO)
108                 return (unsigned char *)SM_I(sbi);
109         else if (struct_type == NM_INFO)
110                 return (unsigned char *)NM_I(sbi);
111         else if (struct_type == F2FS_SBI)
112                 return (unsigned char *)sbi;
113         return NULL;
114 }
115
116 static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
117                         struct f2fs_sb_info *sbi, char *buf)
118 {
119         unsigned char *ptr = NULL;
120         unsigned int *ui;
121
122         ptr = __struct_ptr(sbi, a->struct_type);
123         if (!ptr)
124                 return -EINVAL;
125
126         ui = (unsigned int *)(ptr + a->offset);
127
128         return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
129 }
130
131 static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
132                         struct f2fs_sb_info *sbi,
133                         const char *buf, size_t count)
134 {
135         unsigned char *ptr;
136         unsigned long t;
137         unsigned int *ui;
138         ssize_t ret;
139
140         ptr = __struct_ptr(sbi, a->struct_type);
141         if (!ptr)
142                 return -EINVAL;
143
144         ui = (unsigned int *)(ptr + a->offset);
145
146         ret = kstrtoul(skip_spaces(buf), 0, &t);
147         if (ret < 0)
148                 return ret;
149         *ui = t;
150         return count;
151 }
152
153 static ssize_t f2fs_attr_show(struct kobject *kobj,
154                                 struct attribute *attr, char *buf)
155 {
156         struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
157                                                                 s_kobj);
158         struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
159
160         return a->show ? a->show(a, sbi, buf) : 0;
161 }
162
163 static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr,
164                                                 const char *buf, size_t len)
165 {
166         struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
167                                                                         s_kobj);
168         struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
169
170         return a->store ? a->store(a, sbi, buf, len) : 0;
171 }
172
173 static void f2fs_sb_release(struct kobject *kobj)
174 {
175         struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
176                                                                 s_kobj);
177         complete(&sbi->s_kobj_unregister);
178 }
179
180 #define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
181 static struct f2fs_attr f2fs_attr_##_name = {                   \
182         .attr = {.name = __stringify(_name), .mode = _mode },   \
183         .show   = _show,                                        \
184         .store  = _store,                                       \
185         .struct_type = _struct_type,                            \
186         .offset = _offset                                       \
187 }
188
189 #define F2FS_RW_ATTR(struct_type, struct_name, name, elname)    \
190         F2FS_ATTR_OFFSET(struct_type, name, 0644,               \
191                 f2fs_sbi_show, f2fs_sbi_store,                  \
192                 offsetof(struct struct_name, elname))
193
194 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
195 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
196 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
197 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
198 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
199 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards);
200 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
201 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
202 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
203 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
204 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
205 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
206 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
207
208 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
209 static struct attribute *f2fs_attrs[] = {
210         ATTR_LIST(gc_min_sleep_time),
211         ATTR_LIST(gc_max_sleep_time),
212         ATTR_LIST(gc_no_gc_sleep_time),
213         ATTR_LIST(gc_idle),
214         ATTR_LIST(reclaim_segments),
215         ATTR_LIST(max_small_discards),
216         ATTR_LIST(batched_trim_sections),
217         ATTR_LIST(ipu_policy),
218         ATTR_LIST(min_ipu_util),
219         ATTR_LIST(min_fsync_blocks),
220         ATTR_LIST(max_victim_search),
221         ATTR_LIST(dir_level),
222         ATTR_LIST(ram_thresh),
223         NULL,
224 };
225
226 static const struct sysfs_ops f2fs_attr_ops = {
227         .show   = f2fs_attr_show,
228         .store  = f2fs_attr_store,
229 };
230
231 static struct kobj_type f2fs_ktype = {
232         .default_attrs  = f2fs_attrs,
233         .sysfs_ops      = &f2fs_attr_ops,
234         .release        = f2fs_sb_release,
235 };
236
237 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
238 {
239         struct va_format vaf;
240         va_list args;
241
242         va_start(args, fmt);
243         vaf.fmt = fmt;
244         vaf.va = &args;
245         printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
246         va_end(args);
247 }
248
249 static void init_once(void *foo)
250 {
251         struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
252
253         inode_init_once(&fi->vfs_inode);
254 }
255
256 static int parse_options(struct super_block *sb, char *options)
257 {
258         struct f2fs_sb_info *sbi = F2FS_SB(sb);
259         substring_t args[MAX_OPT_ARGS];
260         char *p, *name;
261         int arg = 0;
262
263         if (!options)
264                 return 0;
265
266         while ((p = strsep(&options, ",")) != NULL) {
267                 int token;
268                 if (!*p)
269                         continue;
270                 /*
271                  * Initialize args struct so we know whether arg was
272                  * found; some options take optional arguments.
273                  */
274                 args[0].to = args[0].from = NULL;
275                 token = match_token(p, f2fs_tokens, args);
276
277                 switch (token) {
278                 case Opt_gc_background:
279                         name = match_strdup(&args[0]);
280
281                         if (!name)
282                                 return -ENOMEM;
283                         if (strlen(name) == 2 && !strncmp(name, "on", 2))
284                                 set_opt(sbi, BG_GC);
285                         else if (strlen(name) == 3 && !strncmp(name, "off", 3))
286                                 clear_opt(sbi, BG_GC);
287                         else {
288                                 kfree(name);
289                                 return -EINVAL;
290                         }
291                         kfree(name);
292                         break;
293                 case Opt_disable_roll_forward:
294                         set_opt(sbi, DISABLE_ROLL_FORWARD);
295                         break;
296                 case Opt_norecovery:
297                         /* this option mounts f2fs with ro */
298                         set_opt(sbi, DISABLE_ROLL_FORWARD);
299                         if (!f2fs_readonly(sb))
300                                 return -EINVAL;
301                         break;
302                 case Opt_discard:
303                         set_opt(sbi, DISCARD);
304                         break;
305                 case Opt_noheap:
306                         set_opt(sbi, NOHEAP);
307                         break;
308 #ifdef CONFIG_F2FS_FS_XATTR
309                 case Opt_user_xattr:
310                         set_opt(sbi, XATTR_USER);
311                         break;
312                 case Opt_nouser_xattr:
313                         clear_opt(sbi, XATTR_USER);
314                         break;
315                 case Opt_inline_xattr:
316                         set_opt(sbi, INLINE_XATTR);
317                         break;
318 #else
319                 case Opt_user_xattr:
320                         f2fs_msg(sb, KERN_INFO,
321                                 "user_xattr options not supported");
322                         break;
323                 case Opt_nouser_xattr:
324                         f2fs_msg(sb, KERN_INFO,
325                                 "nouser_xattr options not supported");
326                         break;
327                 case Opt_inline_xattr:
328                         f2fs_msg(sb, KERN_INFO,
329                                 "inline_xattr options not supported");
330                         break;
331 #endif
332 #ifdef CONFIG_F2FS_FS_POSIX_ACL
333                 case Opt_acl:
334                         set_opt(sbi, POSIX_ACL);
335                         break;
336                 case Opt_noacl:
337                         clear_opt(sbi, POSIX_ACL);
338                         break;
339 #else
340                 case Opt_acl:
341                         f2fs_msg(sb, KERN_INFO, "acl options not supported");
342                         break;
343                 case Opt_noacl:
344                         f2fs_msg(sb, KERN_INFO, "noacl options not supported");
345                         break;
346 #endif
347                 case Opt_active_logs:
348                         if (args->from && match_int(args, &arg))
349                                 return -EINVAL;
350                         if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
351                                 return -EINVAL;
352                         sbi->active_logs = arg;
353                         break;
354                 case Opt_disable_ext_identify:
355                         set_opt(sbi, DISABLE_EXT_IDENTIFY);
356                         break;
357                 case Opt_inline_data:
358                         set_opt(sbi, INLINE_DATA);
359                         break;
360                 case Opt_inline_dentry:
361                         set_opt(sbi, INLINE_DENTRY);
362                         break;
363                 case Opt_flush_merge:
364                         set_opt(sbi, FLUSH_MERGE);
365                         break;
366                 case Opt_nobarrier:
367                         set_opt(sbi, NOBARRIER);
368                         break;
369                 case Opt_fastboot:
370                         set_opt(sbi, FASTBOOT);
371                         break;
372                 case Opt_extent_cache:
373                         set_opt(sbi, EXTENT_CACHE);
374                         break;
375                 default:
376                         f2fs_msg(sb, KERN_ERR,
377                                 "Unrecognized mount option \"%s\" or missing value",
378                                 p);
379                         return -EINVAL;
380                 }
381         }
382         return 0;
383 }
384
385 static struct inode *f2fs_alloc_inode(struct super_block *sb)
386 {
387         struct f2fs_inode_info *fi;
388
389         fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
390         if (!fi)
391                 return NULL;
392
393         init_once((void *) fi);
394
395         /* Initialize f2fs-specific inode info */
396         fi->vfs_inode.i_version = 1;
397         atomic_set(&fi->dirty_pages, 0);
398         fi->i_current_depth = 1;
399         fi->i_advise = 0;
400         rwlock_init(&fi->ext_lock);
401         init_rwsem(&fi->i_sem);
402         INIT_RADIX_TREE(&fi->inmem_root, GFP_NOFS);
403         INIT_LIST_HEAD(&fi->inmem_pages);
404         mutex_init(&fi->inmem_lock);
405
406         set_inode_flag(fi, FI_NEW_INODE);
407
408         if (test_opt(F2FS_SB(sb), INLINE_XATTR))
409                 set_inode_flag(fi, FI_INLINE_XATTR);
410
411         /* Will be used by directory only */
412         fi->i_dir_level = F2FS_SB(sb)->dir_level;
413
414         return &fi->vfs_inode;
415 }
416
417 static int f2fs_drop_inode(struct inode *inode)
418 {
419         /*
420          * This is to avoid a deadlock condition like below.
421          * writeback_single_inode(inode)
422          *  - f2fs_write_data_page
423          *    - f2fs_gc -> iput -> evict
424          *       - inode_wait_for_writeback(inode)
425          */
426         if (!inode_unhashed(inode) && inode->i_state & I_SYNC)
427                 return 0;
428         return generic_drop_inode(inode);
429 }
430
431 /*
432  * f2fs_dirty_inode() is called from __mark_inode_dirty()
433  *
434  * We should call set_dirty_inode to write the dirty inode through write_inode.
435  */
436 static void f2fs_dirty_inode(struct inode *inode, int flags)
437 {
438         set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
439 }
440
441 static void f2fs_i_callback(struct rcu_head *head)
442 {
443         struct inode *inode = container_of(head, struct inode, i_rcu);
444         kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
445 }
446
447 static void f2fs_destroy_inode(struct inode *inode)
448 {
449         call_rcu(&inode->i_rcu, f2fs_i_callback);
450 }
451
452 static void f2fs_put_super(struct super_block *sb)
453 {
454         struct f2fs_sb_info *sbi = F2FS_SB(sb);
455
456         if (sbi->s_proc) {
457                 remove_proc_entry("segment_info", sbi->s_proc);
458                 remove_proc_entry(sb->s_id, f2fs_proc_root);
459         }
460         kobject_del(&sbi->s_kobj);
461
462         f2fs_destroy_stats(sbi);
463         stop_gc_thread(sbi);
464
465         /*
466          * We don't need to do checkpoint when superblock is clean.
467          * But, the previous checkpoint was not done by umount, it needs to do
468          * clean checkpoint again.
469          */
470         if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
471                         !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
472                 struct cp_control cpc = {
473                         .reason = CP_UMOUNT,
474                 };
475                 write_checkpoint(sbi, &cpc);
476         }
477
478         /*
479          * normally superblock is clean, so we need to release this.
480          * In addition, EIO will skip do checkpoint, we need this as well.
481          */
482         release_dirty_inode(sbi);
483         release_discard_addrs(sbi);
484
485         iput(sbi->node_inode);
486         iput(sbi->meta_inode);
487
488         /* destroy f2fs internal modules */
489         destroy_node_manager(sbi);
490         destroy_segment_manager(sbi);
491
492         kfree(sbi->ckpt);
493         kobject_put(&sbi->s_kobj);
494         wait_for_completion(&sbi->s_kobj_unregister);
495
496         sb->s_fs_info = NULL;
497         brelse(sbi->raw_super_buf);
498         kfree(sbi);
499 }
500
501 int f2fs_sync_fs(struct super_block *sb, int sync)
502 {
503         struct f2fs_sb_info *sbi = F2FS_SB(sb);
504
505         trace_f2fs_sync_fs(sb, sync);
506
507         if (sync) {
508                 struct cp_control cpc;
509
510                 cpc.reason = __get_cp_reason(sbi);
511
512                 mutex_lock(&sbi->gc_mutex);
513                 write_checkpoint(sbi, &cpc);
514                 mutex_unlock(&sbi->gc_mutex);
515         } else {
516                 f2fs_balance_fs(sbi);
517         }
518         f2fs_trace_ios(NULL, NULL, 1);
519
520         return 0;
521 }
522
523 static int f2fs_freeze(struct super_block *sb)
524 {
525         int err;
526
527         if (f2fs_readonly(sb))
528                 return 0;
529
530         err = f2fs_sync_fs(sb, 1);
531         return err;
532 }
533
534 static int f2fs_unfreeze(struct super_block *sb)
535 {
536         return 0;
537 }
538
539 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
540 {
541         struct super_block *sb = dentry->d_sb;
542         struct f2fs_sb_info *sbi = F2FS_SB(sb);
543         u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
544         block_t total_count, user_block_count, start_count, ovp_count;
545
546         total_count = le64_to_cpu(sbi->raw_super->block_count);
547         user_block_count = sbi->user_block_count;
548         start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
549         ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
550         buf->f_type = F2FS_SUPER_MAGIC;
551         buf->f_bsize = sbi->blocksize;
552
553         buf->f_blocks = total_count - start_count;
554         buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
555         buf->f_bavail = user_block_count - valid_user_blocks(sbi);
556
557         buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
558         buf->f_ffree = buf->f_files - valid_inode_count(sbi);
559
560         buf->f_namelen = F2FS_NAME_LEN;
561         buf->f_fsid.val[0] = (u32)id;
562         buf->f_fsid.val[1] = (u32)(id >> 32);
563
564         return 0;
565 }
566
567 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
568 {
569         struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
570
571         if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC))
572                 seq_printf(seq, ",background_gc=%s", "on");
573         else
574                 seq_printf(seq, ",background_gc=%s", "off");
575         if (test_opt(sbi, DISABLE_ROLL_FORWARD))
576                 seq_puts(seq, ",disable_roll_forward");
577         if (test_opt(sbi, DISCARD))
578                 seq_puts(seq, ",discard");
579         if (test_opt(sbi, NOHEAP))
580                 seq_puts(seq, ",no_heap_alloc");
581 #ifdef CONFIG_F2FS_FS_XATTR
582         if (test_opt(sbi, XATTR_USER))
583                 seq_puts(seq, ",user_xattr");
584         else
585                 seq_puts(seq, ",nouser_xattr");
586         if (test_opt(sbi, INLINE_XATTR))
587                 seq_puts(seq, ",inline_xattr");
588 #endif
589 #ifdef CONFIG_F2FS_FS_POSIX_ACL
590         if (test_opt(sbi, POSIX_ACL))
591                 seq_puts(seq, ",acl");
592         else
593                 seq_puts(seq, ",noacl");
594 #endif
595         if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
596                 seq_puts(seq, ",disable_ext_identify");
597         if (test_opt(sbi, INLINE_DATA))
598                 seq_puts(seq, ",inline_data");
599         if (test_opt(sbi, INLINE_DENTRY))
600                 seq_puts(seq, ",inline_dentry");
601         if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
602                 seq_puts(seq, ",flush_merge");
603         if (test_opt(sbi, NOBARRIER))
604                 seq_puts(seq, ",nobarrier");
605         if (test_opt(sbi, FASTBOOT))
606                 seq_puts(seq, ",fastboot");
607         if (test_opt(sbi, EXTENT_CACHE))
608                 seq_puts(seq, ",extent_cache");
609         seq_printf(seq, ",active_logs=%u", sbi->active_logs);
610
611         return 0;
612 }
613
614 static int segment_info_seq_show(struct seq_file *seq, void *offset)
615 {
616         struct super_block *sb = seq->private;
617         struct f2fs_sb_info *sbi = F2FS_SB(sb);
618         unsigned int total_segs =
619                         le32_to_cpu(sbi->raw_super->segment_count_main);
620         int i;
621
622         seq_puts(seq, "format: segment_type|valid_blocks\n"
623                 "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
624
625         for (i = 0; i < total_segs; i++) {
626                 struct seg_entry *se = get_seg_entry(sbi, i);
627
628                 if ((i % 10) == 0)
629                         seq_printf(seq, "%-5d", i);
630                 seq_printf(seq, "%d|%-3u", se->type,
631                                         get_valid_blocks(sbi, i, 1));
632                 if ((i % 10) == 9 || i == (total_segs - 1))
633                         seq_putc(seq, '\n');
634                 else
635                         seq_putc(seq, ' ');
636         }
637
638         return 0;
639 }
640
641 static int segment_info_open_fs(struct inode *inode, struct file *file)
642 {
643         return single_open(file, segment_info_seq_show, PDE_DATA(inode));
644 }
645
646 static const struct file_operations f2fs_seq_segment_info_fops = {
647         .owner = THIS_MODULE,
648         .open = segment_info_open_fs,
649         .read = seq_read,
650         .llseek = seq_lseek,
651         .release = single_release,
652 };
653
654 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
655 {
656         struct f2fs_sb_info *sbi = F2FS_SB(sb);
657         struct f2fs_mount_info org_mount_opt;
658         int err, active_logs;
659         bool need_restart_gc = false;
660         bool need_stop_gc = false;
661
662         sync_filesystem(sb);
663
664         /*
665          * Save the old mount options in case we
666          * need to restore them.
667          */
668         org_mount_opt = sbi->mount_opt;
669         active_logs = sbi->active_logs;
670
671         sbi->mount_opt.opt = 0;
672         sbi->active_logs = NR_CURSEG_TYPE;
673
674         /* parse mount options */
675         err = parse_options(sb, data);
676         if (err)
677                 goto restore_opts;
678
679         /*
680          * Previous and new state of filesystem is RO,
681          * so skip checking GC and FLUSH_MERGE conditions.
682          */
683         if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
684                 goto skip;
685
686         /*
687          * We stop the GC thread if FS is mounted as RO
688          * or if background_gc = off is passed in mount
689          * option. Also sync the filesystem.
690          */
691         if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
692                 if (sbi->gc_thread) {
693                         stop_gc_thread(sbi);
694                         f2fs_sync_fs(sb, 1);
695                         need_restart_gc = true;
696                 }
697         } else if (!sbi->gc_thread) {
698                 err = start_gc_thread(sbi);
699                 if (err)
700                         goto restore_opts;
701                 need_stop_gc = true;
702         }
703
704         /*
705          * We stop issue flush thread if FS is mounted as RO
706          * or if flush_merge is not passed in mount option.
707          */
708         if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
709                 destroy_flush_cmd_control(sbi);
710         } else if (!SM_I(sbi)->cmd_control_info) {
711                 err = create_flush_cmd_control(sbi);
712                 if (err)
713                         goto restore_gc;
714         }
715 skip:
716         /* Update the POSIXACL Flag */
717          sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
718                 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
719         return 0;
720 restore_gc:
721         if (need_restart_gc) {
722                 if (start_gc_thread(sbi))
723                         f2fs_msg(sbi->sb, KERN_WARNING,
724                                 "background gc thread has stopped");
725         } else if (need_stop_gc) {
726                 stop_gc_thread(sbi);
727         }
728 restore_opts:
729         sbi->mount_opt = org_mount_opt;
730         sbi->active_logs = active_logs;
731         return err;
732 }
733
734 static struct super_operations f2fs_sops = {
735         .alloc_inode    = f2fs_alloc_inode,
736         .drop_inode     = f2fs_drop_inode,
737         .destroy_inode  = f2fs_destroy_inode,
738         .write_inode    = f2fs_write_inode,
739         .dirty_inode    = f2fs_dirty_inode,
740         .show_options   = f2fs_show_options,
741         .evict_inode    = f2fs_evict_inode,
742         .put_super      = f2fs_put_super,
743         .sync_fs        = f2fs_sync_fs,
744         .freeze_fs      = f2fs_freeze,
745         .unfreeze_fs    = f2fs_unfreeze,
746         .statfs         = f2fs_statfs,
747         .remount_fs     = f2fs_remount,
748 };
749
750 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
751                 u64 ino, u32 generation)
752 {
753         struct f2fs_sb_info *sbi = F2FS_SB(sb);
754         struct inode *inode;
755
756         if (check_nid_range(sbi, ino))
757                 return ERR_PTR(-ESTALE);
758
759         /*
760          * f2fs_iget isn't quite right if the inode is currently unallocated!
761          * However f2fs_iget currently does appropriate checks to handle stale
762          * inodes so everything is OK.
763          */
764         inode = f2fs_iget(sb, ino);
765         if (IS_ERR(inode))
766                 return ERR_CAST(inode);
767         if (unlikely(generation && inode->i_generation != generation)) {
768                 /* we didn't find the right inode.. */
769                 iput(inode);
770                 return ERR_PTR(-ESTALE);
771         }
772         return inode;
773 }
774
775 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
776                 int fh_len, int fh_type)
777 {
778         return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
779                                     f2fs_nfs_get_inode);
780 }
781
782 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
783                 int fh_len, int fh_type)
784 {
785         return generic_fh_to_parent(sb, fid, fh_len, fh_type,
786                                     f2fs_nfs_get_inode);
787 }
788
789 static const struct export_operations f2fs_export_ops = {
790         .fh_to_dentry = f2fs_fh_to_dentry,
791         .fh_to_parent = f2fs_fh_to_parent,
792         .get_parent = f2fs_get_parent,
793 };
794
795 static loff_t max_file_size(unsigned bits)
796 {
797         loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
798         loff_t leaf_count = ADDRS_PER_BLOCK;
799
800         /* two direct node blocks */
801         result += (leaf_count * 2);
802
803         /* two indirect node blocks */
804         leaf_count *= NIDS_PER_BLOCK;
805         result += (leaf_count * 2);
806
807         /* one double indirect node block */
808         leaf_count *= NIDS_PER_BLOCK;
809         result += leaf_count;
810
811         result <<= bits;
812         return result;
813 }
814
815 static int sanity_check_raw_super(struct super_block *sb,
816                         struct f2fs_super_block *raw_super)
817 {
818         unsigned int blocksize;
819
820         if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
821                 f2fs_msg(sb, KERN_INFO,
822                         "Magic Mismatch, valid(0x%x) - read(0x%x)",
823                         F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
824                 return 1;
825         }
826
827         /* Currently, support only 4KB page cache size */
828         if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
829                 f2fs_msg(sb, KERN_INFO,
830                         "Invalid page_cache_size (%lu), supports only 4KB\n",
831                         PAGE_CACHE_SIZE);
832                 return 1;
833         }
834
835         /* Currently, support only 4KB block size */
836         blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
837         if (blocksize != F2FS_BLKSIZE) {
838                 f2fs_msg(sb, KERN_INFO,
839                         "Invalid blocksize (%u), supports only 4KB\n",
840                         blocksize);
841                 return 1;
842         }
843
844         /* Currently, support 512/1024/2048/4096 bytes sector size */
845         if (le32_to_cpu(raw_super->log_sectorsize) >
846                                 F2FS_MAX_LOG_SECTOR_SIZE ||
847                 le32_to_cpu(raw_super->log_sectorsize) <
848                                 F2FS_MIN_LOG_SECTOR_SIZE) {
849                 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
850                         le32_to_cpu(raw_super->log_sectorsize));
851                 return 1;
852         }
853         if (le32_to_cpu(raw_super->log_sectors_per_block) +
854                 le32_to_cpu(raw_super->log_sectorsize) !=
855                         F2FS_MAX_LOG_SECTOR_SIZE) {
856                 f2fs_msg(sb, KERN_INFO,
857                         "Invalid log sectors per block(%u) log sectorsize(%u)",
858                         le32_to_cpu(raw_super->log_sectors_per_block),
859                         le32_to_cpu(raw_super->log_sectorsize));
860                 return 1;
861         }
862         return 0;
863 }
864
865 static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
866 {
867         unsigned int total, fsmeta;
868         struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
869         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
870
871         total = le32_to_cpu(raw_super->segment_count);
872         fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
873         fsmeta += le32_to_cpu(raw_super->segment_count_sit);
874         fsmeta += le32_to_cpu(raw_super->segment_count_nat);
875         fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
876         fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
877
878         if (unlikely(fsmeta >= total))
879                 return 1;
880
881         if (unlikely(f2fs_cp_error(sbi))) {
882                 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
883                 return 1;
884         }
885         return 0;
886 }
887
888 static void init_sb_info(struct f2fs_sb_info *sbi)
889 {
890         struct f2fs_super_block *raw_super = sbi->raw_super;
891         int i;
892
893         sbi->log_sectors_per_block =
894                 le32_to_cpu(raw_super->log_sectors_per_block);
895         sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
896         sbi->blocksize = 1 << sbi->log_blocksize;
897         sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
898         sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
899         sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
900         sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
901         sbi->total_sections = le32_to_cpu(raw_super->section_count);
902         sbi->total_node_count =
903                 (le32_to_cpu(raw_super->segment_count_nat) / 2)
904                         * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
905         sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
906         sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
907         sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
908         sbi->cur_victim_sec = NULL_SECNO;
909         sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
910
911         for (i = 0; i < NR_COUNT_TYPE; i++)
912                 atomic_set(&sbi->nr_pages[i], 0);
913
914         sbi->dir_level = DEF_DIR_LEVEL;
915         clear_sbi_flag(sbi, SBI_NEED_FSCK);
916 }
917
918 /*
919  * Read f2fs raw super block.
920  * Because we have two copies of super block, so read the first one at first,
921  * if the first one is invalid, move to read the second one.
922  */
923 static int read_raw_super_block(struct super_block *sb,
924                         struct f2fs_super_block **raw_super,
925                         struct buffer_head **raw_super_buf)
926 {
927         int block = 0;
928
929 retry:
930         *raw_super_buf = sb_bread(sb, block);
931         if (!*raw_super_buf) {
932                 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
933                                 block + 1);
934                 if (block == 0) {
935                         block++;
936                         goto retry;
937                 } else {
938                         return -EIO;
939                 }
940         }
941
942         *raw_super = (struct f2fs_super_block *)
943                 ((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET);
944
945         /* sanity checking of raw super */
946         if (sanity_check_raw_super(sb, *raw_super)) {
947                 brelse(*raw_super_buf);
948                 f2fs_msg(sb, KERN_ERR,
949                         "Can't find valid F2FS filesystem in %dth superblock",
950                                                                 block + 1);
951                 if (block == 0) {
952                         block++;
953                         goto retry;
954                 } else {
955                         return -EINVAL;
956                 }
957         }
958
959         return 0;
960 }
961
962 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
963 {
964         struct f2fs_sb_info *sbi;
965         struct f2fs_super_block *raw_super = NULL;
966         struct buffer_head *raw_super_buf;
967         struct inode *root;
968         long err = -EINVAL;
969         bool retry = true;
970         char *options = NULL;
971         int i;
972
973 try_onemore:
974         /* allocate memory for f2fs-specific super block info */
975         sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
976         if (!sbi)
977                 return -ENOMEM;
978
979         /* set a block size */
980         if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
981                 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
982                 goto free_sbi;
983         }
984
985         err = read_raw_super_block(sb, &raw_super, &raw_super_buf);
986         if (err)
987                 goto free_sbi;
988
989         sb->s_fs_info = sbi;
990         /* init some FS parameters */
991         sbi->active_logs = NR_CURSEG_TYPE;
992
993         set_opt(sbi, BG_GC);
994
995 #ifdef CONFIG_F2FS_FS_XATTR
996         set_opt(sbi, XATTR_USER);
997 #endif
998 #ifdef CONFIG_F2FS_FS_POSIX_ACL
999         set_opt(sbi, POSIX_ACL);
1000 #endif
1001         /* parse mount options */
1002         options = kstrdup((const char *)data, GFP_KERNEL);
1003         if (data && !options) {
1004                 err = -ENOMEM;
1005                 goto free_sb_buf;
1006         }
1007
1008         err = parse_options(sb, options);
1009         if (err)
1010                 goto free_options;
1011
1012         sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
1013         sb->s_max_links = F2FS_LINK_MAX;
1014         get_random_bytes(&sbi->s_next_generation, sizeof(u32));
1015
1016         sb->s_op = &f2fs_sops;
1017         sb->s_xattr = f2fs_xattr_handlers;
1018         sb->s_export_op = &f2fs_export_ops;
1019         sb->s_magic = F2FS_SUPER_MAGIC;
1020         sb->s_time_gran = 1;
1021         sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
1022                 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
1023         memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
1024
1025         /* init f2fs-specific super block info */
1026         sbi->sb = sb;
1027         sbi->raw_super = raw_super;
1028         sbi->raw_super_buf = raw_super_buf;
1029         mutex_init(&sbi->gc_mutex);
1030         mutex_init(&sbi->writepages);
1031         mutex_init(&sbi->cp_mutex);
1032         init_rwsem(&sbi->node_write);
1033         clear_sbi_flag(sbi, SBI_POR_DOING);
1034         spin_lock_init(&sbi->stat_lock);
1035
1036         init_rwsem(&sbi->read_io.io_rwsem);
1037         sbi->read_io.sbi = sbi;
1038         sbi->read_io.bio = NULL;
1039         for (i = 0; i < NR_PAGE_TYPE; i++) {
1040                 init_rwsem(&sbi->write_io[i].io_rwsem);
1041                 sbi->write_io[i].sbi = sbi;
1042                 sbi->write_io[i].bio = NULL;
1043         }
1044
1045         init_rwsem(&sbi->cp_rwsem);
1046         init_waitqueue_head(&sbi->cp_wait);
1047         init_sb_info(sbi);
1048
1049         /* get an inode for meta space */
1050         sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
1051         if (IS_ERR(sbi->meta_inode)) {
1052                 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
1053                 err = PTR_ERR(sbi->meta_inode);
1054                 goto free_options;
1055         }
1056
1057         err = get_valid_checkpoint(sbi);
1058         if (err) {
1059                 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
1060                 goto free_meta_inode;
1061         }
1062
1063         /* sanity checking of checkpoint */
1064         err = -EINVAL;
1065         if (sanity_check_ckpt(sbi)) {
1066                 f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
1067                 goto free_cp;
1068         }
1069
1070         sbi->total_valid_node_count =
1071                                 le32_to_cpu(sbi->ckpt->valid_node_count);
1072         sbi->total_valid_inode_count =
1073                                 le32_to_cpu(sbi->ckpt->valid_inode_count);
1074         sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
1075         sbi->total_valid_block_count =
1076                                 le64_to_cpu(sbi->ckpt->valid_block_count);
1077         sbi->last_valid_block_count = sbi->total_valid_block_count;
1078         sbi->alloc_valid_block_count = 0;
1079         INIT_LIST_HEAD(&sbi->dir_inode_list);
1080         spin_lock_init(&sbi->dir_inode_lock);
1081
1082         init_extent_cache_info(sbi);
1083
1084         init_ino_entry_info(sbi);
1085
1086         /* setup f2fs internal modules */
1087         err = build_segment_manager(sbi);
1088         if (err) {
1089                 f2fs_msg(sb, KERN_ERR,
1090                         "Failed to initialize F2FS segment manager");
1091                 goto free_sm;
1092         }
1093         err = build_node_manager(sbi);
1094         if (err) {
1095                 f2fs_msg(sb, KERN_ERR,
1096                         "Failed to initialize F2FS node manager");
1097                 goto free_nm;
1098         }
1099
1100         build_gc_manager(sbi);
1101
1102         /* get an inode for node space */
1103         sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
1104         if (IS_ERR(sbi->node_inode)) {
1105                 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
1106                 err = PTR_ERR(sbi->node_inode);
1107                 goto free_nm;
1108         }
1109
1110         /* if there are nt orphan nodes free them */
1111         recover_orphan_inodes(sbi);
1112
1113         /* read root inode and dentry */
1114         root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
1115         if (IS_ERR(root)) {
1116                 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
1117                 err = PTR_ERR(root);
1118                 goto free_node_inode;
1119         }
1120         if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
1121                 iput(root);
1122                 err = -EINVAL;
1123                 goto free_node_inode;
1124         }
1125
1126         sb->s_root = d_make_root(root); /* allocate root dentry */
1127         if (!sb->s_root) {
1128                 err = -ENOMEM;
1129                 goto free_root_inode;
1130         }
1131
1132         err = f2fs_build_stats(sbi);
1133         if (err)
1134                 goto free_root_inode;
1135
1136         if (f2fs_proc_root)
1137                 sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
1138
1139         if (sbi->s_proc)
1140                 proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
1141                                  &f2fs_seq_segment_info_fops, sb);
1142
1143         if (test_opt(sbi, DISCARD)) {
1144                 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1145                 if (!blk_queue_discard(q))
1146                         f2fs_msg(sb, KERN_WARNING,
1147                                         "mounting with \"discard\" option, but "
1148                                         "the device does not support discard");
1149         }
1150
1151         sbi->s_kobj.kset = f2fs_kset;
1152         init_completion(&sbi->s_kobj_unregister);
1153         err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
1154                                                         "%s", sb->s_id);
1155         if (err)
1156                 goto free_proc;
1157
1158         if (!retry)
1159                 set_sbi_flag(sbi, SBI_NEED_FSCK);
1160
1161         /* recover fsynced data */
1162         if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
1163                 /*
1164                  * mount should be failed, when device has readonly mode, and
1165                  * previous checkpoint was not done by clean system shutdown.
1166                  */
1167                 if (bdev_read_only(sb->s_bdev) &&
1168                                 !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
1169                         err = -EROFS;
1170                         goto free_kobj;
1171                 }
1172                 err = recover_fsync_data(sbi);
1173                 if (err) {
1174                         f2fs_msg(sb, KERN_ERR,
1175                                 "Cannot recover all fsync data errno=%ld", err);
1176                         goto free_kobj;
1177                 }
1178         }
1179
1180         /*
1181          * If filesystem is not mounted as read-only then
1182          * do start the gc_thread.
1183          */
1184         if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
1185                 /* After POR, we can run background GC thread.*/
1186                 err = start_gc_thread(sbi);
1187                 if (err)
1188                         goto free_kobj;
1189         }
1190         kfree(options);
1191         return 0;
1192
1193 free_kobj:
1194         kobject_del(&sbi->s_kobj);
1195 free_proc:
1196         if (sbi->s_proc) {
1197                 remove_proc_entry("segment_info", sbi->s_proc);
1198                 remove_proc_entry(sb->s_id, f2fs_proc_root);
1199         }
1200         f2fs_destroy_stats(sbi);
1201 free_root_inode:
1202         dput(sb->s_root);
1203         sb->s_root = NULL;
1204 free_node_inode:
1205         iput(sbi->node_inode);
1206 free_nm:
1207         destroy_node_manager(sbi);
1208 free_sm:
1209         destroy_segment_manager(sbi);
1210 free_cp:
1211         kfree(sbi->ckpt);
1212 free_meta_inode:
1213         make_bad_inode(sbi->meta_inode);
1214         iput(sbi->meta_inode);
1215 free_options:
1216         kfree(options);
1217 free_sb_buf:
1218         brelse(raw_super_buf);
1219 free_sbi:
1220         kfree(sbi);
1221
1222         /* give only one another chance */
1223         if (retry) {
1224                 retry = 0;
1225                 shrink_dcache_sb(sb);
1226                 goto try_onemore;
1227         }
1228         return err;
1229 }
1230
1231 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
1232                         const char *dev_name, void *data)
1233 {
1234         return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
1235 }
1236
1237 static void kill_f2fs_super(struct super_block *sb)
1238 {
1239         if (sb->s_root)
1240                 set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
1241         kill_block_super(sb);
1242 }
1243
1244 static struct file_system_type f2fs_fs_type = {
1245         .owner          = THIS_MODULE,
1246         .name           = "f2fs",
1247         .mount          = f2fs_mount,
1248         .kill_sb        = kill_f2fs_super,
1249         .fs_flags       = FS_REQUIRES_DEV,
1250 };
1251 MODULE_ALIAS_FS("f2fs");
1252
1253 static int __init init_inodecache(void)
1254 {
1255         f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
1256                         sizeof(struct f2fs_inode_info));
1257         if (!f2fs_inode_cachep)
1258                 return -ENOMEM;
1259         return 0;
1260 }
1261
1262 static void destroy_inodecache(void)
1263 {
1264         /*
1265          * Make sure all delayed rcu free inodes are flushed before we
1266          * destroy cache.
1267          */
1268         rcu_barrier();
1269         kmem_cache_destroy(f2fs_inode_cachep);
1270 }
1271
1272 static int __init init_f2fs_fs(void)
1273 {
1274         int err;
1275
1276         f2fs_build_trace_ios();
1277
1278         err = init_inodecache();
1279         if (err)
1280                 goto fail;
1281         err = create_node_manager_caches();
1282         if (err)
1283                 goto free_inodecache;
1284         err = create_segment_manager_caches();
1285         if (err)
1286                 goto free_node_manager_caches;
1287         err = create_checkpoint_caches();
1288         if (err)
1289                 goto free_segment_manager_caches;
1290         err = create_extent_cache();
1291         if (err)
1292                 goto free_checkpoint_caches;
1293         f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
1294         if (!f2fs_kset) {
1295                 err = -ENOMEM;
1296                 goto free_extent_cache;
1297         }
1298         err = register_filesystem(&f2fs_fs_type);
1299         if (err)
1300                 goto free_kset;
1301         f2fs_create_root_stats();
1302         f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
1303         return 0;
1304
1305 free_kset:
1306         kset_unregister(f2fs_kset);
1307 free_extent_cache:
1308         destroy_extent_cache();
1309 free_checkpoint_caches:
1310         destroy_checkpoint_caches();
1311 free_segment_manager_caches:
1312         destroy_segment_manager_caches();
1313 free_node_manager_caches:
1314         destroy_node_manager_caches();
1315 free_inodecache:
1316         destroy_inodecache();
1317 fail:
1318         return err;
1319 }
1320
1321 static void __exit exit_f2fs_fs(void)
1322 {
1323         remove_proc_entry("fs/f2fs", NULL);
1324         f2fs_destroy_root_stats();
1325         unregister_filesystem(&f2fs_fs_type);
1326         destroy_extent_cache();
1327         destroy_checkpoint_caches();
1328         destroy_segment_manager_caches();
1329         destroy_node_manager_caches();
1330         destroy_inodecache();
1331         kset_unregister(f2fs_kset);
1332         f2fs_destroy_trace_ios();
1333 }
1334
1335 module_init(init_f2fs_fs)
1336 module_exit(exit_f2fs_fs)
1337
1338 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
1339 MODULE_DESCRIPTION("Flash Friendly File System");
1340 MODULE_LICENSE("GPL");