Merge tag 'regmap-fix-v4.8-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/mm.h>
32 #include <linux/export.h>
33 #include <linux/swap.h>
34 #include <linux/uio.h>
35 #include <linux/khugepaged.h>
36
37 static struct vfsmount *shm_mnt;
38
39 #ifdef CONFIG_SHMEM
40 /*
41  * This virtual memory filesystem is heavily based on the ramfs. It
42  * extends ramfs by the ability to use swap and honor resource limits
43  * which makes it a completely usable filesystem.
44  */
45
46 #include <linux/xattr.h>
47 #include <linux/exportfs.h>
48 #include <linux/posix_acl.h>
49 #include <linux/posix_acl_xattr.h>
50 #include <linux/mman.h>
51 #include <linux/string.h>
52 #include <linux/slab.h>
53 #include <linux/backing-dev.h>
54 #include <linux/shmem_fs.h>
55 #include <linux/writeback.h>
56 #include <linux/blkdev.h>
57 #include <linux/pagevec.h>
58 #include <linux/percpu_counter.h>
59 #include <linux/falloc.h>
60 #include <linux/splice.h>
61 #include <linux/security.h>
62 #include <linux/swapops.h>
63 #include <linux/mempolicy.h>
64 #include <linux/namei.h>
65 #include <linux/ctype.h>
66 #include <linux/migrate.h>
67 #include <linux/highmem.h>
68 #include <linux/seq_file.h>
69 #include <linux/magic.h>
70 #include <linux/syscalls.h>
71 #include <linux/fcntl.h>
72 #include <uapi/linux/memfd.h>
73
74 #include <asm/uaccess.h>
75 #include <asm/pgtable.h>
76
77 #include "internal.h"
78
79 #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
80 #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
81
82 /* Pretend that each entry is of this size in directory's i_size */
83 #define BOGO_DIRENT_SIZE 20
84
85 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
86 #define SHORT_SYMLINK_LEN 128
87
88 /*
89  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
90  * inode->i_private (with i_mutex making sure that it has only one user at
91  * a time): we would prefer not to enlarge the shmem inode just for that.
92  */
93 struct shmem_falloc {
94         wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
95         pgoff_t start;          /* start of range currently being fallocated */
96         pgoff_t next;           /* the next page offset to be fallocated */
97         pgoff_t nr_falloced;    /* how many new pages have been fallocated */
98         pgoff_t nr_unswapped;   /* how often writepage refused to swap out */
99 };
100
101 #ifdef CONFIG_TMPFS
102 static unsigned long shmem_default_max_blocks(void)
103 {
104         return totalram_pages / 2;
105 }
106
107 static unsigned long shmem_default_max_inodes(void)
108 {
109         return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
110 }
111 #endif
112
113 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
114 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
115                                 struct shmem_inode_info *info, pgoff_t index);
116 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
117                 struct page **pagep, enum sgp_type sgp,
118                 gfp_t gfp, struct mm_struct *fault_mm, int *fault_type);
119
120 int shmem_getpage(struct inode *inode, pgoff_t index,
121                 struct page **pagep, enum sgp_type sgp)
122 {
123         return shmem_getpage_gfp(inode, index, pagep, sgp,
124                 mapping_gfp_mask(inode->i_mapping), NULL, NULL);
125 }
126
127 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
128 {
129         return sb->s_fs_info;
130 }
131
132 /*
133  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
134  * for shared memory and for shared anonymous (/dev/zero) mappings
135  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
136  * consistent with the pre-accounting of private mappings ...
137  */
138 static inline int shmem_acct_size(unsigned long flags, loff_t size)
139 {
140         return (flags & VM_NORESERVE) ?
141                 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
142 }
143
144 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
145 {
146         if (!(flags & VM_NORESERVE))
147                 vm_unacct_memory(VM_ACCT(size));
148 }
149
150 static inline int shmem_reacct_size(unsigned long flags,
151                 loff_t oldsize, loff_t newsize)
152 {
153         if (!(flags & VM_NORESERVE)) {
154                 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
155                         return security_vm_enough_memory_mm(current->mm,
156                                         VM_ACCT(newsize) - VM_ACCT(oldsize));
157                 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
158                         vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
159         }
160         return 0;
161 }
162
163 /*
164  * ... whereas tmpfs objects are accounted incrementally as
165  * pages are allocated, in order to allow large sparse files.
166  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
167  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
168  */
169 static inline int shmem_acct_block(unsigned long flags, long pages)
170 {
171         if (!(flags & VM_NORESERVE))
172                 return 0;
173
174         return security_vm_enough_memory_mm(current->mm,
175                         pages * VM_ACCT(PAGE_SIZE));
176 }
177
178 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
179 {
180         if (flags & VM_NORESERVE)
181                 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
182 }
183
184 static const struct super_operations shmem_ops;
185 static const struct address_space_operations shmem_aops;
186 static const struct file_operations shmem_file_operations;
187 static const struct inode_operations shmem_inode_operations;
188 static const struct inode_operations shmem_dir_inode_operations;
189 static const struct inode_operations shmem_special_inode_operations;
190 static const struct vm_operations_struct shmem_vm_ops;
191 static struct file_system_type shmem_fs_type;
192
193 static LIST_HEAD(shmem_swaplist);
194 static DEFINE_MUTEX(shmem_swaplist_mutex);
195
196 static int shmem_reserve_inode(struct super_block *sb)
197 {
198         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
199         if (sbinfo->max_inodes) {
200                 spin_lock(&sbinfo->stat_lock);
201                 if (!sbinfo->free_inodes) {
202                         spin_unlock(&sbinfo->stat_lock);
203                         return -ENOSPC;
204                 }
205                 sbinfo->free_inodes--;
206                 spin_unlock(&sbinfo->stat_lock);
207         }
208         return 0;
209 }
210
211 static void shmem_free_inode(struct super_block *sb)
212 {
213         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
214         if (sbinfo->max_inodes) {
215                 spin_lock(&sbinfo->stat_lock);
216                 sbinfo->free_inodes++;
217                 spin_unlock(&sbinfo->stat_lock);
218         }
219 }
220
221 /**
222  * shmem_recalc_inode - recalculate the block usage of an inode
223  * @inode: inode to recalc
224  *
225  * We have to calculate the free blocks since the mm can drop
226  * undirtied hole pages behind our back.
227  *
228  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
229  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
230  *
231  * It has to be called with the spinlock held.
232  */
233 static void shmem_recalc_inode(struct inode *inode)
234 {
235         struct shmem_inode_info *info = SHMEM_I(inode);
236         long freed;
237
238         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
239         if (freed > 0) {
240                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
241                 if (sbinfo->max_blocks)
242                         percpu_counter_add(&sbinfo->used_blocks, -freed);
243                 info->alloced -= freed;
244                 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
245                 shmem_unacct_blocks(info->flags, freed);
246         }
247 }
248
249 bool shmem_charge(struct inode *inode, long pages)
250 {
251         struct shmem_inode_info *info = SHMEM_I(inode);
252         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
253         unsigned long flags;
254
255         if (shmem_acct_block(info->flags, pages))
256                 return false;
257         spin_lock_irqsave(&info->lock, flags);
258         info->alloced += pages;
259         inode->i_blocks += pages * BLOCKS_PER_PAGE;
260         shmem_recalc_inode(inode);
261         spin_unlock_irqrestore(&info->lock, flags);
262         inode->i_mapping->nrpages += pages;
263
264         if (!sbinfo->max_blocks)
265                 return true;
266         if (percpu_counter_compare(&sbinfo->used_blocks,
267                                 sbinfo->max_blocks - pages) > 0) {
268                 inode->i_mapping->nrpages -= pages;
269                 spin_lock_irqsave(&info->lock, flags);
270                 info->alloced -= pages;
271                 shmem_recalc_inode(inode);
272                 spin_unlock_irqrestore(&info->lock, flags);
273
274                 return false;
275         }
276         percpu_counter_add(&sbinfo->used_blocks, pages);
277         return true;
278 }
279
280 void shmem_uncharge(struct inode *inode, long pages)
281 {
282         struct shmem_inode_info *info = SHMEM_I(inode);
283         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
284         unsigned long flags;
285
286         spin_lock_irqsave(&info->lock, flags);
287         info->alloced -= pages;
288         inode->i_blocks -= pages * BLOCKS_PER_PAGE;
289         shmem_recalc_inode(inode);
290         spin_unlock_irqrestore(&info->lock, flags);
291
292         if (sbinfo->max_blocks)
293                 percpu_counter_sub(&sbinfo->used_blocks, pages);
294 }
295
296 /*
297  * Replace item expected in radix tree by a new item, while holding tree lock.
298  */
299 static int shmem_radix_tree_replace(struct address_space *mapping,
300                         pgoff_t index, void *expected, void *replacement)
301 {
302         void **pslot;
303         void *item;
304
305         VM_BUG_ON(!expected);
306         VM_BUG_ON(!replacement);
307         pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
308         if (!pslot)
309                 return -ENOENT;
310         item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock);
311         if (item != expected)
312                 return -ENOENT;
313         radix_tree_replace_slot(pslot, replacement);
314         return 0;
315 }
316
317 /*
318  * Sometimes, before we decide whether to proceed or to fail, we must check
319  * that an entry was not already brought back from swap by a racing thread.
320  *
321  * Checking page is not enough: by the time a SwapCache page is locked, it
322  * might be reused, and again be SwapCache, using the same swap as before.
323  */
324 static bool shmem_confirm_swap(struct address_space *mapping,
325                                pgoff_t index, swp_entry_t swap)
326 {
327         void *item;
328
329         rcu_read_lock();
330         item = radix_tree_lookup(&mapping->page_tree, index);
331         rcu_read_unlock();
332         return item == swp_to_radix_entry(swap);
333 }
334
335 /*
336  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
337  *
338  * SHMEM_HUGE_NEVER:
339  *      disables huge pages for the mount;
340  * SHMEM_HUGE_ALWAYS:
341  *      enables huge pages for the mount;
342  * SHMEM_HUGE_WITHIN_SIZE:
343  *      only allocate huge pages if the page will be fully within i_size,
344  *      also respect fadvise()/madvise() hints;
345  * SHMEM_HUGE_ADVISE:
346  *      only allocate huge pages if requested with fadvise()/madvise();
347  */
348
349 #define SHMEM_HUGE_NEVER        0
350 #define SHMEM_HUGE_ALWAYS       1
351 #define SHMEM_HUGE_WITHIN_SIZE  2
352 #define SHMEM_HUGE_ADVISE       3
353
354 /*
355  * Special values.
356  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
357  *
358  * SHMEM_HUGE_DENY:
359  *      disables huge on shm_mnt and all mounts, for emergency use;
360  * SHMEM_HUGE_FORCE:
361  *      enables huge on shm_mnt and all mounts, w/o needing option, for testing;
362  *
363  */
364 #define SHMEM_HUGE_DENY         (-1)
365 #define SHMEM_HUGE_FORCE        (-2)
366
367 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
368 /* ifdef here to avoid bloating shmem.o when not necessary */
369
370 int shmem_huge __read_mostly;
371
372 static int shmem_parse_huge(const char *str)
373 {
374         if (!strcmp(str, "never"))
375                 return SHMEM_HUGE_NEVER;
376         if (!strcmp(str, "always"))
377                 return SHMEM_HUGE_ALWAYS;
378         if (!strcmp(str, "within_size"))
379                 return SHMEM_HUGE_WITHIN_SIZE;
380         if (!strcmp(str, "advise"))
381                 return SHMEM_HUGE_ADVISE;
382         if (!strcmp(str, "deny"))
383                 return SHMEM_HUGE_DENY;
384         if (!strcmp(str, "force"))
385                 return SHMEM_HUGE_FORCE;
386         return -EINVAL;
387 }
388
389 static const char *shmem_format_huge(int huge)
390 {
391         switch (huge) {
392         case SHMEM_HUGE_NEVER:
393                 return "never";
394         case SHMEM_HUGE_ALWAYS:
395                 return "always";
396         case SHMEM_HUGE_WITHIN_SIZE:
397                 return "within_size";
398         case SHMEM_HUGE_ADVISE:
399                 return "advise";
400         case SHMEM_HUGE_DENY:
401                 return "deny";
402         case SHMEM_HUGE_FORCE:
403                 return "force";
404         default:
405                 VM_BUG_ON(1);
406                 return "bad_val";
407         }
408 }
409
410 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
411                 struct shrink_control *sc, unsigned long nr_to_split)
412 {
413         LIST_HEAD(list), *pos, *next;
414         struct inode *inode;
415         struct shmem_inode_info *info;
416         struct page *page;
417         unsigned long batch = sc ? sc->nr_to_scan : 128;
418         int removed = 0, split = 0;
419
420         if (list_empty(&sbinfo->shrinklist))
421                 return SHRINK_STOP;
422
423         spin_lock(&sbinfo->shrinklist_lock);
424         list_for_each_safe(pos, next, &sbinfo->shrinklist) {
425                 info = list_entry(pos, struct shmem_inode_info, shrinklist);
426
427                 /* pin the inode */
428                 inode = igrab(&info->vfs_inode);
429
430                 /* inode is about to be evicted */
431                 if (!inode) {
432                         list_del_init(&info->shrinklist);
433                         removed++;
434                         goto next;
435                 }
436
437                 /* Check if there's anything to gain */
438                 if (round_up(inode->i_size, PAGE_SIZE) ==
439                                 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
440                         list_del_init(&info->shrinklist);
441                         removed++;
442                         iput(inode);
443                         goto next;
444                 }
445
446                 list_move(&info->shrinklist, &list);
447 next:
448                 if (!--batch)
449                         break;
450         }
451         spin_unlock(&sbinfo->shrinklist_lock);
452
453         list_for_each_safe(pos, next, &list) {
454                 int ret;
455
456                 info = list_entry(pos, struct shmem_inode_info, shrinklist);
457                 inode = &info->vfs_inode;
458
459                 if (nr_to_split && split >= nr_to_split) {
460                         iput(inode);
461                         continue;
462                 }
463
464                 page = find_lock_page(inode->i_mapping,
465                                 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
466                 if (!page)
467                         goto drop;
468
469                 if (!PageTransHuge(page)) {
470                         unlock_page(page);
471                         put_page(page);
472                         goto drop;
473                 }
474
475                 ret = split_huge_page(page);
476                 unlock_page(page);
477                 put_page(page);
478
479                 if (ret) {
480                         /* split failed: leave it on the list */
481                         iput(inode);
482                         continue;
483                 }
484
485                 split++;
486 drop:
487                 list_del_init(&info->shrinklist);
488                 removed++;
489                 iput(inode);
490         }
491
492         spin_lock(&sbinfo->shrinklist_lock);
493         list_splice_tail(&list, &sbinfo->shrinklist);
494         sbinfo->shrinklist_len -= removed;
495         spin_unlock(&sbinfo->shrinklist_lock);
496
497         return split;
498 }
499
500 static long shmem_unused_huge_scan(struct super_block *sb,
501                 struct shrink_control *sc)
502 {
503         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
504
505         if (!READ_ONCE(sbinfo->shrinklist_len))
506                 return SHRINK_STOP;
507
508         return shmem_unused_huge_shrink(sbinfo, sc, 0);
509 }
510
511 static long shmem_unused_huge_count(struct super_block *sb,
512                 struct shrink_control *sc)
513 {
514         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
515         return READ_ONCE(sbinfo->shrinklist_len);
516 }
517 #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
518
519 #define shmem_huge SHMEM_HUGE_DENY
520
521 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
522                 struct shrink_control *sc, unsigned long nr_to_split)
523 {
524         return 0;
525 }
526 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
527
528 /*
529  * Like add_to_page_cache_locked, but error if expected item has gone.
530  */
531 static int shmem_add_to_page_cache(struct page *page,
532                                    struct address_space *mapping,
533                                    pgoff_t index, void *expected)
534 {
535         int error, nr = hpage_nr_pages(page);
536
537         VM_BUG_ON_PAGE(PageTail(page), page);
538         VM_BUG_ON_PAGE(index != round_down(index, nr), page);
539         VM_BUG_ON_PAGE(!PageLocked(page), page);
540         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
541         VM_BUG_ON(expected && PageTransHuge(page));
542
543         page_ref_add(page, nr);
544         page->mapping = mapping;
545         page->index = index;
546
547         spin_lock_irq(&mapping->tree_lock);
548         if (PageTransHuge(page)) {
549                 void __rcu **results;
550                 pgoff_t idx;
551                 int i;
552
553                 error = 0;
554                 if (radix_tree_gang_lookup_slot(&mapping->page_tree,
555                                         &results, &idx, index, 1) &&
556                                 idx < index + HPAGE_PMD_NR) {
557                         error = -EEXIST;
558                 }
559
560                 if (!error) {
561                         for (i = 0; i < HPAGE_PMD_NR; i++) {
562                                 error = radix_tree_insert(&mapping->page_tree,
563                                                 index + i, page + i);
564                                 VM_BUG_ON(error);
565                         }
566                         count_vm_event(THP_FILE_ALLOC);
567                 }
568         } else if (!expected) {
569                 error = radix_tree_insert(&mapping->page_tree, index, page);
570         } else {
571                 error = shmem_radix_tree_replace(mapping, index, expected,
572                                                                  page);
573         }
574
575         if (!error) {
576                 mapping->nrpages += nr;
577                 if (PageTransHuge(page))
578                         __inc_node_page_state(page, NR_SHMEM_THPS);
579                 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
580                 __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
581                 spin_unlock_irq(&mapping->tree_lock);
582         } else {
583                 page->mapping = NULL;
584                 spin_unlock_irq(&mapping->tree_lock);
585                 page_ref_sub(page, nr);
586         }
587         return error;
588 }
589
590 /*
591  * Like delete_from_page_cache, but substitutes swap for page.
592  */
593 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
594 {
595         struct address_space *mapping = page->mapping;
596         int error;
597
598         VM_BUG_ON_PAGE(PageCompound(page), page);
599
600         spin_lock_irq(&mapping->tree_lock);
601         error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
602         page->mapping = NULL;
603         mapping->nrpages--;
604         __dec_node_page_state(page, NR_FILE_PAGES);
605         __dec_node_page_state(page, NR_SHMEM);
606         spin_unlock_irq(&mapping->tree_lock);
607         put_page(page);
608         BUG_ON(error);
609 }
610
611 /*
612  * Remove swap entry from radix tree, free the swap and its page cache.
613  */
614 static int shmem_free_swap(struct address_space *mapping,
615                            pgoff_t index, void *radswap)
616 {
617         void *old;
618
619         spin_lock_irq(&mapping->tree_lock);
620         old = radix_tree_delete_item(&mapping->page_tree, index, radswap);
621         spin_unlock_irq(&mapping->tree_lock);
622         if (old != radswap)
623                 return -ENOENT;
624         free_swap_and_cache(radix_to_swp_entry(radswap));
625         return 0;
626 }
627
628 /*
629  * Determine (in bytes) how many of the shmem object's pages mapped by the
630  * given offsets are swapped out.
631  *
632  * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
633  * as long as the inode doesn't go away and racy results are not a problem.
634  */
635 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
636                                                 pgoff_t start, pgoff_t end)
637 {
638         struct radix_tree_iter iter;
639         void **slot;
640         struct page *page;
641         unsigned long swapped = 0;
642
643         rcu_read_lock();
644
645         radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
646                 if (iter.index >= end)
647                         break;
648
649                 page = radix_tree_deref_slot(slot);
650
651                 if (radix_tree_deref_retry(page)) {
652                         slot = radix_tree_iter_retry(&iter);
653                         continue;
654                 }
655
656                 if (radix_tree_exceptional_entry(page))
657                         swapped++;
658
659                 if (need_resched()) {
660                         cond_resched_rcu();
661                         slot = radix_tree_iter_next(&iter);
662                 }
663         }
664
665         rcu_read_unlock();
666
667         return swapped << PAGE_SHIFT;
668 }
669
670 /*
671  * Determine (in bytes) how many of the shmem object's pages mapped by the
672  * given vma is swapped out.
673  *
674  * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
675  * as long as the inode doesn't go away and racy results are not a problem.
676  */
677 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
678 {
679         struct inode *inode = file_inode(vma->vm_file);
680         struct shmem_inode_info *info = SHMEM_I(inode);
681         struct address_space *mapping = inode->i_mapping;
682         unsigned long swapped;
683
684         /* Be careful as we don't hold info->lock */
685         swapped = READ_ONCE(info->swapped);
686
687         /*
688          * The easier cases are when the shmem object has nothing in swap, or
689          * the vma maps it whole. Then we can simply use the stats that we
690          * already track.
691          */
692         if (!swapped)
693                 return 0;
694
695         if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
696                 return swapped << PAGE_SHIFT;
697
698         /* Here comes the more involved part */
699         return shmem_partial_swap_usage(mapping,
700                         linear_page_index(vma, vma->vm_start),
701                         linear_page_index(vma, vma->vm_end));
702 }
703
704 /*
705  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
706  */
707 void shmem_unlock_mapping(struct address_space *mapping)
708 {
709         struct pagevec pvec;
710         pgoff_t indices[PAGEVEC_SIZE];
711         pgoff_t index = 0;
712
713         pagevec_init(&pvec, 0);
714         /*
715          * Minor point, but we might as well stop if someone else SHM_LOCKs it.
716          */
717         while (!mapping_unevictable(mapping)) {
718                 /*
719                  * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
720                  * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
721                  */
722                 pvec.nr = find_get_entries(mapping, index,
723                                            PAGEVEC_SIZE, pvec.pages, indices);
724                 if (!pvec.nr)
725                         break;
726                 index = indices[pvec.nr - 1] + 1;
727                 pagevec_remove_exceptionals(&pvec);
728                 check_move_unevictable_pages(pvec.pages, pvec.nr);
729                 pagevec_release(&pvec);
730                 cond_resched();
731         }
732 }
733
734 /*
735  * Remove range of pages and swap entries from radix tree, and free them.
736  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
737  */
738 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
739                                                                  bool unfalloc)
740 {
741         struct address_space *mapping = inode->i_mapping;
742         struct shmem_inode_info *info = SHMEM_I(inode);
743         pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
744         pgoff_t end = (lend + 1) >> PAGE_SHIFT;
745         unsigned int partial_start = lstart & (PAGE_SIZE - 1);
746         unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
747         struct pagevec pvec;
748         pgoff_t indices[PAGEVEC_SIZE];
749         long nr_swaps_freed = 0;
750         pgoff_t index;
751         int i;
752
753         if (lend == -1)
754                 end = -1;       /* unsigned, so actually very big */
755
756         pagevec_init(&pvec, 0);
757         index = start;
758         while (index < end) {
759                 pvec.nr = find_get_entries(mapping, index,
760                         min(end - index, (pgoff_t)PAGEVEC_SIZE),
761                         pvec.pages, indices);
762                 if (!pvec.nr)
763                         break;
764                 for (i = 0; i < pagevec_count(&pvec); i++) {
765                         struct page *page = pvec.pages[i];
766
767                         index = indices[i];
768                         if (index >= end)
769                                 break;
770
771                         if (radix_tree_exceptional_entry(page)) {
772                                 if (unfalloc)
773                                         continue;
774                                 nr_swaps_freed += !shmem_free_swap(mapping,
775                                                                 index, page);
776                                 continue;
777                         }
778
779                         VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
780
781                         if (!trylock_page(page))
782                                 continue;
783
784                         if (PageTransTail(page)) {
785                                 /* Middle of THP: zero out the page */
786                                 clear_highpage(page);
787                                 unlock_page(page);
788                                 continue;
789                         } else if (PageTransHuge(page)) {
790                                 if (index == round_down(end, HPAGE_PMD_NR)) {
791                                         /*
792                                          * Range ends in the middle of THP:
793                                          * zero out the page
794                                          */
795                                         clear_highpage(page);
796                                         unlock_page(page);
797                                         continue;
798                                 }
799                                 index += HPAGE_PMD_NR - 1;
800                                 i += HPAGE_PMD_NR - 1;
801                         }
802
803                         if (!unfalloc || !PageUptodate(page)) {
804                                 VM_BUG_ON_PAGE(PageTail(page), page);
805                                 if (page_mapping(page) == mapping) {
806                                         VM_BUG_ON_PAGE(PageWriteback(page), page);
807                                         truncate_inode_page(mapping, page);
808                                 }
809                         }
810                         unlock_page(page);
811                 }
812                 pagevec_remove_exceptionals(&pvec);
813                 pagevec_release(&pvec);
814                 cond_resched();
815                 index++;
816         }
817
818         if (partial_start) {
819                 struct page *page = NULL;
820                 shmem_getpage(inode, start - 1, &page, SGP_READ);
821                 if (page) {
822                         unsigned int top = PAGE_SIZE;
823                         if (start > end) {
824                                 top = partial_end;
825                                 partial_end = 0;
826                         }
827                         zero_user_segment(page, partial_start, top);
828                         set_page_dirty(page);
829                         unlock_page(page);
830                         put_page(page);
831                 }
832         }
833         if (partial_end) {
834                 struct page *page = NULL;
835                 shmem_getpage(inode, end, &page, SGP_READ);
836                 if (page) {
837                         zero_user_segment(page, 0, partial_end);
838                         set_page_dirty(page);
839                         unlock_page(page);
840                         put_page(page);
841                 }
842         }
843         if (start >= end)
844                 return;
845
846         index = start;
847         while (index < end) {
848                 cond_resched();
849
850                 pvec.nr = find_get_entries(mapping, index,
851                                 min(end - index, (pgoff_t)PAGEVEC_SIZE),
852                                 pvec.pages, indices);
853                 if (!pvec.nr) {
854                         /* If all gone or hole-punch or unfalloc, we're done */
855                         if (index == start || end != -1)
856                                 break;
857                         /* But if truncating, restart to make sure all gone */
858                         index = start;
859                         continue;
860                 }
861                 for (i = 0; i < pagevec_count(&pvec); i++) {
862                         struct page *page = pvec.pages[i];
863
864                         index = indices[i];
865                         if (index >= end)
866                                 break;
867
868                         if (radix_tree_exceptional_entry(page)) {
869                                 if (unfalloc)
870                                         continue;
871                                 if (shmem_free_swap(mapping, index, page)) {
872                                         /* Swap was replaced by page: retry */
873                                         index--;
874                                         break;
875                                 }
876                                 nr_swaps_freed++;
877                                 continue;
878                         }
879
880                         lock_page(page);
881
882                         if (PageTransTail(page)) {
883                                 /* Middle of THP: zero out the page */
884                                 clear_highpage(page);
885                                 unlock_page(page);
886                                 /*
887                                  * Partial thp truncate due 'start' in middle
888                                  * of THP: don't need to look on these pages
889                                  * again on !pvec.nr restart.
890                                  */
891                                 if (index != round_down(end, HPAGE_PMD_NR))
892                                         start++;
893                                 continue;
894                         } else if (PageTransHuge(page)) {
895                                 if (index == round_down(end, HPAGE_PMD_NR)) {
896                                         /*
897                                          * Range ends in the middle of THP:
898                                          * zero out the page
899                                          */
900                                         clear_highpage(page);
901                                         unlock_page(page);
902                                         continue;
903                                 }
904                                 index += HPAGE_PMD_NR - 1;
905                                 i += HPAGE_PMD_NR - 1;
906                         }
907
908                         if (!unfalloc || !PageUptodate(page)) {
909                                 VM_BUG_ON_PAGE(PageTail(page), page);
910                                 if (page_mapping(page) == mapping) {
911                                         VM_BUG_ON_PAGE(PageWriteback(page), page);
912                                         truncate_inode_page(mapping, page);
913                                 } else {
914                                         /* Page was replaced by swap: retry */
915                                         unlock_page(page);
916                                         index--;
917                                         break;
918                                 }
919                         }
920                         unlock_page(page);
921                 }
922                 pagevec_remove_exceptionals(&pvec);
923                 pagevec_release(&pvec);
924                 index++;
925         }
926
927         spin_lock_irq(&info->lock);
928         info->swapped -= nr_swaps_freed;
929         shmem_recalc_inode(inode);
930         spin_unlock_irq(&info->lock);
931 }
932
933 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
934 {
935         shmem_undo_range(inode, lstart, lend, false);
936         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
937 }
938 EXPORT_SYMBOL_GPL(shmem_truncate_range);
939
940 static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry,
941                          struct kstat *stat)
942 {
943         struct inode *inode = dentry->d_inode;
944         struct shmem_inode_info *info = SHMEM_I(inode);
945
946         if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
947                 spin_lock_irq(&info->lock);
948                 shmem_recalc_inode(inode);
949                 spin_unlock_irq(&info->lock);
950         }
951         generic_fillattr(inode, stat);
952         return 0;
953 }
954
955 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
956 {
957         struct inode *inode = d_inode(dentry);
958         struct shmem_inode_info *info = SHMEM_I(inode);
959         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
960         int error;
961
962         error = inode_change_ok(inode, attr);
963         if (error)
964                 return error;
965
966         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
967                 loff_t oldsize = inode->i_size;
968                 loff_t newsize = attr->ia_size;
969
970                 /* protected by i_mutex */
971                 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
972                     (newsize > oldsize && (info->seals & F_SEAL_GROW)))
973                         return -EPERM;
974
975                 if (newsize != oldsize) {
976                         error = shmem_reacct_size(SHMEM_I(inode)->flags,
977                                         oldsize, newsize);
978                         if (error)
979                                 return error;
980                         i_size_write(inode, newsize);
981                         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
982                 }
983                 if (newsize <= oldsize) {
984                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
985                         if (oldsize > holebegin)
986                                 unmap_mapping_range(inode->i_mapping,
987                                                         holebegin, 0, 1);
988                         if (info->alloced)
989                                 shmem_truncate_range(inode,
990                                                         newsize, (loff_t)-1);
991                         /* unmap again to remove racily COWed private pages */
992                         if (oldsize > holebegin)
993                                 unmap_mapping_range(inode->i_mapping,
994                                                         holebegin, 0, 1);
995
996                         /*
997                          * Part of the huge page can be beyond i_size: subject
998                          * to shrink under memory pressure.
999                          */
1000                         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
1001                                 spin_lock(&sbinfo->shrinklist_lock);
1002                                 if (list_empty(&info->shrinklist)) {
1003                                         list_add_tail(&info->shrinklist,
1004                                                         &sbinfo->shrinklist);
1005                                         sbinfo->shrinklist_len++;
1006                                 }
1007                                 spin_unlock(&sbinfo->shrinklist_lock);
1008                         }
1009                 }
1010         }
1011
1012         setattr_copy(inode, attr);
1013         if (attr->ia_valid & ATTR_MODE)
1014                 error = posix_acl_chmod(inode, inode->i_mode);
1015         return error;
1016 }
1017
1018 static void shmem_evict_inode(struct inode *inode)
1019 {
1020         struct shmem_inode_info *info = SHMEM_I(inode);
1021         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1022
1023         if (inode->i_mapping->a_ops == &shmem_aops) {
1024                 shmem_unacct_size(info->flags, inode->i_size);
1025                 inode->i_size = 0;
1026                 shmem_truncate_range(inode, 0, (loff_t)-1);
1027                 if (!list_empty(&info->shrinklist)) {
1028                         spin_lock(&sbinfo->shrinklist_lock);
1029                         if (!list_empty(&info->shrinklist)) {
1030                                 list_del_init(&info->shrinklist);
1031                                 sbinfo->shrinklist_len--;
1032                         }
1033                         spin_unlock(&sbinfo->shrinklist_lock);
1034                 }
1035                 if (!list_empty(&info->swaplist)) {
1036                         mutex_lock(&shmem_swaplist_mutex);
1037                         list_del_init(&info->swaplist);
1038                         mutex_unlock(&shmem_swaplist_mutex);
1039                 }
1040         }
1041
1042         simple_xattrs_free(&info->xattrs);
1043         WARN_ON(inode->i_blocks);
1044         shmem_free_inode(inode->i_sb);
1045         clear_inode(inode);
1046 }
1047
1048 /*
1049  * If swap found in inode, free it and move page from swapcache to filecache.
1050  */
1051 static int shmem_unuse_inode(struct shmem_inode_info *info,
1052                              swp_entry_t swap, struct page **pagep)
1053 {
1054         struct address_space *mapping = info->vfs_inode.i_mapping;
1055         void *radswap;
1056         pgoff_t index;
1057         gfp_t gfp;
1058         int error = 0;
1059
1060         radswap = swp_to_radix_entry(swap);
1061         index = radix_tree_locate_item(&mapping->page_tree, radswap);
1062         if (index == -1)
1063                 return -EAGAIN; /* tell shmem_unuse we found nothing */
1064
1065         /*
1066          * Move _head_ to start search for next from here.
1067          * But be careful: shmem_evict_inode checks list_empty without taking
1068          * mutex, and there's an instant in list_move_tail when info->swaplist
1069          * would appear empty, if it were the only one on shmem_swaplist.
1070          */
1071         if (shmem_swaplist.next != &info->swaplist)
1072                 list_move_tail(&shmem_swaplist, &info->swaplist);
1073
1074         gfp = mapping_gfp_mask(mapping);
1075         if (shmem_should_replace_page(*pagep, gfp)) {
1076                 mutex_unlock(&shmem_swaplist_mutex);
1077                 error = shmem_replace_page(pagep, gfp, info, index);
1078                 mutex_lock(&shmem_swaplist_mutex);
1079                 /*
1080                  * We needed to drop mutex to make that restrictive page
1081                  * allocation, but the inode might have been freed while we
1082                  * dropped it: although a racing shmem_evict_inode() cannot
1083                  * complete without emptying the radix_tree, our page lock
1084                  * on this swapcache page is not enough to prevent that -
1085                  * free_swap_and_cache() of our swap entry will only
1086                  * trylock_page(), removing swap from radix_tree whatever.
1087                  *
1088                  * We must not proceed to shmem_add_to_page_cache() if the
1089                  * inode has been freed, but of course we cannot rely on
1090                  * inode or mapping or info to check that.  However, we can
1091                  * safely check if our swap entry is still in use (and here
1092                  * it can't have got reused for another page): if it's still
1093                  * in use, then the inode cannot have been freed yet, and we
1094                  * can safely proceed (if it's no longer in use, that tells
1095                  * nothing about the inode, but we don't need to unuse swap).
1096                  */
1097                 if (!page_swapcount(*pagep))
1098                         error = -ENOENT;
1099         }
1100
1101         /*
1102          * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
1103          * but also to hold up shmem_evict_inode(): so inode cannot be freed
1104          * beneath us (pagelock doesn't help until the page is in pagecache).
1105          */
1106         if (!error)
1107                 error = shmem_add_to_page_cache(*pagep, mapping, index,
1108                                                 radswap);
1109         if (error != -ENOMEM) {
1110                 /*
1111                  * Truncation and eviction use free_swap_and_cache(), which
1112                  * only does trylock page: if we raced, best clean up here.
1113                  */
1114                 delete_from_swap_cache(*pagep);
1115                 set_page_dirty(*pagep);
1116                 if (!error) {
1117                         spin_lock_irq(&info->lock);
1118                         info->swapped--;
1119                         spin_unlock_irq(&info->lock);
1120                         swap_free(swap);
1121                 }
1122         }
1123         return error;
1124 }
1125
1126 /*
1127  * Search through swapped inodes to find and replace swap by page.
1128  */
1129 int shmem_unuse(swp_entry_t swap, struct page *page)
1130 {
1131         struct list_head *this, *next;
1132         struct shmem_inode_info *info;
1133         struct mem_cgroup *memcg;
1134         int error = 0;
1135
1136         /*
1137          * There's a faint possibility that swap page was replaced before
1138          * caller locked it: caller will come back later with the right page.
1139          */
1140         if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
1141                 goto out;
1142
1143         /*
1144          * Charge page using GFP_KERNEL while we can wait, before taking
1145          * the shmem_swaplist_mutex which might hold up shmem_writepage().
1146          * Charged back to the user (not to caller) when swap account is used.
1147          */
1148         error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg,
1149                         false);
1150         if (error)
1151                 goto out;
1152         /* No radix_tree_preload: swap entry keeps a place for page in tree */
1153         error = -EAGAIN;
1154
1155         mutex_lock(&shmem_swaplist_mutex);
1156         list_for_each_safe(this, next, &shmem_swaplist) {
1157                 info = list_entry(this, struct shmem_inode_info, swaplist);
1158                 if (info->swapped)
1159                         error = shmem_unuse_inode(info, swap, &page);
1160                 else
1161                         list_del_init(&info->swaplist);
1162                 cond_resched();
1163                 if (error != -EAGAIN)
1164                         break;
1165                 /* found nothing in this: move on to search the next */
1166         }
1167         mutex_unlock(&shmem_swaplist_mutex);
1168
1169         if (error) {
1170                 if (error != -ENOMEM)
1171                         error = 0;
1172                 mem_cgroup_cancel_charge(page, memcg, false);
1173         } else
1174                 mem_cgroup_commit_charge(page, memcg, true, false);
1175 out:
1176         unlock_page(page);
1177         put_page(page);
1178         return error;
1179 }
1180
1181 /*
1182  * Move the page from the page cache to the swap cache.
1183  */
1184 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1185 {
1186         struct shmem_inode_info *info;
1187         struct address_space *mapping;
1188         struct inode *inode;
1189         swp_entry_t swap;
1190         pgoff_t index;
1191
1192         VM_BUG_ON_PAGE(PageCompound(page), page);
1193         BUG_ON(!PageLocked(page));
1194         mapping = page->mapping;
1195         index = page->index;
1196         inode = mapping->host;
1197         info = SHMEM_I(inode);
1198         if (info->flags & VM_LOCKED)
1199                 goto redirty;
1200         if (!total_swap_pages)
1201                 goto redirty;
1202
1203         /*
1204          * Our capabilities prevent regular writeback or sync from ever calling
1205          * shmem_writepage; but a stacking filesystem might use ->writepage of
1206          * its underlying filesystem, in which case tmpfs should write out to
1207          * swap only in response to memory pressure, and not for the writeback
1208          * threads or sync.
1209          */
1210         if (!wbc->for_reclaim) {
1211                 WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
1212                 goto redirty;
1213         }
1214
1215         /*
1216          * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1217          * value into swapfile.c, the only way we can correctly account for a
1218          * fallocated page arriving here is now to initialize it and write it.
1219          *
1220          * That's okay for a page already fallocated earlier, but if we have
1221          * not yet completed the fallocation, then (a) we want to keep track
1222          * of this page in case we have to undo it, and (b) it may not be a
1223          * good idea to continue anyway, once we're pushing into swap.  So
1224          * reactivate the page, and let shmem_fallocate() quit when too many.
1225          */
1226         if (!PageUptodate(page)) {
1227                 if (inode->i_private) {
1228                         struct shmem_falloc *shmem_falloc;
1229                         spin_lock(&inode->i_lock);
1230                         shmem_falloc = inode->i_private;
1231                         if (shmem_falloc &&
1232                             !shmem_falloc->waitq &&
1233                             index >= shmem_falloc->start &&
1234                             index < shmem_falloc->next)
1235                                 shmem_falloc->nr_unswapped++;
1236                         else
1237                                 shmem_falloc = NULL;
1238                         spin_unlock(&inode->i_lock);
1239                         if (shmem_falloc)
1240                                 goto redirty;
1241                 }
1242                 clear_highpage(page);
1243                 flush_dcache_page(page);
1244                 SetPageUptodate(page);
1245         }
1246
1247         swap = get_swap_page();
1248         if (!swap.val)
1249                 goto redirty;
1250
1251         if (mem_cgroup_try_charge_swap(page, swap))
1252                 goto free_swap;
1253
1254         /*
1255          * Add inode to shmem_unuse()'s list of swapped-out inodes,
1256          * if it's not already there.  Do it now before the page is
1257          * moved to swap cache, when its pagelock no longer protects
1258          * the inode from eviction.  But don't unlock the mutex until
1259          * we've incremented swapped, because shmem_unuse_inode() will
1260          * prune a !swapped inode from the swaplist under this mutex.
1261          */
1262         mutex_lock(&shmem_swaplist_mutex);
1263         if (list_empty(&info->swaplist))
1264                 list_add_tail(&info->swaplist, &shmem_swaplist);
1265
1266         if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1267                 spin_lock_irq(&info->lock);
1268                 shmem_recalc_inode(inode);
1269                 info->swapped++;
1270                 spin_unlock_irq(&info->lock);
1271
1272                 swap_shmem_alloc(swap);
1273                 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1274
1275                 mutex_unlock(&shmem_swaplist_mutex);
1276                 BUG_ON(page_mapped(page));
1277                 swap_writepage(page, wbc);
1278                 return 0;
1279         }
1280
1281         mutex_unlock(&shmem_swaplist_mutex);
1282 free_swap:
1283         swapcache_free(swap);
1284 redirty:
1285         set_page_dirty(page);
1286         if (wbc->for_reclaim)
1287                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
1288         unlock_page(page);
1289         return 0;
1290 }
1291
1292 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1293 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1294 {
1295         char buffer[64];
1296
1297         if (!mpol || mpol->mode == MPOL_DEFAULT)
1298                 return;         /* show nothing */
1299
1300         mpol_to_str(buffer, sizeof(buffer), mpol);
1301
1302         seq_printf(seq, ",mpol=%s", buffer);
1303 }
1304
1305 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1306 {
1307         struct mempolicy *mpol = NULL;
1308         if (sbinfo->mpol) {
1309                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
1310                 mpol = sbinfo->mpol;
1311                 mpol_get(mpol);
1312                 spin_unlock(&sbinfo->stat_lock);
1313         }
1314         return mpol;
1315 }
1316 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1317 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1318 {
1319 }
1320 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1321 {
1322         return NULL;
1323 }
1324 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1325 #ifndef CONFIG_NUMA
1326 #define vm_policy vm_private_data
1327 #endif
1328
1329 static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1330                 struct shmem_inode_info *info, pgoff_t index)
1331 {
1332         /* Create a pseudo vma that just contains the policy */
1333         vma->vm_start = 0;
1334         /* Bias interleave by inode number to distribute better across nodes */
1335         vma->vm_pgoff = index + info->vfs_inode.i_ino;
1336         vma->vm_ops = NULL;
1337         vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1338 }
1339
1340 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1341 {
1342         /* Drop reference taken by mpol_shared_policy_lookup() */
1343         mpol_cond_put(vma->vm_policy);
1344 }
1345
1346 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1347                         struct shmem_inode_info *info, pgoff_t index)
1348 {
1349         struct vm_area_struct pvma;
1350         struct page *page;
1351
1352         shmem_pseudo_vma_init(&pvma, info, index);
1353         page = swapin_readahead(swap, gfp, &pvma, 0);
1354         shmem_pseudo_vma_destroy(&pvma);
1355
1356         return page;
1357 }
1358
1359 static struct page *shmem_alloc_hugepage(gfp_t gfp,
1360                 struct shmem_inode_info *info, pgoff_t index)
1361 {
1362         struct vm_area_struct pvma;
1363         struct inode *inode = &info->vfs_inode;
1364         struct address_space *mapping = inode->i_mapping;
1365         pgoff_t idx, hindex;
1366         void __rcu **results;
1367         struct page *page;
1368
1369         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1370                 return NULL;
1371
1372         hindex = round_down(index, HPAGE_PMD_NR);
1373         rcu_read_lock();
1374         if (radix_tree_gang_lookup_slot(&mapping->page_tree, &results, &idx,
1375                                 hindex, 1) && idx < hindex + HPAGE_PMD_NR) {
1376                 rcu_read_unlock();
1377                 return NULL;
1378         }
1379         rcu_read_unlock();
1380
1381         shmem_pseudo_vma_init(&pvma, info, hindex);
1382         page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1383                         HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1384         shmem_pseudo_vma_destroy(&pvma);
1385         if (page)
1386                 prep_transhuge_page(page);
1387         return page;
1388 }
1389
1390 static struct page *shmem_alloc_page(gfp_t gfp,
1391                         struct shmem_inode_info *info, pgoff_t index)
1392 {
1393         struct vm_area_struct pvma;
1394         struct page *page;
1395
1396         shmem_pseudo_vma_init(&pvma, info, index);
1397         page = alloc_page_vma(gfp, &pvma, 0);
1398         shmem_pseudo_vma_destroy(&pvma);
1399
1400         return page;
1401 }
1402
1403 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1404                 struct shmem_inode_info *info, struct shmem_sb_info *sbinfo,
1405                 pgoff_t index, bool huge)
1406 {
1407         struct page *page;
1408         int nr;
1409         int err = -ENOSPC;
1410
1411         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1412                 huge = false;
1413         nr = huge ? HPAGE_PMD_NR : 1;
1414
1415         if (shmem_acct_block(info->flags, nr))
1416                 goto failed;
1417         if (sbinfo->max_blocks) {
1418                 if (percpu_counter_compare(&sbinfo->used_blocks,
1419                                         sbinfo->max_blocks - nr) > 0)
1420                         goto unacct;
1421                 percpu_counter_add(&sbinfo->used_blocks, nr);
1422         }
1423
1424         if (huge)
1425                 page = shmem_alloc_hugepage(gfp, info, index);
1426         else
1427                 page = shmem_alloc_page(gfp, info, index);
1428         if (page) {
1429                 __SetPageLocked(page);
1430                 __SetPageSwapBacked(page);
1431                 return page;
1432         }
1433
1434         err = -ENOMEM;
1435         if (sbinfo->max_blocks)
1436                 percpu_counter_add(&sbinfo->used_blocks, -nr);
1437 unacct:
1438         shmem_unacct_blocks(info->flags, nr);
1439 failed:
1440         return ERR_PTR(err);
1441 }
1442
1443 /*
1444  * When a page is moved from swapcache to shmem filecache (either by the
1445  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1446  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1447  * ignorance of the mapping it belongs to.  If that mapping has special
1448  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1449  * we may need to copy to a suitable page before moving to filecache.
1450  *
1451  * In a future release, this may well be extended to respect cpuset and
1452  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1453  * but for now it is a simple matter of zone.
1454  */
1455 static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1456 {
1457         return page_zonenum(page) > gfp_zone(gfp);
1458 }
1459
1460 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1461                                 struct shmem_inode_info *info, pgoff_t index)
1462 {
1463         struct page *oldpage, *newpage;
1464         struct address_space *swap_mapping;
1465         pgoff_t swap_index;
1466         int error;
1467
1468         oldpage = *pagep;
1469         swap_index = page_private(oldpage);
1470         swap_mapping = page_mapping(oldpage);
1471
1472         /*
1473          * We have arrived here because our zones are constrained, so don't
1474          * limit chance of success by further cpuset and node constraints.
1475          */
1476         gfp &= ~GFP_CONSTRAINT_MASK;
1477         newpage = shmem_alloc_page(gfp, info, index);
1478         if (!newpage)
1479                 return -ENOMEM;
1480
1481         get_page(newpage);
1482         copy_highpage(newpage, oldpage);
1483         flush_dcache_page(newpage);
1484
1485         SetPageUptodate(newpage);
1486         set_page_private(newpage, swap_index);
1487         SetPageSwapCache(newpage);
1488
1489         /*
1490          * Our caller will very soon move newpage out of swapcache, but it's
1491          * a nice clean interface for us to replace oldpage by newpage there.
1492          */
1493         spin_lock_irq(&swap_mapping->tree_lock);
1494         error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1495                                                                    newpage);
1496         if (!error) {
1497                 __inc_node_page_state(newpage, NR_FILE_PAGES);
1498                 __dec_node_page_state(oldpage, NR_FILE_PAGES);
1499         }
1500         spin_unlock_irq(&swap_mapping->tree_lock);
1501
1502         if (unlikely(error)) {
1503                 /*
1504                  * Is this possible?  I think not, now that our callers check
1505                  * both PageSwapCache and page_private after getting page lock;
1506                  * but be defensive.  Reverse old to newpage for clear and free.
1507                  */
1508                 oldpage = newpage;
1509         } else {
1510                 mem_cgroup_migrate(oldpage, newpage);
1511                 lru_cache_add_anon(newpage);
1512                 *pagep = newpage;
1513         }
1514
1515         ClearPageSwapCache(oldpage);
1516         set_page_private(oldpage, 0);
1517
1518         unlock_page(oldpage);
1519         put_page(oldpage);
1520         put_page(oldpage);
1521         return error;
1522 }
1523
1524 /*
1525  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1526  *
1527  * If we allocate a new one we do not mark it dirty. That's up to the
1528  * vm. If we swap it in we mark it dirty since we also free the swap
1529  * entry since a page cannot live in both the swap and page cache.
1530  *
1531  * fault_mm and fault_type are only supplied by shmem_fault:
1532  * otherwise they are NULL.
1533  */
1534 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1535         struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1536         struct mm_struct *fault_mm, int *fault_type)
1537 {
1538         struct address_space *mapping = inode->i_mapping;
1539         struct shmem_inode_info *info;
1540         struct shmem_sb_info *sbinfo;
1541         struct mm_struct *charge_mm;
1542         struct mem_cgroup *memcg;
1543         struct page *page;
1544         swp_entry_t swap;
1545         enum sgp_type sgp_huge = sgp;
1546         pgoff_t hindex = index;
1547         int error;
1548         int once = 0;
1549         int alloced = 0;
1550
1551         if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1552                 return -EFBIG;
1553         if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1554                 sgp = SGP_CACHE;
1555 repeat:
1556         swap.val = 0;
1557         page = find_lock_entry(mapping, index);
1558         if (radix_tree_exceptional_entry(page)) {
1559                 swap = radix_to_swp_entry(page);
1560                 page = NULL;
1561         }
1562
1563         if (sgp <= SGP_CACHE &&
1564             ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1565                 error = -EINVAL;
1566                 goto unlock;
1567         }
1568
1569         if (page && sgp == SGP_WRITE)
1570                 mark_page_accessed(page);
1571
1572         /* fallocated page? */
1573         if (page && !PageUptodate(page)) {
1574                 if (sgp != SGP_READ)
1575                         goto clear;
1576                 unlock_page(page);
1577                 put_page(page);
1578                 page = NULL;
1579         }
1580         if (page || (sgp == SGP_READ && !swap.val)) {
1581                 *pagep = page;
1582                 return 0;
1583         }
1584
1585         /*
1586          * Fast cache lookup did not find it:
1587          * bring it back from swap or allocate.
1588          */
1589         info = SHMEM_I(inode);
1590         sbinfo = SHMEM_SB(inode->i_sb);
1591         charge_mm = fault_mm ? : current->mm;
1592
1593         if (swap.val) {
1594                 /* Look it up and read it in.. */
1595                 page = lookup_swap_cache(swap);
1596                 if (!page) {
1597                         /* Or update major stats only when swapin succeeds?? */
1598                         if (fault_type) {
1599                                 *fault_type |= VM_FAULT_MAJOR;
1600                                 count_vm_event(PGMAJFAULT);
1601                                 mem_cgroup_count_vm_event(fault_mm, PGMAJFAULT);
1602                         }
1603                         /* Here we actually start the io */
1604                         page = shmem_swapin(swap, gfp, info, index);
1605                         if (!page) {
1606                                 error = -ENOMEM;
1607                                 goto failed;
1608                         }
1609                 }
1610
1611                 /* We have to do this with page locked to prevent races */
1612                 lock_page(page);
1613                 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1614                     !shmem_confirm_swap(mapping, index, swap)) {
1615                         error = -EEXIST;        /* try again */
1616                         goto unlock;
1617                 }
1618                 if (!PageUptodate(page)) {
1619                         error = -EIO;
1620                         goto failed;
1621                 }
1622                 wait_on_page_writeback(page);
1623
1624                 if (shmem_should_replace_page(page, gfp)) {
1625                         error = shmem_replace_page(&page, gfp, info, index);
1626                         if (error)
1627                                 goto failed;
1628                 }
1629
1630                 error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg,
1631                                 false);
1632                 if (!error) {
1633                         error = shmem_add_to_page_cache(page, mapping, index,
1634                                                 swp_to_radix_entry(swap));
1635                         /*
1636                          * We already confirmed swap under page lock, and make
1637                          * no memory allocation here, so usually no possibility
1638                          * of error; but free_swap_and_cache() only trylocks a
1639                          * page, so it is just possible that the entry has been
1640                          * truncated or holepunched since swap was confirmed.
1641                          * shmem_undo_range() will have done some of the
1642                          * unaccounting, now delete_from_swap_cache() will do
1643                          * the rest.
1644                          * Reset swap.val? No, leave it so "failed" goes back to
1645                          * "repeat": reading a hole and writing should succeed.
1646                          */
1647                         if (error) {
1648                                 mem_cgroup_cancel_charge(page, memcg, false);
1649                                 delete_from_swap_cache(page);
1650                         }
1651                 }
1652                 if (error)
1653                         goto failed;
1654
1655                 mem_cgroup_commit_charge(page, memcg, true, false);
1656
1657                 spin_lock_irq(&info->lock);
1658                 info->swapped--;
1659                 shmem_recalc_inode(inode);
1660                 spin_unlock_irq(&info->lock);
1661
1662                 if (sgp == SGP_WRITE)
1663                         mark_page_accessed(page);
1664
1665                 delete_from_swap_cache(page);
1666                 set_page_dirty(page);
1667                 swap_free(swap);
1668
1669         } else {
1670                 /* shmem_symlink() */
1671                 if (mapping->a_ops != &shmem_aops)
1672                         goto alloc_nohuge;
1673                 if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1674                         goto alloc_nohuge;
1675                 if (shmem_huge == SHMEM_HUGE_FORCE)
1676                         goto alloc_huge;
1677                 switch (sbinfo->huge) {
1678                         loff_t i_size;
1679                         pgoff_t off;
1680                 case SHMEM_HUGE_NEVER:
1681                         goto alloc_nohuge;
1682                 case SHMEM_HUGE_WITHIN_SIZE:
1683                         off = round_up(index, HPAGE_PMD_NR);
1684                         i_size = round_up(i_size_read(inode), PAGE_SIZE);
1685                         if (i_size >= HPAGE_PMD_SIZE &&
1686                                         i_size >> PAGE_SHIFT >= off)
1687                                 goto alloc_huge;
1688                         /* fallthrough */
1689                 case SHMEM_HUGE_ADVISE:
1690                         if (sgp_huge == SGP_HUGE)
1691                                 goto alloc_huge;
1692                         /* TODO: implement fadvise() hints */
1693                         goto alloc_nohuge;
1694                 }
1695
1696 alloc_huge:
1697                 page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
1698                                 index, true);
1699                 if (IS_ERR(page)) {
1700 alloc_nohuge:           page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
1701                                         index, false);
1702                 }
1703                 if (IS_ERR(page)) {
1704                         int retry = 5;
1705                         error = PTR_ERR(page);
1706                         page = NULL;
1707                         if (error != -ENOSPC)
1708                                 goto failed;
1709                         /*
1710                          * Try to reclaim some spece by splitting a huge page
1711                          * beyond i_size on the filesystem.
1712                          */
1713                         while (retry--) {
1714                                 int ret;
1715                                 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1716                                 if (ret == SHRINK_STOP)
1717                                         break;
1718                                 if (ret)
1719                                         goto alloc_nohuge;
1720                         }
1721                         goto failed;
1722                 }
1723
1724                 if (PageTransHuge(page))
1725                         hindex = round_down(index, HPAGE_PMD_NR);
1726                 else
1727                         hindex = index;
1728
1729                 if (sgp == SGP_WRITE)
1730                         __SetPageReferenced(page);
1731
1732                 error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg,
1733                                 PageTransHuge(page));
1734                 if (error)
1735                         goto unacct;
1736                 error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK,
1737                                 compound_order(page));
1738                 if (!error) {
1739                         error = shmem_add_to_page_cache(page, mapping, hindex,
1740                                                         NULL);
1741                         radix_tree_preload_end();
1742                 }
1743                 if (error) {
1744                         mem_cgroup_cancel_charge(page, memcg,
1745                                         PageTransHuge(page));
1746                         goto unacct;
1747                 }
1748                 mem_cgroup_commit_charge(page, memcg, false,
1749                                 PageTransHuge(page));
1750                 lru_cache_add_anon(page);
1751
1752                 spin_lock_irq(&info->lock);
1753                 info->alloced += 1 << compound_order(page);
1754                 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1755                 shmem_recalc_inode(inode);
1756                 spin_unlock_irq(&info->lock);
1757                 alloced = true;
1758
1759                 if (PageTransHuge(page) &&
1760                                 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1761                                 hindex + HPAGE_PMD_NR - 1) {
1762                         /*
1763                          * Part of the huge page is beyond i_size: subject
1764                          * to shrink under memory pressure.
1765                          */
1766                         spin_lock(&sbinfo->shrinklist_lock);
1767                         if (list_empty(&info->shrinklist)) {
1768                                 list_add_tail(&info->shrinklist,
1769                                                 &sbinfo->shrinklist);
1770                                 sbinfo->shrinklist_len++;
1771                         }
1772                         spin_unlock(&sbinfo->shrinklist_lock);
1773                 }
1774
1775                 /*
1776                  * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1777                  */
1778                 if (sgp == SGP_FALLOC)
1779                         sgp = SGP_WRITE;
1780 clear:
1781                 /*
1782                  * Let SGP_WRITE caller clear ends if write does not fill page;
1783                  * but SGP_FALLOC on a page fallocated earlier must initialize
1784                  * it now, lest undo on failure cancel our earlier guarantee.
1785                  */
1786                 if (sgp != SGP_WRITE && !PageUptodate(page)) {
1787                         struct page *head = compound_head(page);
1788                         int i;
1789
1790                         for (i = 0; i < (1 << compound_order(head)); i++) {
1791                                 clear_highpage(head + i);
1792                                 flush_dcache_page(head + i);
1793                         }
1794                         SetPageUptodate(head);
1795                 }
1796         }
1797
1798         /* Perhaps the file has been truncated since we checked */
1799         if (sgp <= SGP_CACHE &&
1800             ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1801                 if (alloced) {
1802                         ClearPageDirty(page);
1803                         delete_from_page_cache(page);
1804                         spin_lock_irq(&info->lock);
1805                         shmem_recalc_inode(inode);
1806                         spin_unlock_irq(&info->lock);
1807                 }
1808                 error = -EINVAL;
1809                 goto unlock;
1810         }
1811         *pagep = page + index - hindex;
1812         return 0;
1813
1814         /*
1815          * Error recovery.
1816          */
1817 unacct:
1818         if (sbinfo->max_blocks)
1819                 percpu_counter_sub(&sbinfo->used_blocks,
1820                                 1 << compound_order(page));
1821         shmem_unacct_blocks(info->flags, 1 << compound_order(page));
1822
1823         if (PageTransHuge(page)) {
1824                 unlock_page(page);
1825                 put_page(page);
1826                 goto alloc_nohuge;
1827         }
1828 failed:
1829         if (swap.val && !shmem_confirm_swap(mapping, index, swap))
1830                 error = -EEXIST;
1831 unlock:
1832         if (page) {
1833                 unlock_page(page);
1834                 put_page(page);
1835         }
1836         if (error == -ENOSPC && !once++) {
1837                 info = SHMEM_I(inode);
1838                 spin_lock_irq(&info->lock);
1839                 shmem_recalc_inode(inode);
1840                 spin_unlock_irq(&info->lock);
1841                 goto repeat;
1842         }
1843         if (error == -EEXIST)   /* from above or from radix_tree_insert */
1844                 goto repeat;
1845         return error;
1846 }
1847
1848 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1849 {
1850         struct inode *inode = file_inode(vma->vm_file);
1851         gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
1852         enum sgp_type sgp;
1853         int error;
1854         int ret = VM_FAULT_LOCKED;
1855
1856         /*
1857          * Trinity finds that probing a hole which tmpfs is punching can
1858          * prevent the hole-punch from ever completing: which in turn
1859          * locks writers out with its hold on i_mutex.  So refrain from
1860          * faulting pages into the hole while it's being punched.  Although
1861          * shmem_undo_range() does remove the additions, it may be unable to
1862          * keep up, as each new page needs its own unmap_mapping_range() call,
1863          * and the i_mmap tree grows ever slower to scan if new vmas are added.
1864          *
1865          * It does not matter if we sometimes reach this check just before the
1866          * hole-punch begins, so that one fault then races with the punch:
1867          * we just need to make racing faults a rare case.
1868          *
1869          * The implementation below would be much simpler if we just used a
1870          * standard mutex or completion: but we cannot take i_mutex in fault,
1871          * and bloating every shmem inode for this unlikely case would be sad.
1872          */
1873         if (unlikely(inode->i_private)) {
1874                 struct shmem_falloc *shmem_falloc;
1875
1876                 spin_lock(&inode->i_lock);
1877                 shmem_falloc = inode->i_private;
1878                 if (shmem_falloc &&
1879                     shmem_falloc->waitq &&
1880                     vmf->pgoff >= shmem_falloc->start &&
1881                     vmf->pgoff < shmem_falloc->next) {
1882                         wait_queue_head_t *shmem_falloc_waitq;
1883                         DEFINE_WAIT(shmem_fault_wait);
1884
1885                         ret = VM_FAULT_NOPAGE;
1886                         if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1887                            !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1888                                 /* It's polite to up mmap_sem if we can */
1889                                 up_read(&vma->vm_mm->mmap_sem);
1890                                 ret = VM_FAULT_RETRY;
1891                         }
1892
1893                         shmem_falloc_waitq = shmem_falloc->waitq;
1894                         prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1895                                         TASK_UNINTERRUPTIBLE);
1896                         spin_unlock(&inode->i_lock);
1897                         schedule();
1898
1899                         /*
1900                          * shmem_falloc_waitq points into the shmem_fallocate()
1901                          * stack of the hole-punching task: shmem_falloc_waitq
1902                          * is usually invalid by the time we reach here, but
1903                          * finish_wait() does not dereference it in that case;
1904                          * though i_lock needed lest racing with wake_up_all().
1905                          */
1906                         spin_lock(&inode->i_lock);
1907                         finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1908                         spin_unlock(&inode->i_lock);
1909                         return ret;
1910                 }
1911                 spin_unlock(&inode->i_lock);
1912         }
1913
1914         sgp = SGP_CACHE;
1915         if (vma->vm_flags & VM_HUGEPAGE)
1916                 sgp = SGP_HUGE;
1917         else if (vma->vm_flags & VM_NOHUGEPAGE)
1918                 sgp = SGP_NOHUGE;
1919
1920         error = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
1921                                   gfp, vma->vm_mm, &ret);
1922         if (error)
1923                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1924         return ret;
1925 }
1926
1927 unsigned long shmem_get_unmapped_area(struct file *file,
1928                                       unsigned long uaddr, unsigned long len,
1929                                       unsigned long pgoff, unsigned long flags)
1930 {
1931         unsigned long (*get_area)(struct file *,
1932                 unsigned long, unsigned long, unsigned long, unsigned long);
1933         unsigned long addr;
1934         unsigned long offset;
1935         unsigned long inflated_len;
1936         unsigned long inflated_addr;
1937         unsigned long inflated_offset;
1938
1939         if (len > TASK_SIZE)
1940                 return -ENOMEM;
1941
1942         get_area = current->mm->get_unmapped_area;
1943         addr = get_area(file, uaddr, len, pgoff, flags);
1944
1945         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1946                 return addr;
1947         if (IS_ERR_VALUE(addr))
1948                 return addr;
1949         if (addr & ~PAGE_MASK)
1950                 return addr;
1951         if (addr > TASK_SIZE - len)
1952                 return addr;
1953
1954         if (shmem_huge == SHMEM_HUGE_DENY)
1955                 return addr;
1956         if (len < HPAGE_PMD_SIZE)
1957                 return addr;
1958         if (flags & MAP_FIXED)
1959                 return addr;
1960         /*
1961          * Our priority is to support MAP_SHARED mapped hugely;
1962          * and support MAP_PRIVATE mapped hugely too, until it is COWed.
1963          * But if caller specified an address hint, respect that as before.
1964          */
1965         if (uaddr)
1966                 return addr;
1967
1968         if (shmem_huge != SHMEM_HUGE_FORCE) {
1969                 struct super_block *sb;
1970
1971                 if (file) {
1972                         VM_BUG_ON(file->f_op != &shmem_file_operations);
1973                         sb = file_inode(file)->i_sb;
1974                 } else {
1975                         /*
1976                          * Called directly from mm/mmap.c, or drivers/char/mem.c
1977                          * for "/dev/zero", to create a shared anonymous object.
1978                          */
1979                         if (IS_ERR(shm_mnt))
1980                                 return addr;
1981                         sb = shm_mnt->mnt_sb;
1982                 }
1983                 if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER)
1984                         return addr;
1985         }
1986
1987         offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
1988         if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
1989                 return addr;
1990         if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
1991                 return addr;
1992
1993         inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
1994         if (inflated_len > TASK_SIZE)
1995                 return addr;
1996         if (inflated_len < len)
1997                 return addr;
1998
1999         inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
2000         if (IS_ERR_VALUE(inflated_addr))
2001                 return addr;
2002         if (inflated_addr & ~PAGE_MASK)
2003                 return addr;
2004
2005         inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2006         inflated_addr += offset - inflated_offset;
2007         if (inflated_offset > offset)
2008                 inflated_addr += HPAGE_PMD_SIZE;
2009
2010         if (inflated_addr > TASK_SIZE - len)
2011                 return addr;
2012         return inflated_addr;
2013 }
2014
2015 #ifdef CONFIG_NUMA
2016 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2017 {
2018         struct inode *inode = file_inode(vma->vm_file);
2019         return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2020 }
2021
2022 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2023                                           unsigned long addr)
2024 {
2025         struct inode *inode = file_inode(vma->vm_file);
2026         pgoff_t index;
2027
2028         index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2029         return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2030 }
2031 #endif
2032
2033 int shmem_lock(struct file *file, int lock, struct user_struct *user)
2034 {
2035         struct inode *inode = file_inode(file);
2036         struct shmem_inode_info *info = SHMEM_I(inode);
2037         int retval = -ENOMEM;
2038
2039         spin_lock_irq(&info->lock);
2040         if (lock && !(info->flags & VM_LOCKED)) {
2041                 if (!user_shm_lock(inode->i_size, user))
2042                         goto out_nomem;
2043                 info->flags |= VM_LOCKED;
2044                 mapping_set_unevictable(file->f_mapping);
2045         }
2046         if (!lock && (info->flags & VM_LOCKED) && user) {
2047                 user_shm_unlock(inode->i_size, user);
2048                 info->flags &= ~VM_LOCKED;
2049                 mapping_clear_unevictable(file->f_mapping);
2050         }
2051         retval = 0;
2052
2053 out_nomem:
2054         spin_unlock_irq(&info->lock);
2055         return retval;
2056 }
2057
2058 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2059 {
2060         file_accessed(file);
2061         vma->vm_ops = &shmem_vm_ops;
2062         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
2063                         ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2064                         (vma->vm_end & HPAGE_PMD_MASK)) {
2065                 khugepaged_enter(vma, vma->vm_flags);
2066         }
2067         return 0;
2068 }
2069
2070 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2071                                      umode_t mode, dev_t dev, unsigned long flags)
2072 {
2073         struct inode *inode;
2074         struct shmem_inode_info *info;
2075         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2076
2077         if (shmem_reserve_inode(sb))
2078                 return NULL;
2079
2080         inode = new_inode(sb);
2081         if (inode) {
2082                 inode->i_ino = get_next_ino();
2083                 inode_init_owner(inode, dir, mode);
2084                 inode->i_blocks = 0;
2085                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2086                 inode->i_generation = get_seconds();
2087                 info = SHMEM_I(inode);
2088                 memset(info, 0, (char *)inode - (char *)info);
2089                 spin_lock_init(&info->lock);
2090                 info->seals = F_SEAL_SEAL;
2091                 info->flags = flags & VM_NORESERVE;
2092                 INIT_LIST_HEAD(&info->shrinklist);
2093                 INIT_LIST_HEAD(&info->swaplist);
2094                 simple_xattrs_init(&info->xattrs);
2095                 cache_no_acl(inode);
2096
2097                 switch (mode & S_IFMT) {
2098                 default:
2099                         inode->i_op = &shmem_special_inode_operations;
2100                         init_special_inode(inode, mode, dev);
2101                         break;
2102                 case S_IFREG:
2103                         inode->i_mapping->a_ops = &shmem_aops;
2104                         inode->i_op = &shmem_inode_operations;
2105                         inode->i_fop = &shmem_file_operations;
2106                         mpol_shared_policy_init(&info->policy,
2107                                                  shmem_get_sbmpol(sbinfo));
2108                         break;
2109                 case S_IFDIR:
2110                         inc_nlink(inode);
2111                         /* Some things misbehave if size == 0 on a directory */
2112                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
2113                         inode->i_op = &shmem_dir_inode_operations;
2114                         inode->i_fop = &simple_dir_operations;
2115                         break;
2116                 case S_IFLNK:
2117                         /*
2118                          * Must not load anything in the rbtree,
2119                          * mpol_free_shared_policy will not be called.
2120                          */
2121                         mpol_shared_policy_init(&info->policy, NULL);
2122                         break;
2123                 }
2124         } else
2125                 shmem_free_inode(sb);
2126         return inode;
2127 }
2128
2129 bool shmem_mapping(struct address_space *mapping)
2130 {
2131         if (!mapping->host)
2132                 return false;
2133
2134         return mapping->host->i_sb->s_op == &shmem_ops;
2135 }
2136
2137 #ifdef CONFIG_TMPFS
2138 static const struct inode_operations shmem_symlink_inode_operations;
2139 static const struct inode_operations shmem_short_symlink_operations;
2140
2141 #ifdef CONFIG_TMPFS_XATTR
2142 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2143 #else
2144 #define shmem_initxattrs NULL
2145 #endif
2146
2147 static int
2148 shmem_write_begin(struct file *file, struct address_space *mapping,
2149                         loff_t pos, unsigned len, unsigned flags,
2150                         struct page **pagep, void **fsdata)
2151 {
2152         struct inode *inode = mapping->host;
2153         struct shmem_inode_info *info = SHMEM_I(inode);
2154         pgoff_t index = pos >> PAGE_SHIFT;
2155
2156         /* i_mutex is held by caller */
2157         if (unlikely(info->seals)) {
2158                 if (info->seals & F_SEAL_WRITE)
2159                         return -EPERM;
2160                 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2161                         return -EPERM;
2162         }
2163
2164         return shmem_getpage(inode, index, pagep, SGP_WRITE);
2165 }
2166
2167 static int
2168 shmem_write_end(struct file *file, struct address_space *mapping,
2169                         loff_t pos, unsigned len, unsigned copied,
2170                         struct page *page, void *fsdata)
2171 {
2172         struct inode *inode = mapping->host;
2173
2174         if (pos + copied > inode->i_size)
2175                 i_size_write(inode, pos + copied);
2176
2177         if (!PageUptodate(page)) {
2178                 struct page *head = compound_head(page);
2179                 if (PageTransCompound(page)) {
2180                         int i;
2181
2182                         for (i = 0; i < HPAGE_PMD_NR; i++) {
2183                                 if (head + i == page)
2184                                         continue;
2185                                 clear_highpage(head + i);
2186                                 flush_dcache_page(head + i);
2187                         }
2188                 }
2189                 if (copied < PAGE_SIZE) {
2190                         unsigned from = pos & (PAGE_SIZE - 1);
2191                         zero_user_segments(page, 0, from,
2192                                         from + copied, PAGE_SIZE);
2193                 }
2194                 SetPageUptodate(head);
2195         }
2196         set_page_dirty(page);
2197         unlock_page(page);
2198         put_page(page);
2199
2200         return copied;
2201 }
2202
2203 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2204 {
2205         struct file *file = iocb->ki_filp;
2206         struct inode *inode = file_inode(file);
2207         struct address_space *mapping = inode->i_mapping;
2208         pgoff_t index;
2209         unsigned long offset;
2210         enum sgp_type sgp = SGP_READ;
2211         int error = 0;
2212         ssize_t retval = 0;
2213         loff_t *ppos = &iocb->ki_pos;
2214
2215         /*
2216          * Might this read be for a stacking filesystem?  Then when reading
2217          * holes of a sparse file, we actually need to allocate those pages,
2218          * and even mark them dirty, so it cannot exceed the max_blocks limit.
2219          */
2220         if (!iter_is_iovec(to))
2221                 sgp = SGP_CACHE;
2222
2223         index = *ppos >> PAGE_SHIFT;
2224         offset = *ppos & ~PAGE_MASK;
2225
2226         for (;;) {
2227                 struct page *page = NULL;
2228                 pgoff_t end_index;
2229                 unsigned long nr, ret;
2230                 loff_t i_size = i_size_read(inode);
2231
2232                 end_index = i_size >> PAGE_SHIFT;
2233                 if (index > end_index)
2234                         break;
2235                 if (index == end_index) {
2236                         nr = i_size & ~PAGE_MASK;
2237                         if (nr <= offset)
2238                                 break;
2239                 }
2240
2241                 error = shmem_getpage(inode, index, &page, sgp);
2242                 if (error) {
2243                         if (error == -EINVAL)
2244                                 error = 0;
2245                         break;
2246                 }
2247                 if (page) {
2248                         if (sgp == SGP_CACHE)
2249                                 set_page_dirty(page);
2250                         unlock_page(page);
2251                 }
2252
2253                 /*
2254                  * We must evaluate after, since reads (unlike writes)
2255                  * are called without i_mutex protection against truncate
2256                  */
2257                 nr = PAGE_SIZE;
2258                 i_size = i_size_read(inode);
2259                 end_index = i_size >> PAGE_SHIFT;
2260                 if (index == end_index) {
2261                         nr = i_size & ~PAGE_MASK;
2262                         if (nr <= offset) {
2263                                 if (page)
2264                                         put_page(page);
2265                                 break;
2266                         }
2267                 }
2268                 nr -= offset;
2269
2270                 if (page) {
2271                         /*
2272                          * If users can be writing to this page using arbitrary
2273                          * virtual addresses, take care about potential aliasing
2274                          * before reading the page on the kernel side.
2275                          */
2276                         if (mapping_writably_mapped(mapping))
2277                                 flush_dcache_page(page);
2278                         /*
2279                          * Mark the page accessed if we read the beginning.
2280                          */
2281                         if (!offset)
2282                                 mark_page_accessed(page);
2283                 } else {
2284                         page = ZERO_PAGE(0);
2285                         get_page(page);
2286                 }
2287
2288                 /*
2289                  * Ok, we have the page, and it's up-to-date, so
2290                  * now we can copy it to user space...
2291                  */
2292                 ret = copy_page_to_iter(page, offset, nr, to);
2293                 retval += ret;
2294                 offset += ret;
2295                 index += offset >> PAGE_SHIFT;
2296                 offset &= ~PAGE_MASK;
2297
2298                 put_page(page);
2299                 if (!iov_iter_count(to))
2300                         break;
2301                 if (ret < nr) {
2302                         error = -EFAULT;
2303                         break;
2304                 }
2305                 cond_resched();
2306         }
2307
2308         *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2309         file_accessed(file);
2310         return retval ? retval : error;
2311 }
2312
2313 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
2314                                 struct pipe_inode_info *pipe, size_t len,
2315                                 unsigned int flags)
2316 {
2317         struct address_space *mapping = in->f_mapping;
2318         struct inode *inode = mapping->host;
2319         unsigned int loff, nr_pages, req_pages;
2320         struct page *pages[PIPE_DEF_BUFFERS];
2321         struct partial_page partial[PIPE_DEF_BUFFERS];
2322         struct page *page;
2323         pgoff_t index, end_index;
2324         loff_t isize, left;
2325         int error, page_nr;
2326         struct splice_pipe_desc spd = {
2327                 .pages = pages,
2328                 .partial = partial,
2329                 .nr_pages_max = PIPE_DEF_BUFFERS,
2330                 .flags = flags,
2331                 .ops = &page_cache_pipe_buf_ops,
2332                 .spd_release = spd_release_page,
2333         };
2334
2335         isize = i_size_read(inode);
2336         if (unlikely(*ppos >= isize))
2337                 return 0;
2338
2339         left = isize - *ppos;
2340         if (unlikely(left < len))
2341                 len = left;
2342
2343         if (splice_grow_spd(pipe, &spd))
2344                 return -ENOMEM;
2345
2346         index = *ppos >> PAGE_SHIFT;
2347         loff = *ppos & ~PAGE_MASK;
2348         req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
2349         nr_pages = min(req_pages, spd.nr_pages_max);
2350
2351         spd.nr_pages = find_get_pages_contig(mapping, index,
2352                                                 nr_pages, spd.pages);
2353         index += spd.nr_pages;
2354         error = 0;
2355
2356         while (spd.nr_pages < nr_pages) {
2357                 error = shmem_getpage(inode, index, &page, SGP_CACHE);
2358                 if (error)
2359                         break;
2360                 unlock_page(page);
2361                 spd.pages[spd.nr_pages++] = page;
2362                 index++;
2363         }
2364
2365         index = *ppos >> PAGE_SHIFT;
2366         nr_pages = spd.nr_pages;
2367         spd.nr_pages = 0;
2368
2369         for (page_nr = 0; page_nr < nr_pages; page_nr++) {
2370                 unsigned int this_len;
2371
2372                 if (!len)
2373                         break;
2374
2375                 this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
2376                 page = spd.pages[page_nr];
2377
2378                 if (!PageUptodate(page) || page->mapping != mapping) {
2379                         error = shmem_getpage(inode, index, &page, SGP_CACHE);
2380                         if (error)
2381                                 break;
2382                         unlock_page(page);
2383                         put_page(spd.pages[page_nr]);
2384                         spd.pages[page_nr] = page;
2385                 }
2386
2387                 isize = i_size_read(inode);
2388                 end_index = (isize - 1) >> PAGE_SHIFT;
2389                 if (unlikely(!isize || index > end_index))
2390                         break;
2391
2392                 if (end_index == index) {
2393                         unsigned int plen;
2394
2395                         plen = ((isize - 1) & ~PAGE_MASK) + 1;
2396                         if (plen <= loff)
2397                                 break;
2398
2399                         this_len = min(this_len, plen - loff);
2400                         len = this_len;
2401                 }
2402
2403                 spd.partial[page_nr].offset = loff;
2404                 spd.partial[page_nr].len = this_len;
2405                 len -= this_len;
2406                 loff = 0;
2407                 spd.nr_pages++;
2408                 index++;
2409         }
2410
2411         while (page_nr < nr_pages)
2412                 put_page(spd.pages[page_nr++]);
2413
2414         if (spd.nr_pages)
2415                 error = splice_to_pipe(pipe, &spd);
2416
2417         splice_shrink_spd(&spd);
2418
2419         if (error > 0) {
2420                 *ppos += error;
2421                 file_accessed(in);
2422         }
2423         return error;
2424 }
2425
2426 /*
2427  * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
2428  */
2429 static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2430                                     pgoff_t index, pgoff_t end, int whence)
2431 {
2432         struct page *page;
2433         struct pagevec pvec;
2434         pgoff_t indices[PAGEVEC_SIZE];
2435         bool done = false;
2436         int i;
2437
2438         pagevec_init(&pvec, 0);
2439         pvec.nr = 1;            /* start small: we may be there already */
2440         while (!done) {
2441                 pvec.nr = find_get_entries(mapping, index,
2442                                         pvec.nr, pvec.pages, indices);
2443                 if (!pvec.nr) {
2444                         if (whence == SEEK_DATA)
2445                                 index = end;
2446                         break;
2447                 }
2448                 for (i = 0; i < pvec.nr; i++, index++) {
2449                         if (index < indices[i]) {
2450                                 if (whence == SEEK_HOLE) {
2451                                         done = true;
2452                                         break;
2453                                 }
2454                                 index = indices[i];
2455                         }
2456                         page = pvec.pages[i];
2457                         if (page && !radix_tree_exceptional_entry(page)) {
2458                                 if (!PageUptodate(page))
2459                                         page = NULL;
2460                         }
2461                         if (index >= end ||
2462                             (page && whence == SEEK_DATA) ||
2463                             (!page && whence == SEEK_HOLE)) {
2464                                 done = true;
2465                                 break;
2466                         }
2467                 }
2468                 pagevec_remove_exceptionals(&pvec);
2469                 pagevec_release(&pvec);
2470                 pvec.nr = PAGEVEC_SIZE;
2471                 cond_resched();
2472         }
2473         return index;
2474 }
2475
2476 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2477 {
2478         struct address_space *mapping = file->f_mapping;
2479         struct inode *inode = mapping->host;
2480         pgoff_t start, end;
2481         loff_t new_offset;
2482
2483         if (whence != SEEK_DATA && whence != SEEK_HOLE)
2484                 return generic_file_llseek_size(file, offset, whence,
2485                                         MAX_LFS_FILESIZE, i_size_read(inode));
2486         inode_lock(inode);
2487         /* We're holding i_mutex so we can access i_size directly */
2488
2489         if (offset < 0)
2490                 offset = -EINVAL;
2491         else if (offset >= inode->i_size)
2492                 offset = -ENXIO;
2493         else {
2494                 start = offset >> PAGE_SHIFT;
2495                 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2496                 new_offset = shmem_seek_hole_data(mapping, start, end, whence);
2497                 new_offset <<= PAGE_SHIFT;
2498                 if (new_offset > offset) {
2499                         if (new_offset < inode->i_size)
2500                                 offset = new_offset;
2501                         else if (whence == SEEK_DATA)
2502                                 offset = -ENXIO;
2503                         else
2504                                 offset = inode->i_size;
2505                 }
2506         }
2507
2508         if (offset >= 0)
2509                 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2510         inode_unlock(inode);
2511         return offset;
2512 }
2513
2514 /*
2515  * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
2516  * so reuse a tag which we firmly believe is never set or cleared on shmem.
2517  */
2518 #define SHMEM_TAG_PINNED        PAGECACHE_TAG_TOWRITE
2519 #define LAST_SCAN               4       /* about 150ms max */
2520
2521 static void shmem_tag_pins(struct address_space *mapping)
2522 {
2523         struct radix_tree_iter iter;
2524         void **slot;
2525         pgoff_t start;
2526         struct page *page;
2527
2528         lru_add_drain();
2529         start = 0;
2530         rcu_read_lock();
2531
2532         radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
2533                 page = radix_tree_deref_slot(slot);
2534                 if (!page || radix_tree_exception(page)) {
2535                         if (radix_tree_deref_retry(page)) {
2536                                 slot = radix_tree_iter_retry(&iter);
2537                                 continue;
2538                         }
2539                 } else if (page_count(page) - page_mapcount(page) > 1) {
2540                         spin_lock_irq(&mapping->tree_lock);
2541                         radix_tree_tag_set(&mapping->page_tree, iter.index,
2542                                            SHMEM_TAG_PINNED);
2543                         spin_unlock_irq(&mapping->tree_lock);
2544                 }
2545
2546                 if (need_resched()) {
2547                         cond_resched_rcu();
2548                         slot = radix_tree_iter_next(&iter);
2549                 }
2550         }
2551         rcu_read_unlock();
2552 }
2553
2554 /*
2555  * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
2556  * via get_user_pages(), drivers might have some pending I/O without any active
2557  * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages
2558  * and see whether it has an elevated ref-count. If so, we tag them and wait for
2559  * them to be dropped.
2560  * The caller must guarantee that no new user will acquire writable references
2561  * to those pages to avoid races.
2562  */
2563 static int shmem_wait_for_pins(struct address_space *mapping)
2564 {
2565         struct radix_tree_iter iter;
2566         void **slot;
2567         pgoff_t start;
2568         struct page *page;
2569         int error, scan;
2570
2571         shmem_tag_pins(mapping);
2572
2573         error = 0;
2574         for (scan = 0; scan <= LAST_SCAN; scan++) {
2575                 if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED))
2576                         break;
2577
2578                 if (!scan)
2579                         lru_add_drain_all();
2580                 else if (schedule_timeout_killable((HZ << scan) / 200))
2581                         scan = LAST_SCAN;
2582
2583                 start = 0;
2584                 rcu_read_lock();
2585                 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter,
2586                                            start, SHMEM_TAG_PINNED) {
2587
2588                         page = radix_tree_deref_slot(slot);
2589                         if (radix_tree_exception(page)) {
2590                                 if (radix_tree_deref_retry(page)) {
2591                                         slot = radix_tree_iter_retry(&iter);
2592                                         continue;
2593                                 }
2594
2595                                 page = NULL;
2596                         }
2597
2598                         if (page &&
2599                             page_count(page) - page_mapcount(page) != 1) {
2600                                 if (scan < LAST_SCAN)
2601                                         goto continue_resched;
2602
2603                                 /*
2604                                  * On the last scan, we clean up all those tags
2605                                  * we inserted; but make a note that we still
2606                                  * found pages pinned.
2607                                  */
2608                                 error = -EBUSY;
2609                         }
2610
2611                         spin_lock_irq(&mapping->tree_lock);
2612                         radix_tree_tag_clear(&mapping->page_tree,
2613                                              iter.index, SHMEM_TAG_PINNED);
2614                         spin_unlock_irq(&mapping->tree_lock);
2615 continue_resched:
2616                         if (need_resched()) {
2617                                 cond_resched_rcu();
2618                                 slot = radix_tree_iter_next(&iter);
2619                         }
2620                 }
2621                 rcu_read_unlock();
2622         }
2623
2624         return error;
2625 }
2626
2627 #define F_ALL_SEALS (F_SEAL_SEAL | \
2628                      F_SEAL_SHRINK | \
2629                      F_SEAL_GROW | \
2630                      F_SEAL_WRITE)
2631
2632 int shmem_add_seals(struct file *file, unsigned int seals)
2633 {
2634         struct inode *inode = file_inode(file);
2635         struct shmem_inode_info *info = SHMEM_I(inode);
2636         int error;
2637
2638         /*
2639          * SEALING
2640          * Sealing allows multiple parties to share a shmem-file but restrict
2641          * access to a specific subset of file operations. Seals can only be
2642          * added, but never removed. This way, mutually untrusted parties can
2643          * share common memory regions with a well-defined policy. A malicious
2644          * peer can thus never perform unwanted operations on a shared object.
2645          *
2646          * Seals are only supported on special shmem-files and always affect
2647          * the whole underlying inode. Once a seal is set, it may prevent some
2648          * kinds of access to the file. Currently, the following seals are
2649          * defined:
2650          *   SEAL_SEAL: Prevent further seals from being set on this file
2651          *   SEAL_SHRINK: Prevent the file from shrinking
2652          *   SEAL_GROW: Prevent the file from growing
2653          *   SEAL_WRITE: Prevent write access to the file
2654          *
2655          * As we don't require any trust relationship between two parties, we
2656          * must prevent seals from being removed. Therefore, sealing a file
2657          * only adds a given set of seals to the file, it never touches
2658          * existing seals. Furthermore, the "setting seals"-operation can be
2659          * sealed itself, which basically prevents any further seal from being
2660          * added.
2661          *
2662          * Semantics of sealing are only defined on volatile files. Only
2663          * anonymous shmem files support sealing. More importantly, seals are
2664          * never written to disk. Therefore, there's no plan to support it on
2665          * other file types.
2666          */
2667
2668         if (file->f_op != &shmem_file_operations)
2669                 return -EINVAL;
2670         if (!(file->f_mode & FMODE_WRITE))
2671                 return -EPERM;
2672         if (seals & ~(unsigned int)F_ALL_SEALS)
2673                 return -EINVAL;
2674
2675         inode_lock(inode);
2676
2677         if (info->seals & F_SEAL_SEAL) {
2678                 error = -EPERM;
2679                 goto unlock;
2680         }
2681
2682         if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) {
2683                 error = mapping_deny_writable(file->f_mapping);
2684                 if (error)
2685                         goto unlock;
2686
2687                 error = shmem_wait_for_pins(file->f_mapping);
2688                 if (error) {
2689                         mapping_allow_writable(file->f_mapping);
2690                         goto unlock;
2691                 }
2692         }
2693
2694         info->seals |= seals;
2695         error = 0;
2696
2697 unlock:
2698         inode_unlock(inode);
2699         return error;
2700 }
2701 EXPORT_SYMBOL_GPL(shmem_add_seals);
2702
2703 int shmem_get_seals(struct file *file)
2704 {
2705         if (file->f_op != &shmem_file_operations)
2706                 return -EINVAL;
2707
2708         return SHMEM_I(file_inode(file))->seals;
2709 }
2710 EXPORT_SYMBOL_GPL(shmem_get_seals);
2711
2712 long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
2713 {
2714         long error;
2715
2716         switch (cmd) {
2717         case F_ADD_SEALS:
2718                 /* disallow upper 32bit */
2719                 if (arg > UINT_MAX)
2720                         return -EINVAL;
2721
2722                 error = shmem_add_seals(file, arg);
2723                 break;
2724         case F_GET_SEALS:
2725                 error = shmem_get_seals(file);
2726                 break;
2727         default:
2728                 error = -EINVAL;
2729                 break;
2730         }
2731
2732         return error;
2733 }
2734
2735 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2736                                                          loff_t len)
2737 {
2738         struct inode *inode = file_inode(file);
2739         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2740         struct shmem_inode_info *info = SHMEM_I(inode);
2741         struct shmem_falloc shmem_falloc;
2742         pgoff_t start, index, end;
2743         int error;
2744
2745         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2746                 return -EOPNOTSUPP;
2747
2748         inode_lock(inode);
2749
2750         if (mode & FALLOC_FL_PUNCH_HOLE) {
2751                 struct address_space *mapping = file->f_mapping;
2752                 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2753                 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2754                 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2755
2756                 /* protected by i_mutex */
2757                 if (info->seals & F_SEAL_WRITE) {
2758                         error = -EPERM;
2759                         goto out;
2760                 }
2761
2762                 shmem_falloc.waitq = &shmem_falloc_waitq;
2763                 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2764                 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2765                 spin_lock(&inode->i_lock);
2766                 inode->i_private = &shmem_falloc;
2767                 spin_unlock(&inode->i_lock);
2768
2769                 if ((u64)unmap_end > (u64)unmap_start)
2770                         unmap_mapping_range(mapping, unmap_start,
2771                                             1 + unmap_end - unmap_start, 0);
2772                 shmem_truncate_range(inode, offset, offset + len - 1);
2773                 /* No need to unmap again: hole-punching leaves COWed pages */
2774
2775                 spin_lock(&inode->i_lock);
2776                 inode->i_private = NULL;
2777                 wake_up_all(&shmem_falloc_waitq);
2778                 spin_unlock(&inode->i_lock);
2779                 error = 0;
2780                 goto out;
2781         }
2782
2783         /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2784         error = inode_newsize_ok(inode, offset + len);
2785         if (error)
2786                 goto out;
2787
2788         if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2789                 error = -EPERM;
2790                 goto out;
2791         }
2792
2793         start = offset >> PAGE_SHIFT;
2794         end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2795         /* Try to avoid a swapstorm if len is impossible to satisfy */
2796         if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2797                 error = -ENOSPC;
2798                 goto out;
2799         }
2800
2801         shmem_falloc.waitq = NULL;
2802         shmem_falloc.start = start;
2803         shmem_falloc.next  = start;
2804         shmem_falloc.nr_falloced = 0;
2805         shmem_falloc.nr_unswapped = 0;
2806         spin_lock(&inode->i_lock);
2807         inode->i_private = &shmem_falloc;
2808         spin_unlock(&inode->i_lock);
2809
2810         for (index = start; index < end; index++) {
2811                 struct page *page;
2812
2813                 /*
2814                  * Good, the fallocate(2) manpage permits EINTR: we may have
2815                  * been interrupted because we are using up too much memory.
2816                  */
2817                 if (signal_pending(current))
2818                         error = -EINTR;
2819                 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2820                         error = -ENOMEM;
2821                 else
2822                         error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2823                 if (error) {
2824                         /* Remove the !PageUptodate pages we added */
2825                         if (index > start) {
2826                                 shmem_undo_range(inode,
2827                                     (loff_t)start << PAGE_SHIFT,
2828                                     ((loff_t)index << PAGE_SHIFT) - 1, true);
2829                         }
2830                         goto undone;
2831                 }
2832
2833                 /*
2834                  * Inform shmem_writepage() how far we have reached.
2835                  * No need for lock or barrier: we have the page lock.
2836                  */
2837                 shmem_falloc.next++;
2838                 if (!PageUptodate(page))
2839                         shmem_falloc.nr_falloced++;
2840
2841                 /*
2842                  * If !PageUptodate, leave it that way so that freeable pages
2843                  * can be recognized if we need to rollback on error later.
2844                  * But set_page_dirty so that memory pressure will swap rather
2845                  * than free the pages we are allocating (and SGP_CACHE pages
2846                  * might still be clean: we now need to mark those dirty too).
2847                  */
2848                 set_page_dirty(page);
2849                 unlock_page(page);
2850                 put_page(page);
2851                 cond_resched();
2852         }
2853
2854         if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2855                 i_size_write(inode, offset + len);
2856         inode->i_ctime = CURRENT_TIME;
2857 undone:
2858         spin_lock(&inode->i_lock);
2859         inode->i_private = NULL;
2860         spin_unlock(&inode->i_lock);
2861 out:
2862         inode_unlock(inode);
2863         return error;
2864 }
2865
2866 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2867 {
2868         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2869
2870         buf->f_type = TMPFS_MAGIC;
2871         buf->f_bsize = PAGE_SIZE;
2872         buf->f_namelen = NAME_MAX;
2873         if (sbinfo->max_blocks) {
2874                 buf->f_blocks = sbinfo->max_blocks;
2875                 buf->f_bavail =
2876                 buf->f_bfree  = sbinfo->max_blocks -
2877                                 percpu_counter_sum(&sbinfo->used_blocks);
2878         }
2879         if (sbinfo->max_inodes) {
2880                 buf->f_files = sbinfo->max_inodes;
2881                 buf->f_ffree = sbinfo->free_inodes;
2882         }
2883         /* else leave those fields 0 like simple_statfs */
2884         return 0;
2885 }
2886
2887 /*
2888  * File creation. Allocate an inode, and we're done..
2889  */
2890 static int
2891 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2892 {
2893         struct inode *inode;
2894         int error = -ENOSPC;
2895
2896         inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2897         if (inode) {
2898                 error = simple_acl_create(dir, inode);
2899                 if (error)
2900                         goto out_iput;
2901                 error = security_inode_init_security(inode, dir,
2902                                                      &dentry->d_name,
2903                                                      shmem_initxattrs, NULL);
2904                 if (error && error != -EOPNOTSUPP)
2905                         goto out_iput;
2906
2907                 error = 0;
2908                 dir->i_size += BOGO_DIRENT_SIZE;
2909                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2910                 d_instantiate(dentry, inode);
2911                 dget(dentry); /* Extra count - pin the dentry in core */
2912         }
2913         return error;
2914 out_iput:
2915         iput(inode);
2916         return error;
2917 }
2918
2919 static int
2920 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
2921 {
2922         struct inode *inode;
2923         int error = -ENOSPC;
2924
2925         inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2926         if (inode) {
2927                 error = security_inode_init_security(inode, dir,
2928                                                      NULL,
2929                                                      shmem_initxattrs, NULL);
2930                 if (error && error != -EOPNOTSUPP)
2931                         goto out_iput;
2932                 error = simple_acl_create(dir, inode);
2933                 if (error)
2934                         goto out_iput;
2935                 d_tmpfile(dentry, inode);
2936         }
2937         return error;
2938 out_iput:
2939         iput(inode);
2940         return error;
2941 }
2942
2943 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2944 {
2945         int error;
2946
2947         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
2948                 return error;
2949         inc_nlink(dir);
2950         return 0;
2951 }
2952
2953 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2954                 bool excl)
2955 {
2956         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2957 }
2958
2959 /*
2960  * Link a file..
2961  */
2962 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2963 {
2964         struct inode *inode = d_inode(old_dentry);
2965         int ret;
2966
2967         /*
2968          * No ordinary (disk based) filesystem counts links as inodes;
2969          * but each new link needs a new dentry, pinning lowmem, and
2970          * tmpfs dentries cannot be pruned until they are unlinked.
2971          */
2972         ret = shmem_reserve_inode(inode->i_sb);
2973         if (ret)
2974                 goto out;
2975
2976         dir->i_size += BOGO_DIRENT_SIZE;
2977         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2978         inc_nlink(inode);
2979         ihold(inode);   /* New dentry reference */
2980         dget(dentry);           /* Extra pinning count for the created dentry */
2981         d_instantiate(dentry, inode);
2982 out:
2983         return ret;
2984 }
2985
2986 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2987 {
2988         struct inode *inode = d_inode(dentry);
2989
2990         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2991                 shmem_free_inode(inode->i_sb);
2992
2993         dir->i_size -= BOGO_DIRENT_SIZE;
2994         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2995         drop_nlink(inode);
2996         dput(dentry);   /* Undo the count from "create" - this does all the work */
2997         return 0;
2998 }
2999
3000 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3001 {
3002         if (!simple_empty(dentry))
3003                 return -ENOTEMPTY;
3004
3005         drop_nlink(d_inode(dentry));
3006         drop_nlink(dir);
3007         return shmem_unlink(dir, dentry);
3008 }
3009
3010 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
3011 {
3012         bool old_is_dir = d_is_dir(old_dentry);
3013         bool new_is_dir = d_is_dir(new_dentry);
3014
3015         if (old_dir != new_dir && old_is_dir != new_is_dir) {
3016                 if (old_is_dir) {
3017                         drop_nlink(old_dir);
3018                         inc_nlink(new_dir);
3019                 } else {
3020                         drop_nlink(new_dir);
3021                         inc_nlink(old_dir);
3022                 }
3023         }
3024         old_dir->i_ctime = old_dir->i_mtime =
3025         new_dir->i_ctime = new_dir->i_mtime =
3026         d_inode(old_dentry)->i_ctime =
3027         d_inode(new_dentry)->i_ctime = CURRENT_TIME;
3028
3029         return 0;
3030 }
3031
3032 static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
3033 {
3034         struct dentry *whiteout;
3035         int error;
3036
3037         whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3038         if (!whiteout)
3039                 return -ENOMEM;
3040
3041         error = shmem_mknod(old_dir, whiteout,
3042                             S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3043         dput(whiteout);
3044         if (error)
3045                 return error;
3046
3047         /*
3048          * Cheat and hash the whiteout while the old dentry is still in
3049          * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3050          *
3051          * d_lookup() will consistently find one of them at this point,
3052          * not sure which one, but that isn't even important.
3053          */
3054         d_rehash(whiteout);
3055         return 0;
3056 }
3057
3058 /*
3059  * The VFS layer already does all the dentry stuff for rename,
3060  * we just have to decrement the usage count for the target if
3061  * it exists so that the VFS layer correctly free's it when it
3062  * gets overwritten.
3063  */
3064 static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
3065 {
3066         struct inode *inode = d_inode(old_dentry);
3067         int they_are_dirs = S_ISDIR(inode->i_mode);
3068
3069         if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3070                 return -EINVAL;
3071
3072         if (flags & RENAME_EXCHANGE)
3073                 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
3074
3075         if (!simple_empty(new_dentry))
3076                 return -ENOTEMPTY;
3077
3078         if (flags & RENAME_WHITEOUT) {
3079                 int error;
3080
3081                 error = shmem_whiteout(old_dir, old_dentry);
3082                 if (error)
3083                         return error;
3084         }
3085
3086         if (d_really_is_positive(new_dentry)) {
3087                 (void) shmem_unlink(new_dir, new_dentry);
3088                 if (they_are_dirs) {
3089                         drop_nlink(d_inode(new_dentry));
3090                         drop_nlink(old_dir);
3091                 }
3092         } else if (they_are_dirs) {
3093                 drop_nlink(old_dir);
3094                 inc_nlink(new_dir);
3095         }
3096
3097         old_dir->i_size -= BOGO_DIRENT_SIZE;
3098         new_dir->i_size += BOGO_DIRENT_SIZE;
3099         old_dir->i_ctime = old_dir->i_mtime =
3100         new_dir->i_ctime = new_dir->i_mtime =
3101         inode->i_ctime = CURRENT_TIME;
3102         return 0;
3103 }
3104
3105 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
3106 {
3107         int error;
3108         int len;
3109         struct inode *inode;
3110         struct page *page;
3111         struct shmem_inode_info *info;
3112
3113         len = strlen(symname) + 1;
3114         if (len > PAGE_SIZE)
3115                 return -ENAMETOOLONG;
3116
3117         inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
3118         if (!inode)
3119                 return -ENOSPC;
3120
3121         error = security_inode_init_security(inode, dir, &dentry->d_name,
3122                                              shmem_initxattrs, NULL);
3123         if (error) {
3124                 if (error != -EOPNOTSUPP) {
3125                         iput(inode);
3126                         return error;
3127                 }
3128                 error = 0;
3129         }
3130
3131         info = SHMEM_I(inode);
3132         inode->i_size = len-1;
3133         if (len <= SHORT_SYMLINK_LEN) {
3134                 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3135                 if (!inode->i_link) {
3136                         iput(inode);
3137                         return -ENOMEM;
3138                 }
3139                 inode->i_op = &shmem_short_symlink_operations;
3140         } else {
3141                 inode_nohighmem(inode);
3142                 error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3143                 if (error) {
3144                         iput(inode);
3145                         return error;
3146                 }
3147                 inode->i_mapping->a_ops = &shmem_aops;
3148                 inode->i_op = &shmem_symlink_inode_operations;
3149                 memcpy(page_address(page), symname, len);
3150                 SetPageUptodate(page);
3151                 set_page_dirty(page);
3152                 unlock_page(page);
3153                 put_page(page);
3154         }
3155         dir->i_size += BOGO_DIRENT_SIZE;
3156         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
3157         d_instantiate(dentry, inode);
3158         dget(dentry);
3159         return 0;
3160 }
3161
3162 static void shmem_put_link(void *arg)
3163 {
3164         mark_page_accessed(arg);
3165         put_page(arg);
3166 }
3167
3168 static const char *shmem_get_link(struct dentry *dentry,
3169                                   struct inode *inode,
3170                                   struct delayed_call *done)
3171 {
3172         struct page *page = NULL;
3173         int error;
3174         if (!dentry) {
3175                 page = find_get_page(inode->i_mapping, 0);
3176                 if (!page)
3177                         return ERR_PTR(-ECHILD);
3178                 if (!PageUptodate(page)) {
3179                         put_page(page);
3180                         return ERR_PTR(-ECHILD);
3181                 }
3182         } else {
3183                 error = shmem_getpage(inode, 0, &page, SGP_READ);
3184                 if (error)
3185                         return ERR_PTR(error);
3186                 unlock_page(page);
3187         }
3188         set_delayed_call(done, shmem_put_link, page);
3189         return page_address(page);
3190 }
3191
3192 #ifdef CONFIG_TMPFS_XATTR
3193 /*
3194  * Superblocks without xattr inode operations may get some security.* xattr
3195  * support from the LSM "for free". As soon as we have any other xattrs
3196  * like ACLs, we also need to implement the security.* handlers at
3197  * filesystem level, though.
3198  */
3199
3200 /*
3201  * Callback for security_inode_init_security() for acquiring xattrs.
3202  */
3203 static int shmem_initxattrs(struct inode *inode,
3204                             const struct xattr *xattr_array,
3205                             void *fs_info)
3206 {
3207         struct shmem_inode_info *info = SHMEM_I(inode);
3208         const struct xattr *xattr;
3209         struct simple_xattr *new_xattr;
3210         size_t len;
3211
3212         for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3213                 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3214                 if (!new_xattr)
3215                         return -ENOMEM;
3216
3217                 len = strlen(xattr->name) + 1;
3218                 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3219                                           GFP_KERNEL);
3220                 if (!new_xattr->name) {
3221                         kfree(new_xattr);
3222                         return -ENOMEM;
3223                 }
3224
3225                 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3226                        XATTR_SECURITY_PREFIX_LEN);
3227                 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3228                        xattr->name, len);
3229
3230                 simple_xattr_list_add(&info->xattrs, new_xattr);
3231         }
3232
3233         return 0;
3234 }
3235
3236 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3237                                    struct dentry *unused, struct inode *inode,
3238                                    const char *name, void *buffer, size_t size)
3239 {
3240         struct shmem_inode_info *info = SHMEM_I(inode);
3241
3242         name = xattr_full_name(handler, name);
3243         return simple_xattr_get(&info->xattrs, name, buffer, size);
3244 }
3245
3246 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3247                                    struct dentry *unused, struct inode *inode,
3248                                    const char *name, const void *value,
3249                                    size_t size, int flags)
3250 {
3251         struct shmem_inode_info *info = SHMEM_I(inode);
3252
3253         name = xattr_full_name(handler, name);
3254         return simple_xattr_set(&info->xattrs, name, value, size, flags);
3255 }
3256
3257 static const struct xattr_handler shmem_security_xattr_handler = {
3258         .prefix = XATTR_SECURITY_PREFIX,
3259         .get = shmem_xattr_handler_get,
3260         .set = shmem_xattr_handler_set,
3261 };
3262
3263 static const struct xattr_handler shmem_trusted_xattr_handler = {
3264         .prefix = XATTR_TRUSTED_PREFIX,
3265         .get = shmem_xattr_handler_get,
3266         .set = shmem_xattr_handler_set,
3267 };
3268
3269 static const struct xattr_handler *shmem_xattr_handlers[] = {
3270 #ifdef CONFIG_TMPFS_POSIX_ACL
3271         &posix_acl_access_xattr_handler,
3272         &posix_acl_default_xattr_handler,
3273 #endif
3274         &shmem_security_xattr_handler,
3275         &shmem_trusted_xattr_handler,
3276         NULL
3277 };
3278
3279 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3280 {
3281         struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3282         return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3283 }
3284 #endif /* CONFIG_TMPFS_XATTR */
3285
3286 static const struct inode_operations shmem_short_symlink_operations = {
3287         .readlink       = generic_readlink,
3288         .get_link       = simple_get_link,
3289 #ifdef CONFIG_TMPFS_XATTR
3290         .setxattr       = generic_setxattr,
3291         .getxattr       = generic_getxattr,
3292         .listxattr      = shmem_listxattr,
3293         .removexattr    = generic_removexattr,
3294 #endif
3295 };
3296
3297 static const struct inode_operations shmem_symlink_inode_operations = {
3298         .readlink       = generic_readlink,
3299         .get_link       = shmem_get_link,
3300 #ifdef CONFIG_TMPFS_XATTR
3301         .setxattr       = generic_setxattr,
3302         .getxattr       = generic_getxattr,
3303         .listxattr      = shmem_listxattr,
3304         .removexattr    = generic_removexattr,
3305 #endif
3306 };
3307
3308 static struct dentry *shmem_get_parent(struct dentry *child)
3309 {
3310         return ERR_PTR(-ESTALE);
3311 }
3312
3313 static int shmem_match(struct inode *ino, void *vfh)
3314 {
3315         __u32 *fh = vfh;
3316         __u64 inum = fh[2];
3317         inum = (inum << 32) | fh[1];
3318         return ino->i_ino == inum && fh[0] == ino->i_generation;
3319 }
3320
3321 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3322                 struct fid *fid, int fh_len, int fh_type)
3323 {
3324         struct inode *inode;
3325         struct dentry *dentry = NULL;
3326         u64 inum;
3327
3328         if (fh_len < 3)
3329                 return NULL;
3330
3331         inum = fid->raw[2];
3332         inum = (inum << 32) | fid->raw[1];
3333
3334         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3335                         shmem_match, fid->raw);
3336         if (inode) {
3337                 dentry = d_find_alias(inode);
3338                 iput(inode);
3339         }
3340
3341         return dentry;
3342 }
3343
3344 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3345                                 struct inode *parent)
3346 {
3347         if (*len < 3) {
3348                 *len = 3;
3349                 return FILEID_INVALID;
3350         }
3351
3352         if (inode_unhashed(inode)) {
3353                 /* Unfortunately insert_inode_hash is not idempotent,
3354                  * so as we hash inodes here rather than at creation
3355                  * time, we need a lock to ensure we only try
3356                  * to do it once
3357                  */
3358                 static DEFINE_SPINLOCK(lock);
3359                 spin_lock(&lock);
3360                 if (inode_unhashed(inode))
3361                         __insert_inode_hash(inode,
3362                                             inode->i_ino + inode->i_generation);
3363                 spin_unlock(&lock);
3364         }
3365
3366         fh[0] = inode->i_generation;
3367         fh[1] = inode->i_ino;
3368         fh[2] = ((__u64)inode->i_ino) >> 32;
3369
3370         *len = 3;
3371         return 1;
3372 }
3373
3374 static const struct export_operations shmem_export_ops = {
3375         .get_parent     = shmem_get_parent,
3376         .encode_fh      = shmem_encode_fh,
3377         .fh_to_dentry   = shmem_fh_to_dentry,
3378 };
3379
3380 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
3381                                bool remount)
3382 {
3383         char *this_char, *value, *rest;
3384         struct mempolicy *mpol = NULL;
3385         uid_t uid;
3386         gid_t gid;
3387
3388         while (options != NULL) {
3389                 this_char = options;
3390                 for (;;) {
3391                         /*
3392                          * NUL-terminate this option: unfortunately,
3393                          * mount options form a comma-separated list,
3394                          * but mpol's nodelist may also contain commas.
3395                          */
3396                         options = strchr(options, ',');
3397                         if (options == NULL)
3398                                 break;
3399                         options++;
3400                         if (!isdigit(*options)) {
3401                                 options[-1] = '\0';
3402                                 break;
3403                         }
3404                 }
3405                 if (!*this_char)
3406                         continue;
3407                 if ((value = strchr(this_char,'=')) != NULL) {
3408                         *value++ = 0;
3409                 } else {
3410                         pr_err("tmpfs: No value for mount option '%s'\n",
3411                                this_char);
3412                         goto error;
3413                 }
3414
3415                 if (!strcmp(this_char,"size")) {
3416                         unsigned long long size;
3417                         size = memparse(value,&rest);
3418                         if (*rest == '%') {
3419                                 size <<= PAGE_SHIFT;
3420                                 size *= totalram_pages;
3421                                 do_div(size, 100);
3422                                 rest++;
3423                         }
3424                         if (*rest)
3425                                 goto bad_val;
3426                         sbinfo->max_blocks =
3427                                 DIV_ROUND_UP(size, PAGE_SIZE);
3428                 } else if (!strcmp(this_char,"nr_blocks")) {
3429                         sbinfo->max_blocks = memparse(value, &rest);
3430                         if (*rest)
3431                                 goto bad_val;
3432                 } else if (!strcmp(this_char,"nr_inodes")) {
3433                         sbinfo->max_inodes = memparse(value, &rest);
3434                         if (*rest)
3435                                 goto bad_val;
3436                 } else if (!strcmp(this_char,"mode")) {
3437                         if (remount)
3438                                 continue;
3439                         sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
3440                         if (*rest)
3441                                 goto bad_val;
3442                 } else if (!strcmp(this_char,"uid")) {
3443                         if (remount)
3444                                 continue;
3445                         uid = simple_strtoul(value, &rest, 0);
3446                         if (*rest)
3447                                 goto bad_val;
3448                         sbinfo->uid = make_kuid(current_user_ns(), uid);
3449                         if (!uid_valid(sbinfo->uid))
3450                                 goto bad_val;
3451                 } else if (!strcmp(this_char,"gid")) {
3452                         if (remount)
3453                                 continue;
3454                         gid = simple_strtoul(value, &rest, 0);
3455                         if (*rest)
3456                                 goto bad_val;
3457                         sbinfo->gid = make_kgid(current_user_ns(), gid);
3458                         if (!gid_valid(sbinfo->gid))
3459                                 goto bad_val;
3460 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3461                 } else if (!strcmp(this_char, "huge")) {
3462                         int huge;
3463                         huge = shmem_parse_huge(value);
3464                         if (huge < 0)
3465                                 goto bad_val;
3466                         if (!has_transparent_hugepage() &&
3467                                         huge != SHMEM_HUGE_NEVER)
3468                                 goto bad_val;
3469                         sbinfo->huge = huge;
3470 #endif
3471 #ifdef CONFIG_NUMA
3472                 } else if (!strcmp(this_char,"mpol")) {
3473                         mpol_put(mpol);
3474                         mpol = NULL;
3475                         if (mpol_parse_str(value, &mpol))
3476                                 goto bad_val;
3477 #endif
3478                 } else {
3479                         pr_err("tmpfs: Bad mount option %s\n", this_char);
3480                         goto error;
3481                 }
3482         }
3483         sbinfo->mpol = mpol;
3484         return 0;
3485
3486 bad_val:
3487         pr_err("tmpfs: Bad value '%s' for mount option '%s'\n",
3488                value, this_char);
3489 error:
3490         mpol_put(mpol);
3491         return 1;
3492
3493 }
3494
3495 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
3496 {
3497         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3498         struct shmem_sb_info config = *sbinfo;
3499         unsigned long inodes;
3500         int error = -EINVAL;
3501
3502         config.mpol = NULL;
3503         if (shmem_parse_options(data, &config, true))
3504                 return error;
3505
3506         spin_lock(&sbinfo->stat_lock);
3507         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3508         if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
3509                 goto out;
3510         if (config.max_inodes < inodes)
3511                 goto out;
3512         /*
3513          * Those tests disallow limited->unlimited while any are in use;
3514          * but we must separately disallow unlimited->limited, because
3515          * in that case we have no record of how much is already in use.
3516          */
3517         if (config.max_blocks && !sbinfo->max_blocks)
3518                 goto out;
3519         if (config.max_inodes && !sbinfo->max_inodes)
3520                 goto out;
3521
3522         error = 0;
3523         sbinfo->huge = config.huge;
3524         sbinfo->max_blocks  = config.max_blocks;
3525         sbinfo->max_inodes  = config.max_inodes;
3526         sbinfo->free_inodes = config.max_inodes - inodes;
3527
3528         /*
3529          * Preserve previous mempolicy unless mpol remount option was specified.
3530          */
3531         if (config.mpol) {
3532                 mpol_put(sbinfo->mpol);
3533                 sbinfo->mpol = config.mpol;     /* transfers initial ref */
3534         }
3535 out:
3536         spin_unlock(&sbinfo->stat_lock);
3537         return error;
3538 }
3539
3540 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3541 {
3542         struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3543
3544         if (sbinfo->max_blocks != shmem_default_max_blocks())
3545                 seq_printf(seq, ",size=%luk",
3546                         sbinfo->max_blocks << (PAGE_SHIFT - 10));
3547         if (sbinfo->max_inodes != shmem_default_max_inodes())
3548                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3549         if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
3550                 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3551         if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3552                 seq_printf(seq, ",uid=%u",
3553                                 from_kuid_munged(&init_user_ns, sbinfo->uid));
3554         if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3555                 seq_printf(seq, ",gid=%u",
3556                                 from_kgid_munged(&init_user_ns, sbinfo->gid));
3557 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3558         /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3559         if (sbinfo->huge)
3560                 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3561 #endif
3562         shmem_show_mpol(seq, sbinfo->mpol);
3563         return 0;
3564 }
3565
3566 #define MFD_NAME_PREFIX "memfd:"
3567 #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
3568 #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
3569
3570 #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING)
3571
3572 SYSCALL_DEFINE2(memfd_create,
3573                 const char __user *, uname,
3574                 unsigned int, flags)
3575 {
3576         struct shmem_inode_info *info;
3577         struct file *file;
3578         int fd, error;
3579         char *name;
3580         long len;
3581
3582         if (flags & ~(unsigned int)MFD_ALL_FLAGS)
3583                 return -EINVAL;
3584
3585         /* length includes terminating zero */
3586         len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
3587         if (len <= 0)
3588                 return -EFAULT;
3589         if (len > MFD_NAME_MAX_LEN + 1)
3590                 return -EINVAL;
3591
3592         name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY);
3593         if (!name)
3594                 return -ENOMEM;
3595
3596         strcpy(name, MFD_NAME_PREFIX);
3597         if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
3598                 error = -EFAULT;
3599                 goto err_name;
3600         }
3601
3602         /* terminating-zero may have changed after strnlen_user() returned */
3603         if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
3604                 error = -EFAULT;
3605                 goto err_name;
3606         }
3607
3608         fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
3609         if (fd < 0) {
3610                 error = fd;
3611                 goto err_name;
3612         }
3613
3614         file = shmem_file_setup(name, 0, VM_NORESERVE);
3615         if (IS_ERR(file)) {
3616                 error = PTR_ERR(file);
3617                 goto err_fd;
3618         }
3619         info = SHMEM_I(file_inode(file));
3620         file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
3621         file->f_flags |= O_RDWR | O_LARGEFILE;
3622         if (flags & MFD_ALLOW_SEALING)
3623                 info->seals &= ~F_SEAL_SEAL;
3624
3625         fd_install(fd, file);
3626         kfree(name);
3627         return fd;
3628
3629 err_fd:
3630         put_unused_fd(fd);
3631 err_name:
3632         kfree(name);
3633         return error;
3634 }
3635
3636 #endif /* CONFIG_TMPFS */
3637
3638 static void shmem_put_super(struct super_block *sb)
3639 {
3640         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3641
3642         percpu_counter_destroy(&sbinfo->used_blocks);
3643         mpol_put(sbinfo->mpol);
3644         kfree(sbinfo);
3645         sb->s_fs_info = NULL;
3646 }
3647
3648 int shmem_fill_super(struct super_block *sb, void *data, int silent)
3649 {
3650         struct inode *inode;
3651         struct shmem_sb_info *sbinfo;
3652         int err = -ENOMEM;
3653
3654         /* Round up to L1_CACHE_BYTES to resist false sharing */
3655         sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3656                                 L1_CACHE_BYTES), GFP_KERNEL);
3657         if (!sbinfo)
3658                 return -ENOMEM;
3659
3660         sbinfo->mode = S_IRWXUGO | S_ISVTX;
3661         sbinfo->uid = current_fsuid();
3662         sbinfo->gid = current_fsgid();
3663         sb->s_fs_info = sbinfo;
3664
3665 #ifdef CONFIG_TMPFS
3666         /*
3667          * Per default we only allow half of the physical ram per
3668          * tmpfs instance, limiting inodes to one per page of lowmem;
3669          * but the internal instance is left unlimited.
3670          */
3671         if (!(sb->s_flags & MS_KERNMOUNT)) {
3672                 sbinfo->max_blocks = shmem_default_max_blocks();
3673                 sbinfo->max_inodes = shmem_default_max_inodes();
3674                 if (shmem_parse_options(data, sbinfo, false)) {
3675                         err = -EINVAL;
3676                         goto failed;
3677                 }
3678         } else {
3679                 sb->s_flags |= MS_NOUSER;
3680         }
3681         sb->s_export_op = &shmem_export_ops;
3682         sb->s_flags |= MS_NOSEC;
3683 #else
3684         sb->s_flags |= MS_NOUSER;
3685 #endif
3686
3687         spin_lock_init(&sbinfo->stat_lock);
3688         if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3689                 goto failed;
3690         sbinfo->free_inodes = sbinfo->max_inodes;
3691         spin_lock_init(&sbinfo->shrinklist_lock);
3692         INIT_LIST_HEAD(&sbinfo->shrinklist);
3693
3694         sb->s_maxbytes = MAX_LFS_FILESIZE;
3695         sb->s_blocksize = PAGE_SIZE;
3696         sb->s_blocksize_bits = PAGE_SHIFT;
3697         sb->s_magic = TMPFS_MAGIC;
3698         sb->s_op = &shmem_ops;
3699         sb->s_time_gran = 1;
3700 #ifdef CONFIG_TMPFS_XATTR
3701         sb->s_xattr = shmem_xattr_handlers;
3702 #endif
3703 #ifdef CONFIG_TMPFS_POSIX_ACL
3704         sb->s_flags |= MS_POSIXACL;
3705 #endif
3706
3707         inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3708         if (!inode)
3709                 goto failed;
3710         inode->i_uid = sbinfo->uid;
3711         inode->i_gid = sbinfo->gid;
3712         sb->s_root = d_make_root(inode);
3713         if (!sb->s_root)
3714                 goto failed;
3715         return 0;
3716
3717 failed:
3718         shmem_put_super(sb);
3719         return err;
3720 }
3721
3722 static struct kmem_cache *shmem_inode_cachep;
3723
3724 static struct inode *shmem_alloc_inode(struct super_block *sb)
3725 {
3726         struct shmem_inode_info *info;
3727         info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3728         if (!info)
3729                 return NULL;
3730         return &info->vfs_inode;
3731 }
3732
3733 static void shmem_destroy_callback(struct rcu_head *head)
3734 {
3735         struct inode *inode = container_of(head, struct inode, i_rcu);
3736         if (S_ISLNK(inode->i_mode))
3737                 kfree(inode->i_link);
3738         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3739 }
3740
3741 static void shmem_destroy_inode(struct inode *inode)
3742 {
3743         if (S_ISREG(inode->i_mode))
3744                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3745         call_rcu(&inode->i_rcu, shmem_destroy_callback);
3746 }
3747
3748 static void shmem_init_inode(void *foo)
3749 {
3750         struct shmem_inode_info *info = foo;
3751         inode_init_once(&info->vfs_inode);
3752 }
3753
3754 static int shmem_init_inodecache(void)
3755 {
3756         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3757                                 sizeof(struct shmem_inode_info),
3758                                 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3759         return 0;
3760 }
3761
3762 static void shmem_destroy_inodecache(void)
3763 {
3764         kmem_cache_destroy(shmem_inode_cachep);
3765 }
3766
3767 static const struct address_space_operations shmem_aops = {
3768         .writepage      = shmem_writepage,
3769         .set_page_dirty = __set_page_dirty_no_writeback,
3770 #ifdef CONFIG_TMPFS
3771         .write_begin    = shmem_write_begin,
3772         .write_end      = shmem_write_end,
3773 #endif
3774 #ifdef CONFIG_MIGRATION
3775         .migratepage    = migrate_page,
3776 #endif
3777         .error_remove_page = generic_error_remove_page,
3778 };
3779
3780 static const struct file_operations shmem_file_operations = {
3781         .mmap           = shmem_mmap,
3782         .get_unmapped_area = shmem_get_unmapped_area,
3783 #ifdef CONFIG_TMPFS
3784         .llseek         = shmem_file_llseek,
3785         .read_iter      = shmem_file_read_iter,
3786         .write_iter     = generic_file_write_iter,
3787         .fsync          = noop_fsync,
3788         .splice_read    = shmem_file_splice_read,
3789         .splice_write   = iter_file_splice_write,
3790         .fallocate      = shmem_fallocate,
3791 #endif
3792 };
3793
3794 static const struct inode_operations shmem_inode_operations = {
3795         .getattr        = shmem_getattr,
3796         .setattr        = shmem_setattr,
3797 #ifdef CONFIG_TMPFS_XATTR
3798         .setxattr       = generic_setxattr,
3799         .getxattr       = generic_getxattr,
3800         .listxattr      = shmem_listxattr,
3801         .removexattr    = generic_removexattr,
3802         .set_acl        = simple_set_acl,
3803 #endif
3804 };
3805
3806 static const struct inode_operations shmem_dir_inode_operations = {
3807 #ifdef CONFIG_TMPFS
3808         .create         = shmem_create,
3809         .lookup         = simple_lookup,
3810         .link           = shmem_link,
3811         .unlink         = shmem_unlink,
3812         .symlink        = shmem_symlink,
3813         .mkdir          = shmem_mkdir,
3814         .rmdir          = shmem_rmdir,
3815         .mknod          = shmem_mknod,
3816         .rename2        = shmem_rename2,
3817         .tmpfile        = shmem_tmpfile,
3818 #endif
3819 #ifdef CONFIG_TMPFS_XATTR
3820         .setxattr       = generic_setxattr,
3821         .getxattr       = generic_getxattr,
3822         .listxattr      = shmem_listxattr,
3823         .removexattr    = generic_removexattr,
3824 #endif
3825 #ifdef CONFIG_TMPFS_POSIX_ACL
3826         .setattr        = shmem_setattr,
3827         .set_acl        = simple_set_acl,
3828 #endif
3829 };
3830
3831 static const struct inode_operations shmem_special_inode_operations = {
3832 #ifdef CONFIG_TMPFS_XATTR
3833         .setxattr       = generic_setxattr,
3834         .getxattr       = generic_getxattr,
3835         .listxattr      = shmem_listxattr,
3836         .removexattr    = generic_removexattr,
3837 #endif
3838 #ifdef CONFIG_TMPFS_POSIX_ACL
3839         .setattr        = shmem_setattr,
3840         .set_acl        = simple_set_acl,
3841 #endif
3842 };
3843
3844 static const struct super_operations shmem_ops = {
3845         .alloc_inode    = shmem_alloc_inode,
3846         .destroy_inode  = shmem_destroy_inode,
3847 #ifdef CONFIG_TMPFS
3848         .statfs         = shmem_statfs,
3849         .remount_fs     = shmem_remount_fs,
3850         .show_options   = shmem_show_options,
3851 #endif
3852         .evict_inode    = shmem_evict_inode,
3853         .drop_inode     = generic_delete_inode,
3854         .put_super      = shmem_put_super,
3855 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3856         .nr_cached_objects      = shmem_unused_huge_count,
3857         .free_cached_objects    = shmem_unused_huge_scan,
3858 #endif
3859 };
3860
3861 static const struct vm_operations_struct shmem_vm_ops = {
3862         .fault          = shmem_fault,
3863         .map_pages      = filemap_map_pages,
3864 #ifdef CONFIG_NUMA
3865         .set_policy     = shmem_set_policy,
3866         .get_policy     = shmem_get_policy,
3867 #endif
3868 };
3869
3870 static struct dentry *shmem_mount(struct file_system_type *fs_type,
3871         int flags, const char *dev_name, void *data)
3872 {
3873         return mount_nodev(fs_type, flags, data, shmem_fill_super);
3874 }
3875
3876 static struct file_system_type shmem_fs_type = {
3877         .owner          = THIS_MODULE,
3878         .name           = "tmpfs",
3879         .mount          = shmem_mount,
3880         .kill_sb        = kill_litter_super,
3881         .fs_flags       = FS_USERNS_MOUNT,
3882 };
3883
3884 int __init shmem_init(void)
3885 {
3886         int error;
3887
3888         /* If rootfs called this, don't re-init */
3889         if (shmem_inode_cachep)
3890                 return 0;
3891
3892         error = shmem_init_inodecache();
3893         if (error)
3894                 goto out3;
3895
3896         error = register_filesystem(&shmem_fs_type);
3897         if (error) {
3898                 pr_err("Could not register tmpfs\n");
3899                 goto out2;
3900         }
3901
3902         shm_mnt = kern_mount(&shmem_fs_type);
3903         if (IS_ERR(shm_mnt)) {
3904                 error = PTR_ERR(shm_mnt);
3905                 pr_err("Could not kern_mount tmpfs\n");
3906                 goto out1;
3907         }
3908
3909 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3910         if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY)
3911                 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3912         else
3913                 shmem_huge = 0; /* just in case it was patched */
3914 #endif
3915         return 0;
3916
3917 out1:
3918         unregister_filesystem(&shmem_fs_type);
3919 out2:
3920         shmem_destroy_inodecache();
3921 out3:
3922         shm_mnt = ERR_PTR(error);
3923         return error;
3924 }
3925
3926 #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS)
3927 static ssize_t shmem_enabled_show(struct kobject *kobj,
3928                 struct kobj_attribute *attr, char *buf)
3929 {
3930         int values[] = {
3931                 SHMEM_HUGE_ALWAYS,
3932                 SHMEM_HUGE_WITHIN_SIZE,
3933                 SHMEM_HUGE_ADVISE,
3934                 SHMEM_HUGE_NEVER,
3935                 SHMEM_HUGE_DENY,
3936                 SHMEM_HUGE_FORCE,
3937         };
3938         int i, count;
3939
3940         for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
3941                 const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
3942
3943                 count += sprintf(buf + count, fmt,
3944                                 shmem_format_huge(values[i]));
3945         }
3946         buf[count - 1] = '\n';
3947         return count;
3948 }
3949
3950 static ssize_t shmem_enabled_store(struct kobject *kobj,
3951                 struct kobj_attribute *attr, const char *buf, size_t count)
3952 {
3953         char tmp[16];
3954         int huge;
3955
3956         if (count + 1 > sizeof(tmp))
3957                 return -EINVAL;
3958         memcpy(tmp, buf, count);
3959         tmp[count] = '\0';
3960         if (count && tmp[count - 1] == '\n')
3961                 tmp[count - 1] = '\0';
3962
3963         huge = shmem_parse_huge(tmp);
3964         if (huge == -EINVAL)
3965                 return -EINVAL;
3966         if (!has_transparent_hugepage() &&
3967                         huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
3968                 return -EINVAL;
3969
3970         shmem_huge = huge;
3971         if (shmem_huge < SHMEM_HUGE_DENY)
3972                 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3973         return count;
3974 }
3975
3976 struct kobj_attribute shmem_enabled_attr =
3977         __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
3978 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
3979
3980 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3981 bool shmem_huge_enabled(struct vm_area_struct *vma)
3982 {
3983         struct inode *inode = file_inode(vma->vm_file);
3984         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3985         loff_t i_size;
3986         pgoff_t off;
3987
3988         if (shmem_huge == SHMEM_HUGE_FORCE)
3989                 return true;
3990         if (shmem_huge == SHMEM_HUGE_DENY)
3991                 return false;
3992         switch (sbinfo->huge) {
3993                 case SHMEM_HUGE_NEVER:
3994                         return false;
3995                 case SHMEM_HUGE_ALWAYS:
3996                         return true;
3997                 case SHMEM_HUGE_WITHIN_SIZE:
3998                         off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
3999                         i_size = round_up(i_size_read(inode), PAGE_SIZE);
4000                         if (i_size >= HPAGE_PMD_SIZE &&
4001                                         i_size >> PAGE_SHIFT >= off)
4002                                 return true;
4003                 case SHMEM_HUGE_ADVISE:
4004                         /* TODO: implement fadvise() hints */
4005                         return (vma->vm_flags & VM_HUGEPAGE);
4006                 default:
4007                         VM_BUG_ON(1);
4008                         return false;
4009         }
4010 }
4011 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
4012
4013 #else /* !CONFIG_SHMEM */
4014
4015 /*
4016  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4017  *
4018  * This is intended for small system where the benefits of the full
4019  * shmem code (swap-backed and resource-limited) are outweighed by
4020  * their complexity. On systems without swap this code should be
4021  * effectively equivalent, but much lighter weight.
4022  */
4023
4024 static struct file_system_type shmem_fs_type = {
4025         .name           = "tmpfs",
4026         .mount          = ramfs_mount,
4027         .kill_sb        = kill_litter_super,
4028         .fs_flags       = FS_USERNS_MOUNT,
4029 };
4030
4031 int __init shmem_init(void)
4032 {
4033         BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4034
4035         shm_mnt = kern_mount(&shmem_fs_type);
4036         BUG_ON(IS_ERR(shm_mnt));
4037
4038         return 0;
4039 }
4040
4041 int shmem_unuse(swp_entry_t swap, struct page *page)
4042 {
4043         return 0;
4044 }
4045
4046 int shmem_lock(struct file *file, int lock, struct user_struct *user)
4047 {
4048         return 0;
4049 }
4050
4051 void shmem_unlock_mapping(struct address_space *mapping)
4052 {
4053 }
4054
4055 #ifdef CONFIG_MMU
4056 unsigned long shmem_get_unmapped_area(struct file *file,
4057                                       unsigned long addr, unsigned long len,
4058                                       unsigned long pgoff, unsigned long flags)
4059 {
4060         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4061 }
4062 #endif
4063
4064 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4065 {
4066         truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4067 }
4068 EXPORT_SYMBOL_GPL(shmem_truncate_range);
4069
4070 #define shmem_vm_ops                            generic_file_vm_ops
4071 #define shmem_file_operations                   ramfs_file_operations
4072 #define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
4073 #define shmem_acct_size(flags, size)            0
4074 #define shmem_unacct_size(flags, size)          do {} while (0)
4075
4076 #endif /* CONFIG_SHMEM */
4077
4078 /* common code */
4079
4080 static struct dentry_operations anon_ops = {
4081         .d_dname = simple_dname
4082 };
4083
4084 static struct file *__shmem_file_setup(const char *name, loff_t size,
4085                                        unsigned long flags, unsigned int i_flags)
4086 {
4087         struct file *res;
4088         struct inode *inode;
4089         struct path path;
4090         struct super_block *sb;
4091         struct qstr this;
4092
4093         if (IS_ERR(shm_mnt))
4094                 return ERR_CAST(shm_mnt);
4095
4096         if (size < 0 || size > MAX_LFS_FILESIZE)
4097                 return ERR_PTR(-EINVAL);
4098
4099         if (shmem_acct_size(flags, size))
4100                 return ERR_PTR(-ENOMEM);
4101
4102         res = ERR_PTR(-ENOMEM);
4103         this.name = name;
4104         this.len = strlen(name);
4105         this.hash = 0; /* will go */
4106         sb = shm_mnt->mnt_sb;
4107         path.mnt = mntget(shm_mnt);
4108         path.dentry = d_alloc_pseudo(sb, &this);
4109         if (!path.dentry)
4110                 goto put_memory;
4111         d_set_d_op(path.dentry, &anon_ops);
4112
4113         res = ERR_PTR(-ENOSPC);
4114         inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
4115         if (!inode)
4116                 goto put_memory;
4117
4118         inode->i_flags |= i_flags;
4119         d_instantiate(path.dentry, inode);
4120         inode->i_size = size;
4121         clear_nlink(inode);     /* It is unlinked */
4122         res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4123         if (IS_ERR(res))
4124                 goto put_path;
4125
4126         res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
4127                   &shmem_file_operations);
4128         if (IS_ERR(res))
4129                 goto put_path;
4130
4131         return res;
4132
4133 put_memory:
4134         shmem_unacct_size(flags, size);
4135 put_path:
4136         path_put(&path);
4137         return res;
4138 }
4139
4140 /**
4141  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4142  *      kernel internal.  There will be NO LSM permission checks against the
4143  *      underlying inode.  So users of this interface must do LSM checks at a
4144  *      higher layer.  The users are the big_key and shm implementations.  LSM
4145  *      checks are provided at the key or shm level rather than the inode.
4146  * @name: name for dentry (to be seen in /proc/<pid>/maps
4147  * @size: size to be set for the file
4148  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4149  */
4150 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4151 {
4152         return __shmem_file_setup(name, size, flags, S_PRIVATE);
4153 }
4154
4155 /**
4156  * shmem_file_setup - get an unlinked file living in tmpfs
4157  * @name: name for dentry (to be seen in /proc/<pid>/maps
4158  * @size: size to be set for the file
4159  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4160  */
4161 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4162 {
4163         return __shmem_file_setup(name, size, flags, 0);
4164 }
4165 EXPORT_SYMBOL_GPL(shmem_file_setup);
4166
4167 /**
4168  * shmem_zero_setup - setup a shared anonymous mapping
4169  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
4170  */
4171 int shmem_zero_setup(struct vm_area_struct *vma)
4172 {
4173         struct file *file;
4174         loff_t size = vma->vm_end - vma->vm_start;
4175
4176         /*
4177          * Cloning a new file under mmap_sem leads to a lock ordering conflict
4178          * between XFS directory reading and selinux: since this file is only
4179          * accessible to the user through its mapping, use S_PRIVATE flag to
4180          * bypass file security, in the same way as shmem_kernel_file_setup().
4181          */
4182         file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE);
4183         if (IS_ERR(file))
4184                 return PTR_ERR(file);
4185
4186         if (vma->vm_file)
4187                 fput(vma->vm_file);
4188         vma->vm_file = file;
4189         vma->vm_ops = &shmem_vm_ops;
4190
4191         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
4192                         ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4193                         (vma->vm_end & HPAGE_PMD_MASK)) {
4194                 khugepaged_enter(vma, vma->vm_flags);
4195         }
4196
4197         return 0;
4198 }
4199
4200 /**
4201  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4202  * @mapping:    the page's address_space
4203  * @index:      the page index
4204  * @gfp:        the page allocator flags to use if allocating
4205  *
4206  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4207  * with any new page allocations done using the specified allocation flags.
4208  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4209  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4210  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4211  *
4212  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4213  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4214  */
4215 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4216                                          pgoff_t index, gfp_t gfp)
4217 {
4218 #ifdef CONFIG_SHMEM
4219         struct inode *inode = mapping->host;
4220         struct page *page;
4221         int error;
4222
4223         BUG_ON(mapping->a_ops != &shmem_aops);
4224         error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4225                                   gfp, NULL, NULL);
4226         if (error)
4227                 page = ERR_PTR(error);
4228         else
4229                 unlock_page(page);
4230         return page;
4231 #else
4232         /*
4233          * The tiny !SHMEM case uses ramfs without swap
4234          */
4235         return read_cache_page_gfp(mapping, index, gfp);
4236 #endif
4237 }
4238 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);