fa95ab2d36740803e06da898a4851db317b86a9e
[cascardo/linux.git] / fs / proc / task_mmu.c
1 #include <linux/mm.h>
2 #include <linux/vmacache.h>
3 #include <linux/hugetlb.h>
4 #include <linux/huge_mm.h>
5 #include <linux/mount.h>
6 #include <linux/seq_file.h>
7 #include <linux/highmem.h>
8 #include <linux/ptrace.h>
9 #include <linux/slab.h>
10 #include <linux/pagemap.h>
11 #include <linux/mempolicy.h>
12 #include <linux/rmap.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/page_idle.h>
17 #include <linux/shmem_fs.h>
18
19 #include <asm/elf.h>
20 #include <asm/uaccess.h>
21 #include <asm/tlbflush.h>
22 #include "internal.h"
23
24 void task_mem(struct seq_file *m, struct mm_struct *mm)
25 {
26         unsigned long text, lib, swap, ptes, pmds, anon, file, shmem;
27         unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
28
29         anon = get_mm_counter(mm, MM_ANONPAGES);
30         file = get_mm_counter(mm, MM_FILEPAGES);
31         shmem = get_mm_counter(mm, MM_SHMEMPAGES);
32
33         /*
34          * Note: to minimize their overhead, mm maintains hiwater_vm and
35          * hiwater_rss only when about to *lower* total_vm or rss.  Any
36          * collector of these hiwater stats must therefore get total_vm
37          * and rss too, which will usually be the higher.  Barriers? not
38          * worth the effort, such snapshots can always be inconsistent.
39          */
40         hiwater_vm = total_vm = mm->total_vm;
41         if (hiwater_vm < mm->hiwater_vm)
42                 hiwater_vm = mm->hiwater_vm;
43         hiwater_rss = total_rss = anon + file + shmem;
44         if (hiwater_rss < mm->hiwater_rss)
45                 hiwater_rss = mm->hiwater_rss;
46
47         text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
48         lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
49         swap = get_mm_counter(mm, MM_SWAPENTS);
50         ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
51         pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
52         seq_printf(m,
53                 "VmPeak:\t%8lu kB\n"
54                 "VmSize:\t%8lu kB\n"
55                 "VmLck:\t%8lu kB\n"
56                 "VmPin:\t%8lu kB\n"
57                 "VmHWM:\t%8lu kB\n"
58                 "VmRSS:\t%8lu kB\n"
59                 "RssAnon:\t%8lu kB\n"
60                 "RssFile:\t%8lu kB\n"
61                 "RssShmem:\t%8lu kB\n"
62                 "VmData:\t%8lu kB\n"
63                 "VmStk:\t%8lu kB\n"
64                 "VmExe:\t%8lu kB\n"
65                 "VmLib:\t%8lu kB\n"
66                 "VmPTE:\t%8lu kB\n"
67                 "VmPMD:\t%8lu kB\n"
68                 "VmSwap:\t%8lu kB\n",
69                 hiwater_vm << (PAGE_SHIFT-10),
70                 total_vm << (PAGE_SHIFT-10),
71                 mm->locked_vm << (PAGE_SHIFT-10),
72                 mm->pinned_vm << (PAGE_SHIFT-10),
73                 hiwater_rss << (PAGE_SHIFT-10),
74                 total_rss << (PAGE_SHIFT-10),
75                 anon << (PAGE_SHIFT-10),
76                 file << (PAGE_SHIFT-10),
77                 shmem << (PAGE_SHIFT-10),
78                 mm->data_vm << (PAGE_SHIFT-10),
79                 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
80                 ptes >> 10,
81                 pmds >> 10,
82                 swap << (PAGE_SHIFT-10));
83         hugetlb_report_usage(m, mm);
84 }
85
86 unsigned long task_vsize(struct mm_struct *mm)
87 {
88         return PAGE_SIZE * mm->total_vm;
89 }
90
91 unsigned long task_statm(struct mm_struct *mm,
92                          unsigned long *shared, unsigned long *text,
93                          unsigned long *data, unsigned long *resident)
94 {
95         *shared = get_mm_counter(mm, MM_FILEPAGES) +
96                         get_mm_counter(mm, MM_SHMEMPAGES);
97         *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
98                                                                 >> PAGE_SHIFT;
99         *data = mm->data_vm + mm->stack_vm;
100         *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
101         return mm->total_vm;
102 }
103
104 #ifdef CONFIG_NUMA
105 /*
106  * Save get_task_policy() for show_numa_map().
107  */
108 static void hold_task_mempolicy(struct proc_maps_private *priv)
109 {
110         struct task_struct *task = priv->task;
111
112         task_lock(task);
113         priv->task_mempolicy = get_task_policy(task);
114         mpol_get(priv->task_mempolicy);
115         task_unlock(task);
116 }
117 static void release_task_mempolicy(struct proc_maps_private *priv)
118 {
119         mpol_put(priv->task_mempolicy);
120 }
121 #else
122 static void hold_task_mempolicy(struct proc_maps_private *priv)
123 {
124 }
125 static void release_task_mempolicy(struct proc_maps_private *priv)
126 {
127 }
128 #endif
129
130 static void vma_stop(struct proc_maps_private *priv)
131 {
132         struct mm_struct *mm = priv->mm;
133
134         release_task_mempolicy(priv);
135         up_read(&mm->mmap_sem);
136         mmput(mm);
137 }
138
139 static struct vm_area_struct *
140 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
141 {
142         if (vma == priv->tail_vma)
143                 return NULL;
144         return vma->vm_next ?: priv->tail_vma;
145 }
146
147 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
148 {
149         if (m->count < m->size) /* vma is copied successfully */
150                 m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL;
151 }
152
153 static void *m_start(struct seq_file *m, loff_t *ppos)
154 {
155         struct proc_maps_private *priv = m->private;
156         unsigned long last_addr = m->version;
157         struct mm_struct *mm;
158         struct vm_area_struct *vma;
159         unsigned int pos = *ppos;
160
161         /* See m_cache_vma(). Zero at the start or after lseek. */
162         if (last_addr == -1UL)
163                 return NULL;
164
165         priv->task = get_proc_task(priv->inode);
166         if (!priv->task)
167                 return ERR_PTR(-ESRCH);
168
169         mm = priv->mm;
170         if (!mm || !atomic_inc_not_zero(&mm->mm_users))
171                 return NULL;
172
173         down_read(&mm->mmap_sem);
174         hold_task_mempolicy(priv);
175         priv->tail_vma = get_gate_vma(mm);
176
177         if (last_addr) {
178                 vma = find_vma(mm, last_addr);
179                 if (vma && (vma = m_next_vma(priv, vma)))
180                         return vma;
181         }
182
183         m->version = 0;
184         if (pos < mm->map_count) {
185                 for (vma = mm->mmap; pos; pos--) {
186                         m->version = vma->vm_start;
187                         vma = vma->vm_next;
188                 }
189                 return vma;
190         }
191
192         /* we do not bother to update m->version in this case */
193         if (pos == mm->map_count && priv->tail_vma)
194                 return priv->tail_vma;
195
196         vma_stop(priv);
197         return NULL;
198 }
199
200 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
201 {
202         struct proc_maps_private *priv = m->private;
203         struct vm_area_struct *next;
204
205         (*pos)++;
206         next = m_next_vma(priv, v);
207         if (!next)
208                 vma_stop(priv);
209         return next;
210 }
211
212 static void m_stop(struct seq_file *m, void *v)
213 {
214         struct proc_maps_private *priv = m->private;
215
216         if (!IS_ERR_OR_NULL(v))
217                 vma_stop(priv);
218         if (priv->task) {
219                 put_task_struct(priv->task);
220                 priv->task = NULL;
221         }
222 }
223
224 static int proc_maps_open(struct inode *inode, struct file *file,
225                         const struct seq_operations *ops, int psize)
226 {
227         struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
228
229         if (!priv)
230                 return -ENOMEM;
231
232         priv->inode = inode;
233         priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
234         if (IS_ERR(priv->mm)) {
235                 int err = PTR_ERR(priv->mm);
236
237                 seq_release_private(inode, file);
238                 return err;
239         }
240
241         return 0;
242 }
243
244 static int proc_map_release(struct inode *inode, struct file *file)
245 {
246         struct seq_file *seq = file->private_data;
247         struct proc_maps_private *priv = seq->private;
248
249         if (priv->mm)
250                 mmdrop(priv->mm);
251
252         return seq_release_private(inode, file);
253 }
254
255 static int do_maps_open(struct inode *inode, struct file *file,
256                         const struct seq_operations *ops)
257 {
258         return proc_maps_open(inode, file, ops,
259                                 sizeof(struct proc_maps_private));
260 }
261
262 /*
263  * Indicate if the VMA is a stack for the given task; for
264  * /proc/PID/maps that is the stack of the main task.
265  */
266 static int is_stack(struct proc_maps_private *priv,
267                     struct vm_area_struct *vma, int is_pid)
268 {
269         int stack = 0;
270
271         if (is_pid) {
272                 stack = vma->vm_start <= vma->vm_mm->start_stack &&
273                         vma->vm_end >= vma->vm_mm->start_stack;
274         } else {
275                 struct inode *inode = priv->inode;
276                 struct task_struct *task;
277
278                 rcu_read_lock();
279                 task = pid_task(proc_pid(inode), PIDTYPE_PID);
280                 if (task)
281                         stack = vma_is_stack_for_task(vma, task);
282                 rcu_read_unlock();
283         }
284         return stack;
285 }
286
287 static void
288 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
289 {
290         struct mm_struct *mm = vma->vm_mm;
291         struct file *file = vma->vm_file;
292         struct proc_maps_private *priv = m->private;
293         vm_flags_t flags = vma->vm_flags;
294         unsigned long ino = 0;
295         unsigned long long pgoff = 0;
296         unsigned long start, end;
297         dev_t dev = 0;
298         const char *name = NULL;
299
300         if (file) {
301                 struct inode *inode = file_inode(vma->vm_file);
302                 dev = inode->i_sb->s_dev;
303                 ino = inode->i_ino;
304                 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
305         }
306
307         /* We don't show the stack guard page in /proc/maps */
308         start = vma->vm_start;
309         if (stack_guard_page_start(vma, start))
310                 start += PAGE_SIZE;
311         end = vma->vm_end;
312         if (stack_guard_page_end(vma, end))
313                 end -= PAGE_SIZE;
314
315         seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
316         seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
317                         start,
318                         end,
319                         flags & VM_READ ? 'r' : '-',
320                         flags & VM_WRITE ? 'w' : '-',
321                         flags & VM_EXEC ? 'x' : '-',
322                         flags & VM_MAYSHARE ? 's' : 'p',
323                         pgoff,
324                         MAJOR(dev), MINOR(dev), ino);
325
326         /*
327          * Print the dentry name for named mappings, and a
328          * special [heap] marker for the heap:
329          */
330         if (file) {
331                 seq_pad(m, ' ');
332                 seq_file_path(m, file, "\n");
333                 goto done;
334         }
335
336         if (vma->vm_ops && vma->vm_ops->name) {
337                 name = vma->vm_ops->name(vma);
338                 if (name)
339                         goto done;
340         }
341
342         name = arch_vma_name(vma);
343         if (!name) {
344                 if (!mm) {
345                         name = "[vdso]";
346                         goto done;
347                 }
348
349                 if (vma->vm_start <= mm->brk &&
350                     vma->vm_end >= mm->start_brk) {
351                         name = "[heap]";
352                         goto done;
353                 }
354
355                 if (is_stack(priv, vma, is_pid))
356                         name = "[stack]";
357         }
358
359 done:
360         if (name) {
361                 seq_pad(m, ' ');
362                 seq_puts(m, name);
363         }
364         seq_putc(m, '\n');
365 }
366
367 static int show_map(struct seq_file *m, void *v, int is_pid)
368 {
369         show_map_vma(m, v, is_pid);
370         m_cache_vma(m, v);
371         return 0;
372 }
373
374 static int show_pid_map(struct seq_file *m, void *v)
375 {
376         return show_map(m, v, 1);
377 }
378
379 static int show_tid_map(struct seq_file *m, void *v)
380 {
381         return show_map(m, v, 0);
382 }
383
384 static const struct seq_operations proc_pid_maps_op = {
385         .start  = m_start,
386         .next   = m_next,
387         .stop   = m_stop,
388         .show   = show_pid_map
389 };
390
391 static const struct seq_operations proc_tid_maps_op = {
392         .start  = m_start,
393         .next   = m_next,
394         .stop   = m_stop,
395         .show   = show_tid_map
396 };
397
398 static int pid_maps_open(struct inode *inode, struct file *file)
399 {
400         return do_maps_open(inode, file, &proc_pid_maps_op);
401 }
402
403 static int tid_maps_open(struct inode *inode, struct file *file)
404 {
405         return do_maps_open(inode, file, &proc_tid_maps_op);
406 }
407
408 const struct file_operations proc_pid_maps_operations = {
409         .open           = pid_maps_open,
410         .read           = seq_read,
411         .llseek         = seq_lseek,
412         .release        = proc_map_release,
413 };
414
415 const struct file_operations proc_tid_maps_operations = {
416         .open           = tid_maps_open,
417         .read           = seq_read,
418         .llseek         = seq_lseek,
419         .release        = proc_map_release,
420 };
421
422 /*
423  * Proportional Set Size(PSS): my share of RSS.
424  *
425  * PSS of a process is the count of pages it has in memory, where each
426  * page is divided by the number of processes sharing it.  So if a
427  * process has 1000 pages all to itself, and 1000 shared with one other
428  * process, its PSS will be 1500.
429  *
430  * To keep (accumulated) division errors low, we adopt a 64bit
431  * fixed-point pss counter to minimize division errors. So (pss >>
432  * PSS_SHIFT) would be the real byte count.
433  *
434  * A shift of 12 before division means (assuming 4K page size):
435  *      - 1M 3-user-pages add up to 8KB errors;
436  *      - supports mapcount up to 2^24, or 16M;
437  *      - supports PSS up to 2^52 bytes, or 4PB.
438  */
439 #define PSS_SHIFT 12
440
441 #ifdef CONFIG_PROC_PAGE_MONITOR
442 struct mem_size_stats {
443         unsigned long resident;
444         unsigned long shared_clean;
445         unsigned long shared_dirty;
446         unsigned long private_clean;
447         unsigned long private_dirty;
448         unsigned long referenced;
449         unsigned long anonymous;
450         unsigned long anonymous_thp;
451         unsigned long swap;
452         unsigned long shared_hugetlb;
453         unsigned long private_hugetlb;
454         u64 pss;
455         u64 swap_pss;
456         bool check_shmem_swap;
457 };
458
459 static void smaps_account(struct mem_size_stats *mss, struct page *page,
460                 bool compound, bool young, bool dirty)
461 {
462         int i, nr = compound ? 1 << compound_order(page) : 1;
463         unsigned long size = nr * PAGE_SIZE;
464
465         if (PageAnon(page))
466                 mss->anonymous += size;
467
468         mss->resident += size;
469         /* Accumulate the size in pages that have been accessed. */
470         if (young || page_is_young(page) || PageReferenced(page))
471                 mss->referenced += size;
472
473         /*
474          * page_count(page) == 1 guarantees the page is mapped exactly once.
475          * If any subpage of the compound page mapped with PTE it would elevate
476          * page_count().
477          */
478         if (page_count(page) == 1) {
479                 if (dirty || PageDirty(page))
480                         mss->private_dirty += size;
481                 else
482                         mss->private_clean += size;
483                 mss->pss += (u64)size << PSS_SHIFT;
484                 return;
485         }
486
487         for (i = 0; i < nr; i++, page++) {
488                 int mapcount = page_mapcount(page);
489
490                 if (mapcount >= 2) {
491                         if (dirty || PageDirty(page))
492                                 mss->shared_dirty += PAGE_SIZE;
493                         else
494                                 mss->shared_clean += PAGE_SIZE;
495                         mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
496                 } else {
497                         if (dirty || PageDirty(page))
498                                 mss->private_dirty += PAGE_SIZE;
499                         else
500                                 mss->private_clean += PAGE_SIZE;
501                         mss->pss += PAGE_SIZE << PSS_SHIFT;
502                 }
503         }
504 }
505
506 #ifdef CONFIG_SHMEM
507 static int smaps_pte_hole(unsigned long addr, unsigned long end,
508                 struct mm_walk *walk)
509 {
510         struct mem_size_stats *mss = walk->private;
511
512         mss->swap += shmem_partial_swap_usage(
513                         walk->vma->vm_file->f_mapping, addr, end);
514
515         return 0;
516 }
517 #endif
518
519 static void smaps_pte_entry(pte_t *pte, unsigned long addr,
520                 struct mm_walk *walk)
521 {
522         struct mem_size_stats *mss = walk->private;
523         struct vm_area_struct *vma = walk->vma;
524         struct page *page = NULL;
525
526         if (pte_present(*pte)) {
527                 page = vm_normal_page(vma, addr, *pte);
528         } else if (is_swap_pte(*pte)) {
529                 swp_entry_t swpent = pte_to_swp_entry(*pte);
530
531                 if (!non_swap_entry(swpent)) {
532                         int mapcount;
533
534                         mss->swap += PAGE_SIZE;
535                         mapcount = swp_swapcount(swpent);
536                         if (mapcount >= 2) {
537                                 u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
538
539                                 do_div(pss_delta, mapcount);
540                                 mss->swap_pss += pss_delta;
541                         } else {
542                                 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
543                         }
544                 } else if (is_migration_entry(swpent))
545                         page = migration_entry_to_page(swpent);
546         } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
547                                                         && pte_none(*pte))) {
548                 page = find_get_entry(vma->vm_file->f_mapping,
549                                                 linear_page_index(vma, addr));
550                 if (!page)
551                         return;
552
553                 if (radix_tree_exceptional_entry(page))
554                         mss->swap += PAGE_SIZE;
555                 else
556                         page_cache_release(page);
557
558                 return;
559         }
560
561         if (!page)
562                 return;
563
564         smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
565 }
566
567 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
568 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
569                 struct mm_walk *walk)
570 {
571         struct mem_size_stats *mss = walk->private;
572         struct vm_area_struct *vma = walk->vma;
573         struct page *page;
574
575         /* FOLL_DUMP will return -EFAULT on huge zero page */
576         page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
577         if (IS_ERR_OR_NULL(page))
578                 return;
579         mss->anonymous_thp += HPAGE_PMD_SIZE;
580         smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
581 }
582 #else
583 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
584                 struct mm_walk *walk)
585 {
586 }
587 #endif
588
589 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
590                            struct mm_walk *walk)
591 {
592         struct vm_area_struct *vma = walk->vma;
593         pte_t *pte;
594         spinlock_t *ptl;
595
596         ptl = pmd_trans_huge_lock(pmd, vma);
597         if (ptl) {
598                 smaps_pmd_entry(pmd, addr, walk);
599                 spin_unlock(ptl);
600                 return 0;
601         }
602
603         if (pmd_trans_unstable(pmd))
604                 return 0;
605         /*
606          * The mmap_sem held all the way back in m_start() is what
607          * keeps khugepaged out of here and from collapsing things
608          * in here.
609          */
610         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
611         for (; addr != end; pte++, addr += PAGE_SIZE)
612                 smaps_pte_entry(pte, addr, walk);
613         pte_unmap_unlock(pte - 1, ptl);
614         cond_resched();
615         return 0;
616 }
617
618 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
619 {
620         /*
621          * Don't forget to update Documentation/ on changes.
622          */
623         static const char mnemonics[BITS_PER_LONG][2] = {
624                 /*
625                  * In case if we meet a flag we don't know about.
626                  */
627                 [0 ... (BITS_PER_LONG-1)] = "??",
628
629                 [ilog2(VM_READ)]        = "rd",
630                 [ilog2(VM_WRITE)]       = "wr",
631                 [ilog2(VM_EXEC)]        = "ex",
632                 [ilog2(VM_SHARED)]      = "sh",
633                 [ilog2(VM_MAYREAD)]     = "mr",
634                 [ilog2(VM_MAYWRITE)]    = "mw",
635                 [ilog2(VM_MAYEXEC)]     = "me",
636                 [ilog2(VM_MAYSHARE)]    = "ms",
637                 [ilog2(VM_GROWSDOWN)]   = "gd",
638                 [ilog2(VM_PFNMAP)]      = "pf",
639                 [ilog2(VM_DENYWRITE)]   = "dw",
640 #ifdef CONFIG_X86_INTEL_MPX
641                 [ilog2(VM_MPX)]         = "mp",
642 #endif
643                 [ilog2(VM_LOCKED)]      = "lo",
644                 [ilog2(VM_IO)]          = "io",
645                 [ilog2(VM_SEQ_READ)]    = "sr",
646                 [ilog2(VM_RAND_READ)]   = "rr",
647                 [ilog2(VM_DONTCOPY)]    = "dc",
648                 [ilog2(VM_DONTEXPAND)]  = "de",
649                 [ilog2(VM_ACCOUNT)]     = "ac",
650                 [ilog2(VM_NORESERVE)]   = "nr",
651                 [ilog2(VM_HUGETLB)]     = "ht",
652                 [ilog2(VM_ARCH_1)]      = "ar",
653                 [ilog2(VM_DONTDUMP)]    = "dd",
654 #ifdef CONFIG_MEM_SOFT_DIRTY
655                 [ilog2(VM_SOFTDIRTY)]   = "sd",
656 #endif
657                 [ilog2(VM_MIXEDMAP)]    = "mm",
658                 [ilog2(VM_HUGEPAGE)]    = "hg",
659                 [ilog2(VM_NOHUGEPAGE)]  = "nh",
660                 [ilog2(VM_MERGEABLE)]   = "mg",
661                 [ilog2(VM_UFFD_MISSING)]= "um",
662                 [ilog2(VM_UFFD_WP)]     = "uw",
663         };
664         size_t i;
665
666         seq_puts(m, "VmFlags: ");
667         for (i = 0; i < BITS_PER_LONG; i++) {
668                 if (vma->vm_flags & (1UL << i)) {
669                         seq_printf(m, "%c%c ",
670                                    mnemonics[i][0], mnemonics[i][1]);
671                 }
672         }
673         seq_putc(m, '\n');
674 }
675
676 #ifdef CONFIG_HUGETLB_PAGE
677 static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
678                                  unsigned long addr, unsigned long end,
679                                  struct mm_walk *walk)
680 {
681         struct mem_size_stats *mss = walk->private;
682         struct vm_area_struct *vma = walk->vma;
683         struct page *page = NULL;
684
685         if (pte_present(*pte)) {
686                 page = vm_normal_page(vma, addr, *pte);
687         } else if (is_swap_pte(*pte)) {
688                 swp_entry_t swpent = pte_to_swp_entry(*pte);
689
690                 if (is_migration_entry(swpent))
691                         page = migration_entry_to_page(swpent);
692         }
693         if (page) {
694                 int mapcount = page_mapcount(page);
695
696                 if (mapcount >= 2)
697                         mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
698                 else
699                         mss->private_hugetlb += huge_page_size(hstate_vma(vma));
700         }
701         return 0;
702 }
703 #endif /* HUGETLB_PAGE */
704
705 static int show_smap(struct seq_file *m, void *v, int is_pid)
706 {
707         struct vm_area_struct *vma = v;
708         struct mem_size_stats mss;
709         struct mm_walk smaps_walk = {
710                 .pmd_entry = smaps_pte_range,
711 #ifdef CONFIG_HUGETLB_PAGE
712                 .hugetlb_entry = smaps_hugetlb_range,
713 #endif
714                 .mm = vma->vm_mm,
715                 .private = &mss,
716         };
717
718         memset(&mss, 0, sizeof mss);
719
720 #ifdef CONFIG_SHMEM
721         if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
722                 /*
723                  * For shared or readonly shmem mappings we know that all
724                  * swapped out pages belong to the shmem object, and we can
725                  * obtain the swap value much more efficiently. For private
726                  * writable mappings, we might have COW pages that are
727                  * not affected by the parent swapped out pages of the shmem
728                  * object, so we have to distinguish them during the page walk.
729                  * Unless we know that the shmem object (or the part mapped by
730                  * our VMA) has no swapped out pages at all.
731                  */
732                 unsigned long shmem_swapped = shmem_swap_usage(vma);
733
734                 if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
735                                         !(vma->vm_flags & VM_WRITE)) {
736                         mss.swap = shmem_swapped;
737                 } else {
738                         mss.check_shmem_swap = true;
739                         smaps_walk.pte_hole = smaps_pte_hole;
740                 }
741         }
742 #endif
743
744         /* mmap_sem is held in m_start */
745         walk_page_vma(vma, &smaps_walk);
746
747         show_map_vma(m, vma, is_pid);
748
749         seq_printf(m,
750                    "Size:           %8lu kB\n"
751                    "Rss:            %8lu kB\n"
752                    "Pss:            %8lu kB\n"
753                    "Shared_Clean:   %8lu kB\n"
754                    "Shared_Dirty:   %8lu kB\n"
755                    "Private_Clean:  %8lu kB\n"
756                    "Private_Dirty:  %8lu kB\n"
757                    "Referenced:     %8lu kB\n"
758                    "Anonymous:      %8lu kB\n"
759                    "AnonHugePages:  %8lu kB\n"
760                    "Shared_Hugetlb: %8lu kB\n"
761                    "Private_Hugetlb: %7lu kB\n"
762                    "Swap:           %8lu kB\n"
763                    "SwapPss:        %8lu kB\n"
764                    "KernelPageSize: %8lu kB\n"
765                    "MMUPageSize:    %8lu kB\n"
766                    "Locked:         %8lu kB\n",
767                    (vma->vm_end - vma->vm_start) >> 10,
768                    mss.resident >> 10,
769                    (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
770                    mss.shared_clean  >> 10,
771                    mss.shared_dirty  >> 10,
772                    mss.private_clean >> 10,
773                    mss.private_dirty >> 10,
774                    mss.referenced >> 10,
775                    mss.anonymous >> 10,
776                    mss.anonymous_thp >> 10,
777                    mss.shared_hugetlb >> 10,
778                    mss.private_hugetlb >> 10,
779                    mss.swap >> 10,
780                    (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
781                    vma_kernel_pagesize(vma) >> 10,
782                    vma_mmu_pagesize(vma) >> 10,
783                    (vma->vm_flags & VM_LOCKED) ?
784                         (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
785
786         show_smap_vma_flags(m, vma);
787         m_cache_vma(m, vma);
788         return 0;
789 }
790
791 static int show_pid_smap(struct seq_file *m, void *v)
792 {
793         return show_smap(m, v, 1);
794 }
795
796 static int show_tid_smap(struct seq_file *m, void *v)
797 {
798         return show_smap(m, v, 0);
799 }
800
801 static const struct seq_operations proc_pid_smaps_op = {
802         .start  = m_start,
803         .next   = m_next,
804         .stop   = m_stop,
805         .show   = show_pid_smap
806 };
807
808 static const struct seq_operations proc_tid_smaps_op = {
809         .start  = m_start,
810         .next   = m_next,
811         .stop   = m_stop,
812         .show   = show_tid_smap
813 };
814
815 static int pid_smaps_open(struct inode *inode, struct file *file)
816 {
817         return do_maps_open(inode, file, &proc_pid_smaps_op);
818 }
819
820 static int tid_smaps_open(struct inode *inode, struct file *file)
821 {
822         return do_maps_open(inode, file, &proc_tid_smaps_op);
823 }
824
825 const struct file_operations proc_pid_smaps_operations = {
826         .open           = pid_smaps_open,
827         .read           = seq_read,
828         .llseek         = seq_lseek,
829         .release        = proc_map_release,
830 };
831
832 const struct file_operations proc_tid_smaps_operations = {
833         .open           = tid_smaps_open,
834         .read           = seq_read,
835         .llseek         = seq_lseek,
836         .release        = proc_map_release,
837 };
838
839 enum clear_refs_types {
840         CLEAR_REFS_ALL = 1,
841         CLEAR_REFS_ANON,
842         CLEAR_REFS_MAPPED,
843         CLEAR_REFS_SOFT_DIRTY,
844         CLEAR_REFS_MM_HIWATER_RSS,
845         CLEAR_REFS_LAST,
846 };
847
848 struct clear_refs_private {
849         enum clear_refs_types type;
850 };
851
852 #ifdef CONFIG_MEM_SOFT_DIRTY
853 static inline void clear_soft_dirty(struct vm_area_struct *vma,
854                 unsigned long addr, pte_t *pte)
855 {
856         /*
857          * The soft-dirty tracker uses #PF-s to catch writes
858          * to pages, so write-protect the pte as well. See the
859          * Documentation/vm/soft-dirty.txt for full description
860          * of how soft-dirty works.
861          */
862         pte_t ptent = *pte;
863
864         if (pte_present(ptent)) {
865                 ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte);
866                 ptent = pte_wrprotect(ptent);
867                 ptent = pte_clear_soft_dirty(ptent);
868                 ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent);
869         } else if (is_swap_pte(ptent)) {
870                 ptent = pte_swp_clear_soft_dirty(ptent);
871                 set_pte_at(vma->vm_mm, addr, pte, ptent);
872         }
873 }
874 #else
875 static inline void clear_soft_dirty(struct vm_area_struct *vma,
876                 unsigned long addr, pte_t *pte)
877 {
878 }
879 #endif
880
881 #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
882 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
883                 unsigned long addr, pmd_t *pmdp)
884 {
885         pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
886
887         pmd = pmd_wrprotect(pmd);
888         pmd = pmd_clear_soft_dirty(pmd);
889
890         set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
891 }
892 #else
893 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
894                 unsigned long addr, pmd_t *pmdp)
895 {
896 }
897 #endif
898
899 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
900                                 unsigned long end, struct mm_walk *walk)
901 {
902         struct clear_refs_private *cp = walk->private;
903         struct vm_area_struct *vma = walk->vma;
904         pte_t *pte, ptent;
905         spinlock_t *ptl;
906         struct page *page;
907
908         ptl = pmd_trans_huge_lock(pmd, vma);
909         if (ptl) {
910                 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
911                         clear_soft_dirty_pmd(vma, addr, pmd);
912                         goto out;
913                 }
914
915                 page = pmd_page(*pmd);
916
917                 /* Clear accessed and referenced bits. */
918                 pmdp_test_and_clear_young(vma, addr, pmd);
919                 test_and_clear_page_young(page);
920                 ClearPageReferenced(page);
921 out:
922                 spin_unlock(ptl);
923                 return 0;
924         }
925
926         if (pmd_trans_unstable(pmd))
927                 return 0;
928
929         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
930         for (; addr != end; pte++, addr += PAGE_SIZE) {
931                 ptent = *pte;
932
933                 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
934                         clear_soft_dirty(vma, addr, pte);
935                         continue;
936                 }
937
938                 if (!pte_present(ptent))
939                         continue;
940
941                 page = vm_normal_page(vma, addr, ptent);
942                 if (!page)
943                         continue;
944
945                 /* Clear accessed and referenced bits. */
946                 ptep_test_and_clear_young(vma, addr, pte);
947                 test_and_clear_page_young(page);
948                 ClearPageReferenced(page);
949         }
950         pte_unmap_unlock(pte - 1, ptl);
951         cond_resched();
952         return 0;
953 }
954
955 static int clear_refs_test_walk(unsigned long start, unsigned long end,
956                                 struct mm_walk *walk)
957 {
958         struct clear_refs_private *cp = walk->private;
959         struct vm_area_struct *vma = walk->vma;
960
961         if (vma->vm_flags & VM_PFNMAP)
962                 return 1;
963
964         /*
965          * Writing 1 to /proc/pid/clear_refs affects all pages.
966          * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
967          * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
968          * Writing 4 to /proc/pid/clear_refs affects all pages.
969          */
970         if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
971                 return 1;
972         if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
973                 return 1;
974         return 0;
975 }
976
977 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
978                                 size_t count, loff_t *ppos)
979 {
980         struct task_struct *task;
981         char buffer[PROC_NUMBUF];
982         struct mm_struct *mm;
983         struct vm_area_struct *vma;
984         enum clear_refs_types type;
985         int itype;
986         int rv;
987
988         memset(buffer, 0, sizeof(buffer));
989         if (count > sizeof(buffer) - 1)
990                 count = sizeof(buffer) - 1;
991         if (copy_from_user(buffer, buf, count))
992                 return -EFAULT;
993         rv = kstrtoint(strstrip(buffer), 10, &itype);
994         if (rv < 0)
995                 return rv;
996         type = (enum clear_refs_types)itype;
997         if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
998                 return -EINVAL;
999
1000         task = get_proc_task(file_inode(file));
1001         if (!task)
1002                 return -ESRCH;
1003         mm = get_task_mm(task);
1004         if (mm) {
1005                 struct clear_refs_private cp = {
1006                         .type = type,
1007                 };
1008                 struct mm_walk clear_refs_walk = {
1009                         .pmd_entry = clear_refs_pte_range,
1010                         .test_walk = clear_refs_test_walk,
1011                         .mm = mm,
1012                         .private = &cp,
1013                 };
1014
1015                 if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1016                         /*
1017                          * Writing 5 to /proc/pid/clear_refs resets the peak
1018                          * resident set size to this mm's current rss value.
1019                          */
1020                         down_write(&mm->mmap_sem);
1021                         reset_mm_hiwater_rss(mm);
1022                         up_write(&mm->mmap_sem);
1023                         goto out_mm;
1024                 }
1025
1026                 down_read(&mm->mmap_sem);
1027                 if (type == CLEAR_REFS_SOFT_DIRTY) {
1028                         for (vma = mm->mmap; vma; vma = vma->vm_next) {
1029                                 if (!(vma->vm_flags & VM_SOFTDIRTY))
1030                                         continue;
1031                                 up_read(&mm->mmap_sem);
1032                                 down_write(&mm->mmap_sem);
1033                                 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1034                                         vma->vm_flags &= ~VM_SOFTDIRTY;
1035                                         vma_set_page_prot(vma);
1036                                 }
1037                                 downgrade_write(&mm->mmap_sem);
1038                                 break;
1039                         }
1040                         mmu_notifier_invalidate_range_start(mm, 0, -1);
1041                 }
1042                 walk_page_range(0, ~0UL, &clear_refs_walk);
1043                 if (type == CLEAR_REFS_SOFT_DIRTY)
1044                         mmu_notifier_invalidate_range_end(mm, 0, -1);
1045                 flush_tlb_mm(mm);
1046                 up_read(&mm->mmap_sem);
1047 out_mm:
1048                 mmput(mm);
1049         }
1050         put_task_struct(task);
1051
1052         return count;
1053 }
1054
1055 const struct file_operations proc_clear_refs_operations = {
1056         .write          = clear_refs_write,
1057         .llseek         = noop_llseek,
1058 };
1059
1060 typedef struct {
1061         u64 pme;
1062 } pagemap_entry_t;
1063
1064 struct pagemapread {
1065         int pos, len;           /* units: PM_ENTRY_BYTES, not bytes */
1066         pagemap_entry_t *buffer;
1067         bool show_pfn;
1068 };
1069
1070 #define PAGEMAP_WALK_SIZE       (PMD_SIZE)
1071 #define PAGEMAP_WALK_MASK       (PMD_MASK)
1072
1073 #define PM_ENTRY_BYTES          sizeof(pagemap_entry_t)
1074 #define PM_PFRAME_BITS          55
1075 #define PM_PFRAME_MASK          GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1076 #define PM_SOFT_DIRTY           BIT_ULL(55)
1077 #define PM_MMAP_EXCLUSIVE       BIT_ULL(56)
1078 #define PM_FILE                 BIT_ULL(61)
1079 #define PM_SWAP                 BIT_ULL(62)
1080 #define PM_PRESENT              BIT_ULL(63)
1081
1082 #define PM_END_OF_BUFFER    1
1083
1084 static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1085 {
1086         return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1087 }
1088
1089 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1090                           struct pagemapread *pm)
1091 {
1092         pm->buffer[pm->pos++] = *pme;
1093         if (pm->pos >= pm->len)
1094                 return PM_END_OF_BUFFER;
1095         return 0;
1096 }
1097
1098 static int pagemap_pte_hole(unsigned long start, unsigned long end,
1099                                 struct mm_walk *walk)
1100 {
1101         struct pagemapread *pm = walk->private;
1102         unsigned long addr = start;
1103         int err = 0;
1104
1105         while (addr < end) {
1106                 struct vm_area_struct *vma = find_vma(walk->mm, addr);
1107                 pagemap_entry_t pme = make_pme(0, 0);
1108                 /* End of address space hole, which we mark as non-present. */
1109                 unsigned long hole_end;
1110
1111                 if (vma)
1112                         hole_end = min(end, vma->vm_start);
1113                 else
1114                         hole_end = end;
1115
1116                 for (; addr < hole_end; addr += PAGE_SIZE) {
1117                         err = add_to_pagemap(addr, &pme, pm);
1118                         if (err)
1119                                 goto out;
1120                 }
1121
1122                 if (!vma)
1123                         break;
1124
1125                 /* Addresses in the VMA. */
1126                 if (vma->vm_flags & VM_SOFTDIRTY)
1127                         pme = make_pme(0, PM_SOFT_DIRTY);
1128                 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1129                         err = add_to_pagemap(addr, &pme, pm);
1130                         if (err)
1131                                 goto out;
1132                 }
1133         }
1134 out:
1135         return err;
1136 }
1137
1138 static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1139                 struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1140 {
1141         u64 frame = 0, flags = 0;
1142         struct page *page = NULL;
1143
1144         if (pte_present(pte)) {
1145                 if (pm->show_pfn)
1146                         frame = pte_pfn(pte);
1147                 flags |= PM_PRESENT;
1148                 page = vm_normal_page(vma, addr, pte);
1149                 if (pte_soft_dirty(pte))
1150                         flags |= PM_SOFT_DIRTY;
1151         } else if (is_swap_pte(pte)) {
1152                 swp_entry_t entry;
1153                 if (pte_swp_soft_dirty(pte))
1154                         flags |= PM_SOFT_DIRTY;
1155                 entry = pte_to_swp_entry(pte);
1156                 frame = swp_type(entry) |
1157                         (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
1158                 flags |= PM_SWAP;
1159                 if (is_migration_entry(entry))
1160                         page = migration_entry_to_page(entry);
1161         }
1162
1163         if (page && !PageAnon(page))
1164                 flags |= PM_FILE;
1165         if (page && page_mapcount(page) == 1)
1166                 flags |= PM_MMAP_EXCLUSIVE;
1167         if (vma->vm_flags & VM_SOFTDIRTY)
1168                 flags |= PM_SOFT_DIRTY;
1169
1170         return make_pme(frame, flags);
1171 }
1172
1173 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1174                              struct mm_walk *walk)
1175 {
1176         struct vm_area_struct *vma = walk->vma;
1177         struct pagemapread *pm = walk->private;
1178         spinlock_t *ptl;
1179         pte_t *pte, *orig_pte;
1180         int err = 0;
1181
1182 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1183         ptl = pmd_trans_huge_lock(pmdp, vma);
1184         if (ptl) {
1185                 u64 flags = 0, frame = 0;
1186                 pmd_t pmd = *pmdp;
1187
1188                 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd))
1189                         flags |= PM_SOFT_DIRTY;
1190
1191                 /*
1192                  * Currently pmd for thp is always present because thp
1193                  * can not be swapped-out, migrated, or HWPOISONed
1194                  * (split in such cases instead.)
1195                  * This if-check is just to prepare for future implementation.
1196                  */
1197                 if (pmd_present(pmd)) {
1198                         struct page *page = pmd_page(pmd);
1199
1200                         if (page_mapcount(page) == 1)
1201                                 flags |= PM_MMAP_EXCLUSIVE;
1202
1203                         flags |= PM_PRESENT;
1204                         if (pm->show_pfn)
1205                                 frame = pmd_pfn(pmd) +
1206                                         ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1207                 }
1208
1209                 for (; addr != end; addr += PAGE_SIZE) {
1210                         pagemap_entry_t pme = make_pme(frame, flags);
1211
1212                         err = add_to_pagemap(addr, &pme, pm);
1213                         if (err)
1214                                 break;
1215                         if (pm->show_pfn && (flags & PM_PRESENT))
1216                                 frame++;
1217                 }
1218                 spin_unlock(ptl);
1219                 return err;
1220         }
1221
1222         if (pmd_trans_unstable(pmdp))
1223                 return 0;
1224 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1225
1226         /*
1227          * We can assume that @vma always points to a valid one and @end never
1228          * goes beyond vma->vm_end.
1229          */
1230         orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1231         for (; addr < end; pte++, addr += PAGE_SIZE) {
1232                 pagemap_entry_t pme;
1233
1234                 pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
1235                 err = add_to_pagemap(addr, &pme, pm);
1236                 if (err)
1237                         break;
1238         }
1239         pte_unmap_unlock(orig_pte, ptl);
1240
1241         cond_resched();
1242
1243         return err;
1244 }
1245
1246 #ifdef CONFIG_HUGETLB_PAGE
1247 /* This function walks within one hugetlb entry in the single call */
1248 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1249                                  unsigned long addr, unsigned long end,
1250                                  struct mm_walk *walk)
1251 {
1252         struct pagemapread *pm = walk->private;
1253         struct vm_area_struct *vma = walk->vma;
1254         u64 flags = 0, frame = 0;
1255         int err = 0;
1256         pte_t pte;
1257
1258         if (vma->vm_flags & VM_SOFTDIRTY)
1259                 flags |= PM_SOFT_DIRTY;
1260
1261         pte = huge_ptep_get(ptep);
1262         if (pte_present(pte)) {
1263                 struct page *page = pte_page(pte);
1264
1265                 if (!PageAnon(page))
1266                         flags |= PM_FILE;
1267
1268                 if (page_mapcount(page) == 1)
1269                         flags |= PM_MMAP_EXCLUSIVE;
1270
1271                 flags |= PM_PRESENT;
1272                 if (pm->show_pfn)
1273                         frame = pte_pfn(pte) +
1274                                 ((addr & ~hmask) >> PAGE_SHIFT);
1275         }
1276
1277         for (; addr != end; addr += PAGE_SIZE) {
1278                 pagemap_entry_t pme = make_pme(frame, flags);
1279
1280                 err = add_to_pagemap(addr, &pme, pm);
1281                 if (err)
1282                         return err;
1283                 if (pm->show_pfn && (flags & PM_PRESENT))
1284                         frame++;
1285         }
1286
1287         cond_resched();
1288
1289         return err;
1290 }
1291 #endif /* HUGETLB_PAGE */
1292
1293 /*
1294  * /proc/pid/pagemap - an array mapping virtual pages to pfns
1295  *
1296  * For each page in the address space, this file contains one 64-bit entry
1297  * consisting of the following:
1298  *
1299  * Bits 0-54  page frame number (PFN) if present
1300  * Bits 0-4   swap type if swapped
1301  * Bits 5-54  swap offset if swapped
1302  * Bit  55    pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
1303  * Bit  56    page exclusively mapped
1304  * Bits 57-60 zero
1305  * Bit  61    page is file-page or shared-anon
1306  * Bit  62    page swapped
1307  * Bit  63    page present
1308  *
1309  * If the page is not present but in swap, then the PFN contains an
1310  * encoding of the swap file number and the page's offset into the
1311  * swap. Unmapped pages return a null PFN. This allows determining
1312  * precisely which pages are mapped (or in swap) and comparing mapped
1313  * pages between processes.
1314  *
1315  * Efficient users of this interface will use /proc/pid/maps to
1316  * determine which areas of memory are actually mapped and llseek to
1317  * skip over unmapped regions.
1318  */
1319 static ssize_t pagemap_read(struct file *file, char __user *buf,
1320                             size_t count, loff_t *ppos)
1321 {
1322         struct mm_struct *mm = file->private_data;
1323         struct pagemapread pm;
1324         struct mm_walk pagemap_walk = {};
1325         unsigned long src;
1326         unsigned long svpfn;
1327         unsigned long start_vaddr;
1328         unsigned long end_vaddr;
1329         int ret = 0, copied = 0;
1330
1331         if (!mm || !atomic_inc_not_zero(&mm->mm_users))
1332                 goto out;
1333
1334         ret = -EINVAL;
1335         /* file position must be aligned */
1336         if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1337                 goto out_mm;
1338
1339         ret = 0;
1340         if (!count)
1341                 goto out_mm;
1342
1343         /* do not disclose physical addresses: attack vector */
1344         pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1345
1346         pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1347         pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
1348         ret = -ENOMEM;
1349         if (!pm.buffer)
1350                 goto out_mm;
1351
1352         pagemap_walk.pmd_entry = pagemap_pmd_range;
1353         pagemap_walk.pte_hole = pagemap_pte_hole;
1354 #ifdef CONFIG_HUGETLB_PAGE
1355         pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1356 #endif
1357         pagemap_walk.mm = mm;
1358         pagemap_walk.private = &pm;
1359
1360         src = *ppos;
1361         svpfn = src / PM_ENTRY_BYTES;
1362         start_vaddr = svpfn << PAGE_SHIFT;
1363         end_vaddr = mm->task_size;
1364
1365         /* watch out for wraparound */
1366         if (svpfn > mm->task_size >> PAGE_SHIFT)
1367                 start_vaddr = end_vaddr;
1368
1369         /*
1370          * The odds are that this will stop walking way
1371          * before end_vaddr, because the length of the
1372          * user buffer is tracked in "pm", and the walk
1373          * will stop when we hit the end of the buffer.
1374          */
1375         ret = 0;
1376         while (count && (start_vaddr < end_vaddr)) {
1377                 int len;
1378                 unsigned long end;
1379
1380                 pm.pos = 0;
1381                 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1382                 /* overflow ? */
1383                 if (end < start_vaddr || end > end_vaddr)
1384                         end = end_vaddr;
1385                 down_read(&mm->mmap_sem);
1386                 ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1387                 up_read(&mm->mmap_sem);
1388                 start_vaddr = end;
1389
1390                 len = min(count, PM_ENTRY_BYTES * pm.pos);
1391                 if (copy_to_user(buf, pm.buffer, len)) {
1392                         ret = -EFAULT;
1393                         goto out_free;
1394                 }
1395                 copied += len;
1396                 buf += len;
1397                 count -= len;
1398         }
1399         *ppos += copied;
1400         if (!ret || ret == PM_END_OF_BUFFER)
1401                 ret = copied;
1402
1403 out_free:
1404         kfree(pm.buffer);
1405 out_mm:
1406         mmput(mm);
1407 out:
1408         return ret;
1409 }
1410
1411 static int pagemap_open(struct inode *inode, struct file *file)
1412 {
1413         struct mm_struct *mm;
1414
1415         mm = proc_mem_open(inode, PTRACE_MODE_READ);
1416         if (IS_ERR(mm))
1417                 return PTR_ERR(mm);
1418         file->private_data = mm;
1419         return 0;
1420 }
1421
1422 static int pagemap_release(struct inode *inode, struct file *file)
1423 {
1424         struct mm_struct *mm = file->private_data;
1425
1426         if (mm)
1427                 mmdrop(mm);
1428         return 0;
1429 }
1430
1431 const struct file_operations proc_pagemap_operations = {
1432         .llseek         = mem_lseek, /* borrow this */
1433         .read           = pagemap_read,
1434         .open           = pagemap_open,
1435         .release        = pagemap_release,
1436 };
1437 #endif /* CONFIG_PROC_PAGE_MONITOR */
1438
1439 #ifdef CONFIG_NUMA
1440
1441 struct numa_maps {
1442         unsigned long pages;
1443         unsigned long anon;
1444         unsigned long active;
1445         unsigned long writeback;
1446         unsigned long mapcount_max;
1447         unsigned long dirty;
1448         unsigned long swapcache;
1449         unsigned long node[MAX_NUMNODES];
1450 };
1451
1452 struct numa_maps_private {
1453         struct proc_maps_private proc_maps;
1454         struct numa_maps md;
1455 };
1456
1457 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1458                         unsigned long nr_pages)
1459 {
1460         int count = page_mapcount(page);
1461
1462         md->pages += nr_pages;
1463         if (pte_dirty || PageDirty(page))
1464                 md->dirty += nr_pages;
1465
1466         if (PageSwapCache(page))
1467                 md->swapcache += nr_pages;
1468
1469         if (PageActive(page) || PageUnevictable(page))
1470                 md->active += nr_pages;
1471
1472         if (PageWriteback(page))
1473                 md->writeback += nr_pages;
1474
1475         if (PageAnon(page))
1476                 md->anon += nr_pages;
1477
1478         if (count > md->mapcount_max)
1479                 md->mapcount_max = count;
1480
1481         md->node[page_to_nid(page)] += nr_pages;
1482 }
1483
1484 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1485                 unsigned long addr)
1486 {
1487         struct page *page;
1488         int nid;
1489
1490         if (!pte_present(pte))
1491                 return NULL;
1492
1493         page = vm_normal_page(vma, addr, pte);
1494         if (!page)
1495                 return NULL;
1496
1497         if (PageReserved(page))
1498                 return NULL;
1499
1500         nid = page_to_nid(page);
1501         if (!node_isset(nid, node_states[N_MEMORY]))
1502                 return NULL;
1503
1504         return page;
1505 }
1506
1507 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1508                 unsigned long end, struct mm_walk *walk)
1509 {
1510         struct numa_maps *md = walk->private;
1511         struct vm_area_struct *vma = walk->vma;
1512         spinlock_t *ptl;
1513         pte_t *orig_pte;
1514         pte_t *pte;
1515
1516         ptl = pmd_trans_huge_lock(pmd, vma);
1517         if (ptl) {
1518                 pte_t huge_pte = *(pte_t *)pmd;
1519                 struct page *page;
1520
1521                 page = can_gather_numa_stats(huge_pte, vma, addr);
1522                 if (page)
1523                         gather_stats(page, md, pte_dirty(huge_pte),
1524                                      HPAGE_PMD_SIZE/PAGE_SIZE);
1525                 spin_unlock(ptl);
1526                 return 0;
1527         }
1528
1529         if (pmd_trans_unstable(pmd))
1530                 return 0;
1531         orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1532         do {
1533                 struct page *page = can_gather_numa_stats(*pte, vma, addr);
1534                 if (!page)
1535                         continue;
1536                 gather_stats(page, md, pte_dirty(*pte), 1);
1537
1538         } while (pte++, addr += PAGE_SIZE, addr != end);
1539         pte_unmap_unlock(orig_pte, ptl);
1540         return 0;
1541 }
1542 #ifdef CONFIG_HUGETLB_PAGE
1543 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1544                 unsigned long addr, unsigned long end, struct mm_walk *walk)
1545 {
1546         pte_t huge_pte = huge_ptep_get(pte);
1547         struct numa_maps *md;
1548         struct page *page;
1549
1550         if (!pte_present(huge_pte))
1551                 return 0;
1552
1553         page = pte_page(huge_pte);
1554         if (!page)
1555                 return 0;
1556
1557         md = walk->private;
1558         gather_stats(page, md, pte_dirty(huge_pte), 1);
1559         return 0;
1560 }
1561
1562 #else
1563 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1564                 unsigned long addr, unsigned long end, struct mm_walk *walk)
1565 {
1566         return 0;
1567 }
1568 #endif
1569
1570 /*
1571  * Display pages allocated per node and memory policy via /proc.
1572  */
1573 static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1574 {
1575         struct numa_maps_private *numa_priv = m->private;
1576         struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1577         struct vm_area_struct *vma = v;
1578         struct numa_maps *md = &numa_priv->md;
1579         struct file *file = vma->vm_file;
1580         struct mm_struct *mm = vma->vm_mm;
1581         struct mm_walk walk = {
1582                 .hugetlb_entry = gather_hugetlb_stats,
1583                 .pmd_entry = gather_pte_stats,
1584                 .private = md,
1585                 .mm = mm,
1586         };
1587         struct mempolicy *pol;
1588         char buffer[64];
1589         int nid;
1590
1591         if (!mm)
1592                 return 0;
1593
1594         /* Ensure we start with an empty set of numa_maps statistics. */
1595         memset(md, 0, sizeof(*md));
1596
1597         pol = __get_vma_policy(vma, vma->vm_start);
1598         if (pol) {
1599                 mpol_to_str(buffer, sizeof(buffer), pol);
1600                 mpol_cond_put(pol);
1601         } else {
1602                 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1603         }
1604
1605         seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1606
1607         if (file) {
1608                 seq_puts(m, " file=");
1609                 seq_file_path(m, file, "\n\t= ");
1610         } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1611                 seq_puts(m, " heap");
1612         } else if (is_stack(proc_priv, vma, is_pid)) {
1613                 seq_puts(m, " stack");
1614         }
1615
1616         if (is_vm_hugetlb_page(vma))
1617                 seq_puts(m, " huge");
1618
1619         /* mmap_sem is held by m_start */
1620         walk_page_vma(vma, &walk);
1621
1622         if (!md->pages)
1623                 goto out;
1624
1625         if (md->anon)
1626                 seq_printf(m, " anon=%lu", md->anon);
1627
1628         if (md->dirty)
1629                 seq_printf(m, " dirty=%lu", md->dirty);
1630
1631         if (md->pages != md->anon && md->pages != md->dirty)
1632                 seq_printf(m, " mapped=%lu", md->pages);
1633
1634         if (md->mapcount_max > 1)
1635                 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1636
1637         if (md->swapcache)
1638                 seq_printf(m, " swapcache=%lu", md->swapcache);
1639
1640         if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1641                 seq_printf(m, " active=%lu", md->active);
1642
1643         if (md->writeback)
1644                 seq_printf(m, " writeback=%lu", md->writeback);
1645
1646         for_each_node_state(nid, N_MEMORY)
1647                 if (md->node[nid])
1648                         seq_printf(m, " N%d=%lu", nid, md->node[nid]);
1649
1650         seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
1651 out:
1652         seq_putc(m, '\n');
1653         m_cache_vma(m, vma);
1654         return 0;
1655 }
1656
1657 static int show_pid_numa_map(struct seq_file *m, void *v)
1658 {
1659         return show_numa_map(m, v, 1);
1660 }
1661
1662 static int show_tid_numa_map(struct seq_file *m, void *v)
1663 {
1664         return show_numa_map(m, v, 0);
1665 }
1666
1667 static const struct seq_operations proc_pid_numa_maps_op = {
1668         .start  = m_start,
1669         .next   = m_next,
1670         .stop   = m_stop,
1671         .show   = show_pid_numa_map,
1672 };
1673
1674 static const struct seq_operations proc_tid_numa_maps_op = {
1675         .start  = m_start,
1676         .next   = m_next,
1677         .stop   = m_stop,
1678         .show   = show_tid_numa_map,
1679 };
1680
1681 static int numa_maps_open(struct inode *inode, struct file *file,
1682                           const struct seq_operations *ops)
1683 {
1684         return proc_maps_open(inode, file, ops,
1685                                 sizeof(struct numa_maps_private));
1686 }
1687
1688 static int pid_numa_maps_open(struct inode *inode, struct file *file)
1689 {
1690         return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1691 }
1692
1693 static int tid_numa_maps_open(struct inode *inode, struct file *file)
1694 {
1695         return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1696 }
1697
1698 const struct file_operations proc_pid_numa_maps_operations = {
1699         .open           = pid_numa_maps_open,
1700         .read           = seq_read,
1701         .llseek         = seq_lseek,
1702         .release        = proc_map_release,
1703 };
1704
1705 const struct file_operations proc_tid_numa_maps_operations = {
1706         .open           = tid_numa_maps_open,
1707         .read           = seq_read,
1708         .llseek         = seq_lseek,
1709         .release        = proc_map_release,
1710 };
1711 #endif /* CONFIG_NUMA */