Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[cascardo/linux.git] / arch / s390 / mm / page-states.c
1 /*
2  * Copyright IBM Corp. 2008
3  *
4  * Guest page hinting for unused pages.
5  *
6  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/mm.h>
13 #include <linux/gfp.h>
14 #include <linux/init.h>
15
16 #define ESSA_SET_STABLE         1
17 #define ESSA_SET_UNUSED         2
18
19 static int cmma_flag = 1;
20
21 static int __init cmma(char *str)
22 {
23         char *parm;
24
25         parm = strstrip(str);
26         if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
27                 cmma_flag = 1;
28                 return 1;
29         }
30         cmma_flag = 0;
31         if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
32                 return 1;
33         return 0;
34 }
35 __setup("cmma=", cmma);
36
37 static inline int cmma_test_essa(void)
38 {
39         register unsigned long tmp asm("0") = 0;
40         register int rc asm("1") = -EOPNOTSUPP;
41
42         asm volatile(
43                 "       .insn rrf,0xb9ab0000,%1,%1,0,0\n"
44                 "0:     la      %0,0\n"
45                 "1:\n"
46                 EX_TABLE(0b,1b)
47                 : "+&d" (rc), "+&d" (tmp));
48         return rc;
49 }
50
51 void __init cmma_init(void)
52 {
53         if (!cmma_flag)
54                 return;
55         if (cmma_test_essa())
56                 cmma_flag = 0;
57 }
58
59 static inline void set_page_unstable(struct page *page, int order)
60 {
61         int i, rc;
62
63         for (i = 0; i < (1 << order); i++)
64                 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
65                              : "=&d" (rc)
66                              : "a" (page_to_phys(page + i)),
67                                "i" (ESSA_SET_UNUSED));
68 }
69
70 void arch_free_page(struct page *page, int order)
71 {
72         if (!cmma_flag)
73                 return;
74         set_page_unstable(page, order);
75 }
76
77 static inline void set_page_stable(struct page *page, int order)
78 {
79         int i, rc;
80
81         for (i = 0; i < (1 << order); i++)
82                 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
83                              : "=&d" (rc)
84                              : "a" (page_to_phys(page + i)),
85                                "i" (ESSA_SET_STABLE));
86 }
87
88 void arch_alloc_page(struct page *page, int order)
89 {
90         if (!cmma_flag)
91                 return;
92         set_page_stable(page, order);
93 }
94
95 void arch_set_page_states(int make_stable)
96 {
97         unsigned long flags, order, t;
98         struct list_head *l;
99         struct page *page;
100         struct zone *zone;
101
102         if (!cmma_flag)
103                 return;
104         if (make_stable)
105                 drain_local_pages(NULL);
106         for_each_populated_zone(zone) {
107                 spin_lock_irqsave(&zone->lock, flags);
108                 for_each_migratetype_order(order, t) {
109                         list_for_each(l, &zone->free_area[order].free_list[t]) {
110                                 page = list_entry(l, struct page, lru);
111                                 if (make_stable)
112                                         set_page_stable(page, order);
113                                 else
114                                         set_page_unstable(page, order);
115                         }
116                 }
117                 spin_unlock_irqrestore(&zone->lock, flags);
118         }
119 }