x86/power/64: Fix kernel text mapping corruption during image restoration
[cascardo/linux.git] / arch / x86 / power / hibernate_64.c
1 /*
2  * Hibernation support for x86-64
3  *
4  * Distribute under GPLv2
5  *
6  * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7  * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8  * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
9  */
10
11 #include <linux/gfp.h>
12 #include <linux/smp.h>
13 #include <linux/suspend.h>
14
15 #include <asm/init.h>
16 #include <asm/proto.h>
17 #include <asm/page.h>
18 #include <asm/pgtable.h>
19 #include <asm/mtrr.h>
20 #include <asm/sections.h>
21 #include <asm/suspend.h>
22 #include <asm/tlbflush.h>
23
24 /* Defined in hibernate_asm_64.S */
25 extern asmlinkage __visible int restore_image(void);
26
27 /*
28  * Address to jump to in the last phase of restore in order to get to the image
29  * kernel's text (this value is passed in the image header).
30  */
31 unsigned long restore_jump_address __visible;
32 unsigned long jump_address_phys;
33
34 /*
35  * Value of the cr3 register from before the hibernation (this value is passed
36  * in the image header).
37  */
38 unsigned long restore_cr3 __visible;
39
40 pgd_t *temp_level4_pgt __visible;
41
42 unsigned long relocated_restore_code __visible;
43
44 static int set_up_temporary_text_mapping(void)
45 {
46         pmd_t *pmd;
47         pud_t *pud;
48
49         /*
50          * The new mapping only has to cover the page containing the image
51          * kernel's entry point (jump_address_phys), because the switch over to
52          * it is carried out by relocated code running from a page allocated
53          * specifically for this purpose and covered by the identity mapping, so
54          * the temporary kernel text mapping is only needed for the final jump.
55          * Moreover, in that mapping the virtual address of the image kernel's
56          * entry point must be the same as its virtual address in the image
57          * kernel (restore_jump_address), so the image kernel's
58          * restore_registers() code doesn't find itself in a different area of
59          * the virtual address space after switching over to the original page
60          * tables used by the image kernel.
61          */
62         pud = (pud_t *)get_safe_page(GFP_ATOMIC);
63         if (!pud)
64                 return -ENOMEM;
65
66         pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
67         if (!pmd)
68                 return -ENOMEM;
69
70         set_pmd(pmd + pmd_index(restore_jump_address),
71                 __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
72         set_pud(pud + pud_index(restore_jump_address),
73                 __pud(__pa(pmd) | _KERNPG_TABLE));
74         set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
75                 __pgd(__pa(pud) | _KERNPG_TABLE));
76
77         return 0;
78 }
79
80 static void *alloc_pgt_page(void *context)
81 {
82         return (void *)get_safe_page(GFP_ATOMIC);
83 }
84
85 static int set_up_temporary_mappings(void)
86 {
87         struct x86_mapping_info info = {
88                 .alloc_pgt_page = alloc_pgt_page,
89                 .pmd_flag       = __PAGE_KERNEL_LARGE_EXEC,
90                 .kernel_mapping = true,
91         };
92         unsigned long mstart, mend;
93         int result;
94         int i;
95
96         temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
97         if (!temp_level4_pgt)
98                 return -ENOMEM;
99
100         /* Prepare a temporary mapping for the kernel text */
101         result = set_up_temporary_text_mapping();
102         if (result)
103                 return result;
104
105         /* Set up the direct mapping from scratch */
106         for (i = 0; i < nr_pfn_mapped; i++) {
107                 mstart = pfn_mapped[i].start << PAGE_SHIFT;
108                 mend   = pfn_mapped[i].end << PAGE_SHIFT;
109
110                 result = kernel_ident_mapping_init(&info, temp_level4_pgt,
111                                                    mstart, mend);
112
113                 if (result)
114                         return result;
115         }
116
117         return 0;
118 }
119
120 static int relocate_restore_code(void)
121 {
122         pgd_t *pgd;
123         pud_t *pud;
124
125         relocated_restore_code = get_safe_page(GFP_ATOMIC);
126         if (!relocated_restore_code)
127                 return -ENOMEM;
128
129         memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
130
131         /* Make the page containing the relocated code executable */
132         pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
133         pud = pud_offset(pgd, relocated_restore_code);
134         if (pud_large(*pud)) {
135                 set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
136         } else {
137                 pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
138
139                 if (pmd_large(*pmd)) {
140                         set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
141                 } else {
142                         pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
143
144                         set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
145                 }
146         }
147         __flush_tlb_all();
148
149         return 0;
150 }
151
152 int swsusp_arch_resume(void)
153 {
154         int error;
155
156         /* We have got enough memory and from now on we cannot recover */
157         error = set_up_temporary_mappings();
158         if (error)
159                 return error;
160
161         error = relocate_restore_code();
162         if (error)
163                 return error;
164
165         restore_image();
166         return 0;
167 }
168
169 /*
170  *      pfn_is_nosave - check if given pfn is in the 'nosave' section
171  */
172
173 int pfn_is_nosave(unsigned long pfn)
174 {
175         unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
176         unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
177         return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
178 }
179
180 struct restore_data_record {
181         unsigned long jump_address;
182         unsigned long jump_address_phys;
183         unsigned long cr3;
184         unsigned long magic;
185 };
186
187 #define RESTORE_MAGIC   0x123456789ABCDEF0UL
188
189 /**
190  *      arch_hibernation_header_save - populate the architecture specific part
191  *              of a hibernation image header
192  *      @addr: address to save the data at
193  */
194 int arch_hibernation_header_save(void *addr, unsigned int max_size)
195 {
196         struct restore_data_record *rdr = addr;
197
198         if (max_size < sizeof(struct restore_data_record))
199                 return -EOVERFLOW;
200         rdr->jump_address = (unsigned long)&restore_registers;
201         rdr->jump_address_phys = __pa_symbol(&restore_registers);
202         rdr->cr3 = restore_cr3;
203         rdr->magic = RESTORE_MAGIC;
204         return 0;
205 }
206
207 /**
208  *      arch_hibernation_header_restore - read the architecture specific data
209  *              from the hibernation image header
210  *      @addr: address to read the data from
211  */
212 int arch_hibernation_header_restore(void *addr)
213 {
214         struct restore_data_record *rdr = addr;
215
216         restore_jump_address = rdr->jump_address;
217         jump_address_phys = rdr->jump_address_phys;
218         restore_cr3 = rdr->cr3;
219         return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
220 }