kprobes: Unpoison stack in jprobe_return() for KASAN
[cascardo/linux.git] / arch / arm64 / kernel / machine_kexec.c
1 /*
2  * kexec for arm64
3  *
4  * Copyright (C) Linaro.
5  * Copyright (C) Huawei Futurewei Technologies.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/kexec.h>
13 #include <linux/smp.h>
14
15 #include <asm/cacheflush.h>
16 #include <asm/cpu_ops.h>
17 #include <asm/mmu_context.h>
18
19 #include "cpu-reset.h"
20
21 /* Global variables for the arm64_relocate_new_kernel routine. */
22 extern const unsigned char arm64_relocate_new_kernel[];
23 extern const unsigned long arm64_relocate_new_kernel_size;
24
25 static unsigned long kimage_start;
26
27 /**
28  * kexec_image_info - For debugging output.
29  */
30 #define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i)
31 static void _kexec_image_info(const char *func, int line,
32         const struct kimage *kimage)
33 {
34         unsigned long i;
35
36         pr_debug("%s:%d:\n", func, line);
37         pr_debug("  kexec kimage info:\n");
38         pr_debug("    type:        %d\n", kimage->type);
39         pr_debug("    start:       %lx\n", kimage->start);
40         pr_debug("    head:        %lx\n", kimage->head);
41         pr_debug("    nr_segments: %lu\n", kimage->nr_segments);
42
43         for (i = 0; i < kimage->nr_segments; i++) {
44                 pr_debug("      segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
45                         i,
46                         kimage->segment[i].mem,
47                         kimage->segment[i].mem + kimage->segment[i].memsz,
48                         kimage->segment[i].memsz,
49                         kimage->segment[i].memsz /  PAGE_SIZE);
50         }
51 }
52
53 void machine_kexec_cleanup(struct kimage *kimage)
54 {
55         /* Empty routine needed to avoid build errors. */
56 }
57
58 /**
59  * machine_kexec_prepare - Prepare for a kexec reboot.
60  *
61  * Called from the core kexec code when a kernel image is loaded.
62  * Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus
63  * are stuck in the kernel. This avoids a panic once we hit machine_kexec().
64  */
65 int machine_kexec_prepare(struct kimage *kimage)
66 {
67         kimage_start = kimage->start;
68
69         kexec_image_info(kimage);
70
71         if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) {
72                 pr_err("Can't kexec: CPUs are stuck in the kernel.\n");
73                 return -EBUSY;
74         }
75
76         return 0;
77 }
78
79 /**
80  * kexec_list_flush - Helper to flush the kimage list and source pages to PoC.
81  */
82 static void kexec_list_flush(struct kimage *kimage)
83 {
84         kimage_entry_t *entry;
85
86         for (entry = &kimage->head; ; entry++) {
87                 unsigned int flag;
88                 void *addr;
89
90                 /* flush the list entries. */
91                 __flush_dcache_area(entry, sizeof(kimage_entry_t));
92
93                 flag = *entry & IND_FLAGS;
94                 if (flag == IND_DONE)
95                         break;
96
97                 addr = phys_to_virt(*entry & PAGE_MASK);
98
99                 switch (flag) {
100                 case IND_INDIRECTION:
101                         /* Set entry point just before the new list page. */
102                         entry = (kimage_entry_t *)addr - 1;
103                         break;
104                 case IND_SOURCE:
105                         /* flush the source pages. */
106                         __flush_dcache_area(addr, PAGE_SIZE);
107                         break;
108                 case IND_DESTINATION:
109                         break;
110                 default:
111                         BUG();
112                 }
113         }
114 }
115
116 /**
117  * kexec_segment_flush - Helper to flush the kimage segments to PoC.
118  */
119 static void kexec_segment_flush(const struct kimage *kimage)
120 {
121         unsigned long i;
122
123         pr_debug("%s:\n", __func__);
124
125         for (i = 0; i < kimage->nr_segments; i++) {
126                 pr_debug("  segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
127                         i,
128                         kimage->segment[i].mem,
129                         kimage->segment[i].mem + kimage->segment[i].memsz,
130                         kimage->segment[i].memsz,
131                         kimage->segment[i].memsz /  PAGE_SIZE);
132
133                 __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
134                         kimage->segment[i].memsz);
135         }
136 }
137
138 /**
139  * machine_kexec - Do the kexec reboot.
140  *
141  * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
142  */
143 void machine_kexec(struct kimage *kimage)
144 {
145         phys_addr_t reboot_code_buffer_phys;
146         void *reboot_code_buffer;
147
148         /*
149          * New cpus may have become stuck_in_kernel after we loaded the image.
150          */
151         BUG_ON(cpus_are_stuck_in_kernel() || (num_online_cpus() > 1));
152
153         reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
154         reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
155
156         kexec_image_info(kimage);
157
158         pr_debug("%s:%d: control_code_page:        %p\n", __func__, __LINE__,
159                 kimage->control_code_page);
160         pr_debug("%s:%d: reboot_code_buffer_phys:  %pa\n", __func__, __LINE__,
161                 &reboot_code_buffer_phys);
162         pr_debug("%s:%d: reboot_code_buffer:       %p\n", __func__, __LINE__,
163                 reboot_code_buffer);
164         pr_debug("%s:%d: relocate_new_kernel:      %p\n", __func__, __LINE__,
165                 arm64_relocate_new_kernel);
166         pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
167                 __func__, __LINE__, arm64_relocate_new_kernel_size,
168                 arm64_relocate_new_kernel_size);
169
170         /*
171          * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
172          * after the kernel is shut down.
173          */
174         memcpy(reboot_code_buffer, arm64_relocate_new_kernel,
175                 arm64_relocate_new_kernel_size);
176
177         /* Flush the reboot_code_buffer in preparation for its execution. */
178         __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
179         flush_icache_range((uintptr_t)reboot_code_buffer,
180                 arm64_relocate_new_kernel_size);
181
182         /* Flush the kimage list and its buffers. */
183         kexec_list_flush(kimage);
184
185         /* Flush the new image if already in place. */
186         if (kimage->head & IND_DONE)
187                 kexec_segment_flush(kimage);
188
189         pr_info("Bye!\n");
190
191         /* Disable all DAIF exceptions. */
192         asm volatile ("msr daifset, #0xf" : : : "memory");
193
194         /*
195          * cpu_soft_restart will shutdown the MMU, disable data caches, then
196          * transfer control to the reboot_code_buffer which contains a copy of
197          * the arm64_relocate_new_kernel routine.  arm64_relocate_new_kernel
198          * uses physical addressing to relocate the new image to its final
199          * position and transfers control to the image entry point when the
200          * relocation is complete.
201          */
202
203         cpu_soft_restart(1, reboot_code_buffer_phys, kimage->head,
204                 kimage_start, 0);
205
206         BUG(); /* Should never get here. */
207 }
208
209 void machine_crash_shutdown(struct pt_regs *regs)
210 {
211         /* Empty routine needed to avoid build errors. */
212 }