2 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
4 * Modifications for ppc64:
5 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
7 * Copyright 2008 Michael Ellerman, IBM Corporation.
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <asm/cputable.h>
20 #include <asm/code-patching.h>
22 #include <asm/sections.h>
23 #include <asm/setup.h>
24 #include <asm/firmware.h>
35 static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
38 * We store the offset to the code as a negative offset from
39 * the start of the alt_entry, to support the VDSO. This
40 * routine converts that back into an actual address.
42 return (unsigned int *)((unsigned long)fcur + offset);
45 static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
46 unsigned int *alt_start, unsigned int *alt_end)
52 if (instr_is_relative_branch(*src)) {
53 unsigned int *target = (unsigned int *)branch_target(src);
55 /* Branch within the section doesn't need translating */
56 if (target < alt_start || target >= alt_end) {
57 instr = translate_branch(dest, src);
63 patch_instruction(dest, instr);
68 static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
70 unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
72 start = calc_addr(fcur, fcur->start_off);
73 end = calc_addr(fcur, fcur->end_off);
74 alt_start = calc_addr(fcur, fcur->alt_start_off);
75 alt_end = calc_addr(fcur, fcur->alt_end_off);
77 if ((alt_end - alt_start) > (end - start))
80 if ((value & fcur->mask) == fcur->value)
86 for (; src < alt_end; src++, dest++) {
87 if (patch_alt_instruction(src, dest, alt_start, alt_end))
91 for (; dest < end; dest++)
92 patch_instruction(dest, PPC_INST_NOP);
97 void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
99 struct fixup_entry *fcur, *fend;
104 for (; fcur < fend; fcur++) {
105 if (patch_feature_section(value, fcur)) {
107 printk("Unable to patch feature section at %p - %p" \
109 calc_addr(fcur, fcur->start_off),
110 calc_addr(fcur, fcur->end_off),
111 calc_addr(fcur, fcur->alt_start_off),
112 calc_addr(fcur, fcur->alt_end_off));
117 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
122 if (!(value & CPU_FTR_LWSYNC))
128 for (; start < end; start++) {
129 dest = (void *)start + *start;
130 patch_instruction(dest, PPC_INST_LWSYNC);
134 static void do_final_fixups(void)
136 #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
138 unsigned long length;
140 if (PHYSICAL_START == 0)
143 src = (int *)(KERNELBASE + PHYSICAL_START);
144 dest = (int *)KERNELBASE;
145 length = (__end_interrupts - _stext) / sizeof(int);
148 patch_instruction(dest, *src);
155 void apply_feature_fixups(void)
157 struct cpu_spec *spec = *PTRRELOC(&cur_cpu_spec);
160 * Apply the CPU-specific and firmware specific fixups to kernel text
161 * (nop out sections not relevant to this CPU or this firmware).
163 do_feature_fixups(spec->cpu_features,
164 PTRRELOC(&__start___ftr_fixup),
165 PTRRELOC(&__stop___ftr_fixup));
167 do_feature_fixups(spec->mmu_features,
168 PTRRELOC(&__start___mmu_ftr_fixup),
169 PTRRELOC(&__stop___mmu_ftr_fixup));
171 do_lwsync_fixups(spec->cpu_features,
172 PTRRELOC(&__start___lwsync_fixup),
173 PTRRELOC(&__stop___lwsync_fixup));
176 do_feature_fixups(powerpc_firmware_features,
177 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
182 #ifdef CONFIG_FTR_FIXUP_SELFTEST
185 if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
187 /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
188 static struct fixup_entry fixup;
190 static long calc_offset(struct fixup_entry *entry, unsigned int *p)
192 return (unsigned long)p - (unsigned long)entry;
195 static void test_basic_patching(void)
197 extern unsigned int ftr_fixup_test1;
198 extern unsigned int end_ftr_fixup_test1;
199 extern unsigned int ftr_fixup_test1_orig;
200 extern unsigned int ftr_fixup_test1_expected;
201 int size = &end_ftr_fixup_test1 - &ftr_fixup_test1;
203 fixup.value = fixup.mask = 8;
204 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1);
205 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2);
206 fixup.alt_start_off = fixup.alt_end_off = 0;
209 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
211 /* Check we don't patch if the value matches */
212 patch_feature_section(8, &fixup);
213 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
215 /* Check we do patch if the value doesn't match */
216 patch_feature_section(0, &fixup);
217 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
219 /* Check we do patch if the mask doesn't match */
220 memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size);
221 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
222 patch_feature_section(~8, &fixup);
223 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
226 static void test_alternative_patching(void)
228 extern unsigned int ftr_fixup_test2;
229 extern unsigned int end_ftr_fixup_test2;
230 extern unsigned int ftr_fixup_test2_orig;
231 extern unsigned int ftr_fixup_test2_alt;
232 extern unsigned int ftr_fixup_test2_expected;
233 int size = &end_ftr_fixup_test2 - &ftr_fixup_test2;
235 fixup.value = fixup.mask = 0xF;
236 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1);
237 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2);
238 fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt);
239 fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1);
242 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
244 /* Check we don't patch if the value matches */
245 patch_feature_section(0xF, &fixup);
246 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
248 /* Check we do patch if the value doesn't match */
249 patch_feature_section(0, &fixup);
250 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
252 /* Check we do patch if the mask doesn't match */
253 memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size);
254 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
255 patch_feature_section(~0xF, &fixup);
256 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
259 static void test_alternative_case_too_big(void)
261 extern unsigned int ftr_fixup_test3;
262 extern unsigned int end_ftr_fixup_test3;
263 extern unsigned int ftr_fixup_test3_orig;
264 extern unsigned int ftr_fixup_test3_alt;
265 int size = &end_ftr_fixup_test3 - &ftr_fixup_test3;
267 fixup.value = fixup.mask = 0xC;
268 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1);
269 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2);
270 fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt);
271 fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2);
274 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
276 /* Expect nothing to be patched, and the error returned to us */
277 check(patch_feature_section(0xF, &fixup) == 1);
278 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
279 check(patch_feature_section(0, &fixup) == 1);
280 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
281 check(patch_feature_section(~0xF, &fixup) == 1);
282 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
285 static void test_alternative_case_too_small(void)
287 extern unsigned int ftr_fixup_test4;
288 extern unsigned int end_ftr_fixup_test4;
289 extern unsigned int ftr_fixup_test4_orig;
290 extern unsigned int ftr_fixup_test4_alt;
291 extern unsigned int ftr_fixup_test4_expected;
292 int size = &end_ftr_fixup_test4 - &ftr_fixup_test4;
295 /* Check a high-bit flag */
296 flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
297 fixup.value = fixup.mask = flag;
298 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1);
299 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5);
300 fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt);
301 fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2);
304 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
306 /* Check we don't patch if the value matches */
307 patch_feature_section(flag, &fixup);
308 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
310 /* Check we do patch if the value doesn't match */
311 patch_feature_section(0, &fixup);
312 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
314 /* Check we do patch if the mask doesn't match */
315 memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size);
316 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
317 patch_feature_section(~flag, &fixup);
318 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
321 static void test_alternative_case_with_branch(void)
323 extern unsigned int ftr_fixup_test5;
324 extern unsigned int end_ftr_fixup_test5;
325 extern unsigned int ftr_fixup_test5_expected;
326 int size = &end_ftr_fixup_test5 - &ftr_fixup_test5;
328 check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0);
331 static void test_alternative_case_with_external_branch(void)
333 extern unsigned int ftr_fixup_test6;
334 extern unsigned int end_ftr_fixup_test6;
335 extern unsigned int ftr_fixup_test6_expected;
336 int size = &end_ftr_fixup_test6 - &ftr_fixup_test6;
338 check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0);
341 static void test_cpu_macros(void)
343 extern u8 ftr_fixup_test_FTR_macros;
344 extern u8 ftr_fixup_test_FTR_macros_expected;
345 unsigned long size = &ftr_fixup_test_FTR_macros_expected -
346 &ftr_fixup_test_FTR_macros;
348 /* The fixups have already been done for us during boot */
349 check(memcmp(&ftr_fixup_test_FTR_macros,
350 &ftr_fixup_test_FTR_macros_expected, size) == 0);
353 static void test_fw_macros(void)
356 extern u8 ftr_fixup_test_FW_FTR_macros;
357 extern u8 ftr_fixup_test_FW_FTR_macros_expected;
358 unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected -
359 &ftr_fixup_test_FW_FTR_macros;
361 /* The fixups have already been done for us during boot */
362 check(memcmp(&ftr_fixup_test_FW_FTR_macros,
363 &ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
367 static void test_lwsync_macros(void)
369 extern u8 lwsync_fixup_test;
370 extern u8 end_lwsync_fixup_test;
371 extern u8 lwsync_fixup_test_expected_LWSYNC;
372 extern u8 lwsync_fixup_test_expected_SYNC;
373 unsigned long size = &end_lwsync_fixup_test -
376 /* The fixups have already been done for us during boot */
377 if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
378 check(memcmp(&lwsync_fixup_test,
379 &lwsync_fixup_test_expected_LWSYNC, size) == 0);
381 check(memcmp(&lwsync_fixup_test,
382 &lwsync_fixup_test_expected_SYNC, size) == 0);
386 static int __init test_feature_fixups(void)
388 printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
390 test_basic_patching();
391 test_alternative_patching();
392 test_alternative_case_too_big();
393 test_alternative_case_too_small();
394 test_alternative_case_with_branch();
395 test_alternative_case_with_external_branch();
398 test_lwsync_macros();
402 late_initcall(test_feature_fixups);
404 #endif /* CONFIG_FTR_FIXUP_SELFTEST */