x86/efi: Remove unused efi_call* macros
[cascardo/linux.git] / arch / x86 / include / asm / efi.h
1 #ifndef _ASM_X86_EFI_H
2 #define _ASM_X86_EFI_H
3
4 #include <asm/i387.h>
5 /*
6  * We map the EFI regions needed for runtime services non-contiguously,
7  * with preserved alignment on virtual addresses starting from -4G down
8  * for a total max space of 64G. This way, we provide for stable runtime
9  * services addresses across kernels so that a kexec'd kernel can still
10  * use them.
11  *
12  * This is the main reason why we're doing stable VA mappings for RT
13  * services.
14  *
15  * This flag is used in conjuction with a chicken bit called
16  * "efi=old_map" which can be used as a fallback to the old runtime
17  * services mapping method in case there's some b0rkage with a
18  * particular EFI implementation (haha, it is hard to hold up the
19  * sarcasm here...).
20  */
21 #define EFI_OLD_MEMMAP          EFI_ARCH_1
22
23 #define EFI32_LOADER_SIGNATURE  "EL32"
24 #define EFI64_LOADER_SIGNATURE  "EL64"
25
26 #ifdef CONFIG_X86_32
27
28
29 extern unsigned long asmlinkage efi_call_phys(void *, ...);
30
31 /*
32  * Wrap all the virtual calls in a way that forces the parameters on the stack.
33  */
34
35 /* Use this macro if your virtual returns a non-void value */
36 #define efi_call_virt(f, args...) \
37 ({                                                                      \
38         efi_status_t __s;                                               \
39         kernel_fpu_begin();                                             \
40         __s = ((efi_##f##_t __attribute__((regparm(0)))*)               \
41                 efi.systab->runtime->f)(args);                          \
42         kernel_fpu_end();                                               \
43         __s;                                                            \
44 })
45
46 /* Use this macro if your virtual call does not return any value */
47 #define __efi_call_virt(f, args...) \
48 ({                                                                      \
49         kernel_fpu_begin();                                             \
50         ((efi_##f##_t __attribute__((regparm(0)))*)                     \
51                 efi.systab->runtime->f)(args);                          \
52         kernel_fpu_end();                                               \
53 })
54
55 #define efi_ioremap(addr, size, type, attr)     ioremap_cache(addr, size)
56
57 #else /* !CONFIG_X86_32 */
58
59 #define EFI_LOADER_SIGNATURE    "EL64"
60
61 extern u64 asmlinkage efi_call(void *fp, ...);
62
63 #define efi_call_phys(f, args...)               efi_call((f), args)
64
65 #define efi_call_virt(f, ...)                                           \
66 ({                                                                      \
67         efi_status_t __s;                                               \
68                                                                         \
69         efi_sync_low_kernel_mappings();                                 \
70         preempt_disable();                                              \
71         __kernel_fpu_begin();                                           \
72         __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__);    \
73         __kernel_fpu_end();                                             \
74         preempt_enable();                                               \
75         __s;                                                            \
76 })
77
78 /*
79  * All X86_64 virt calls return non-void values. Thus, use non-void call for
80  * virt calls that would be void on X86_32.
81  */
82 #define __efi_call_virt(f, args...) efi_call_virt(f, args)
83
84 extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
85                                  u32 type, u64 attribute);
86
87 #endif /* CONFIG_X86_32 */
88
89 #define efi_in_nmi()    in_nmi()
90
91 extern int add_efi_memmap;
92 extern struct efi_scratch efi_scratch;
93 extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
94 extern int efi_memblock_x86_reserve_range(void);
95 extern void efi_call_phys_prelog(void);
96 extern void efi_call_phys_epilog(void);
97 extern void efi_unmap_memmap(void);
98 extern void efi_memory_uc(u64 addr, unsigned long size);
99 extern void __init efi_map_region(efi_memory_desc_t *md);
100 extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
101 extern void efi_sync_low_kernel_mappings(void);
102 extern int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
103 extern void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
104 extern void __init old_map_region(efi_memory_desc_t *md);
105 extern void __init runtime_code_page_mkexec(void);
106 extern void __init efi_runtime_mkexec(void);
107 extern void __init efi_dump_pagetable(void);
108 extern void __init efi_apply_memmap_quirks(void);
109 extern int __init efi_reuse_config(u64 tables, int nr_tables);
110 extern void efi_delete_dummy_variable(void);
111
112 struct efi_setup_data {
113         u64 fw_vendor;
114         u64 runtime;
115         u64 tables;
116         u64 smbios;
117         u64 reserved[8];
118 };
119
120 extern u64 efi_setup;
121
122 #ifdef CONFIG_EFI
123
124 static inline bool efi_is_native(void)
125 {
126         return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
127 }
128
129 static inline bool efi_runtime_supported(void)
130 {
131         if (efi_is_native())
132                 return true;
133
134         if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
135                 return true;
136
137         return false;
138 }
139
140 extern struct console early_efi_console;
141 extern void parse_efi_setup(u64 phys_addr, u32 data_len);
142
143 #ifdef CONFIG_EFI_MIXED
144 extern void efi_thunk_runtime_setup(void);
145 extern efi_status_t efi_thunk_set_virtual_address_map(
146         void *phys_set_virtual_address_map,
147         unsigned long memory_map_size,
148         unsigned long descriptor_size,
149         u32 descriptor_version,
150         efi_memory_desc_t *virtual_map);
151 #else
152 static inline void efi_thunk_runtime_setup(void) {}
153 static inline efi_status_t efi_thunk_set_virtual_address_map(
154         void *phys_set_virtual_address_map,
155         unsigned long memory_map_size,
156         unsigned long descriptor_size,
157         u32 descriptor_version,
158         efi_memory_desc_t *virtual_map)
159 {
160         return EFI_SUCCESS;
161 }
162 #endif /* CONFIG_EFI_MIXED */
163
164
165 /* arch specific definitions used by the stub code */
166
167 struct efi_config {
168         u64 image_handle;
169         u64 table;
170         u64 allocate_pool;
171         u64 allocate_pages;
172         u64 get_memory_map;
173         u64 free_pool;
174         u64 free_pages;
175         u64 locate_handle;
176         u64 handle_protocol;
177         u64 exit_boot_services;
178         u64 text_output;
179         efi_status_t (*call)(unsigned long, ...);
180         bool is64;
181 } __packed;
182
183 extern struct efi_config *efi_early;
184
185 #define efi_call_early(f, ...)                                          \
186         efi_early->call(efi_early->f, __VA_ARGS__);
187
188 extern bool efi_reboot_required(void);
189
190 #else
191 static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
192 static inline bool efi_reboot_required(void)
193 {
194         return false;
195 }
196 #endif /* CONFIG_EFI */
197
198 #endif /* _ASM_X86_EFI_H */