2 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
4 * Copyright (C) 1996-2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #error "Only include this from assembly code"
23 #ifndef __ASM_ASSEMBLER_H
24 #define __ASM_ASSEMBLER_H
26 #include <asm/asm-offsets.h>
27 #include <asm/cpufeature.h>
29 #include <asm/pgtable-hwdef.h>
30 #include <asm/ptrace.h>
31 #include <asm/thread_info.h>
34 * Enable and disable interrupts.
45 * Enable and disable debug exceptions.
55 .macro disable_step_tsk, flgs, tmp
56 tbz \flgs, #TIF_SINGLESTEP, 9990f
60 isb // Synchronise with enable_dbg
64 .macro enable_step_tsk, flgs, tmp
65 tbz \flgs, #TIF_SINGLESTEP, 9990f
74 * Enable both debug exceptions and interrupts. This is likely to be
75 * faster than two daifclr operations, since writes to this register
76 * are self-synchronising.
78 .macro enable_dbg_and_irq
83 * SMP data memory barrier
99 * Emit an entry into the exception table
101 .macro _asm_extable, from, to
102 .pushsection __ex_table, "a"
104 .long (\from - .), (\to - .)
108 #define USER(l, x...) \
110 _asm_extable 9999b, l
115 lr .req x30 // link register
126 * Select code when configured for BE.
128 #ifdef CONFIG_CPU_BIG_ENDIAN
129 #define CPU_BE(code...) code
131 #define CPU_BE(code...)
135 * Select code when configured for LE.
137 #ifdef CONFIG_CPU_BIG_ENDIAN
138 #define CPU_LE(code...)
140 #define CPU_LE(code...) code
144 * Define a macro that constructs a 64-bit value by concatenating two
145 * 32-bit registers. Note that on big endian systems the order of the
146 * registers is swapped.
148 #ifndef CONFIG_CPU_BIG_ENDIAN
149 .macro regs_to_64, rd, lbits, hbits
151 .macro regs_to_64, rd, hbits, lbits
153 orr \rd, \lbits, \hbits, lsl #32
157 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
158 * <symbol> is within the range +/- 4 GB of the PC.
161 * @dst: destination register (64 bit wide)
162 * @sym: name of the symbol
163 * @tmp: optional scratch register to be used if <dst> == sp, which
164 * is not allowed in an adrp instruction
166 .macro adr_l, dst, sym, tmp=
169 add \dst, \dst, :lo12:\sym
172 add \dst, \tmp, :lo12:\sym
177 * @dst: destination register (32 or 64 bit wide)
178 * @sym: name of the symbol
179 * @tmp: optional 64-bit scratch register to be used if <dst> is a
180 * 32-bit wide register, in which case it cannot be used to hold
183 .macro ldr_l, dst, sym, tmp=
186 ldr \dst, [\dst, :lo12:\sym]
189 ldr \dst, [\tmp, :lo12:\sym]
194 * @src: source register (32 or 64 bit wide)
195 * @sym: name of the symbol
196 * @tmp: mandatory 64-bit scratch register to calculate the address
197 * while <src> needs to be preserved.
199 .macro str_l, src, sym, tmp
201 str \src, [\tmp, :lo12:\sym]
205 * @sym: The name of the per-cpu variable
206 * @reg: Result of per_cpu(sym, smp_processor_id())
207 * @tmp: scratch register
209 .macro this_cpu_ptr, sym, reg, tmp
216 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
218 .macro vma_vm_mm, rd, rn
219 ldr \rd, [\rn, #VMA_VM_MM]
223 * mmid - get context id from mm pointer (mm->context.id)
226 ldr \rd, [\rn, #MM_CONTEXT_ID]
229 * read_ctr - read CTR_EL0. If the system has mismatched
230 * cache line sizes, provide the system wide safe value
231 * from arm64_ftr_reg_ctrel0.sys_val
234 alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
235 mrs \reg, ctr_el0 // read CTR
238 ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
244 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
245 * from the CTR register.
247 .macro raw_dcache_line_size, reg, tmp
248 mrs \tmp, ctr_el0 // read CTR
249 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
250 mov \reg, #4 // bytes per word
251 lsl \reg, \reg, \tmp // actual cache line size
255 * dcache_line_size - get the safe D-cache line size across all CPUs
257 .macro dcache_line_size, reg, tmp
259 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
260 mov \reg, #4 // bytes per word
261 lsl \reg, \reg, \tmp // actual cache line size
265 * raw_icache_line_size - get the minimum I-cache line size on this CPU
266 * from the CTR register.
268 .macro raw_icache_line_size, reg, tmp
269 mrs \tmp, ctr_el0 // read CTR
270 and \tmp, \tmp, #0xf // cache line size encoding
271 mov \reg, #4 // bytes per word
272 lsl \reg, \reg, \tmp // actual cache line size
276 * icache_line_size - get the safe I-cache line size across all CPUs
278 .macro icache_line_size, reg, tmp
280 and \tmp, \tmp, #0xf // cache line size encoding
281 mov \reg, #4 // bytes per word
282 lsl \reg, \reg, \tmp // actual cache line size
286 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
288 .macro tcr_set_idmap_t0sz, valreg, tmpreg
289 #ifndef CONFIG_ARM64_VA_BITS_48
290 ldr_l \tmpreg, idmap_t0sz
291 bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
296 * Macro to perform a data cache maintenance for the interval
297 * [kaddr, kaddr + size)
299 * op: operation passed to dc instruction
300 * domain: domain used in dsb instruciton
301 * kaddr: starting virtual address of the region
302 * size: size of the region
303 * Corrupts: kaddr, size, tmp1, tmp2
305 .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
306 dcache_line_size \tmp1, \tmp2
307 add \size, \kaddr, \size
309 bic \kaddr, \kaddr, \tmp2
311 .if (\op == cvau || \op == cvac)
312 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
320 add \kaddr, \kaddr, \tmp1
327 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
329 .macro reset_pmuserenr_el0, tmpreg
330 mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
331 sbfx \tmpreg, \tmpreg, #8, #4
332 cmp \tmpreg, #1 // Skip if no PMU present
334 msr pmuserenr_el0, xzr // Disable PMU access from EL0
339 * copy_page - copy src to dest using temp registers t1-t8
341 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
342 9998: ldp \t1, \t2, [\src]
343 ldp \t3, \t4, [\src, #16]
344 ldp \t5, \t6, [\src, #32]
345 ldp \t7, \t8, [\src, #48]
347 stnp \t1, \t2, [\dest]
348 stnp \t3, \t4, [\dest, #16]
349 stnp \t5, \t6, [\dest, #32]
350 stnp \t7, \t8, [\dest, #48]
351 add \dest, \dest, #64
352 tst \src, #(PAGE_SIZE - 1)
357 * Annotate a function as position independent, i.e., safe to be called before
358 * the kernel virtual mapping is activated.
360 #define ENDPIPROC(x) \
362 .type __pi_##x, %function; \
364 .size __pi_##x, . - x; \
368 * Emit a 64-bit absolute little endian symbol reference in a way that
369 * ensures that it will be resolved at build time, even when building a
370 * PIE binary. This requires cooperation from the linker script, which
371 * must emit the lo32/hi32 halves individually.
379 * mov_q - move an immediate constant into a 64-bit register using
380 * between 2 and 4 movz/movk instructions (depending on the
381 * magnitude and sign of the operand)
383 .macro mov_q, reg, val
384 .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
385 movz \reg, :abs_g1_s:\val
387 .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
388 movz \reg, :abs_g2_s:\val
390 movz \reg, :abs_g3:\val
391 movk \reg, :abs_g2_nc:\val
393 movk \reg, :abs_g1_nc:\val
395 movk \reg, :abs_g0_nc:\val
398 #endif /* __ASM_ASSEMBLER_H */