Merge tag 'fixes-rcu-fiq-signed' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / arch / arm64 / kernel / insn.c
1 /*
2  * Copyright (C) 2013 Huawei Ltd.
3  * Author: Jiang Liu <liuj97@gmail.com>
4  *
5  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
33 #include <asm/insn.h>
34
35 #define AARCH64_INSN_SF_BIT     BIT(31)
36 #define AARCH64_INSN_N_BIT      BIT(22)
37
38 static int aarch64_insn_encoding_class[] = {
39         AARCH64_INSN_CLS_UNKNOWN,
40         AARCH64_INSN_CLS_UNKNOWN,
41         AARCH64_INSN_CLS_UNKNOWN,
42         AARCH64_INSN_CLS_UNKNOWN,
43         AARCH64_INSN_CLS_LDST,
44         AARCH64_INSN_CLS_DP_REG,
45         AARCH64_INSN_CLS_LDST,
46         AARCH64_INSN_CLS_DP_FPSIMD,
47         AARCH64_INSN_CLS_DP_IMM,
48         AARCH64_INSN_CLS_DP_IMM,
49         AARCH64_INSN_CLS_BR_SYS,
50         AARCH64_INSN_CLS_BR_SYS,
51         AARCH64_INSN_CLS_LDST,
52         AARCH64_INSN_CLS_DP_REG,
53         AARCH64_INSN_CLS_LDST,
54         AARCH64_INSN_CLS_DP_FPSIMD,
55 };
56
57 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
58 {
59         return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
60 }
61
62 /* NOP is an alias of HINT */
63 bool __kprobes aarch64_insn_is_nop(u32 insn)
64 {
65         if (!aarch64_insn_is_hint(insn))
66                 return false;
67
68         switch (insn & 0xFE0) {
69         case AARCH64_INSN_HINT_YIELD:
70         case AARCH64_INSN_HINT_WFE:
71         case AARCH64_INSN_HINT_WFI:
72         case AARCH64_INSN_HINT_SEV:
73         case AARCH64_INSN_HINT_SEVL:
74                 return false;
75         default:
76                 return true;
77         }
78 }
79
80 bool aarch64_insn_is_branch_imm(u32 insn)
81 {
82         return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
83                 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
84                 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
85                 aarch64_insn_is_bcond(insn));
86 }
87
88 static DEFINE_RAW_SPINLOCK(patch_lock);
89
90 static void __kprobes *patch_map(void *addr, int fixmap)
91 {
92         unsigned long uintaddr = (uintptr_t) addr;
93         bool module = !core_kernel_text(uintaddr);
94         struct page *page;
95
96         if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
97                 page = vmalloc_to_page(addr);
98         else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
99                 page = pfn_to_page(PHYS_PFN(__pa(addr)));
100         else
101                 return addr;
102
103         BUG_ON(!page);
104         return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
105                         (uintaddr & ~PAGE_MASK));
106 }
107
108 static void __kprobes patch_unmap(int fixmap)
109 {
110         clear_fixmap(fixmap);
111 }
112 /*
113  * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
114  * little-endian.
115  */
116 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
117 {
118         int ret;
119         u32 val;
120
121         ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
122         if (!ret)
123                 *insnp = le32_to_cpu(val);
124
125         return ret;
126 }
127
128 static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
129 {
130         void *waddr = addr;
131         unsigned long flags = 0;
132         int ret;
133
134         raw_spin_lock_irqsave(&patch_lock, flags);
135         waddr = patch_map(addr, FIX_TEXT_POKE0);
136
137         ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
138
139         patch_unmap(FIX_TEXT_POKE0);
140         raw_spin_unlock_irqrestore(&patch_lock, flags);
141
142         return ret;
143 }
144
145 int __kprobes aarch64_insn_write(void *addr, u32 insn)
146 {
147         insn = cpu_to_le32(insn);
148         return __aarch64_insn_write(addr, insn);
149 }
150
151 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
152 {
153         if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
154                 return false;
155
156         return  aarch64_insn_is_b(insn) ||
157                 aarch64_insn_is_bl(insn) ||
158                 aarch64_insn_is_svc(insn) ||
159                 aarch64_insn_is_hvc(insn) ||
160                 aarch64_insn_is_smc(insn) ||
161                 aarch64_insn_is_brk(insn) ||
162                 aarch64_insn_is_nop(insn);
163 }
164
165 /*
166  * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
167  * Section B2.6.5 "Concurrent modification and execution of instructions":
168  * Concurrent modification and execution of instructions can lead to the
169  * resulting instruction performing any behavior that can be achieved by
170  * executing any sequence of instructions that can be executed from the
171  * same Exception level, except where the instruction before modification
172  * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
173  * or SMC instruction.
174  */
175 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
176 {
177         return __aarch64_insn_hotpatch_safe(old_insn) &&
178                __aarch64_insn_hotpatch_safe(new_insn);
179 }
180
181 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
182 {
183         u32 *tp = addr;
184         int ret;
185
186         /* A64 instructions must be word aligned */
187         if ((uintptr_t)tp & 0x3)
188                 return -EINVAL;
189
190         ret = aarch64_insn_write(tp, insn);
191         if (ret == 0)
192                 flush_icache_range((uintptr_t)tp,
193                                    (uintptr_t)tp + AARCH64_INSN_SIZE);
194
195         return ret;
196 }
197
198 struct aarch64_insn_patch {
199         void            **text_addrs;
200         u32             *new_insns;
201         int             insn_cnt;
202         atomic_t        cpu_count;
203 };
204
205 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
206 {
207         int i, ret = 0;
208         struct aarch64_insn_patch *pp = arg;
209
210         /* The first CPU becomes master */
211         if (atomic_inc_return(&pp->cpu_count) == 1) {
212                 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
213                         ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
214                                                              pp->new_insns[i]);
215                 /*
216                  * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
217                  * which ends with "dsb; isb" pair guaranteeing global
218                  * visibility.
219                  */
220                 /* Notify other processors with an additional increment. */
221                 atomic_inc(&pp->cpu_count);
222         } else {
223                 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
224                         cpu_relax();
225                 isb();
226         }
227
228         return ret;
229 }
230
231 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
232 {
233         struct aarch64_insn_patch patch = {
234                 .text_addrs = addrs,
235                 .new_insns = insns,
236                 .insn_cnt = cnt,
237                 .cpu_count = ATOMIC_INIT(0),
238         };
239
240         if (cnt <= 0)
241                 return -EINVAL;
242
243         return stop_machine(aarch64_insn_patch_text_cb, &patch,
244                             cpu_online_mask);
245 }
246
247 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
248 {
249         int ret;
250         u32 insn;
251
252         /* Unsafe to patch multiple instructions without synchronizaiton */
253         if (cnt == 1) {
254                 ret = aarch64_insn_read(addrs[0], &insn);
255                 if (ret)
256                         return ret;
257
258                 if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
259                         /*
260                          * ARMv8 architecture doesn't guarantee all CPUs see
261                          * the new instruction after returning from function
262                          * aarch64_insn_patch_text_nosync(). So send IPIs to
263                          * all other CPUs to achieve instruction
264                          * synchronization.
265                          */
266                         ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
267                         kick_all_cpus_sync();
268                         return ret;
269                 }
270         }
271
272         return aarch64_insn_patch_text_sync(addrs, insns, cnt);
273 }
274
275 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
276                                                 u32 *maskp, int *shiftp)
277 {
278         u32 mask;
279         int shift;
280
281         switch (type) {
282         case AARCH64_INSN_IMM_26:
283                 mask = BIT(26) - 1;
284                 shift = 0;
285                 break;
286         case AARCH64_INSN_IMM_19:
287                 mask = BIT(19) - 1;
288                 shift = 5;
289                 break;
290         case AARCH64_INSN_IMM_16:
291                 mask = BIT(16) - 1;
292                 shift = 5;
293                 break;
294         case AARCH64_INSN_IMM_14:
295                 mask = BIT(14) - 1;
296                 shift = 5;
297                 break;
298         case AARCH64_INSN_IMM_12:
299                 mask = BIT(12) - 1;
300                 shift = 10;
301                 break;
302         case AARCH64_INSN_IMM_9:
303                 mask = BIT(9) - 1;
304                 shift = 12;
305                 break;
306         case AARCH64_INSN_IMM_7:
307                 mask = BIT(7) - 1;
308                 shift = 15;
309                 break;
310         case AARCH64_INSN_IMM_6:
311         case AARCH64_INSN_IMM_S:
312                 mask = BIT(6) - 1;
313                 shift = 10;
314                 break;
315         case AARCH64_INSN_IMM_R:
316                 mask = BIT(6) - 1;
317                 shift = 16;
318                 break;
319         default:
320                 return -EINVAL;
321         }
322
323         *maskp = mask;
324         *shiftp = shift;
325
326         return 0;
327 }
328
329 #define ADR_IMM_HILOSPLIT       2
330 #define ADR_IMM_SIZE            SZ_2M
331 #define ADR_IMM_LOMASK          ((1 << ADR_IMM_HILOSPLIT) - 1)
332 #define ADR_IMM_HIMASK          ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
333 #define ADR_IMM_LOSHIFT         29
334 #define ADR_IMM_HISHIFT         5
335
336 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
337 {
338         u32 immlo, immhi, mask;
339         int shift;
340
341         switch (type) {
342         case AARCH64_INSN_IMM_ADR:
343                 shift = 0;
344                 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
345                 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
346                 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
347                 mask = ADR_IMM_SIZE - 1;
348                 break;
349         default:
350                 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
351                         pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
352                                type);
353                         return 0;
354                 }
355         }
356
357         return (insn >> shift) & mask;
358 }
359
360 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
361                                   u32 insn, u64 imm)
362 {
363         u32 immlo, immhi, mask;
364         int shift;
365
366         if (insn == AARCH64_BREAK_FAULT)
367                 return AARCH64_BREAK_FAULT;
368
369         switch (type) {
370         case AARCH64_INSN_IMM_ADR:
371                 shift = 0;
372                 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
373                 imm >>= ADR_IMM_HILOSPLIT;
374                 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
375                 imm = immlo | immhi;
376                 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
377                         (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
378                 break;
379         default:
380                 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
381                         pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
382                                type);
383                         return AARCH64_BREAK_FAULT;
384                 }
385         }
386
387         /* Update the immediate field. */
388         insn &= ~(mask << shift);
389         insn |= (imm & mask) << shift;
390
391         return insn;
392 }
393
394 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
395                                         u32 insn,
396                                         enum aarch64_insn_register reg)
397 {
398         int shift;
399
400         if (insn == AARCH64_BREAK_FAULT)
401                 return AARCH64_BREAK_FAULT;
402
403         if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
404                 pr_err("%s: unknown register encoding %d\n", __func__, reg);
405                 return AARCH64_BREAK_FAULT;
406         }
407
408         switch (type) {
409         case AARCH64_INSN_REGTYPE_RT:
410         case AARCH64_INSN_REGTYPE_RD:
411                 shift = 0;
412                 break;
413         case AARCH64_INSN_REGTYPE_RN:
414                 shift = 5;
415                 break;
416         case AARCH64_INSN_REGTYPE_RT2:
417         case AARCH64_INSN_REGTYPE_RA:
418                 shift = 10;
419                 break;
420         case AARCH64_INSN_REGTYPE_RM:
421                 shift = 16;
422                 break;
423         default:
424                 pr_err("%s: unknown register type encoding %d\n", __func__,
425                        type);
426                 return AARCH64_BREAK_FAULT;
427         }
428
429         insn &= ~(GENMASK(4, 0) << shift);
430         insn |= reg << shift;
431
432         return insn;
433 }
434
435 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
436                                          u32 insn)
437 {
438         u32 size;
439
440         switch (type) {
441         case AARCH64_INSN_SIZE_8:
442                 size = 0;
443                 break;
444         case AARCH64_INSN_SIZE_16:
445                 size = 1;
446                 break;
447         case AARCH64_INSN_SIZE_32:
448                 size = 2;
449                 break;
450         case AARCH64_INSN_SIZE_64:
451                 size = 3;
452                 break;
453         default:
454                 pr_err("%s: unknown size encoding %d\n", __func__, type);
455                 return AARCH64_BREAK_FAULT;
456         }
457
458         insn &= ~GENMASK(31, 30);
459         insn |= size << 30;
460
461         return insn;
462 }
463
464 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
465                                      long range)
466 {
467         long offset;
468
469         if ((pc & 0x3) || (addr & 0x3)) {
470                 pr_err("%s: A64 instructions must be word aligned\n", __func__);
471                 return range;
472         }
473
474         offset = ((long)addr - (long)pc);
475
476         if (offset < -range || offset >= range) {
477                 pr_err("%s: offset out of range\n", __func__);
478                 return range;
479         }
480
481         return offset;
482 }
483
484 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
485                                           enum aarch64_insn_branch_type type)
486 {
487         u32 insn;
488         long offset;
489
490         /*
491          * B/BL support [-128M, 128M) offset
492          * ARM64 virtual address arrangement guarantees all kernel and module
493          * texts are within +/-128M.
494          */
495         offset = branch_imm_common(pc, addr, SZ_128M);
496         if (offset >= SZ_128M)
497                 return AARCH64_BREAK_FAULT;
498
499         switch (type) {
500         case AARCH64_INSN_BRANCH_LINK:
501                 insn = aarch64_insn_get_bl_value();
502                 break;
503         case AARCH64_INSN_BRANCH_NOLINK:
504                 insn = aarch64_insn_get_b_value();
505                 break;
506         default:
507                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
508                 return AARCH64_BREAK_FAULT;
509         }
510
511         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
512                                              offset >> 2);
513 }
514
515 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
516                                      enum aarch64_insn_register reg,
517                                      enum aarch64_insn_variant variant,
518                                      enum aarch64_insn_branch_type type)
519 {
520         u32 insn;
521         long offset;
522
523         offset = branch_imm_common(pc, addr, SZ_1M);
524         if (offset >= SZ_1M)
525                 return AARCH64_BREAK_FAULT;
526
527         switch (type) {
528         case AARCH64_INSN_BRANCH_COMP_ZERO:
529                 insn = aarch64_insn_get_cbz_value();
530                 break;
531         case AARCH64_INSN_BRANCH_COMP_NONZERO:
532                 insn = aarch64_insn_get_cbnz_value();
533                 break;
534         default:
535                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
536                 return AARCH64_BREAK_FAULT;
537         }
538
539         switch (variant) {
540         case AARCH64_INSN_VARIANT_32BIT:
541                 break;
542         case AARCH64_INSN_VARIANT_64BIT:
543                 insn |= AARCH64_INSN_SF_BIT;
544                 break;
545         default:
546                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
547                 return AARCH64_BREAK_FAULT;
548         }
549
550         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
551
552         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
553                                              offset >> 2);
554 }
555
556 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
557                                      enum aarch64_insn_condition cond)
558 {
559         u32 insn;
560         long offset;
561
562         offset = branch_imm_common(pc, addr, SZ_1M);
563
564         insn = aarch64_insn_get_bcond_value();
565
566         if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
567                 pr_err("%s: unknown condition encoding %d\n", __func__, cond);
568                 return AARCH64_BREAK_FAULT;
569         }
570         insn |= cond;
571
572         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
573                                              offset >> 2);
574 }
575
576 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
577 {
578         return aarch64_insn_get_hint_value() | op;
579 }
580
581 u32 __kprobes aarch64_insn_gen_nop(void)
582 {
583         return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
584 }
585
586 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
587                                 enum aarch64_insn_branch_type type)
588 {
589         u32 insn;
590
591         switch (type) {
592         case AARCH64_INSN_BRANCH_NOLINK:
593                 insn = aarch64_insn_get_br_value();
594                 break;
595         case AARCH64_INSN_BRANCH_LINK:
596                 insn = aarch64_insn_get_blr_value();
597                 break;
598         case AARCH64_INSN_BRANCH_RETURN:
599                 insn = aarch64_insn_get_ret_value();
600                 break;
601         default:
602                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
603                 return AARCH64_BREAK_FAULT;
604         }
605
606         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
607 }
608
609 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
610                                     enum aarch64_insn_register base,
611                                     enum aarch64_insn_register offset,
612                                     enum aarch64_insn_size_type size,
613                                     enum aarch64_insn_ldst_type type)
614 {
615         u32 insn;
616
617         switch (type) {
618         case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
619                 insn = aarch64_insn_get_ldr_reg_value();
620                 break;
621         case AARCH64_INSN_LDST_STORE_REG_OFFSET:
622                 insn = aarch64_insn_get_str_reg_value();
623                 break;
624         default:
625                 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
626                 return AARCH64_BREAK_FAULT;
627         }
628
629         insn = aarch64_insn_encode_ldst_size(size, insn);
630
631         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
632
633         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
634                                             base);
635
636         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
637                                             offset);
638 }
639
640 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
641                                      enum aarch64_insn_register reg2,
642                                      enum aarch64_insn_register base,
643                                      int offset,
644                                      enum aarch64_insn_variant variant,
645                                      enum aarch64_insn_ldst_type type)
646 {
647         u32 insn;
648         int shift;
649
650         switch (type) {
651         case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
652                 insn = aarch64_insn_get_ldp_pre_value();
653                 break;
654         case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
655                 insn = aarch64_insn_get_stp_pre_value();
656                 break;
657         case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
658                 insn = aarch64_insn_get_ldp_post_value();
659                 break;
660         case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
661                 insn = aarch64_insn_get_stp_post_value();
662                 break;
663         default:
664                 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
665                 return AARCH64_BREAK_FAULT;
666         }
667
668         switch (variant) {
669         case AARCH64_INSN_VARIANT_32BIT:
670                 if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
671                         pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
672                                __func__, offset);
673                         return AARCH64_BREAK_FAULT;
674                 }
675                 shift = 2;
676                 break;
677         case AARCH64_INSN_VARIANT_64BIT:
678                 if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
679                         pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
680                                __func__, offset);
681                         return AARCH64_BREAK_FAULT;
682                 }
683                 shift = 3;
684                 insn |= AARCH64_INSN_SF_BIT;
685                 break;
686         default:
687                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
688                 return AARCH64_BREAK_FAULT;
689         }
690
691         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
692                                             reg1);
693
694         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
695                                             reg2);
696
697         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
698                                             base);
699
700         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
701                                              offset >> shift);
702 }
703
704 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
705                                  enum aarch64_insn_register src,
706                                  int imm, enum aarch64_insn_variant variant,
707                                  enum aarch64_insn_adsb_type type)
708 {
709         u32 insn;
710
711         switch (type) {
712         case AARCH64_INSN_ADSB_ADD:
713                 insn = aarch64_insn_get_add_imm_value();
714                 break;
715         case AARCH64_INSN_ADSB_SUB:
716                 insn = aarch64_insn_get_sub_imm_value();
717                 break;
718         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
719                 insn = aarch64_insn_get_adds_imm_value();
720                 break;
721         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
722                 insn = aarch64_insn_get_subs_imm_value();
723                 break;
724         default:
725                 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
726                 return AARCH64_BREAK_FAULT;
727         }
728
729         switch (variant) {
730         case AARCH64_INSN_VARIANT_32BIT:
731                 break;
732         case AARCH64_INSN_VARIANT_64BIT:
733                 insn |= AARCH64_INSN_SF_BIT;
734                 break;
735         default:
736                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
737                 return AARCH64_BREAK_FAULT;
738         }
739
740         if (imm & ~(SZ_4K - 1)) {
741                 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
742                 return AARCH64_BREAK_FAULT;
743         }
744
745         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
746
747         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
748
749         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
750 }
751
752 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
753                               enum aarch64_insn_register src,
754                               int immr, int imms,
755                               enum aarch64_insn_variant variant,
756                               enum aarch64_insn_bitfield_type type)
757 {
758         u32 insn;
759         u32 mask;
760
761         switch (type) {
762         case AARCH64_INSN_BITFIELD_MOVE:
763                 insn = aarch64_insn_get_bfm_value();
764                 break;
765         case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
766                 insn = aarch64_insn_get_ubfm_value();
767                 break;
768         case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
769                 insn = aarch64_insn_get_sbfm_value();
770                 break;
771         default:
772                 pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
773                 return AARCH64_BREAK_FAULT;
774         }
775
776         switch (variant) {
777         case AARCH64_INSN_VARIANT_32BIT:
778                 mask = GENMASK(4, 0);
779                 break;
780         case AARCH64_INSN_VARIANT_64BIT:
781                 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
782                 mask = GENMASK(5, 0);
783                 break;
784         default:
785                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
786                 return AARCH64_BREAK_FAULT;
787         }
788
789         if (immr & ~mask) {
790                 pr_err("%s: invalid immr encoding %d\n", __func__, immr);
791                 return AARCH64_BREAK_FAULT;
792         }
793         if (imms & ~mask) {
794                 pr_err("%s: invalid imms encoding %d\n", __func__, imms);
795                 return AARCH64_BREAK_FAULT;
796         }
797
798         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
799
800         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
801
802         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
803
804         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
805 }
806
807 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
808                               int imm, int shift,
809                               enum aarch64_insn_variant variant,
810                               enum aarch64_insn_movewide_type type)
811 {
812         u32 insn;
813
814         switch (type) {
815         case AARCH64_INSN_MOVEWIDE_ZERO:
816                 insn = aarch64_insn_get_movz_value();
817                 break;
818         case AARCH64_INSN_MOVEWIDE_KEEP:
819                 insn = aarch64_insn_get_movk_value();
820                 break;
821         case AARCH64_INSN_MOVEWIDE_INVERSE:
822                 insn = aarch64_insn_get_movn_value();
823                 break;
824         default:
825                 pr_err("%s: unknown movewide encoding %d\n", __func__, type);
826                 return AARCH64_BREAK_FAULT;
827         }
828
829         if (imm & ~(SZ_64K - 1)) {
830                 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
831                 return AARCH64_BREAK_FAULT;
832         }
833
834         switch (variant) {
835         case AARCH64_INSN_VARIANT_32BIT:
836                 if (shift != 0 && shift != 16) {
837                         pr_err("%s: invalid shift encoding %d\n", __func__,
838                                shift);
839                         return AARCH64_BREAK_FAULT;
840                 }
841                 break;
842         case AARCH64_INSN_VARIANT_64BIT:
843                 insn |= AARCH64_INSN_SF_BIT;
844                 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
845                         pr_err("%s: invalid shift encoding %d\n", __func__,
846                                shift);
847                         return AARCH64_BREAK_FAULT;
848                 }
849                 break;
850         default:
851                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
852                 return AARCH64_BREAK_FAULT;
853         }
854
855         insn |= (shift >> 4) << 21;
856
857         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
858
859         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
860 }
861
862 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
863                                          enum aarch64_insn_register src,
864                                          enum aarch64_insn_register reg,
865                                          int shift,
866                                          enum aarch64_insn_variant variant,
867                                          enum aarch64_insn_adsb_type type)
868 {
869         u32 insn;
870
871         switch (type) {
872         case AARCH64_INSN_ADSB_ADD:
873                 insn = aarch64_insn_get_add_value();
874                 break;
875         case AARCH64_INSN_ADSB_SUB:
876                 insn = aarch64_insn_get_sub_value();
877                 break;
878         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
879                 insn = aarch64_insn_get_adds_value();
880                 break;
881         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
882                 insn = aarch64_insn_get_subs_value();
883                 break;
884         default:
885                 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
886                 return AARCH64_BREAK_FAULT;
887         }
888
889         switch (variant) {
890         case AARCH64_INSN_VARIANT_32BIT:
891                 if (shift & ~(SZ_32 - 1)) {
892                         pr_err("%s: invalid shift encoding %d\n", __func__,
893                                shift);
894                         return AARCH64_BREAK_FAULT;
895                 }
896                 break;
897         case AARCH64_INSN_VARIANT_64BIT:
898                 insn |= AARCH64_INSN_SF_BIT;
899                 if (shift & ~(SZ_64 - 1)) {
900                         pr_err("%s: invalid shift encoding %d\n", __func__,
901                                shift);
902                         return AARCH64_BREAK_FAULT;
903                 }
904                 break;
905         default:
906                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
907                 return AARCH64_BREAK_FAULT;
908         }
909
910
911         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
912
913         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
914
915         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
916
917         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
918 }
919
920 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
921                            enum aarch64_insn_register src,
922                            enum aarch64_insn_variant variant,
923                            enum aarch64_insn_data1_type type)
924 {
925         u32 insn;
926
927         switch (type) {
928         case AARCH64_INSN_DATA1_REVERSE_16:
929                 insn = aarch64_insn_get_rev16_value();
930                 break;
931         case AARCH64_INSN_DATA1_REVERSE_32:
932                 insn = aarch64_insn_get_rev32_value();
933                 break;
934         case AARCH64_INSN_DATA1_REVERSE_64:
935                 if (variant != AARCH64_INSN_VARIANT_64BIT) {
936                         pr_err("%s: invalid variant for reverse64 %d\n",
937                                __func__, variant);
938                         return AARCH64_BREAK_FAULT;
939                 }
940                 insn = aarch64_insn_get_rev64_value();
941                 break;
942         default:
943                 pr_err("%s: unknown data1 encoding %d\n", __func__, type);
944                 return AARCH64_BREAK_FAULT;
945         }
946
947         switch (variant) {
948         case AARCH64_INSN_VARIANT_32BIT:
949                 break;
950         case AARCH64_INSN_VARIANT_64BIT:
951                 insn |= AARCH64_INSN_SF_BIT;
952                 break;
953         default:
954                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
955                 return AARCH64_BREAK_FAULT;
956         }
957
958         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
959
960         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
961 }
962
963 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
964                            enum aarch64_insn_register src,
965                            enum aarch64_insn_register reg,
966                            enum aarch64_insn_variant variant,
967                            enum aarch64_insn_data2_type type)
968 {
969         u32 insn;
970
971         switch (type) {
972         case AARCH64_INSN_DATA2_UDIV:
973                 insn = aarch64_insn_get_udiv_value();
974                 break;
975         case AARCH64_INSN_DATA2_SDIV:
976                 insn = aarch64_insn_get_sdiv_value();
977                 break;
978         case AARCH64_INSN_DATA2_LSLV:
979                 insn = aarch64_insn_get_lslv_value();
980                 break;
981         case AARCH64_INSN_DATA2_LSRV:
982                 insn = aarch64_insn_get_lsrv_value();
983                 break;
984         case AARCH64_INSN_DATA2_ASRV:
985                 insn = aarch64_insn_get_asrv_value();
986                 break;
987         case AARCH64_INSN_DATA2_RORV:
988                 insn = aarch64_insn_get_rorv_value();
989                 break;
990         default:
991                 pr_err("%s: unknown data2 encoding %d\n", __func__, type);
992                 return AARCH64_BREAK_FAULT;
993         }
994
995         switch (variant) {
996         case AARCH64_INSN_VARIANT_32BIT:
997                 break;
998         case AARCH64_INSN_VARIANT_64BIT:
999                 insn |= AARCH64_INSN_SF_BIT;
1000                 break;
1001         default:
1002                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1003                 return AARCH64_BREAK_FAULT;
1004         }
1005
1006         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1007
1008         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1009
1010         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1011 }
1012
1013 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1014                            enum aarch64_insn_register src,
1015                            enum aarch64_insn_register reg1,
1016                            enum aarch64_insn_register reg2,
1017                            enum aarch64_insn_variant variant,
1018                            enum aarch64_insn_data3_type type)
1019 {
1020         u32 insn;
1021
1022         switch (type) {
1023         case AARCH64_INSN_DATA3_MADD:
1024                 insn = aarch64_insn_get_madd_value();
1025                 break;
1026         case AARCH64_INSN_DATA3_MSUB:
1027                 insn = aarch64_insn_get_msub_value();
1028                 break;
1029         default:
1030                 pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1031                 return AARCH64_BREAK_FAULT;
1032         }
1033
1034         switch (variant) {
1035         case AARCH64_INSN_VARIANT_32BIT:
1036                 break;
1037         case AARCH64_INSN_VARIANT_64BIT:
1038                 insn |= AARCH64_INSN_SF_BIT;
1039                 break;
1040         default:
1041                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1042                 return AARCH64_BREAK_FAULT;
1043         }
1044
1045         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1046
1047         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1048
1049         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1050                                             reg1);
1051
1052         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1053                                             reg2);
1054 }
1055
1056 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1057                                          enum aarch64_insn_register src,
1058                                          enum aarch64_insn_register reg,
1059                                          int shift,
1060                                          enum aarch64_insn_variant variant,
1061                                          enum aarch64_insn_logic_type type)
1062 {
1063         u32 insn;
1064
1065         switch (type) {
1066         case AARCH64_INSN_LOGIC_AND:
1067                 insn = aarch64_insn_get_and_value();
1068                 break;
1069         case AARCH64_INSN_LOGIC_BIC:
1070                 insn = aarch64_insn_get_bic_value();
1071                 break;
1072         case AARCH64_INSN_LOGIC_ORR:
1073                 insn = aarch64_insn_get_orr_value();
1074                 break;
1075         case AARCH64_INSN_LOGIC_ORN:
1076                 insn = aarch64_insn_get_orn_value();
1077                 break;
1078         case AARCH64_INSN_LOGIC_EOR:
1079                 insn = aarch64_insn_get_eor_value();
1080                 break;
1081         case AARCH64_INSN_LOGIC_EON:
1082                 insn = aarch64_insn_get_eon_value();
1083                 break;
1084         case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1085                 insn = aarch64_insn_get_ands_value();
1086                 break;
1087         case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1088                 insn = aarch64_insn_get_bics_value();
1089                 break;
1090         default:
1091                 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1092                 return AARCH64_BREAK_FAULT;
1093         }
1094
1095         switch (variant) {
1096         case AARCH64_INSN_VARIANT_32BIT:
1097                 if (shift & ~(SZ_32 - 1)) {
1098                         pr_err("%s: invalid shift encoding %d\n", __func__,
1099                                shift);
1100                         return AARCH64_BREAK_FAULT;
1101                 }
1102                 break;
1103         case AARCH64_INSN_VARIANT_64BIT:
1104                 insn |= AARCH64_INSN_SF_BIT;
1105                 if (shift & ~(SZ_64 - 1)) {
1106                         pr_err("%s: invalid shift encoding %d\n", __func__,
1107                                shift);
1108                         return AARCH64_BREAK_FAULT;
1109                 }
1110                 break;
1111         default:
1112                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1113                 return AARCH64_BREAK_FAULT;
1114         }
1115
1116
1117         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1118
1119         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1120
1121         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1122
1123         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1124 }
1125
1126 /*
1127  * Decode the imm field of a branch, and return the byte offset as a
1128  * signed value (so it can be used when computing a new branch
1129  * target).
1130  */
1131 s32 aarch64_get_branch_offset(u32 insn)
1132 {
1133         s32 imm;
1134
1135         if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1136                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1137                 return (imm << 6) >> 4;
1138         }
1139
1140         if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1141             aarch64_insn_is_bcond(insn)) {
1142                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1143                 return (imm << 13) >> 11;
1144         }
1145
1146         if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1147                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1148                 return (imm << 18) >> 16;
1149         }
1150
1151         /* Unhandled instruction */
1152         BUG();
1153 }
1154
1155 /*
1156  * Encode the displacement of a branch in the imm field and return the
1157  * updated instruction.
1158  */
1159 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1160 {
1161         if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1162                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1163                                                      offset >> 2);
1164
1165         if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1166             aarch64_insn_is_bcond(insn))
1167                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1168                                                      offset >> 2);
1169
1170         if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1171                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1172                                                      offset >> 2);
1173
1174         /* Unhandled instruction */
1175         BUG();
1176 }
1177
1178 bool aarch32_insn_is_wide(u32 insn)
1179 {
1180         return insn >= 0xe800;
1181 }
1182
1183 /*
1184  * Macros/defines for extracting register numbers from instruction.
1185  */
1186 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1187 {
1188         return (insn & (0xf << offset)) >> offset;
1189 }
1190
1191 #define OPC2_MASK       0x7
1192 #define OPC2_OFFSET     5
1193 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1194 {
1195         return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1196 }
1197
1198 #define CRM_MASK        0xf
1199 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1200 {
1201         return insn & CRM_MASK;
1202 }