2 * Copyright (C) 2013 Huawei Ltd.
3 * Author: Jiang Liu <liuj97@gmail.com>
5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
35 #define AARCH64_INSN_SF_BIT BIT(31)
36 #define AARCH64_INSN_N_BIT BIT(22)
38 static int aarch64_insn_encoding_class
[] = {
39 AARCH64_INSN_CLS_UNKNOWN
,
40 AARCH64_INSN_CLS_UNKNOWN
,
41 AARCH64_INSN_CLS_UNKNOWN
,
42 AARCH64_INSN_CLS_UNKNOWN
,
43 AARCH64_INSN_CLS_LDST
,
44 AARCH64_INSN_CLS_DP_REG
,
45 AARCH64_INSN_CLS_LDST
,
46 AARCH64_INSN_CLS_DP_FPSIMD
,
47 AARCH64_INSN_CLS_DP_IMM
,
48 AARCH64_INSN_CLS_DP_IMM
,
49 AARCH64_INSN_CLS_BR_SYS
,
50 AARCH64_INSN_CLS_BR_SYS
,
51 AARCH64_INSN_CLS_LDST
,
52 AARCH64_INSN_CLS_DP_REG
,
53 AARCH64_INSN_CLS_LDST
,
54 AARCH64_INSN_CLS_DP_FPSIMD
,
57 enum aarch64_insn_encoding_class __kprobes
aarch64_get_insn_class(u32 insn
)
59 return aarch64_insn_encoding_class
[(insn
>> 25) & 0xf];
62 /* NOP is an alias of HINT */
63 bool __kprobes
aarch64_insn_is_nop(u32 insn
)
65 if (!aarch64_insn_is_hint(insn
))
68 switch (insn
& 0xFE0) {
69 case AARCH64_INSN_HINT_YIELD
:
70 case AARCH64_INSN_HINT_WFE
:
71 case AARCH64_INSN_HINT_WFI
:
72 case AARCH64_INSN_HINT_SEV
:
73 case AARCH64_INSN_HINT_SEVL
:
80 bool aarch64_insn_is_branch_imm(u32 insn
)
82 return (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
) ||
83 aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
) ||
84 aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
85 aarch64_insn_is_bcond(insn
));
88 static DEFINE_RAW_SPINLOCK(patch_lock
);
90 static void __kprobes
*patch_map(void *addr
, int fixmap
)
92 unsigned long uintaddr
= (uintptr_t) addr
;
93 bool module
= !core_kernel_text(uintaddr
);
96 if (module
&& IS_ENABLED(CONFIG_STRICT_MODULE_RWX
))
97 page
= vmalloc_to_page(addr
);
99 page
= phys_to_page(__pa_symbol(addr
));
104 return (void *)set_fixmap_offset(fixmap
, page_to_phys(page
) +
105 (uintaddr
& ~PAGE_MASK
));
108 static void __kprobes
patch_unmap(int fixmap
)
110 clear_fixmap(fixmap
);
113 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
116 int __kprobes
aarch64_insn_read(void *addr
, u32
*insnp
)
121 ret
= probe_kernel_read(&val
, addr
, AARCH64_INSN_SIZE
);
123 *insnp
= le32_to_cpu(val
);
128 static int __kprobes
__aarch64_insn_write(void *addr
, u32 insn
)
131 unsigned long flags
= 0;
134 raw_spin_lock_irqsave(&patch_lock
, flags
);
135 waddr
= patch_map(addr
, FIX_TEXT_POKE0
);
137 ret
= probe_kernel_write(waddr
, &insn
, AARCH64_INSN_SIZE
);
139 patch_unmap(FIX_TEXT_POKE0
);
140 raw_spin_unlock_irqrestore(&patch_lock
, flags
);
145 int __kprobes
aarch64_insn_write(void *addr
, u32 insn
)
147 insn
= cpu_to_le32(insn
);
148 return __aarch64_insn_write(addr
, insn
);
151 static bool __kprobes
__aarch64_insn_hotpatch_safe(u32 insn
)
153 if (aarch64_get_insn_class(insn
) != AARCH64_INSN_CLS_BR_SYS
)
156 return aarch64_insn_is_b(insn
) ||
157 aarch64_insn_is_bl(insn
) ||
158 aarch64_insn_is_svc(insn
) ||
159 aarch64_insn_is_hvc(insn
) ||
160 aarch64_insn_is_smc(insn
) ||
161 aarch64_insn_is_brk(insn
) ||
162 aarch64_insn_is_nop(insn
);
165 bool __kprobes
aarch64_insn_uses_literal(u32 insn
)
167 /* ldr/ldrsw (literal), prfm */
169 return aarch64_insn_is_ldr_lit(insn
) ||
170 aarch64_insn_is_ldrsw_lit(insn
) ||
171 aarch64_insn_is_adr_adrp(insn
) ||
172 aarch64_insn_is_prfm_lit(insn
);
175 bool __kprobes
aarch64_insn_is_branch(u32 insn
)
177 /* b, bl, cb*, tb*, b.cond, br, blr */
179 return aarch64_insn_is_b(insn
) ||
180 aarch64_insn_is_bl(insn
) ||
181 aarch64_insn_is_cbz(insn
) ||
182 aarch64_insn_is_cbnz(insn
) ||
183 aarch64_insn_is_tbz(insn
) ||
184 aarch64_insn_is_tbnz(insn
) ||
185 aarch64_insn_is_ret(insn
) ||
186 aarch64_insn_is_br(insn
) ||
187 aarch64_insn_is_blr(insn
) ||
188 aarch64_insn_is_bcond(insn
);
192 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
193 * Section B2.6.5 "Concurrent modification and execution of instructions":
194 * Concurrent modification and execution of instructions can lead to the
195 * resulting instruction performing any behavior that can be achieved by
196 * executing any sequence of instructions that can be executed from the
197 * same Exception level, except where the instruction before modification
198 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
199 * or SMC instruction.
201 bool __kprobes
aarch64_insn_hotpatch_safe(u32 old_insn
, u32 new_insn
)
203 return __aarch64_insn_hotpatch_safe(old_insn
) &&
204 __aarch64_insn_hotpatch_safe(new_insn
);
207 int __kprobes
aarch64_insn_patch_text_nosync(void *addr
, u32 insn
)
212 /* A64 instructions must be word aligned */
213 if ((uintptr_t)tp
& 0x3)
216 ret
= aarch64_insn_write(tp
, insn
);
218 flush_icache_range((uintptr_t)tp
,
219 (uintptr_t)tp
+ AARCH64_INSN_SIZE
);
224 struct aarch64_insn_patch
{
231 static int __kprobes
aarch64_insn_patch_text_cb(void *arg
)
234 struct aarch64_insn_patch
*pp
= arg
;
236 /* The first CPU becomes master */
237 if (atomic_inc_return(&pp
->cpu_count
) == 1) {
238 for (i
= 0; ret
== 0 && i
< pp
->insn_cnt
; i
++)
239 ret
= aarch64_insn_patch_text_nosync(pp
->text_addrs
[i
],
242 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
243 * which ends with "dsb; isb" pair guaranteeing global
246 /* Notify other processors with an additional increment. */
247 atomic_inc(&pp
->cpu_count
);
249 while (atomic_read(&pp
->cpu_count
) <= num_online_cpus())
257 int __kprobes
aarch64_insn_patch_text_sync(void *addrs
[], u32 insns
[], int cnt
)
259 struct aarch64_insn_patch patch
= {
263 .cpu_count
= ATOMIC_INIT(0),
269 return stop_machine(aarch64_insn_patch_text_cb
, &patch
,
273 int __kprobes
aarch64_insn_patch_text(void *addrs
[], u32 insns
[], int cnt
)
278 /* Unsafe to patch multiple instructions without synchronizaiton */
280 ret
= aarch64_insn_read(addrs
[0], &insn
);
284 if (aarch64_insn_hotpatch_safe(insn
, insns
[0])) {
286 * ARMv8 architecture doesn't guarantee all CPUs see
287 * the new instruction after returning from function
288 * aarch64_insn_patch_text_nosync(). So send IPIs to
289 * all other CPUs to achieve instruction
292 ret
= aarch64_insn_patch_text_nosync(addrs
[0], insns
[0]);
293 kick_all_cpus_sync();
298 return aarch64_insn_patch_text_sync(addrs
, insns
, cnt
);
301 static int __kprobes
aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type
,
302 u32
*maskp
, int *shiftp
)
308 case AARCH64_INSN_IMM_26
:
312 case AARCH64_INSN_IMM_19
:
316 case AARCH64_INSN_IMM_16
:
320 case AARCH64_INSN_IMM_14
:
324 case AARCH64_INSN_IMM_12
:
328 case AARCH64_INSN_IMM_9
:
332 case AARCH64_INSN_IMM_7
:
336 case AARCH64_INSN_IMM_6
:
337 case AARCH64_INSN_IMM_S
:
341 case AARCH64_INSN_IMM_R
:
355 #define ADR_IMM_HILOSPLIT 2
356 #define ADR_IMM_SIZE SZ_2M
357 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
358 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
359 #define ADR_IMM_LOSHIFT 29
360 #define ADR_IMM_HISHIFT 5
362 u64
aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type
, u32 insn
)
364 u32 immlo
, immhi
, mask
;
368 case AARCH64_INSN_IMM_ADR
:
370 immlo
= (insn
>> ADR_IMM_LOSHIFT
) & ADR_IMM_LOMASK
;
371 immhi
= (insn
>> ADR_IMM_HISHIFT
) & ADR_IMM_HIMASK
;
372 insn
= (immhi
<< ADR_IMM_HILOSPLIT
) | immlo
;
373 mask
= ADR_IMM_SIZE
- 1;
376 if (aarch64_get_imm_shift_mask(type
, &mask
, &shift
) < 0) {
377 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
383 return (insn
>> shift
) & mask
;
386 u32 __kprobes
aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type
,
389 u32 immlo
, immhi
, mask
;
392 if (insn
== AARCH64_BREAK_FAULT
)
393 return AARCH64_BREAK_FAULT
;
396 case AARCH64_INSN_IMM_ADR
:
398 immlo
= (imm
& ADR_IMM_LOMASK
) << ADR_IMM_LOSHIFT
;
399 imm
>>= ADR_IMM_HILOSPLIT
;
400 immhi
= (imm
& ADR_IMM_HIMASK
) << ADR_IMM_HISHIFT
;
402 mask
= ((ADR_IMM_LOMASK
<< ADR_IMM_LOSHIFT
) |
403 (ADR_IMM_HIMASK
<< ADR_IMM_HISHIFT
));
406 if (aarch64_get_imm_shift_mask(type
, &mask
, &shift
) < 0) {
407 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
409 return AARCH64_BREAK_FAULT
;
413 /* Update the immediate field. */
414 insn
&= ~(mask
<< shift
);
415 insn
|= (imm
& mask
) << shift
;
420 u32
aarch64_insn_decode_register(enum aarch64_insn_register_type type
,
426 case AARCH64_INSN_REGTYPE_RT
:
427 case AARCH64_INSN_REGTYPE_RD
:
430 case AARCH64_INSN_REGTYPE_RN
:
433 case AARCH64_INSN_REGTYPE_RT2
:
434 case AARCH64_INSN_REGTYPE_RA
:
437 case AARCH64_INSN_REGTYPE_RM
:
441 pr_err("%s: unknown register type encoding %d\n", __func__
,
446 return (insn
>> shift
) & GENMASK(4, 0);
449 static u32
aarch64_insn_encode_register(enum aarch64_insn_register_type type
,
451 enum aarch64_insn_register reg
)
455 if (insn
== AARCH64_BREAK_FAULT
)
456 return AARCH64_BREAK_FAULT
;
458 if (reg
< AARCH64_INSN_REG_0
|| reg
> AARCH64_INSN_REG_SP
) {
459 pr_err("%s: unknown register encoding %d\n", __func__
, reg
);
460 return AARCH64_BREAK_FAULT
;
464 case AARCH64_INSN_REGTYPE_RT
:
465 case AARCH64_INSN_REGTYPE_RD
:
468 case AARCH64_INSN_REGTYPE_RN
:
471 case AARCH64_INSN_REGTYPE_RT2
:
472 case AARCH64_INSN_REGTYPE_RA
:
475 case AARCH64_INSN_REGTYPE_RM
:
479 pr_err("%s: unknown register type encoding %d\n", __func__
,
481 return AARCH64_BREAK_FAULT
;
484 insn
&= ~(GENMASK(4, 0) << shift
);
485 insn
|= reg
<< shift
;
490 static u32
aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type
,
496 case AARCH64_INSN_SIZE_8
:
499 case AARCH64_INSN_SIZE_16
:
502 case AARCH64_INSN_SIZE_32
:
505 case AARCH64_INSN_SIZE_64
:
509 pr_err("%s: unknown size encoding %d\n", __func__
, type
);
510 return AARCH64_BREAK_FAULT
;
513 insn
&= ~GENMASK(31, 30);
519 static inline long branch_imm_common(unsigned long pc
, unsigned long addr
,
524 if ((pc
& 0x3) || (addr
& 0x3)) {
525 pr_err("%s: A64 instructions must be word aligned\n", __func__
);
529 offset
= ((long)addr
- (long)pc
);
531 if (offset
< -range
|| offset
>= range
) {
532 pr_err("%s: offset out of range\n", __func__
);
539 u32 __kprobes
aarch64_insn_gen_branch_imm(unsigned long pc
, unsigned long addr
,
540 enum aarch64_insn_branch_type type
)
546 * B/BL support [-128M, 128M) offset
547 * ARM64 virtual address arrangement guarantees all kernel and module
548 * texts are within +/-128M.
550 offset
= branch_imm_common(pc
, addr
, SZ_128M
);
551 if (offset
>= SZ_128M
)
552 return AARCH64_BREAK_FAULT
;
555 case AARCH64_INSN_BRANCH_LINK
:
556 insn
= aarch64_insn_get_bl_value();
558 case AARCH64_INSN_BRANCH_NOLINK
:
559 insn
= aarch64_insn_get_b_value();
562 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
563 return AARCH64_BREAK_FAULT
;
566 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26
, insn
,
570 u32
aarch64_insn_gen_comp_branch_imm(unsigned long pc
, unsigned long addr
,
571 enum aarch64_insn_register reg
,
572 enum aarch64_insn_variant variant
,
573 enum aarch64_insn_branch_type type
)
578 offset
= branch_imm_common(pc
, addr
, SZ_1M
);
580 return AARCH64_BREAK_FAULT
;
583 case AARCH64_INSN_BRANCH_COMP_ZERO
:
584 insn
= aarch64_insn_get_cbz_value();
586 case AARCH64_INSN_BRANCH_COMP_NONZERO
:
587 insn
= aarch64_insn_get_cbnz_value();
590 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
591 return AARCH64_BREAK_FAULT
;
595 case AARCH64_INSN_VARIANT_32BIT
:
597 case AARCH64_INSN_VARIANT_64BIT
:
598 insn
|= AARCH64_INSN_SF_BIT
;
601 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
602 return AARCH64_BREAK_FAULT
;
605 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
, reg
);
607 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
611 u32
aarch64_insn_gen_cond_branch_imm(unsigned long pc
, unsigned long addr
,
612 enum aarch64_insn_condition cond
)
617 offset
= branch_imm_common(pc
, addr
, SZ_1M
);
619 insn
= aarch64_insn_get_bcond_value();
621 if (cond
< AARCH64_INSN_COND_EQ
|| cond
> AARCH64_INSN_COND_AL
) {
622 pr_err("%s: unknown condition encoding %d\n", __func__
, cond
);
623 return AARCH64_BREAK_FAULT
;
627 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
631 u32 __kprobes
aarch64_insn_gen_hint(enum aarch64_insn_hint_op op
)
633 return aarch64_insn_get_hint_value() | op
;
636 u32 __kprobes
aarch64_insn_gen_nop(void)
638 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP
);
641 u32
aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg
,
642 enum aarch64_insn_branch_type type
)
647 case AARCH64_INSN_BRANCH_NOLINK
:
648 insn
= aarch64_insn_get_br_value();
650 case AARCH64_INSN_BRANCH_LINK
:
651 insn
= aarch64_insn_get_blr_value();
653 case AARCH64_INSN_BRANCH_RETURN
:
654 insn
= aarch64_insn_get_ret_value();
657 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
658 return AARCH64_BREAK_FAULT
;
661 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, reg
);
664 u32
aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg
,
665 enum aarch64_insn_register base
,
666 enum aarch64_insn_register offset
,
667 enum aarch64_insn_size_type size
,
668 enum aarch64_insn_ldst_type type
)
673 case AARCH64_INSN_LDST_LOAD_REG_OFFSET
:
674 insn
= aarch64_insn_get_ldr_reg_value();
676 case AARCH64_INSN_LDST_STORE_REG_OFFSET
:
677 insn
= aarch64_insn_get_str_reg_value();
680 pr_err("%s: unknown load/store encoding %d\n", __func__
, type
);
681 return AARCH64_BREAK_FAULT
;
684 insn
= aarch64_insn_encode_ldst_size(size
, insn
);
686 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
, reg
);
688 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
691 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
,
695 u32
aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1
,
696 enum aarch64_insn_register reg2
,
697 enum aarch64_insn_register base
,
699 enum aarch64_insn_variant variant
,
700 enum aarch64_insn_ldst_type type
)
706 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX
:
707 insn
= aarch64_insn_get_ldp_pre_value();
709 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX
:
710 insn
= aarch64_insn_get_stp_pre_value();
712 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX
:
713 insn
= aarch64_insn_get_ldp_post_value();
715 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX
:
716 insn
= aarch64_insn_get_stp_post_value();
719 pr_err("%s: unknown load/store encoding %d\n", __func__
, type
);
720 return AARCH64_BREAK_FAULT
;
724 case AARCH64_INSN_VARIANT_32BIT
:
725 if ((offset
& 0x3) || (offset
< -256) || (offset
> 252)) {
726 pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
728 return AARCH64_BREAK_FAULT
;
732 case AARCH64_INSN_VARIANT_64BIT
:
733 if ((offset
& 0x7) || (offset
< -512) || (offset
> 504)) {
734 pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
736 return AARCH64_BREAK_FAULT
;
739 insn
|= AARCH64_INSN_SF_BIT
;
742 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
743 return AARCH64_BREAK_FAULT
;
746 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
,
749 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2
, insn
,
752 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
755 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7
, insn
,
759 u32
aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst
,
760 enum aarch64_insn_register src
,
761 int imm
, enum aarch64_insn_variant variant
,
762 enum aarch64_insn_adsb_type type
)
767 case AARCH64_INSN_ADSB_ADD
:
768 insn
= aarch64_insn_get_add_imm_value();
770 case AARCH64_INSN_ADSB_SUB
:
771 insn
= aarch64_insn_get_sub_imm_value();
773 case AARCH64_INSN_ADSB_ADD_SETFLAGS
:
774 insn
= aarch64_insn_get_adds_imm_value();
776 case AARCH64_INSN_ADSB_SUB_SETFLAGS
:
777 insn
= aarch64_insn_get_subs_imm_value();
780 pr_err("%s: unknown add/sub encoding %d\n", __func__
, type
);
781 return AARCH64_BREAK_FAULT
;
785 case AARCH64_INSN_VARIANT_32BIT
:
787 case AARCH64_INSN_VARIANT_64BIT
:
788 insn
|= AARCH64_INSN_SF_BIT
;
791 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
792 return AARCH64_BREAK_FAULT
;
795 if (imm
& ~(SZ_4K
- 1)) {
796 pr_err("%s: invalid immediate encoding %d\n", __func__
, imm
);
797 return AARCH64_BREAK_FAULT
;
800 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
802 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
804 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12
, insn
, imm
);
807 u32
aarch64_insn_gen_bitfield(enum aarch64_insn_register dst
,
808 enum aarch64_insn_register src
,
810 enum aarch64_insn_variant variant
,
811 enum aarch64_insn_bitfield_type type
)
817 case AARCH64_INSN_BITFIELD_MOVE
:
818 insn
= aarch64_insn_get_bfm_value();
820 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED
:
821 insn
= aarch64_insn_get_ubfm_value();
823 case AARCH64_INSN_BITFIELD_MOVE_SIGNED
:
824 insn
= aarch64_insn_get_sbfm_value();
827 pr_err("%s: unknown bitfield encoding %d\n", __func__
, type
);
828 return AARCH64_BREAK_FAULT
;
832 case AARCH64_INSN_VARIANT_32BIT
:
833 mask
= GENMASK(4, 0);
835 case AARCH64_INSN_VARIANT_64BIT
:
836 insn
|= AARCH64_INSN_SF_BIT
| AARCH64_INSN_N_BIT
;
837 mask
= GENMASK(5, 0);
840 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
841 return AARCH64_BREAK_FAULT
;
845 pr_err("%s: invalid immr encoding %d\n", __func__
, immr
);
846 return AARCH64_BREAK_FAULT
;
849 pr_err("%s: invalid imms encoding %d\n", __func__
, imms
);
850 return AARCH64_BREAK_FAULT
;
853 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
855 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
857 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R
, insn
, immr
);
859 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S
, insn
, imms
);
862 u32
aarch64_insn_gen_movewide(enum aarch64_insn_register dst
,
864 enum aarch64_insn_variant variant
,
865 enum aarch64_insn_movewide_type type
)
870 case AARCH64_INSN_MOVEWIDE_ZERO
:
871 insn
= aarch64_insn_get_movz_value();
873 case AARCH64_INSN_MOVEWIDE_KEEP
:
874 insn
= aarch64_insn_get_movk_value();
876 case AARCH64_INSN_MOVEWIDE_INVERSE
:
877 insn
= aarch64_insn_get_movn_value();
880 pr_err("%s: unknown movewide encoding %d\n", __func__
, type
);
881 return AARCH64_BREAK_FAULT
;
884 if (imm
& ~(SZ_64K
- 1)) {
885 pr_err("%s: invalid immediate encoding %d\n", __func__
, imm
);
886 return AARCH64_BREAK_FAULT
;
890 case AARCH64_INSN_VARIANT_32BIT
:
891 if (shift
!= 0 && shift
!= 16) {
892 pr_err("%s: invalid shift encoding %d\n", __func__
,
894 return AARCH64_BREAK_FAULT
;
897 case AARCH64_INSN_VARIANT_64BIT
:
898 insn
|= AARCH64_INSN_SF_BIT
;
899 if (shift
!= 0 && shift
!= 16 && shift
!= 32 && shift
!= 48) {
900 pr_err("%s: invalid shift encoding %d\n", __func__
,
902 return AARCH64_BREAK_FAULT
;
906 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
907 return AARCH64_BREAK_FAULT
;
910 insn
|= (shift
>> 4) << 21;
912 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
914 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16
, insn
, imm
);
917 u32
aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst
,
918 enum aarch64_insn_register src
,
919 enum aarch64_insn_register reg
,
921 enum aarch64_insn_variant variant
,
922 enum aarch64_insn_adsb_type type
)
927 case AARCH64_INSN_ADSB_ADD
:
928 insn
= aarch64_insn_get_add_value();
930 case AARCH64_INSN_ADSB_SUB
:
931 insn
= aarch64_insn_get_sub_value();
933 case AARCH64_INSN_ADSB_ADD_SETFLAGS
:
934 insn
= aarch64_insn_get_adds_value();
936 case AARCH64_INSN_ADSB_SUB_SETFLAGS
:
937 insn
= aarch64_insn_get_subs_value();
940 pr_err("%s: unknown add/sub encoding %d\n", __func__
, type
);
941 return AARCH64_BREAK_FAULT
;
945 case AARCH64_INSN_VARIANT_32BIT
:
946 if (shift
& ~(SZ_32
- 1)) {
947 pr_err("%s: invalid shift encoding %d\n", __func__
,
949 return AARCH64_BREAK_FAULT
;
952 case AARCH64_INSN_VARIANT_64BIT
:
953 insn
|= AARCH64_INSN_SF_BIT
;
954 if (shift
& ~(SZ_64
- 1)) {
955 pr_err("%s: invalid shift encoding %d\n", __func__
,
957 return AARCH64_BREAK_FAULT
;
961 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
962 return AARCH64_BREAK_FAULT
;
966 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
968 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
970 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
972 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6
, insn
, shift
);
975 u32
aarch64_insn_gen_data1(enum aarch64_insn_register dst
,
976 enum aarch64_insn_register src
,
977 enum aarch64_insn_variant variant
,
978 enum aarch64_insn_data1_type type
)
983 case AARCH64_INSN_DATA1_REVERSE_16
:
984 insn
= aarch64_insn_get_rev16_value();
986 case AARCH64_INSN_DATA1_REVERSE_32
:
987 insn
= aarch64_insn_get_rev32_value();
989 case AARCH64_INSN_DATA1_REVERSE_64
:
990 if (variant
!= AARCH64_INSN_VARIANT_64BIT
) {
991 pr_err("%s: invalid variant for reverse64 %d\n",
993 return AARCH64_BREAK_FAULT
;
995 insn
= aarch64_insn_get_rev64_value();
998 pr_err("%s: unknown data1 encoding %d\n", __func__
, type
);
999 return AARCH64_BREAK_FAULT
;
1003 case AARCH64_INSN_VARIANT_32BIT
:
1005 case AARCH64_INSN_VARIANT_64BIT
:
1006 insn
|= AARCH64_INSN_SF_BIT
;
1009 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1010 return AARCH64_BREAK_FAULT
;
1013 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1015 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1018 u32
aarch64_insn_gen_data2(enum aarch64_insn_register dst
,
1019 enum aarch64_insn_register src
,
1020 enum aarch64_insn_register reg
,
1021 enum aarch64_insn_variant variant
,
1022 enum aarch64_insn_data2_type type
)
1027 case AARCH64_INSN_DATA2_UDIV
:
1028 insn
= aarch64_insn_get_udiv_value();
1030 case AARCH64_INSN_DATA2_SDIV
:
1031 insn
= aarch64_insn_get_sdiv_value();
1033 case AARCH64_INSN_DATA2_LSLV
:
1034 insn
= aarch64_insn_get_lslv_value();
1036 case AARCH64_INSN_DATA2_LSRV
:
1037 insn
= aarch64_insn_get_lsrv_value();
1039 case AARCH64_INSN_DATA2_ASRV
:
1040 insn
= aarch64_insn_get_asrv_value();
1042 case AARCH64_INSN_DATA2_RORV
:
1043 insn
= aarch64_insn_get_rorv_value();
1046 pr_err("%s: unknown data2 encoding %d\n", __func__
, type
);
1047 return AARCH64_BREAK_FAULT
;
1051 case AARCH64_INSN_VARIANT_32BIT
:
1053 case AARCH64_INSN_VARIANT_64BIT
:
1054 insn
|= AARCH64_INSN_SF_BIT
;
1057 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1058 return AARCH64_BREAK_FAULT
;
1061 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1063 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1065 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
1068 u32
aarch64_insn_gen_data3(enum aarch64_insn_register dst
,
1069 enum aarch64_insn_register src
,
1070 enum aarch64_insn_register reg1
,
1071 enum aarch64_insn_register reg2
,
1072 enum aarch64_insn_variant variant
,
1073 enum aarch64_insn_data3_type type
)
1078 case AARCH64_INSN_DATA3_MADD
:
1079 insn
= aarch64_insn_get_madd_value();
1081 case AARCH64_INSN_DATA3_MSUB
:
1082 insn
= aarch64_insn_get_msub_value();
1085 pr_err("%s: unknown data3 encoding %d\n", __func__
, type
);
1086 return AARCH64_BREAK_FAULT
;
1090 case AARCH64_INSN_VARIANT_32BIT
:
1092 case AARCH64_INSN_VARIANT_64BIT
:
1093 insn
|= AARCH64_INSN_SF_BIT
;
1096 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1097 return AARCH64_BREAK_FAULT
;
1100 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1102 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA
, insn
, src
);
1104 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
1107 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
,
1111 u32
aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst
,
1112 enum aarch64_insn_register src
,
1113 enum aarch64_insn_register reg
,
1115 enum aarch64_insn_variant variant
,
1116 enum aarch64_insn_logic_type type
)
1121 case AARCH64_INSN_LOGIC_AND
:
1122 insn
= aarch64_insn_get_and_value();
1124 case AARCH64_INSN_LOGIC_BIC
:
1125 insn
= aarch64_insn_get_bic_value();
1127 case AARCH64_INSN_LOGIC_ORR
:
1128 insn
= aarch64_insn_get_orr_value();
1130 case AARCH64_INSN_LOGIC_ORN
:
1131 insn
= aarch64_insn_get_orn_value();
1133 case AARCH64_INSN_LOGIC_EOR
:
1134 insn
= aarch64_insn_get_eor_value();
1136 case AARCH64_INSN_LOGIC_EON
:
1137 insn
= aarch64_insn_get_eon_value();
1139 case AARCH64_INSN_LOGIC_AND_SETFLAGS
:
1140 insn
= aarch64_insn_get_ands_value();
1142 case AARCH64_INSN_LOGIC_BIC_SETFLAGS
:
1143 insn
= aarch64_insn_get_bics_value();
1146 pr_err("%s: unknown logical encoding %d\n", __func__
, type
);
1147 return AARCH64_BREAK_FAULT
;
1151 case AARCH64_INSN_VARIANT_32BIT
:
1152 if (shift
& ~(SZ_32
- 1)) {
1153 pr_err("%s: invalid shift encoding %d\n", __func__
,
1155 return AARCH64_BREAK_FAULT
;
1158 case AARCH64_INSN_VARIANT_64BIT
:
1159 insn
|= AARCH64_INSN_SF_BIT
;
1160 if (shift
& ~(SZ_64
- 1)) {
1161 pr_err("%s: invalid shift encoding %d\n", __func__
,
1163 return AARCH64_BREAK_FAULT
;
1167 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1168 return AARCH64_BREAK_FAULT
;
1172 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1174 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1176 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
1178 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6
, insn
, shift
);
1182 * Decode the imm field of a branch, and return the byte offset as a
1183 * signed value (so it can be used when computing a new branch
1186 s32
aarch64_get_branch_offset(u32 insn
)
1190 if (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
)) {
1191 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26
, insn
);
1192 return (imm
<< 6) >> 4;
1195 if (aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
1196 aarch64_insn_is_bcond(insn
)) {
1197 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19
, insn
);
1198 return (imm
<< 13) >> 11;
1201 if (aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
)) {
1202 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14
, insn
);
1203 return (imm
<< 18) >> 16;
1206 /* Unhandled instruction */
1211 * Encode the displacement of a branch in the imm field and return the
1212 * updated instruction.
1214 u32
aarch64_set_branch_offset(u32 insn
, s32 offset
)
1216 if (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
))
1217 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26
, insn
,
1220 if (aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
1221 aarch64_insn_is_bcond(insn
))
1222 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
1225 if (aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
))
1226 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14
, insn
,
1229 /* Unhandled instruction */
1233 s32
aarch64_insn_adrp_get_offset(u32 insn
)
1235 BUG_ON(!aarch64_insn_is_adrp(insn
));
1236 return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR
, insn
) << 12;
1239 u32
aarch64_insn_adrp_set_offset(u32 insn
, s32 offset
)
1241 BUG_ON(!aarch64_insn_is_adrp(insn
));
1242 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR
, insn
,
1247 * Extract the Op/CR data from a msr/mrs instruction.
1249 u32
aarch64_insn_extract_system_reg(u32 insn
)
1251 return (insn
& 0x1FFFE0) >> 5;
1254 bool aarch32_insn_is_wide(u32 insn
)
1256 return insn
>= 0xe800;
1260 * Macros/defines for extracting register numbers from instruction.
1262 u32
aarch32_insn_extract_reg_num(u32 insn
, int offset
)
1264 return (insn
& (0xf << offset
)) >> offset
;
1267 #define OPC2_MASK 0x7
1268 #define OPC2_OFFSET 5
1269 u32
aarch32_insn_mcr_extract_opc2(u32 insn
)
1271 return (insn
& (OPC2_MASK
<< OPC2_OFFSET
)) >> OPC2_OFFSET
;
1274 #define CRM_MASK 0xf
1275 u32
aarch32_insn_mcr_extract_crm(u32 insn
)
1277 return insn
& CRM_MASK
;
1280 static bool __kprobes
__check_eq(unsigned long pstate
)
1282 return (pstate
& PSR_Z_BIT
) != 0;
1285 static bool __kprobes
__check_ne(unsigned long pstate
)
1287 return (pstate
& PSR_Z_BIT
) == 0;
1290 static bool __kprobes
__check_cs(unsigned long pstate
)
1292 return (pstate
& PSR_C_BIT
) != 0;
1295 static bool __kprobes
__check_cc(unsigned long pstate
)
1297 return (pstate
& PSR_C_BIT
) == 0;
1300 static bool __kprobes
__check_mi(unsigned long pstate
)
1302 return (pstate
& PSR_N_BIT
) != 0;
1305 static bool __kprobes
__check_pl(unsigned long pstate
)
1307 return (pstate
& PSR_N_BIT
) == 0;
1310 static bool __kprobes
__check_vs(unsigned long pstate
)
1312 return (pstate
& PSR_V_BIT
) != 0;
1315 static bool __kprobes
__check_vc(unsigned long pstate
)
1317 return (pstate
& PSR_V_BIT
) == 0;
1320 static bool __kprobes
__check_hi(unsigned long pstate
)
1322 pstate
&= ~(pstate
>> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1323 return (pstate
& PSR_C_BIT
) != 0;
1326 static bool __kprobes
__check_ls(unsigned long pstate
)
1328 pstate
&= ~(pstate
>> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1329 return (pstate
& PSR_C_BIT
) == 0;
1332 static bool __kprobes
__check_ge(unsigned long pstate
)
1334 pstate
^= (pstate
<< 3); /* PSR_N_BIT ^= PSR_V_BIT */
1335 return (pstate
& PSR_N_BIT
) == 0;
1338 static bool __kprobes
__check_lt(unsigned long pstate
)
1340 pstate
^= (pstate
<< 3); /* PSR_N_BIT ^= PSR_V_BIT */
1341 return (pstate
& PSR_N_BIT
) != 0;
1344 static bool __kprobes
__check_gt(unsigned long pstate
)
1346 /*PSR_N_BIT ^= PSR_V_BIT */
1347 unsigned long temp
= pstate
^ (pstate
<< 3);
1349 temp
|= (pstate
<< 1); /*PSR_N_BIT |= PSR_Z_BIT */
1350 return (temp
& PSR_N_BIT
) == 0;
1353 static bool __kprobes
__check_le(unsigned long pstate
)
1355 /*PSR_N_BIT ^= PSR_V_BIT */
1356 unsigned long temp
= pstate
^ (pstate
<< 3);
1358 temp
|= (pstate
<< 1); /*PSR_N_BIT |= PSR_Z_BIT */
1359 return (temp
& PSR_N_BIT
) != 0;
1362 static bool __kprobes
__check_al(unsigned long pstate
)
1368 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1369 * it behaves identically to 0b1110 ("al").
1371 pstate_check_t
* const aarch32_opcode_cond_checks
[16] = {
1372 __check_eq
, __check_ne
, __check_cs
, __check_cc
,
1373 __check_mi
, __check_pl
, __check_vs
, __check_vc
,
1374 __check_hi
, __check_ls
, __check_ge
, __check_lt
,
1375 __check_gt
, __check_le
, __check_al
, __check_al