]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm64/kernel/insn.c
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / kernel / insn.c
1 /*
2 * Copyright (C) 2013 Huawei Ltd.
3 * Author: Jiang Liu <liuj97@gmail.com>
4 *
5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
33 #include <asm/insn.h>
34
35 #define AARCH64_INSN_SF_BIT BIT(31)
36 #define AARCH64_INSN_N_BIT BIT(22)
37
38 static int aarch64_insn_encoding_class[] = {
39 AARCH64_INSN_CLS_UNKNOWN,
40 AARCH64_INSN_CLS_UNKNOWN,
41 AARCH64_INSN_CLS_UNKNOWN,
42 AARCH64_INSN_CLS_UNKNOWN,
43 AARCH64_INSN_CLS_LDST,
44 AARCH64_INSN_CLS_DP_REG,
45 AARCH64_INSN_CLS_LDST,
46 AARCH64_INSN_CLS_DP_FPSIMD,
47 AARCH64_INSN_CLS_DP_IMM,
48 AARCH64_INSN_CLS_DP_IMM,
49 AARCH64_INSN_CLS_BR_SYS,
50 AARCH64_INSN_CLS_BR_SYS,
51 AARCH64_INSN_CLS_LDST,
52 AARCH64_INSN_CLS_DP_REG,
53 AARCH64_INSN_CLS_LDST,
54 AARCH64_INSN_CLS_DP_FPSIMD,
55 };
56
57 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
58 {
59 return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
60 }
61
62 /* NOP is an alias of HINT */
63 bool __kprobes aarch64_insn_is_nop(u32 insn)
64 {
65 if (!aarch64_insn_is_hint(insn))
66 return false;
67
68 switch (insn & 0xFE0) {
69 case AARCH64_INSN_HINT_YIELD:
70 case AARCH64_INSN_HINT_WFE:
71 case AARCH64_INSN_HINT_WFI:
72 case AARCH64_INSN_HINT_SEV:
73 case AARCH64_INSN_HINT_SEVL:
74 return false;
75 default:
76 return true;
77 }
78 }
79
80 bool aarch64_insn_is_branch_imm(u32 insn)
81 {
82 return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
83 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
84 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
85 aarch64_insn_is_bcond(insn));
86 }
87
88 static DEFINE_RAW_SPINLOCK(patch_lock);
89
90 static void __kprobes *patch_map(void *addr, int fixmap)
91 {
92 unsigned long uintaddr = (uintptr_t) addr;
93 bool module = !core_kernel_text(uintaddr);
94 struct page *page;
95
96 if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
97 page = vmalloc_to_page(addr);
98 else if (!module)
99 page = phys_to_page(__pa_symbol(addr));
100 else
101 return addr;
102
103 BUG_ON(!page);
104 return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
105 (uintaddr & ~PAGE_MASK));
106 }
107
108 static void __kprobes patch_unmap(int fixmap)
109 {
110 clear_fixmap(fixmap);
111 }
112 /*
113 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
114 * little-endian.
115 */
116 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
117 {
118 int ret;
119 u32 val;
120
121 ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
122 if (!ret)
123 *insnp = le32_to_cpu(val);
124
125 return ret;
126 }
127
128 static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
129 {
130 void *waddr = addr;
131 unsigned long flags = 0;
132 int ret;
133
134 raw_spin_lock_irqsave(&patch_lock, flags);
135 waddr = patch_map(addr, FIX_TEXT_POKE0);
136
137 ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
138
139 patch_unmap(FIX_TEXT_POKE0);
140 raw_spin_unlock_irqrestore(&patch_lock, flags);
141
142 return ret;
143 }
144
145 int __kprobes aarch64_insn_write(void *addr, u32 insn)
146 {
147 insn = cpu_to_le32(insn);
148 return __aarch64_insn_write(addr, insn);
149 }
150
151 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
152 {
153 if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
154 return false;
155
156 return aarch64_insn_is_b(insn) ||
157 aarch64_insn_is_bl(insn) ||
158 aarch64_insn_is_svc(insn) ||
159 aarch64_insn_is_hvc(insn) ||
160 aarch64_insn_is_smc(insn) ||
161 aarch64_insn_is_brk(insn) ||
162 aarch64_insn_is_nop(insn);
163 }
164
165 bool __kprobes aarch64_insn_uses_literal(u32 insn)
166 {
167 /* ldr/ldrsw (literal), prfm */
168
169 return aarch64_insn_is_ldr_lit(insn) ||
170 aarch64_insn_is_ldrsw_lit(insn) ||
171 aarch64_insn_is_adr_adrp(insn) ||
172 aarch64_insn_is_prfm_lit(insn);
173 }
174
175 bool __kprobes aarch64_insn_is_branch(u32 insn)
176 {
177 /* b, bl, cb*, tb*, b.cond, br, blr */
178
179 return aarch64_insn_is_b(insn) ||
180 aarch64_insn_is_bl(insn) ||
181 aarch64_insn_is_cbz(insn) ||
182 aarch64_insn_is_cbnz(insn) ||
183 aarch64_insn_is_tbz(insn) ||
184 aarch64_insn_is_tbnz(insn) ||
185 aarch64_insn_is_ret(insn) ||
186 aarch64_insn_is_br(insn) ||
187 aarch64_insn_is_blr(insn) ||
188 aarch64_insn_is_bcond(insn);
189 }
190
191 /*
192 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
193 * Section B2.6.5 "Concurrent modification and execution of instructions":
194 * Concurrent modification and execution of instructions can lead to the
195 * resulting instruction performing any behavior that can be achieved by
196 * executing any sequence of instructions that can be executed from the
197 * same Exception level, except where the instruction before modification
198 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
199 * or SMC instruction.
200 */
201 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
202 {
203 return __aarch64_insn_hotpatch_safe(old_insn) &&
204 __aarch64_insn_hotpatch_safe(new_insn);
205 }
206
207 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
208 {
209 u32 *tp = addr;
210 int ret;
211
212 /* A64 instructions must be word aligned */
213 if ((uintptr_t)tp & 0x3)
214 return -EINVAL;
215
216 ret = aarch64_insn_write(tp, insn);
217 if (ret == 0)
218 flush_icache_range((uintptr_t)tp,
219 (uintptr_t)tp + AARCH64_INSN_SIZE);
220
221 return ret;
222 }
223
224 struct aarch64_insn_patch {
225 void **text_addrs;
226 u32 *new_insns;
227 int insn_cnt;
228 atomic_t cpu_count;
229 };
230
231 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
232 {
233 int i, ret = 0;
234 struct aarch64_insn_patch *pp = arg;
235
236 /* The first CPU becomes master */
237 if (atomic_inc_return(&pp->cpu_count) == 1) {
238 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
239 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
240 pp->new_insns[i]);
241 /*
242 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
243 * which ends with "dsb; isb" pair guaranteeing global
244 * visibility.
245 */
246 /* Notify other processors with an additional increment. */
247 atomic_inc(&pp->cpu_count);
248 } else {
249 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
250 cpu_relax();
251 isb();
252 }
253
254 return ret;
255 }
256
257 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
258 {
259 struct aarch64_insn_patch patch = {
260 .text_addrs = addrs,
261 .new_insns = insns,
262 .insn_cnt = cnt,
263 .cpu_count = ATOMIC_INIT(0),
264 };
265
266 if (cnt <= 0)
267 return -EINVAL;
268
269 return stop_machine(aarch64_insn_patch_text_cb, &patch,
270 cpu_online_mask);
271 }
272
273 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
274 {
275 int ret;
276 u32 insn;
277
278 /* Unsafe to patch multiple instructions without synchronizaiton */
279 if (cnt == 1) {
280 ret = aarch64_insn_read(addrs[0], &insn);
281 if (ret)
282 return ret;
283
284 if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
285 /*
286 * ARMv8 architecture doesn't guarantee all CPUs see
287 * the new instruction after returning from function
288 * aarch64_insn_patch_text_nosync(). So send IPIs to
289 * all other CPUs to achieve instruction
290 * synchronization.
291 */
292 ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
293 kick_all_cpus_sync();
294 return ret;
295 }
296 }
297
298 return aarch64_insn_patch_text_sync(addrs, insns, cnt);
299 }
300
301 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
302 u32 *maskp, int *shiftp)
303 {
304 u32 mask;
305 int shift;
306
307 switch (type) {
308 case AARCH64_INSN_IMM_26:
309 mask = BIT(26) - 1;
310 shift = 0;
311 break;
312 case AARCH64_INSN_IMM_19:
313 mask = BIT(19) - 1;
314 shift = 5;
315 break;
316 case AARCH64_INSN_IMM_16:
317 mask = BIT(16) - 1;
318 shift = 5;
319 break;
320 case AARCH64_INSN_IMM_14:
321 mask = BIT(14) - 1;
322 shift = 5;
323 break;
324 case AARCH64_INSN_IMM_12:
325 mask = BIT(12) - 1;
326 shift = 10;
327 break;
328 case AARCH64_INSN_IMM_9:
329 mask = BIT(9) - 1;
330 shift = 12;
331 break;
332 case AARCH64_INSN_IMM_7:
333 mask = BIT(7) - 1;
334 shift = 15;
335 break;
336 case AARCH64_INSN_IMM_6:
337 case AARCH64_INSN_IMM_S:
338 mask = BIT(6) - 1;
339 shift = 10;
340 break;
341 case AARCH64_INSN_IMM_R:
342 mask = BIT(6) - 1;
343 shift = 16;
344 break;
345 default:
346 return -EINVAL;
347 }
348
349 *maskp = mask;
350 *shiftp = shift;
351
352 return 0;
353 }
354
355 #define ADR_IMM_HILOSPLIT 2
356 #define ADR_IMM_SIZE SZ_2M
357 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
358 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
359 #define ADR_IMM_LOSHIFT 29
360 #define ADR_IMM_HISHIFT 5
361
362 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
363 {
364 u32 immlo, immhi, mask;
365 int shift;
366
367 switch (type) {
368 case AARCH64_INSN_IMM_ADR:
369 shift = 0;
370 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
371 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
372 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
373 mask = ADR_IMM_SIZE - 1;
374 break;
375 default:
376 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
377 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
378 type);
379 return 0;
380 }
381 }
382
383 return (insn >> shift) & mask;
384 }
385
386 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
387 u32 insn, u64 imm)
388 {
389 u32 immlo, immhi, mask;
390 int shift;
391
392 if (insn == AARCH64_BREAK_FAULT)
393 return AARCH64_BREAK_FAULT;
394
395 switch (type) {
396 case AARCH64_INSN_IMM_ADR:
397 shift = 0;
398 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
399 imm >>= ADR_IMM_HILOSPLIT;
400 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
401 imm = immlo | immhi;
402 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
403 (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
404 break;
405 default:
406 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
407 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
408 type);
409 return AARCH64_BREAK_FAULT;
410 }
411 }
412
413 /* Update the immediate field. */
414 insn &= ~(mask << shift);
415 insn |= (imm & mask) << shift;
416
417 return insn;
418 }
419
420 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
421 u32 insn)
422 {
423 int shift;
424
425 switch (type) {
426 case AARCH64_INSN_REGTYPE_RT:
427 case AARCH64_INSN_REGTYPE_RD:
428 shift = 0;
429 break;
430 case AARCH64_INSN_REGTYPE_RN:
431 shift = 5;
432 break;
433 case AARCH64_INSN_REGTYPE_RT2:
434 case AARCH64_INSN_REGTYPE_RA:
435 shift = 10;
436 break;
437 case AARCH64_INSN_REGTYPE_RM:
438 shift = 16;
439 break;
440 default:
441 pr_err("%s: unknown register type encoding %d\n", __func__,
442 type);
443 return 0;
444 }
445
446 return (insn >> shift) & GENMASK(4, 0);
447 }
448
449 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
450 u32 insn,
451 enum aarch64_insn_register reg)
452 {
453 int shift;
454
455 if (insn == AARCH64_BREAK_FAULT)
456 return AARCH64_BREAK_FAULT;
457
458 if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
459 pr_err("%s: unknown register encoding %d\n", __func__, reg);
460 return AARCH64_BREAK_FAULT;
461 }
462
463 switch (type) {
464 case AARCH64_INSN_REGTYPE_RT:
465 case AARCH64_INSN_REGTYPE_RD:
466 shift = 0;
467 break;
468 case AARCH64_INSN_REGTYPE_RN:
469 shift = 5;
470 break;
471 case AARCH64_INSN_REGTYPE_RT2:
472 case AARCH64_INSN_REGTYPE_RA:
473 shift = 10;
474 break;
475 case AARCH64_INSN_REGTYPE_RM:
476 shift = 16;
477 break;
478 default:
479 pr_err("%s: unknown register type encoding %d\n", __func__,
480 type);
481 return AARCH64_BREAK_FAULT;
482 }
483
484 insn &= ~(GENMASK(4, 0) << shift);
485 insn |= reg << shift;
486
487 return insn;
488 }
489
490 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
491 u32 insn)
492 {
493 u32 size;
494
495 switch (type) {
496 case AARCH64_INSN_SIZE_8:
497 size = 0;
498 break;
499 case AARCH64_INSN_SIZE_16:
500 size = 1;
501 break;
502 case AARCH64_INSN_SIZE_32:
503 size = 2;
504 break;
505 case AARCH64_INSN_SIZE_64:
506 size = 3;
507 break;
508 default:
509 pr_err("%s: unknown size encoding %d\n", __func__, type);
510 return AARCH64_BREAK_FAULT;
511 }
512
513 insn &= ~GENMASK(31, 30);
514 insn |= size << 30;
515
516 return insn;
517 }
518
519 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
520 long range)
521 {
522 long offset;
523
524 if ((pc & 0x3) || (addr & 0x3)) {
525 pr_err("%s: A64 instructions must be word aligned\n", __func__);
526 return range;
527 }
528
529 offset = ((long)addr - (long)pc);
530
531 if (offset < -range || offset >= range) {
532 pr_err("%s: offset out of range\n", __func__);
533 return range;
534 }
535
536 return offset;
537 }
538
539 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
540 enum aarch64_insn_branch_type type)
541 {
542 u32 insn;
543 long offset;
544
545 /*
546 * B/BL support [-128M, 128M) offset
547 * ARM64 virtual address arrangement guarantees all kernel and module
548 * texts are within +/-128M.
549 */
550 offset = branch_imm_common(pc, addr, SZ_128M);
551 if (offset >= SZ_128M)
552 return AARCH64_BREAK_FAULT;
553
554 switch (type) {
555 case AARCH64_INSN_BRANCH_LINK:
556 insn = aarch64_insn_get_bl_value();
557 break;
558 case AARCH64_INSN_BRANCH_NOLINK:
559 insn = aarch64_insn_get_b_value();
560 break;
561 default:
562 pr_err("%s: unknown branch encoding %d\n", __func__, type);
563 return AARCH64_BREAK_FAULT;
564 }
565
566 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
567 offset >> 2);
568 }
569
570 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
571 enum aarch64_insn_register reg,
572 enum aarch64_insn_variant variant,
573 enum aarch64_insn_branch_type type)
574 {
575 u32 insn;
576 long offset;
577
578 offset = branch_imm_common(pc, addr, SZ_1M);
579 if (offset >= SZ_1M)
580 return AARCH64_BREAK_FAULT;
581
582 switch (type) {
583 case AARCH64_INSN_BRANCH_COMP_ZERO:
584 insn = aarch64_insn_get_cbz_value();
585 break;
586 case AARCH64_INSN_BRANCH_COMP_NONZERO:
587 insn = aarch64_insn_get_cbnz_value();
588 break;
589 default:
590 pr_err("%s: unknown branch encoding %d\n", __func__, type);
591 return AARCH64_BREAK_FAULT;
592 }
593
594 switch (variant) {
595 case AARCH64_INSN_VARIANT_32BIT:
596 break;
597 case AARCH64_INSN_VARIANT_64BIT:
598 insn |= AARCH64_INSN_SF_BIT;
599 break;
600 default:
601 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
602 return AARCH64_BREAK_FAULT;
603 }
604
605 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
606
607 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
608 offset >> 2);
609 }
610
611 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
612 enum aarch64_insn_condition cond)
613 {
614 u32 insn;
615 long offset;
616
617 offset = branch_imm_common(pc, addr, SZ_1M);
618
619 insn = aarch64_insn_get_bcond_value();
620
621 if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
622 pr_err("%s: unknown condition encoding %d\n", __func__, cond);
623 return AARCH64_BREAK_FAULT;
624 }
625 insn |= cond;
626
627 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
628 offset >> 2);
629 }
630
631 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
632 {
633 return aarch64_insn_get_hint_value() | op;
634 }
635
636 u32 __kprobes aarch64_insn_gen_nop(void)
637 {
638 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
639 }
640
641 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
642 enum aarch64_insn_branch_type type)
643 {
644 u32 insn;
645
646 switch (type) {
647 case AARCH64_INSN_BRANCH_NOLINK:
648 insn = aarch64_insn_get_br_value();
649 break;
650 case AARCH64_INSN_BRANCH_LINK:
651 insn = aarch64_insn_get_blr_value();
652 break;
653 case AARCH64_INSN_BRANCH_RETURN:
654 insn = aarch64_insn_get_ret_value();
655 break;
656 default:
657 pr_err("%s: unknown branch encoding %d\n", __func__, type);
658 return AARCH64_BREAK_FAULT;
659 }
660
661 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
662 }
663
664 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
665 enum aarch64_insn_register base,
666 enum aarch64_insn_register offset,
667 enum aarch64_insn_size_type size,
668 enum aarch64_insn_ldst_type type)
669 {
670 u32 insn;
671
672 switch (type) {
673 case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
674 insn = aarch64_insn_get_ldr_reg_value();
675 break;
676 case AARCH64_INSN_LDST_STORE_REG_OFFSET:
677 insn = aarch64_insn_get_str_reg_value();
678 break;
679 default:
680 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
681 return AARCH64_BREAK_FAULT;
682 }
683
684 insn = aarch64_insn_encode_ldst_size(size, insn);
685
686 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
687
688 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
689 base);
690
691 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
692 offset);
693 }
694
695 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
696 enum aarch64_insn_register reg2,
697 enum aarch64_insn_register base,
698 int offset,
699 enum aarch64_insn_variant variant,
700 enum aarch64_insn_ldst_type type)
701 {
702 u32 insn;
703 int shift;
704
705 switch (type) {
706 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
707 insn = aarch64_insn_get_ldp_pre_value();
708 break;
709 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
710 insn = aarch64_insn_get_stp_pre_value();
711 break;
712 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
713 insn = aarch64_insn_get_ldp_post_value();
714 break;
715 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
716 insn = aarch64_insn_get_stp_post_value();
717 break;
718 default:
719 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
720 return AARCH64_BREAK_FAULT;
721 }
722
723 switch (variant) {
724 case AARCH64_INSN_VARIANT_32BIT:
725 if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
726 pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
727 __func__, offset);
728 return AARCH64_BREAK_FAULT;
729 }
730 shift = 2;
731 break;
732 case AARCH64_INSN_VARIANT_64BIT:
733 if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
734 pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
735 __func__, offset);
736 return AARCH64_BREAK_FAULT;
737 }
738 shift = 3;
739 insn |= AARCH64_INSN_SF_BIT;
740 break;
741 default:
742 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
743 return AARCH64_BREAK_FAULT;
744 }
745
746 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
747 reg1);
748
749 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
750 reg2);
751
752 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
753 base);
754
755 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
756 offset >> shift);
757 }
758
759 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
760 enum aarch64_insn_register src,
761 int imm, enum aarch64_insn_variant variant,
762 enum aarch64_insn_adsb_type type)
763 {
764 u32 insn;
765
766 switch (type) {
767 case AARCH64_INSN_ADSB_ADD:
768 insn = aarch64_insn_get_add_imm_value();
769 break;
770 case AARCH64_INSN_ADSB_SUB:
771 insn = aarch64_insn_get_sub_imm_value();
772 break;
773 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
774 insn = aarch64_insn_get_adds_imm_value();
775 break;
776 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
777 insn = aarch64_insn_get_subs_imm_value();
778 break;
779 default:
780 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
781 return AARCH64_BREAK_FAULT;
782 }
783
784 switch (variant) {
785 case AARCH64_INSN_VARIANT_32BIT:
786 break;
787 case AARCH64_INSN_VARIANT_64BIT:
788 insn |= AARCH64_INSN_SF_BIT;
789 break;
790 default:
791 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
792 return AARCH64_BREAK_FAULT;
793 }
794
795 if (imm & ~(SZ_4K - 1)) {
796 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
797 return AARCH64_BREAK_FAULT;
798 }
799
800 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
801
802 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
803
804 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
805 }
806
807 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
808 enum aarch64_insn_register src,
809 int immr, int imms,
810 enum aarch64_insn_variant variant,
811 enum aarch64_insn_bitfield_type type)
812 {
813 u32 insn;
814 u32 mask;
815
816 switch (type) {
817 case AARCH64_INSN_BITFIELD_MOVE:
818 insn = aarch64_insn_get_bfm_value();
819 break;
820 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
821 insn = aarch64_insn_get_ubfm_value();
822 break;
823 case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
824 insn = aarch64_insn_get_sbfm_value();
825 break;
826 default:
827 pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
828 return AARCH64_BREAK_FAULT;
829 }
830
831 switch (variant) {
832 case AARCH64_INSN_VARIANT_32BIT:
833 mask = GENMASK(4, 0);
834 break;
835 case AARCH64_INSN_VARIANT_64BIT:
836 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
837 mask = GENMASK(5, 0);
838 break;
839 default:
840 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
841 return AARCH64_BREAK_FAULT;
842 }
843
844 if (immr & ~mask) {
845 pr_err("%s: invalid immr encoding %d\n", __func__, immr);
846 return AARCH64_BREAK_FAULT;
847 }
848 if (imms & ~mask) {
849 pr_err("%s: invalid imms encoding %d\n", __func__, imms);
850 return AARCH64_BREAK_FAULT;
851 }
852
853 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
854
855 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
856
857 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
858
859 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
860 }
861
862 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
863 int imm, int shift,
864 enum aarch64_insn_variant variant,
865 enum aarch64_insn_movewide_type type)
866 {
867 u32 insn;
868
869 switch (type) {
870 case AARCH64_INSN_MOVEWIDE_ZERO:
871 insn = aarch64_insn_get_movz_value();
872 break;
873 case AARCH64_INSN_MOVEWIDE_KEEP:
874 insn = aarch64_insn_get_movk_value();
875 break;
876 case AARCH64_INSN_MOVEWIDE_INVERSE:
877 insn = aarch64_insn_get_movn_value();
878 break;
879 default:
880 pr_err("%s: unknown movewide encoding %d\n", __func__, type);
881 return AARCH64_BREAK_FAULT;
882 }
883
884 if (imm & ~(SZ_64K - 1)) {
885 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
886 return AARCH64_BREAK_FAULT;
887 }
888
889 switch (variant) {
890 case AARCH64_INSN_VARIANT_32BIT:
891 if (shift != 0 && shift != 16) {
892 pr_err("%s: invalid shift encoding %d\n", __func__,
893 shift);
894 return AARCH64_BREAK_FAULT;
895 }
896 break;
897 case AARCH64_INSN_VARIANT_64BIT:
898 insn |= AARCH64_INSN_SF_BIT;
899 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
900 pr_err("%s: invalid shift encoding %d\n", __func__,
901 shift);
902 return AARCH64_BREAK_FAULT;
903 }
904 break;
905 default:
906 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
907 return AARCH64_BREAK_FAULT;
908 }
909
910 insn |= (shift >> 4) << 21;
911
912 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
913
914 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
915 }
916
917 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
918 enum aarch64_insn_register src,
919 enum aarch64_insn_register reg,
920 int shift,
921 enum aarch64_insn_variant variant,
922 enum aarch64_insn_adsb_type type)
923 {
924 u32 insn;
925
926 switch (type) {
927 case AARCH64_INSN_ADSB_ADD:
928 insn = aarch64_insn_get_add_value();
929 break;
930 case AARCH64_INSN_ADSB_SUB:
931 insn = aarch64_insn_get_sub_value();
932 break;
933 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
934 insn = aarch64_insn_get_adds_value();
935 break;
936 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
937 insn = aarch64_insn_get_subs_value();
938 break;
939 default:
940 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
941 return AARCH64_BREAK_FAULT;
942 }
943
944 switch (variant) {
945 case AARCH64_INSN_VARIANT_32BIT:
946 if (shift & ~(SZ_32 - 1)) {
947 pr_err("%s: invalid shift encoding %d\n", __func__,
948 shift);
949 return AARCH64_BREAK_FAULT;
950 }
951 break;
952 case AARCH64_INSN_VARIANT_64BIT:
953 insn |= AARCH64_INSN_SF_BIT;
954 if (shift & ~(SZ_64 - 1)) {
955 pr_err("%s: invalid shift encoding %d\n", __func__,
956 shift);
957 return AARCH64_BREAK_FAULT;
958 }
959 break;
960 default:
961 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
962 return AARCH64_BREAK_FAULT;
963 }
964
965
966 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
967
968 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
969
970 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
971
972 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
973 }
974
975 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
976 enum aarch64_insn_register src,
977 enum aarch64_insn_variant variant,
978 enum aarch64_insn_data1_type type)
979 {
980 u32 insn;
981
982 switch (type) {
983 case AARCH64_INSN_DATA1_REVERSE_16:
984 insn = aarch64_insn_get_rev16_value();
985 break;
986 case AARCH64_INSN_DATA1_REVERSE_32:
987 insn = aarch64_insn_get_rev32_value();
988 break;
989 case AARCH64_INSN_DATA1_REVERSE_64:
990 if (variant != AARCH64_INSN_VARIANT_64BIT) {
991 pr_err("%s: invalid variant for reverse64 %d\n",
992 __func__, variant);
993 return AARCH64_BREAK_FAULT;
994 }
995 insn = aarch64_insn_get_rev64_value();
996 break;
997 default:
998 pr_err("%s: unknown data1 encoding %d\n", __func__, type);
999 return AARCH64_BREAK_FAULT;
1000 }
1001
1002 switch (variant) {
1003 case AARCH64_INSN_VARIANT_32BIT:
1004 break;
1005 case AARCH64_INSN_VARIANT_64BIT:
1006 insn |= AARCH64_INSN_SF_BIT;
1007 break;
1008 default:
1009 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1010 return AARCH64_BREAK_FAULT;
1011 }
1012
1013 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1014
1015 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1016 }
1017
1018 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1019 enum aarch64_insn_register src,
1020 enum aarch64_insn_register reg,
1021 enum aarch64_insn_variant variant,
1022 enum aarch64_insn_data2_type type)
1023 {
1024 u32 insn;
1025
1026 switch (type) {
1027 case AARCH64_INSN_DATA2_UDIV:
1028 insn = aarch64_insn_get_udiv_value();
1029 break;
1030 case AARCH64_INSN_DATA2_SDIV:
1031 insn = aarch64_insn_get_sdiv_value();
1032 break;
1033 case AARCH64_INSN_DATA2_LSLV:
1034 insn = aarch64_insn_get_lslv_value();
1035 break;
1036 case AARCH64_INSN_DATA2_LSRV:
1037 insn = aarch64_insn_get_lsrv_value();
1038 break;
1039 case AARCH64_INSN_DATA2_ASRV:
1040 insn = aarch64_insn_get_asrv_value();
1041 break;
1042 case AARCH64_INSN_DATA2_RORV:
1043 insn = aarch64_insn_get_rorv_value();
1044 break;
1045 default:
1046 pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1047 return AARCH64_BREAK_FAULT;
1048 }
1049
1050 switch (variant) {
1051 case AARCH64_INSN_VARIANT_32BIT:
1052 break;
1053 case AARCH64_INSN_VARIANT_64BIT:
1054 insn |= AARCH64_INSN_SF_BIT;
1055 break;
1056 default:
1057 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1058 return AARCH64_BREAK_FAULT;
1059 }
1060
1061 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1062
1063 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1064
1065 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1066 }
1067
1068 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1069 enum aarch64_insn_register src,
1070 enum aarch64_insn_register reg1,
1071 enum aarch64_insn_register reg2,
1072 enum aarch64_insn_variant variant,
1073 enum aarch64_insn_data3_type type)
1074 {
1075 u32 insn;
1076
1077 switch (type) {
1078 case AARCH64_INSN_DATA3_MADD:
1079 insn = aarch64_insn_get_madd_value();
1080 break;
1081 case AARCH64_INSN_DATA3_MSUB:
1082 insn = aarch64_insn_get_msub_value();
1083 break;
1084 default:
1085 pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1086 return AARCH64_BREAK_FAULT;
1087 }
1088
1089 switch (variant) {
1090 case AARCH64_INSN_VARIANT_32BIT:
1091 break;
1092 case AARCH64_INSN_VARIANT_64BIT:
1093 insn |= AARCH64_INSN_SF_BIT;
1094 break;
1095 default:
1096 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1097 return AARCH64_BREAK_FAULT;
1098 }
1099
1100 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1101
1102 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1103
1104 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1105 reg1);
1106
1107 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1108 reg2);
1109 }
1110
1111 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1112 enum aarch64_insn_register src,
1113 enum aarch64_insn_register reg,
1114 int shift,
1115 enum aarch64_insn_variant variant,
1116 enum aarch64_insn_logic_type type)
1117 {
1118 u32 insn;
1119
1120 switch (type) {
1121 case AARCH64_INSN_LOGIC_AND:
1122 insn = aarch64_insn_get_and_value();
1123 break;
1124 case AARCH64_INSN_LOGIC_BIC:
1125 insn = aarch64_insn_get_bic_value();
1126 break;
1127 case AARCH64_INSN_LOGIC_ORR:
1128 insn = aarch64_insn_get_orr_value();
1129 break;
1130 case AARCH64_INSN_LOGIC_ORN:
1131 insn = aarch64_insn_get_orn_value();
1132 break;
1133 case AARCH64_INSN_LOGIC_EOR:
1134 insn = aarch64_insn_get_eor_value();
1135 break;
1136 case AARCH64_INSN_LOGIC_EON:
1137 insn = aarch64_insn_get_eon_value();
1138 break;
1139 case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1140 insn = aarch64_insn_get_ands_value();
1141 break;
1142 case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1143 insn = aarch64_insn_get_bics_value();
1144 break;
1145 default:
1146 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1147 return AARCH64_BREAK_FAULT;
1148 }
1149
1150 switch (variant) {
1151 case AARCH64_INSN_VARIANT_32BIT:
1152 if (shift & ~(SZ_32 - 1)) {
1153 pr_err("%s: invalid shift encoding %d\n", __func__,
1154 shift);
1155 return AARCH64_BREAK_FAULT;
1156 }
1157 break;
1158 case AARCH64_INSN_VARIANT_64BIT:
1159 insn |= AARCH64_INSN_SF_BIT;
1160 if (shift & ~(SZ_64 - 1)) {
1161 pr_err("%s: invalid shift encoding %d\n", __func__,
1162 shift);
1163 return AARCH64_BREAK_FAULT;
1164 }
1165 break;
1166 default:
1167 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1168 return AARCH64_BREAK_FAULT;
1169 }
1170
1171
1172 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1173
1174 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1175
1176 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1177
1178 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1179 }
1180
1181 /*
1182 * Decode the imm field of a branch, and return the byte offset as a
1183 * signed value (so it can be used when computing a new branch
1184 * target).
1185 */
1186 s32 aarch64_get_branch_offset(u32 insn)
1187 {
1188 s32 imm;
1189
1190 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1191 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1192 return (imm << 6) >> 4;
1193 }
1194
1195 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1196 aarch64_insn_is_bcond(insn)) {
1197 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1198 return (imm << 13) >> 11;
1199 }
1200
1201 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1202 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1203 return (imm << 18) >> 16;
1204 }
1205
1206 /* Unhandled instruction */
1207 BUG();
1208 }
1209
1210 /*
1211 * Encode the displacement of a branch in the imm field and return the
1212 * updated instruction.
1213 */
1214 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1215 {
1216 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1217 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1218 offset >> 2);
1219
1220 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1221 aarch64_insn_is_bcond(insn))
1222 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1223 offset >> 2);
1224
1225 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1226 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1227 offset >> 2);
1228
1229 /* Unhandled instruction */
1230 BUG();
1231 }
1232
1233 s32 aarch64_insn_adrp_get_offset(u32 insn)
1234 {
1235 BUG_ON(!aarch64_insn_is_adrp(insn));
1236 return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1237 }
1238
1239 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1240 {
1241 BUG_ON(!aarch64_insn_is_adrp(insn));
1242 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1243 offset >> 12);
1244 }
1245
1246 /*
1247 * Extract the Op/CR data from a msr/mrs instruction.
1248 */
1249 u32 aarch64_insn_extract_system_reg(u32 insn)
1250 {
1251 return (insn & 0x1FFFE0) >> 5;
1252 }
1253
1254 bool aarch32_insn_is_wide(u32 insn)
1255 {
1256 return insn >= 0xe800;
1257 }
1258
1259 /*
1260 * Macros/defines for extracting register numbers from instruction.
1261 */
1262 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1263 {
1264 return (insn & (0xf << offset)) >> offset;
1265 }
1266
1267 #define OPC2_MASK 0x7
1268 #define OPC2_OFFSET 5
1269 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1270 {
1271 return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1272 }
1273
1274 #define CRM_MASK 0xf
1275 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1276 {
1277 return insn & CRM_MASK;
1278 }
1279
1280 static bool __kprobes __check_eq(unsigned long pstate)
1281 {
1282 return (pstate & PSR_Z_BIT) != 0;
1283 }
1284
1285 static bool __kprobes __check_ne(unsigned long pstate)
1286 {
1287 return (pstate & PSR_Z_BIT) == 0;
1288 }
1289
1290 static bool __kprobes __check_cs(unsigned long pstate)
1291 {
1292 return (pstate & PSR_C_BIT) != 0;
1293 }
1294
1295 static bool __kprobes __check_cc(unsigned long pstate)
1296 {
1297 return (pstate & PSR_C_BIT) == 0;
1298 }
1299
1300 static bool __kprobes __check_mi(unsigned long pstate)
1301 {
1302 return (pstate & PSR_N_BIT) != 0;
1303 }
1304
1305 static bool __kprobes __check_pl(unsigned long pstate)
1306 {
1307 return (pstate & PSR_N_BIT) == 0;
1308 }
1309
1310 static bool __kprobes __check_vs(unsigned long pstate)
1311 {
1312 return (pstate & PSR_V_BIT) != 0;
1313 }
1314
1315 static bool __kprobes __check_vc(unsigned long pstate)
1316 {
1317 return (pstate & PSR_V_BIT) == 0;
1318 }
1319
1320 static bool __kprobes __check_hi(unsigned long pstate)
1321 {
1322 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1323 return (pstate & PSR_C_BIT) != 0;
1324 }
1325
1326 static bool __kprobes __check_ls(unsigned long pstate)
1327 {
1328 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1329 return (pstate & PSR_C_BIT) == 0;
1330 }
1331
1332 static bool __kprobes __check_ge(unsigned long pstate)
1333 {
1334 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1335 return (pstate & PSR_N_BIT) == 0;
1336 }
1337
1338 static bool __kprobes __check_lt(unsigned long pstate)
1339 {
1340 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1341 return (pstate & PSR_N_BIT) != 0;
1342 }
1343
1344 static bool __kprobes __check_gt(unsigned long pstate)
1345 {
1346 /*PSR_N_BIT ^= PSR_V_BIT */
1347 unsigned long temp = pstate ^ (pstate << 3);
1348
1349 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
1350 return (temp & PSR_N_BIT) == 0;
1351 }
1352
1353 static bool __kprobes __check_le(unsigned long pstate)
1354 {
1355 /*PSR_N_BIT ^= PSR_V_BIT */
1356 unsigned long temp = pstate ^ (pstate << 3);
1357
1358 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
1359 return (temp & PSR_N_BIT) != 0;
1360 }
1361
1362 static bool __kprobes __check_al(unsigned long pstate)
1363 {
1364 return true;
1365 }
1366
1367 /*
1368 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1369 * it behaves identically to 0b1110 ("al").
1370 */
1371 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1372 __check_eq, __check_ne, __check_cs, __check_cc,
1373 __check_mi, __check_pl, __check_vs, __check_vc,
1374 __check_hi, __check_ls, __check_ge, __check_lt,
1375 __check_gt, __check_le, __check_al, __check_al
1376 };