]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/vfp.decode
target/arm: Convert VADD to decodetree
[mirror_qemu.git] / target / arm / vfp.decode
CommitLineData
78e138bc
PM
1# AArch32 VFP instruction descriptions (conditional insns)
2#
3# Copyright (c) 2019 Linaro, Ltd
4#
5# This library is free software; you can redistribute it and/or
6# modify it under the terms of the GNU Lesser General Public
7# License as published by the Free Software Foundation; either
8# version 2 of the License, or (at your option) any later version.
9#
10# This library is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY; without even the implied warranty of
12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13# Lesser General Public License for more details.
14#
15# You should have received a copy of the GNU Lesser General Public
16# License along with this library; if not, see <http://www.gnu.org/licenses/>.
17
18#
19# This file is processed by scripts/decodetree.py
20#
21# Encodings for the conditional VFP instructions are here:
22# generally anything matching A32
23# cccc 11.. .... .... .... 101. .... ....
24# and T32
25# 1110 110. .... .... .... 101. .... ....
26# 1110 1110 .... .... .... 101. .... ....
27# (but those patterns might also cover some Neon instructions,
28# which do not live in this file.)
9851ed92
PM
29
30# VFP registers have an odd encoding with a four-bit field
31# and a one-bit field which are assembled in different orders
32# depending on whether the register is double or single precision.
33# Each individual instruction function must do the checks for
34# "double register selected but CPU does not have double support"
35# and "double register number has bit 4 set but CPU does not
36# support D16-D31" (which should UNDEF).
37%vm_dp 5:1 0:4
38%vm_sp 0:4 5:1
39%vn_dp 7:1 16:4
40%vn_sp 16:4 7:1
41%vd_dp 22:1 12:4
42%vd_sp 12:4 22:1
43
44%vmov_idx_b 21:1 5:2
45%vmov_idx_h 21:1 6:1
46
47# VMOV scalar to general-purpose register; note that this does
48# include some Neon cases.
49VMOV_to_gp ---- 1110 u:1 1. 1 .... rt:4 1011 ... 1 0000 \
50 vn=%vn_dp size=0 index=%vmov_idx_b
51VMOV_to_gp ---- 1110 u:1 0. 1 .... rt:4 1011 ..1 1 0000 \
52 vn=%vn_dp size=1 index=%vmov_idx_h
53VMOV_to_gp ---- 1110 0 0 index:1 1 .... rt:4 1011 .00 1 0000 \
54 vn=%vn_dp size=2 u=0
55
56VMOV_from_gp ---- 1110 0 1. 0 .... rt:4 1011 ... 1 0000 \
57 vn=%vn_dp size=0 index=%vmov_idx_b
58VMOV_from_gp ---- 1110 0 0. 0 .... rt:4 1011 ..1 1 0000 \
59 vn=%vn_dp size=1 index=%vmov_idx_h
60VMOV_from_gp ---- 1110 0 0 index:1 0 .... rt:4 1011 .00 1 0000 \
61 vn=%vn_dp size=2
62
63VDUP ---- 1110 1 b:1 q:1 0 .... rt:4 1011 . 0 e:1 1 0000 \
64 vn=%vn_dp
a9ab5001
PM
65
66VMSR_VMRS ---- 1110 111 l:1 reg:4 rt:4 1010 0001 0000
67VMOV_single ---- 1110 000 l:1 .... rt:4 1010 . 001 0000 \
68 vn=%vn_sp
81f68110
PM
69
70VMOV_64_sp ---- 1100 010 op:1 rt2:4 rt:4 1010 00.1 .... \
71 vm=%vm_sp
72VMOV_64_dp ---- 1100 010 op:1 rt2:4 rt:4 1011 00.1 .... \
73 vm=%vm_dp
79b02a3b
PM
74
75# Note that the half-precision variants of VLDR and VSTR are
76# not part of this decodetree at all because they have bits [9:8] == 0b01
77VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 \
78 vd=%vd_sp
79VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 \
80 vd=%vd_dp
fa288de2
PM
81
82# We split the load/store multiple up into two patterns to avoid
83# overlap with other insns in the "Advanced SIMD load/store and 64-bit move"
84# grouping:
85# P=0 U=0 W=0 is 64-bit VMOV
86# P=1 W=0 is VLDR/VSTR
87# P=U W=1 is UNDEF
88# leaving P=0 U=1 W=x and P=1 U=0 W=1 for load/store multiple.
89# These include FSTM/FLDM.
90VLDM_VSTM_sp ---- 1100 1 . w:1 l:1 rn:4 .... 1010 imm:8 \
91 vd=%vd_sp p=0 u=1
92VLDM_VSTM_dp ---- 1100 1 . w:1 l:1 rn:4 .... 1011 imm:8 \
93 vd=%vd_dp p=0 u=1
94
95VLDM_VSTM_sp ---- 1101 0.1 l:1 rn:4 .... 1010 imm:8 \
96 vd=%vd_sp p=1 u=0 w=1
97VLDM_VSTM_dp ---- 1101 0.1 l:1 rn:4 .... 1011 imm:8 \
98 vd=%vd_dp p=1 u=0 w=1
266bd25c
PM
99
100# 3-register VFP data-processing; bits [23,21:20,6] identify the operation.
101VMLA_sp ---- 1110 0.00 .... .... 1010 .0.0 .... \
102 vm=%vm_sp vn=%vn_sp vd=%vd_sp
103VMLA_dp ---- 1110 0.00 .... .... 1011 .0.0 .... \
104 vm=%vm_dp vn=%vn_dp vd=%vd_dp
e7258280
PM
105
106VMLS_sp ---- 1110 0.00 .... .... 1010 .1.0 .... \
107 vm=%vm_sp vn=%vn_sp vd=%vd_sp
108VMLS_dp ---- 1110 0.00 .... .... 1011 .1.0 .... \
109 vm=%vm_dp vn=%vn_dp vd=%vd_dp
c54a416c
PM
110
111VNMLS_sp ---- 1110 0.01 .... .... 1010 .0.0 .... \
112 vm=%vm_sp vn=%vn_sp vd=%vd_sp
113VNMLS_dp ---- 1110 0.01 .... .... 1011 .0.0 .... \
114 vm=%vm_dp vn=%vn_dp vd=%vd_dp
8a483533
PM
115
116VNMLA_sp ---- 1110 0.01 .... .... 1010 .1.0 .... \
117 vm=%vm_sp vn=%vn_sp vd=%vd_sp
118VNMLA_dp ---- 1110 0.01 .... .... 1011 .1.0 .... \
119 vm=%vm_dp vn=%vn_dp vd=%vd_dp
88c5188c
PM
120
121VMUL_sp ---- 1110 0.10 .... .... 1010 .0.0 .... \
122 vm=%vm_sp vn=%vn_sp vd=%vd_sp
123VMUL_dp ---- 1110 0.10 .... .... 1011 .0.0 .... \
124 vm=%vm_dp vn=%vn_dp vd=%vd_dp
43c4be12
PM
125
126VNMUL_sp ---- 1110 0.10 .... .... 1010 .1.0 .... \
127 vm=%vm_sp vn=%vn_sp vd=%vd_sp
128VNMUL_dp ---- 1110 0.10 .... .... 1011 .1.0 .... \
129 vm=%vm_dp vn=%vn_dp vd=%vd_dp
ce28b303
PM
130
131VADD_sp ---- 1110 0.11 .... .... 1010 .0.0 .... \
132 vm=%vm_sp vn=%vn_sp vd=%vd_sp
133VADD_dp ---- 1110 0.11 .... .... 1011 .0.0 .... \
134 vm=%vm_dp vn=%vn_dp vd=%vd_dp