1 /*
2 * (not much of an) Emulation layer for 32bit guests.
3 *
4 * Copyright (C) 2012,2013 - ARM Ltd
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * based on arch/arm/kvm/emulate.c
8 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
10 *
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #include <linux/bits.h>
25 #include <linux/kvm_host.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_hyp.h>
28
29 #define DFSR_FSC_EXTABT_LPAE 0x10
30 #define DFSR_FSC_EXTABT_nLPAE 0x08
31 #define DFSR_LPAE BIT(9)
32
33 /*
34 * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
35 */
36 static const u8 return_offsets[8][2] = {
37 [0] = { 0, 0 }, /* Reset, unused */
38 [1] = { 4, 2 }, /* Undefined */
39 [2] = { 0, 0 }, /* SVC, unused */
40 [3] = { 4, 4 }, /* Prefetch abort */
41 [4] = { 8, 8 }, /* Data abort */
42 [5] = { 0, 0 }, /* HVC, unused */
43 [6] = { 4, 4 }, /* IRQ, unused */
44 [7] = { 4, 4 }, /* FIQ, unused */
45 };
46
pre_fault_synchronize(struct kvm_vcpu * vcpu)47 static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
48 {
49 preempt_disable();
50 if (kvm_arm_vcpu_loaded(vcpu)) {
51 kvm_arch_vcpu_put(vcpu);
52 return true;
53 }
54
55 preempt_enable();
56 return false;
57 }
58
post_fault_synchronize(struct kvm_vcpu * vcpu,bool loaded)59 static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
60 {
61 if (loaded) {
62 kvm_arch_vcpu_load(vcpu, smp_processor_id());
63 preempt_enable();
64 }
65 }
66
67 /*
68 * When an exception is taken, most CPSR fields are left unchanged in the
69 * handler. However, some are explicitly overridden (e.g. M[4:0]).
70 *
71 * The SPSR/SPSR_ELx layouts differ, and the below is intended to work with
72 * either format. Note: SPSR.J bit doesn't exist in SPSR_ELx, but this bit was
73 * obsoleted by the ARMv7 virtualization extensions and is RES0.
74 *
75 * For the SPSR layout seen from AArch32, see:
76 * - ARM DDI 0406C.d, page B1-1148
77 * - ARM DDI 0487E.a, page G8-6264
78 *
79 * For the SPSR_ELx layout for AArch32 seen from AArch64, see:
80 * - ARM DDI 0487E.a, page C5-426
81 *
82 * Here we manipulate the fields in order of the AArch32 SPSR_ELx layout, from
83 * MSB to LSB.
84 */
get_except32_cpsr(struct kvm_vcpu * vcpu,u32 mode)85 static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
86 {
87 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
88 unsigned long old, new;
89
90 old = *vcpu_cpsr(vcpu);
91 new = 0;
92
93 new |= (old & PSR_AA32_N_BIT);
94 new |= (old & PSR_AA32_Z_BIT);
95 new |= (old & PSR_AA32_C_BIT);
96 new |= (old & PSR_AA32_V_BIT);
97 new |= (old & PSR_AA32_Q_BIT);
98
99 // CPSR.IT[7:0] are set to zero upon any exception
100 // See ARM DDI 0487E.a, section G1.12.3
101 // See ARM DDI 0406C.d, section B1.8.3
102
103 new |= (old & PSR_AA32_DIT_BIT);
104
105 // CPSR.SSBS is set to SCTLR.DSSBS upon any exception
106 // See ARM DDI 0487E.a, page G8-6244
107 if (sctlr & BIT(31))
108 new |= PSR_AA32_SSBS_BIT;
109
110 // CPSR.PAN is unchanged unless SCTLR.SPAN == 0b0
111 // SCTLR.SPAN is RES1 when ARMv8.1-PAN is not implemented
112 // See ARM DDI 0487E.a, page G8-6246
113 new |= (old & PSR_AA32_PAN_BIT);
114 if (!(sctlr & BIT(23)))
115 new |= PSR_AA32_PAN_BIT;
116
117 // SS does not exist in AArch32, so ignore
118
119 // CPSR.IL is set to zero upon any exception
120 // See ARM DDI 0487E.a, page G1-5527
121
122 new |= (old & PSR_AA32_GE_MASK);
123
124 // CPSR.IT[7:0] are set to zero upon any exception
125 // See prior comment above
126
127 // CPSR.E is set to SCTLR.EE upon any exception
128 // See ARM DDI 0487E.a, page G8-6245
129 // See ARM DDI 0406C.d, page B4-1701
130 if (sctlr & BIT(25))
131 new |= PSR_AA32_E_BIT;
132
133 // CPSR.A is unchanged upon an exception to Undefined, Supervisor
134 // CPSR.A is set upon an exception to other modes
135 // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
136 // See ARM DDI 0406C.d, page B1-1182
137 new |= (old & PSR_AA32_A_BIT);
138 if (mode != PSR_AA32_MODE_UND && mode != PSR_AA32_MODE_SVC)
139 new |= PSR_AA32_A_BIT;
140
141 // CPSR.I is set upon any exception
142 // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
143 // See ARM DDI 0406C.d, page B1-1182
144 new |= PSR_AA32_I_BIT;
145
146 // CPSR.F is set upon an exception to FIQ
147 // CPSR.F is unchanged upon an exception to other modes
148 // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
149 // See ARM DDI 0406C.d, page B1-1182
150 new |= (old & PSR_AA32_F_BIT);
151 if (mode == PSR_AA32_MODE_FIQ)
152 new |= PSR_AA32_F_BIT;
153
154 // CPSR.T is set to SCTLR.TE upon any exception
155 // See ARM DDI 0487E.a, page G8-5514
156 // See ARM DDI 0406C.d, page B1-1181
157 if (sctlr & BIT(30))
158 new |= PSR_AA32_T_BIT;
159
160 new |= mode;
161
162 return new;
163 }
164
prepare_fault32(struct kvm_vcpu * vcpu,u32 mode,u32 vect_offset)165 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
166 {
167 unsigned long spsr = *vcpu_cpsr(vcpu);
168 bool is_thumb = (spsr & PSR_AA32_T_BIT);
169 u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
170 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
171
172 *vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
173
174 /* Note: These now point to the banked copies */
175 vcpu_write_spsr(vcpu, host_spsr_to_spsr32(spsr));
176 *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
177
178 /* Branch to exception vector */
179 if (sctlr & (1 << 13))
180 vect_offset += 0xffff0000;
181 else /* always have security exceptions */
182 vect_offset += vcpu_cp15(vcpu, c12_VBAR);
183
184 *vcpu_pc(vcpu) = vect_offset;
185 }
186
kvm_inject_undef32(struct kvm_vcpu * vcpu)187 void kvm_inject_undef32(struct kvm_vcpu *vcpu)
188 {
189 bool loaded = pre_fault_synchronize(vcpu);
190
191 prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
192 post_fault_synchronize(vcpu, loaded);
193 }
194
195 /*
196 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
197 * pseudocode.
198 */
inject_abt32(struct kvm_vcpu * vcpu,bool is_pabt,unsigned long addr)199 static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
200 unsigned long addr)
201 {
202 u32 vect_offset;
203 u32 *far, *fsr;
204 bool is_lpae;
205 bool loaded;
206
207 loaded = pre_fault_synchronize(vcpu);
208
209 if (is_pabt) {
210 vect_offset = 12;
211 far = &vcpu_cp15(vcpu, c6_IFAR);
212 fsr = &vcpu_cp15(vcpu, c5_IFSR);
213 } else { /* !iabt */
214 vect_offset = 16;
215 far = &vcpu_cp15(vcpu, c6_DFAR);
216 fsr = &vcpu_cp15(vcpu, c5_DFSR);
217 }
218
219 prepare_fault32(vcpu, PSR_AA32_MODE_ABT, vect_offset);
220
221 *far = addr;
222
223 /* Give the guest an IMPLEMENTATION DEFINED exception */
224 is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
225 if (is_lpae) {
226 *fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
227 } else {
228 /* no need to shuffle FS[4] into DFSR[10] as its 0 */
229 *fsr = DFSR_FSC_EXTABT_nLPAE;
230 }
231
232 post_fault_synchronize(vcpu, loaded);
233 }
234
kvm_inject_dabt32(struct kvm_vcpu * vcpu,unsigned long addr)235 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
236 {
237 inject_abt32(vcpu, false, addr);
238 }
239
kvm_inject_pabt32(struct kvm_vcpu * vcpu,unsigned long addr)240 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr)
241 {
242 inject_abt32(vcpu, true, addr);
243 }
244