1 /*
2 * Copyright (C) 2015, 2016 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17 #include <linux/uaccess.h>
18 #include <linux/interrupt.h>
19 #include <linux/cpu.h>
20 #include <linux/kvm_host.h>
21 #include <kvm/arm_vgic.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_mmu.h>
24 #include "vgic.h"
25
26 /*
27 * Initialization rules: there are multiple stages to the vgic
28 * initialization, both for the distributor and the CPU interfaces. The basic
29 * idea is that even though the VGIC is not functional or not requested from
30 * user space, the critical path of the run loop can still call VGIC functions
31 * that just won't do anything, without them having to check additional
32 * initialization flags to ensure they don't look at uninitialized data
33 * structures.
34 *
35 * Distributor:
36 *
37 * - kvm_vgic_early_init(): initialization of static data that doesn't
38 * depend on any sizing information or emulation type. No allocation
39 * is allowed there.
40 *
41 * - vgic_init(): allocation and initialization of the generic data
42 * structures that depend on sizing information (number of CPUs,
43 * number of interrupts). Also initializes the vcpu specific data
44 * structures. Can be executed lazily for GICv2.
45 *
46 * CPU Interface:
47 *
48 * - kvm_vgic_vcpu_init(): initialization of static data that
49 * doesn't depend on any sizing information or emulation type. No
50 * allocation is allowed there.
51 */
52
53 /* EARLY INIT */
54
55 /**
56 * kvm_vgic_early_init() - Initialize static VGIC VCPU data structures
57 * @kvm: The VM whose VGIC districutor should be initialized
58 *
59 * Only do initialization of static structures that don't require any
60 * allocation or sizing information from userspace. vgic_init() called
61 * kvm_vgic_dist_init() which takes care of the rest.
62 */
kvm_vgic_early_init(struct kvm * kvm)63 void kvm_vgic_early_init(struct kvm *kvm)
64 {
65 struct vgic_dist *dist = &kvm->arch.vgic;
66
67 INIT_LIST_HEAD(&dist->lpi_list_head);
68 raw_spin_lock_init(&dist->lpi_list_lock);
69 }
70
71 /* CREATION */
72
73 /**
74 * kvm_vgic_create: triggered by the instantiation of the VGIC device by
75 * user space, either through the legacy KVM_CREATE_IRQCHIP ioctl (v2 only)
76 * or through the generic KVM_CREATE_DEVICE API ioctl.
77 * irqchip_in_kernel() tells you if this function succeeded or not.
78 * @kvm: kvm struct pointer
79 * @type: KVM_DEV_TYPE_ARM_VGIC_V[23]
80 */
kvm_vgic_create(struct kvm * kvm,u32 type)81 int kvm_vgic_create(struct kvm *kvm, u32 type)
82 {
83 int i, vcpu_lock_idx = -1, ret;
84 struct kvm_vcpu *vcpu;
85
86 if (irqchip_in_kernel(kvm))
87 return -EEXIST;
88
89 /*
90 * This function is also called by the KVM_CREATE_IRQCHIP handler,
91 * which had no chance yet to check the availability of the GICv2
92 * emulation. So check this here again. KVM_CREATE_DEVICE does
93 * the proper checks already.
94 */
95 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 &&
96 !kvm_vgic_global_state.can_emulate_gicv2)
97 return -ENODEV;
98
99 /*
100 * Any time a vcpu is run, vcpu_load is called which tries to grab the
101 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
102 * that no other VCPUs are run while we create the vgic.
103 */
104 ret = -EBUSY;
105 kvm_for_each_vcpu(i, vcpu, kvm) {
106 if (!mutex_trylock(&vcpu->mutex))
107 goto out_unlock;
108 vcpu_lock_idx = i;
109 }
110
111 kvm_for_each_vcpu(i, vcpu, kvm) {
112 if (vcpu->arch.has_run_once)
113 goto out_unlock;
114 }
115 ret = 0;
116
117 if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
118 kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
119 else
120 kvm->arch.max_vcpus = VGIC_V3_MAX_CPUS;
121
122 if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus) {
123 ret = -E2BIG;
124 goto out_unlock;
125 }
126
127 kvm->arch.vgic.in_kernel = true;
128 kvm->arch.vgic.vgic_model = type;
129
130 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
131
132 if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
133 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
134 else
135 INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
136
137 out_unlock:
138 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
139 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
140 mutex_unlock(&vcpu->mutex);
141 }
142 return ret;
143 }
144
145 /* INIT/DESTROY */
146
147 /**
148 * kvm_vgic_dist_init: initialize the dist data structures
149 * @kvm: kvm struct pointer
150 * @nr_spis: number of spis, frozen by caller
151 */
kvm_vgic_dist_init(struct kvm * kvm,unsigned int nr_spis)152 static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
153 {
154 struct vgic_dist *dist = &kvm->arch.vgic;
155 struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
156 int i;
157
158 dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL);
159 if (!dist->spis)
160 return -ENOMEM;
161
162 /*
163 * In the following code we do not take the irq struct lock since
164 * no other action on irq structs can happen while the VGIC is
165 * not initialized yet:
166 * If someone wants to inject an interrupt or does a MMIO access, we
167 * require prior initialization in case of a virtual GICv3 or trigger
168 * initialization when using a virtual GICv2.
169 */
170 for (i = 0; i < nr_spis; i++) {
171 struct vgic_irq *irq = &dist->spis[i];
172
173 irq->intid = i + VGIC_NR_PRIVATE_IRQS;
174 INIT_LIST_HEAD(&irq->ap_list);
175 spin_lock_init(&irq->irq_lock);
176 irq->vcpu = NULL;
177 irq->target_vcpu = vcpu0;
178 kref_init(&irq->refcount);
179 switch (dist->vgic_model) {
180 case KVM_DEV_TYPE_ARM_VGIC_V2:
181 irq->targets = 0;
182 irq->group = 0;
183 break;
184 case KVM_DEV_TYPE_ARM_VGIC_V3:
185 irq->mpidr = 0;
186 irq->group = 1;
187 break;
188 default:
189 kfree(dist->spis);
190 dist->spis = NULL;
191 return -EINVAL;
192 }
193 }
194 return 0;
195 }
196
197 /**
198 * kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data
199 * structures and register VCPU-specific KVM iodevs
200 *
201 * @vcpu: pointer to the VCPU being created and initialized
202 *
203 * Only do initialization, but do not actually enable the
204 * VGIC CPU interface
205 */
kvm_vgic_vcpu_init(struct kvm_vcpu * vcpu)206 int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
207 {
208 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
209 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
210 int ret = 0;
211 int i;
212
213 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
214 vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
215
216 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
217 spin_lock_init(&vgic_cpu->ap_list_lock);
218
219 /*
220 * Enable and configure all SGIs to be edge-triggered and
221 * configure all PPIs as level-triggered.
222 */
223 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
224 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
225
226 INIT_LIST_HEAD(&irq->ap_list);
227 spin_lock_init(&irq->irq_lock);
228 irq->intid = i;
229 irq->vcpu = NULL;
230 irq->target_vcpu = vcpu;
231 kref_init(&irq->refcount);
232 if (vgic_irq_is_sgi(i)) {
233 /* SGIs */
234 irq->enabled = 1;
235 irq->config = VGIC_CONFIG_EDGE;
236 } else {
237 /* PPIs */
238 irq->config = VGIC_CONFIG_LEVEL;
239 }
240 }
241
242 if (!irqchip_in_kernel(vcpu->kvm))
243 return 0;
244
245 /*
246 * If we are creating a VCPU with a GICv3 we must also register the
247 * KVM io device for the redistributor that belongs to this VCPU.
248 */
249 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
250 mutex_lock(&vcpu->kvm->lock);
251 ret = vgic_register_redist_iodev(vcpu);
252 mutex_unlock(&vcpu->kvm->lock);
253 }
254 return ret;
255 }
256
kvm_vgic_vcpu_enable(struct kvm_vcpu * vcpu)257 static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
258 {
259 if (kvm_vgic_global_state.type == VGIC_V2)
260 vgic_v2_enable(vcpu);
261 else
262 vgic_v3_enable(vcpu);
263 }
264
265 /*
266 * vgic_init: allocates and initializes dist and vcpu data structures
267 * depending on two dimensioning parameters:
268 * - the number of spis
269 * - the number of vcpus
270 * The function is generally called when nr_spis has been explicitly set
271 * by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
272 * vgic_initialized() returns true when this function has succeeded.
273 * Must be called with kvm->lock held!
274 */
vgic_init(struct kvm * kvm)275 int vgic_init(struct kvm *kvm)
276 {
277 struct vgic_dist *dist = &kvm->arch.vgic;
278 struct kvm_vcpu *vcpu;
279 int ret = 0, i, idx;
280
281 if (vgic_initialized(kvm))
282 return 0;
283
284 /* Are we also in the middle of creating a VCPU? */
285 if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
286 return -EBUSY;
287
288 /* freeze the number of spis */
289 if (!dist->nr_spis)
290 dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS;
291
292 ret = kvm_vgic_dist_init(kvm, dist->nr_spis);
293 if (ret)
294 goto out;
295
296 /* Initialize groups on CPUs created before the VGIC type was known */
297 kvm_for_each_vcpu(idx, vcpu, kvm) {
298 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
299
300 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
301 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
302 switch (dist->vgic_model) {
303 case KVM_DEV_TYPE_ARM_VGIC_V3:
304 irq->group = 1;
305 irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
306 break;
307 case KVM_DEV_TYPE_ARM_VGIC_V2:
308 irq->group = 0;
309 irq->targets = 1U << idx;
310 break;
311 default:
312 ret = -EINVAL;
313 goto out;
314 }
315 }
316 }
317
318 if (vgic_has_its(kvm)) {
319 ret = vgic_v4_init(kvm);
320 if (ret)
321 goto out;
322 }
323
324 kvm_for_each_vcpu(i, vcpu, kvm)
325 kvm_vgic_vcpu_enable(vcpu);
326
327 ret = kvm_vgic_setup_default_irq_routing(kvm);
328 if (ret)
329 goto out;
330
331 vgic_debug_init(kvm);
332
333 dist->implementation_rev = 2;
334 dist->initialized = true;
335
336 out:
337 return ret;
338 }
339
kvm_vgic_dist_destroy(struct kvm * kvm)340 static void kvm_vgic_dist_destroy(struct kvm *kvm)
341 {
342 struct vgic_dist *dist = &kvm->arch.vgic;
343 struct vgic_redist_region *rdreg, *next;
344
345 dist->ready = false;
346 dist->initialized = false;
347
348 kfree(dist->spis);
349 dist->spis = NULL;
350 dist->nr_spis = 0;
351
352 if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
353 list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list) {
354 list_del(&rdreg->list);
355 kfree(rdreg);
356 }
357 INIT_LIST_HEAD(&dist->rd_regions);
358 }
359
360 if (vgic_supports_direct_msis(kvm))
361 vgic_v4_teardown(kvm);
362 }
363
kvm_vgic_vcpu_destroy(struct kvm_vcpu * vcpu)364 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
365 {
366 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
367
368 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
369 }
370
371 /* To be called with kvm->lock held */
__kvm_vgic_destroy(struct kvm * kvm)372 static void __kvm_vgic_destroy(struct kvm *kvm)
373 {
374 struct kvm_vcpu *vcpu;
375 int i;
376
377 vgic_debug_destroy(kvm);
378
379 kvm_vgic_dist_destroy(kvm);
380
381 kvm_for_each_vcpu(i, vcpu, kvm)
382 kvm_vgic_vcpu_destroy(vcpu);
383 }
384
kvm_vgic_destroy(struct kvm * kvm)385 void kvm_vgic_destroy(struct kvm *kvm)
386 {
387 mutex_lock(&kvm->lock);
388 __kvm_vgic_destroy(kvm);
389 mutex_unlock(&kvm->lock);
390 }
391
392 /**
393 * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
394 * is a GICv2. A GICv3 must be explicitly initialized by the guest using the
395 * KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group.
396 * @kvm: kvm struct pointer
397 */
vgic_lazy_init(struct kvm * kvm)398 int vgic_lazy_init(struct kvm *kvm)
399 {
400 int ret = 0;
401
402 if (unlikely(!vgic_initialized(kvm))) {
403 /*
404 * We only provide the automatic initialization of the VGIC
405 * for the legacy case of a GICv2. Any other type must
406 * be explicitly initialized once setup with the respective
407 * KVM device call.
408 */
409 if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
410 return -EBUSY;
411
412 mutex_lock(&kvm->lock);
413 ret = vgic_init(kvm);
414 mutex_unlock(&kvm->lock);
415 }
416
417 return ret;
418 }
419
420 /* RESOURCE MAPPING */
421
422 /**
423 * Map the MMIO regions depending on the VGIC model exposed to the guest
424 * called on the first VCPU run.
425 * Also map the virtual CPU interface into the VM.
426 * v2/v3 derivatives call vgic_init if not already done.
427 * vgic_ready() returns true if this function has succeeded.
428 * @kvm: kvm struct pointer
429 */
kvm_vgic_map_resources(struct kvm * kvm)430 int kvm_vgic_map_resources(struct kvm *kvm)
431 {
432 struct vgic_dist *dist = &kvm->arch.vgic;
433 int ret = 0;
434
435 mutex_lock(&kvm->lock);
436 if (!irqchip_in_kernel(kvm))
437 goto out;
438
439 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
440 ret = vgic_v2_map_resources(kvm);
441 else
442 ret = vgic_v3_map_resources(kvm);
443
444 if (ret)
445 __kvm_vgic_destroy(kvm);
446
447 out:
448 mutex_unlock(&kvm->lock);
449 return ret;
450 }
451
452 /* GENERIC PROBE */
453
vgic_init_cpu_starting(unsigned int cpu)454 static int vgic_init_cpu_starting(unsigned int cpu)
455 {
456 enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0);
457 return 0;
458 }
459
460
vgic_init_cpu_dying(unsigned int cpu)461 static int vgic_init_cpu_dying(unsigned int cpu)
462 {
463 disable_percpu_irq(kvm_vgic_global_state.maint_irq);
464 return 0;
465 }
466
vgic_maintenance_handler(int irq,void * data)467 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
468 {
469 /*
470 * We cannot rely on the vgic maintenance interrupt to be
471 * delivered synchronously. This means we can only use it to
472 * exit the VM, and we perform the handling of EOIed
473 * interrupts on the exit path (see vgic_fold_lr_state).
474 */
475 return IRQ_HANDLED;
476 }
477
478 /**
479 * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
480 *
481 * For a specific CPU, initialize the GIC VE hardware.
482 */
kvm_vgic_init_cpu_hardware(void)483 void kvm_vgic_init_cpu_hardware(void)
484 {
485 BUG_ON(preemptible());
486
487 /*
488 * We want to make sure the list registers start out clear so that we
489 * only have the program the used registers.
490 */
491 if (kvm_vgic_global_state.type == VGIC_V2)
492 vgic_v2_init_lrs();
493 else
494 kvm_call_hyp(__vgic_v3_init_lrs);
495 }
496
497 /**
498 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
499 * according to the host GIC model. Accordingly calls either
500 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be
501 * instantiated by a guest later on .
502 */
kvm_vgic_hyp_init(void)503 int kvm_vgic_hyp_init(void)
504 {
505 const struct gic_kvm_info *gic_kvm_info;
506 int ret;
507
508 gic_kvm_info = gic_get_kvm_info();
509 if (!gic_kvm_info)
510 return -ENODEV;
511
512 if (!gic_kvm_info->maint_irq) {
513 kvm_err("No vgic maintenance irq\n");
514 return -ENXIO;
515 }
516
517 switch (gic_kvm_info->type) {
518 case GIC_V2:
519 ret = vgic_v2_probe(gic_kvm_info);
520 break;
521 case GIC_V3:
522 ret = vgic_v3_probe(gic_kvm_info);
523 if (!ret) {
524 static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif);
525 kvm_info("GIC system register CPU interface enabled\n");
526 }
527 break;
528 default:
529 ret = -ENODEV;
530 };
531
532 if (ret)
533 return ret;
534
535 kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
536 ret = request_percpu_irq(kvm_vgic_global_state.maint_irq,
537 vgic_maintenance_handler,
538 "vgic", kvm_get_running_vcpus());
539 if (ret) {
540 kvm_err("Cannot register interrupt %d\n",
541 kvm_vgic_global_state.maint_irq);
542 return ret;
543 }
544
545 ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
546 "kvm/arm/vgic:starting",
547 vgic_init_cpu_starting, vgic_init_cpu_dying);
548 if (ret) {
549 kvm_err("Cannot register vgic CPU notifier\n");
550 goto out_free_irq;
551 }
552
553 kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
554 return 0;
555
556 out_free_irq:
557 free_percpu_irq(kvm_vgic_global_state.maint_irq,
558 kvm_get_running_vcpus());
559 return ret;
560 }
561