1 /*
2 * VGIC: KVM DEVICE API
3 *
4 * Copyright (C) 2015 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16 #include <linux/kvm_host.h>
17 #include <kvm/arm_vgic.h>
18 #include <linux/uaccess.h>
19 #include <asm/kvm_mmu.h>
20 #include <asm/cputype.h>
21 #include "vgic.h"
22
23 /* common helpers */
24
vgic_check_ioaddr(struct kvm * kvm,phys_addr_t * ioaddr,phys_addr_t addr,phys_addr_t alignment)25 int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
26 phys_addr_t addr, phys_addr_t alignment)
27 {
28 if (addr & ~KVM_PHYS_MASK)
29 return -E2BIG;
30
31 if (!IS_ALIGNED(addr, alignment))
32 return -EINVAL;
33
34 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
35 return -EEXIST;
36
37 return 0;
38 }
39
vgic_check_type(struct kvm * kvm,int type_needed)40 static int vgic_check_type(struct kvm *kvm, int type_needed)
41 {
42 if (kvm->arch.vgic.vgic_model != type_needed)
43 return -ENODEV;
44 else
45 return 0;
46 }
47
48 /**
49 * kvm_vgic_addr - set or get vgic VM base addresses
50 * @kvm: pointer to the vm struct
51 * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
52 * @addr: pointer to address value
53 * @write: if true set the address in the VM address space, if false read the
54 * address
55 *
56 * Set or get the vgic base addresses for the distributor and the virtual CPU
57 * interface in the VM physical address space. These addresses are properties
58 * of the emulated core/SoC and therefore user space initially knows this
59 * information.
60 * Check them for sanity (alignment, double assignment). We can't check for
61 * overlapping regions in case of a virtual GICv3 here, since we don't know
62 * the number of VCPUs yet, so we defer this check to map_resources().
63 */
kvm_vgic_addr(struct kvm * kvm,unsigned long type,u64 * addr,bool write)64 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
65 {
66 int r = 0;
67 struct vgic_dist *vgic = &kvm->arch.vgic;
68 phys_addr_t *addr_ptr, alignment;
69 u64 undef_value = VGIC_ADDR_UNDEF;
70
71 mutex_lock(&kvm->lock);
72 switch (type) {
73 case KVM_VGIC_V2_ADDR_TYPE_DIST:
74 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
75 addr_ptr = &vgic->vgic_dist_base;
76 alignment = SZ_4K;
77 break;
78 case KVM_VGIC_V2_ADDR_TYPE_CPU:
79 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
80 addr_ptr = &vgic->vgic_cpu_base;
81 alignment = SZ_4K;
82 break;
83 case KVM_VGIC_V3_ADDR_TYPE_DIST:
84 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
85 addr_ptr = &vgic->vgic_dist_base;
86 alignment = SZ_64K;
87 break;
88 case KVM_VGIC_V3_ADDR_TYPE_REDIST: {
89 struct vgic_redist_region *rdreg;
90
91 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
92 if (r)
93 break;
94 if (write) {
95 r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
96 goto out;
97 }
98 rdreg = list_first_entry_or_null(&vgic->rd_regions,
99 struct vgic_redist_region, list);
100 if (!rdreg)
101 addr_ptr = &undef_value;
102 else
103 addr_ptr = &rdreg->base;
104 break;
105 }
106 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
107 {
108 struct vgic_redist_region *rdreg;
109 u8 index;
110
111 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
112 if (r)
113 break;
114
115 index = *addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
116
117 if (write) {
118 gpa_t base = *addr & KVM_VGIC_V3_RDIST_BASE_MASK;
119 u32 count = (*addr & KVM_VGIC_V3_RDIST_COUNT_MASK)
120 >> KVM_VGIC_V3_RDIST_COUNT_SHIFT;
121 u8 flags = (*addr & KVM_VGIC_V3_RDIST_FLAGS_MASK)
122 >> KVM_VGIC_V3_RDIST_FLAGS_SHIFT;
123
124 if (!count || flags)
125 r = -EINVAL;
126 else
127 r = vgic_v3_set_redist_base(kvm, index,
128 base, count);
129 goto out;
130 }
131
132 rdreg = vgic_v3_rdist_region_from_index(kvm, index);
133 if (!rdreg) {
134 r = -ENOENT;
135 goto out;
136 }
137
138 *addr = index;
139 *addr |= rdreg->base;
140 *addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
141 goto out;
142 }
143 default:
144 r = -ENODEV;
145 }
146
147 if (r)
148 goto out;
149
150 if (write) {
151 r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
152 if (!r)
153 *addr_ptr = *addr;
154 } else {
155 *addr = *addr_ptr;
156 }
157
158 out:
159 mutex_unlock(&kvm->lock);
160 return r;
161 }
162
vgic_set_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)163 static int vgic_set_common_attr(struct kvm_device *dev,
164 struct kvm_device_attr *attr)
165 {
166 int r;
167
168 switch (attr->group) {
169 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
170 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
171 u64 addr;
172 unsigned long type = (unsigned long)attr->attr;
173
174 if (copy_from_user(&addr, uaddr, sizeof(addr)))
175 return -EFAULT;
176
177 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
178 return (r == -ENODEV) ? -ENXIO : r;
179 }
180 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
181 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
182 u32 val;
183 int ret = 0;
184
185 if (get_user(val, uaddr))
186 return -EFAULT;
187
188 /*
189 * We require:
190 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
191 * - at most 1024 interrupts
192 * - a multiple of 32 interrupts
193 */
194 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
195 val > VGIC_MAX_RESERVED ||
196 (val & 31))
197 return -EINVAL;
198
199 mutex_lock(&dev->kvm->lock);
200
201 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
202 ret = -EBUSY;
203 else
204 dev->kvm->arch.vgic.nr_spis =
205 val - VGIC_NR_PRIVATE_IRQS;
206
207 mutex_unlock(&dev->kvm->lock);
208
209 return ret;
210 }
211 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
212 switch (attr->attr) {
213 case KVM_DEV_ARM_VGIC_CTRL_INIT:
214 mutex_lock(&dev->kvm->lock);
215 r = vgic_init(dev->kvm);
216 mutex_unlock(&dev->kvm->lock);
217 return r;
218 }
219 break;
220 }
221 }
222
223 return -ENXIO;
224 }
225
vgic_get_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)226 static int vgic_get_common_attr(struct kvm_device *dev,
227 struct kvm_device_attr *attr)
228 {
229 int r = -ENXIO;
230
231 switch (attr->group) {
232 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
233 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
234 u64 addr;
235 unsigned long type = (unsigned long)attr->attr;
236
237 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
238 if (r)
239 return (r == -ENODEV) ? -ENXIO : r;
240
241 if (copy_to_user(uaddr, &addr, sizeof(addr)))
242 return -EFAULT;
243 break;
244 }
245 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
246 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
247
248 r = put_user(dev->kvm->arch.vgic.nr_spis +
249 VGIC_NR_PRIVATE_IRQS, uaddr);
250 break;
251 }
252 }
253
254 return r;
255 }
256
vgic_create(struct kvm_device * dev,u32 type)257 static int vgic_create(struct kvm_device *dev, u32 type)
258 {
259 return kvm_vgic_create(dev->kvm, type);
260 }
261
vgic_destroy(struct kvm_device * dev)262 static void vgic_destroy(struct kvm_device *dev)
263 {
264 kfree(dev);
265 }
266
kvm_register_vgic_device(unsigned long type)267 int kvm_register_vgic_device(unsigned long type)
268 {
269 int ret = -ENODEV;
270
271 switch (type) {
272 case KVM_DEV_TYPE_ARM_VGIC_V2:
273 ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
274 KVM_DEV_TYPE_ARM_VGIC_V2);
275 break;
276 case KVM_DEV_TYPE_ARM_VGIC_V3:
277 ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
278 KVM_DEV_TYPE_ARM_VGIC_V3);
279
280 if (ret)
281 break;
282 ret = kvm_vgic_register_its_device();
283 break;
284 }
285
286 return ret;
287 }
288
vgic_v2_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)289 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
290 struct vgic_reg_attr *reg_attr)
291 {
292 int cpuid;
293
294 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
295 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
296
297 if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
298 return -EINVAL;
299
300 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
301 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
302
303 return 0;
304 }
305
306 /* unlocks vcpus from @vcpu_lock_idx and smaller */
unlock_vcpus(struct kvm * kvm,int vcpu_lock_idx)307 static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
308 {
309 struct kvm_vcpu *tmp_vcpu;
310
311 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
312 tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
313 mutex_unlock(&tmp_vcpu->mutex);
314 }
315 }
316
unlock_all_vcpus(struct kvm * kvm)317 void unlock_all_vcpus(struct kvm *kvm)
318 {
319 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
320 }
321
322 /* Returns true if all vcpus were locked, false otherwise */
lock_all_vcpus(struct kvm * kvm)323 bool lock_all_vcpus(struct kvm *kvm)
324 {
325 struct kvm_vcpu *tmp_vcpu;
326 int c;
327
328 /*
329 * Any time a vcpu is run, vcpu_load is called which tries to grab the
330 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
331 * that no other VCPUs are run and fiddle with the vgic state while we
332 * access it.
333 */
334 kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
335 if (!mutex_trylock(&tmp_vcpu->mutex)) {
336 unlock_vcpus(kvm, c - 1);
337 return false;
338 }
339 }
340
341 return true;
342 }
343
344 /**
345 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
346 *
347 * @dev: kvm device handle
348 * @attr: kvm device attribute
349 * @reg: address the value is read or written
350 * @is_write: true if userspace is writing a register
351 */
vgic_v2_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,u32 * reg,bool is_write)352 static int vgic_v2_attr_regs_access(struct kvm_device *dev,
353 struct kvm_device_attr *attr,
354 u32 *reg, bool is_write)
355 {
356 struct vgic_reg_attr reg_attr;
357 gpa_t addr;
358 struct kvm_vcpu *vcpu;
359 int ret;
360
361 ret = vgic_v2_parse_attr(dev, attr, ®_attr);
362 if (ret)
363 return ret;
364
365 vcpu = reg_attr.vcpu;
366 addr = reg_attr.addr;
367
368 mutex_lock(&dev->kvm->lock);
369
370 ret = vgic_init(dev->kvm);
371 if (ret)
372 goto out;
373
374 if (!lock_all_vcpus(dev->kvm)) {
375 ret = -EBUSY;
376 goto out;
377 }
378
379 switch (attr->group) {
380 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
381 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
382 break;
383 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
384 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
385 break;
386 default:
387 ret = -EINVAL;
388 break;
389 }
390
391 unlock_all_vcpus(dev->kvm);
392 out:
393 mutex_unlock(&dev->kvm->lock);
394 return ret;
395 }
396
vgic_v2_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)397 static int vgic_v2_set_attr(struct kvm_device *dev,
398 struct kvm_device_attr *attr)
399 {
400 int ret;
401
402 ret = vgic_set_common_attr(dev, attr);
403 if (ret != -ENXIO)
404 return ret;
405
406 switch (attr->group) {
407 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
408 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
409 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
410 u32 reg;
411
412 if (get_user(reg, uaddr))
413 return -EFAULT;
414
415 return vgic_v2_attr_regs_access(dev, attr, ®, true);
416 }
417 }
418
419 return -ENXIO;
420 }
421
vgic_v2_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)422 static int vgic_v2_get_attr(struct kvm_device *dev,
423 struct kvm_device_attr *attr)
424 {
425 int ret;
426
427 ret = vgic_get_common_attr(dev, attr);
428 if (ret != -ENXIO)
429 return ret;
430
431 switch (attr->group) {
432 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
433 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
434 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
435 u32 reg = 0;
436
437 ret = vgic_v2_attr_regs_access(dev, attr, ®, false);
438 if (ret)
439 return ret;
440 return put_user(reg, uaddr);
441 }
442 }
443
444 return -ENXIO;
445 }
446
vgic_v2_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)447 static int vgic_v2_has_attr(struct kvm_device *dev,
448 struct kvm_device_attr *attr)
449 {
450 switch (attr->group) {
451 case KVM_DEV_ARM_VGIC_GRP_ADDR:
452 switch (attr->attr) {
453 case KVM_VGIC_V2_ADDR_TYPE_DIST:
454 case KVM_VGIC_V2_ADDR_TYPE_CPU:
455 return 0;
456 }
457 break;
458 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
459 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
460 return vgic_v2_has_attr_regs(dev, attr);
461 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
462 return 0;
463 case KVM_DEV_ARM_VGIC_GRP_CTRL:
464 switch (attr->attr) {
465 case KVM_DEV_ARM_VGIC_CTRL_INIT:
466 return 0;
467 }
468 }
469 return -ENXIO;
470 }
471
472 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
473 .name = "kvm-arm-vgic-v2",
474 .create = vgic_create,
475 .destroy = vgic_destroy,
476 .set_attr = vgic_v2_set_attr,
477 .get_attr = vgic_v2_get_attr,
478 .has_attr = vgic_v2_has_attr,
479 };
480
vgic_v3_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)481 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
482 struct vgic_reg_attr *reg_attr)
483 {
484 unsigned long vgic_mpidr, mpidr_reg;
485
486 /*
487 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
488 * attr might not hold MPIDR. Hence assume vcpu0.
489 */
490 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
491 vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
492 KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
493
494 mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
495 reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
496 } else {
497 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
498 }
499
500 if (!reg_attr->vcpu)
501 return -EINVAL;
502
503 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
504
505 return 0;
506 }
507
508 /*
509 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
510 *
511 * @dev: kvm device handle
512 * @attr: kvm device attribute
513 * @reg: address the value is read or written
514 * @is_write: true if userspace is writing a register
515 */
vgic_v3_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,u64 * reg,bool is_write)516 static int vgic_v3_attr_regs_access(struct kvm_device *dev,
517 struct kvm_device_attr *attr,
518 u64 *reg, bool is_write)
519 {
520 struct vgic_reg_attr reg_attr;
521 gpa_t addr;
522 struct kvm_vcpu *vcpu;
523 int ret;
524 u32 tmp32;
525
526 ret = vgic_v3_parse_attr(dev, attr, ®_attr);
527 if (ret)
528 return ret;
529
530 vcpu = reg_attr.vcpu;
531 addr = reg_attr.addr;
532
533 mutex_lock(&dev->kvm->lock);
534
535 if (unlikely(!vgic_initialized(dev->kvm))) {
536 ret = -EBUSY;
537 goto out;
538 }
539
540 if (!lock_all_vcpus(dev->kvm)) {
541 ret = -EBUSY;
542 goto out;
543 }
544
545 switch (attr->group) {
546 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
547 if (is_write)
548 tmp32 = *reg;
549
550 ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32);
551 if (!is_write)
552 *reg = tmp32;
553 break;
554 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
555 if (is_write)
556 tmp32 = *reg;
557
558 ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32);
559 if (!is_write)
560 *reg = tmp32;
561 break;
562 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
563 u64 regid;
564
565 regid = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
566 ret = vgic_v3_cpu_sysregs_uaccess(vcpu, is_write,
567 regid, reg);
568 break;
569 }
570 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
571 unsigned int info, intid;
572
573 info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
574 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
575 if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
576 intid = attr->attr &
577 KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
578 ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
579 intid, reg);
580 } else {
581 ret = -EINVAL;
582 }
583 break;
584 }
585 default:
586 ret = -EINVAL;
587 break;
588 }
589
590 unlock_all_vcpus(dev->kvm);
591 out:
592 mutex_unlock(&dev->kvm->lock);
593 return ret;
594 }
595
vgic_v3_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)596 static int vgic_v3_set_attr(struct kvm_device *dev,
597 struct kvm_device_attr *attr)
598 {
599 int ret;
600
601 ret = vgic_set_common_attr(dev, attr);
602 if (ret != -ENXIO)
603 return ret;
604
605 switch (attr->group) {
606 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
607 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
608 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
609 u32 tmp32;
610 u64 reg;
611
612 if (get_user(tmp32, uaddr))
613 return -EFAULT;
614
615 reg = tmp32;
616 return vgic_v3_attr_regs_access(dev, attr, ®, true);
617 }
618 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
619 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
620 u64 reg;
621
622 if (get_user(reg, uaddr))
623 return -EFAULT;
624
625 return vgic_v3_attr_regs_access(dev, attr, ®, true);
626 }
627 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
628 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
629 u64 reg;
630 u32 tmp32;
631
632 if (get_user(tmp32, uaddr))
633 return -EFAULT;
634
635 reg = tmp32;
636 return vgic_v3_attr_regs_access(dev, attr, ®, true);
637 }
638 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
639 int ret;
640
641 switch (attr->attr) {
642 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
643 mutex_lock(&dev->kvm->lock);
644
645 if (!lock_all_vcpus(dev->kvm)) {
646 mutex_unlock(&dev->kvm->lock);
647 return -EBUSY;
648 }
649 ret = vgic_v3_save_pending_tables(dev->kvm);
650 unlock_all_vcpus(dev->kvm);
651 mutex_unlock(&dev->kvm->lock);
652 return ret;
653 }
654 break;
655 }
656 }
657 return -ENXIO;
658 }
659
vgic_v3_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)660 static int vgic_v3_get_attr(struct kvm_device *dev,
661 struct kvm_device_attr *attr)
662 {
663 int ret;
664
665 ret = vgic_get_common_attr(dev, attr);
666 if (ret != -ENXIO)
667 return ret;
668
669 switch (attr->group) {
670 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
671 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
672 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
673 u64 reg;
674 u32 tmp32;
675
676 ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
677 if (ret)
678 return ret;
679 tmp32 = reg;
680 return put_user(tmp32, uaddr);
681 }
682 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
683 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
684 u64 reg;
685
686 ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
687 if (ret)
688 return ret;
689 return put_user(reg, uaddr);
690 }
691 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
692 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
693 u64 reg;
694 u32 tmp32;
695
696 ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
697 if (ret)
698 return ret;
699 tmp32 = reg;
700 return put_user(tmp32, uaddr);
701 }
702 }
703 return -ENXIO;
704 }
705
vgic_v3_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)706 static int vgic_v3_has_attr(struct kvm_device *dev,
707 struct kvm_device_attr *attr)
708 {
709 switch (attr->group) {
710 case KVM_DEV_ARM_VGIC_GRP_ADDR:
711 switch (attr->attr) {
712 case KVM_VGIC_V3_ADDR_TYPE_DIST:
713 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
714 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
715 return 0;
716 }
717 break;
718 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
719 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
720 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
721 return vgic_v3_has_attr_regs(dev, attr);
722 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
723 return 0;
724 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
725 if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
726 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
727 VGIC_LEVEL_INFO_LINE_LEVEL)
728 return 0;
729 break;
730 }
731 case KVM_DEV_ARM_VGIC_GRP_CTRL:
732 switch (attr->attr) {
733 case KVM_DEV_ARM_VGIC_CTRL_INIT:
734 return 0;
735 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
736 return 0;
737 }
738 }
739 return -ENXIO;
740 }
741
742 struct kvm_device_ops kvm_arm_vgic_v3_ops = {
743 .name = "kvm-arm-vgic-v3",
744 .create = vgic_create,
745 .destroy = vgic_destroy,
746 .set_attr = vgic_v3_set_attr,
747 .get_attr = vgic_v3_get_attr,
748 .has_attr = vgic_v3_has_attr,
749 };
750