1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KVM coalesced MMIO
4 *
5 * Copyright (c) 2008 Bull S.A.S.
6 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7 *
8 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 *
10 */
11
12 #include <kvm/iodev.h>
13
14 #include <linux/kvm_host.h>
15 #include <linux/slab.h>
16 #include <linux/kvm.h>
17
18 #include "coalesced_mmio.h"
19
to_mmio(struct kvm_io_device * dev)20 static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
21 {
22 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
23 }
24
coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev * dev,gpa_t addr,int len)25 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
26 gpa_t addr, int len)
27 {
28 /* is it in a batchable area ?
29 * (addr,len) is fully included in
30 * (zone->addr, zone->size)
31 */
32 if (len < 0)
33 return 0;
34 if (addr + len < addr)
35 return 0;
36 if (addr < dev->zone.addr)
37 return 0;
38 if (addr + len > dev->zone.addr + dev->zone.size)
39 return 0;
40 return 1;
41 }
42
coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev * dev,u32 last)43 static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
44 {
45 struct kvm_coalesced_mmio_ring *ring;
46 unsigned avail;
47
48 /* Are we able to batch it ? */
49
50 /* last is the first free entry
51 * check if we don't meet the first used entry
52 * there is always one unused entry in the buffer
53 */
54 ring = dev->kvm->coalesced_mmio_ring;
55 avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
56 if (avail == 0) {
57 /* full */
58 return 0;
59 }
60
61 return 1;
62 }
63
coalesced_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,const void * val)64 static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
65 struct kvm_io_device *this, gpa_t addr,
66 int len, const void *val)
67 {
68 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
69 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
70 __u32 insert;
71
72 if (!coalesced_mmio_in_range(dev, addr, len))
73 return -EOPNOTSUPP;
74
75 spin_lock(&dev->kvm->ring_lock);
76
77 insert = READ_ONCE(ring->last);
78 if (!coalesced_mmio_has_room(dev, insert) ||
79 insert >= KVM_COALESCED_MMIO_MAX) {
80 spin_unlock(&dev->kvm->ring_lock);
81 return -EOPNOTSUPP;
82 }
83
84 /* copy data in first free entry of the ring */
85
86 ring->coalesced_mmio[insert].phys_addr = addr;
87 ring->coalesced_mmio[insert].len = len;
88 memcpy(ring->coalesced_mmio[insert].data, val, len);
89 smp_wmb();
90 ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
91 spin_unlock(&dev->kvm->ring_lock);
92 return 0;
93 }
94
coalesced_mmio_destructor(struct kvm_io_device * this)95 static void coalesced_mmio_destructor(struct kvm_io_device *this)
96 {
97 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
98
99 list_del(&dev->list);
100
101 kfree(dev);
102 }
103
104 static const struct kvm_io_device_ops coalesced_mmio_ops = {
105 .write = coalesced_mmio_write,
106 .destructor = coalesced_mmio_destructor,
107 };
108
kvm_coalesced_mmio_init(struct kvm * kvm)109 int kvm_coalesced_mmio_init(struct kvm *kvm)
110 {
111 struct page *page;
112 int ret;
113
114 ret = -ENOMEM;
115 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
116 if (!page)
117 goto out_err;
118
119 ret = 0;
120 kvm->coalesced_mmio_ring = page_address(page);
121
122 /*
123 * We're using this spinlock to sync access to the coalesced ring.
124 * The list doesn't need it's own lock since device registration and
125 * unregistration should only happen when kvm->slots_lock is held.
126 */
127 spin_lock_init(&kvm->ring_lock);
128 INIT_LIST_HEAD(&kvm->coalesced_zones);
129
130 out_err:
131 return ret;
132 }
133
kvm_coalesced_mmio_free(struct kvm * kvm)134 void kvm_coalesced_mmio_free(struct kvm *kvm)
135 {
136 if (kvm->coalesced_mmio_ring)
137 free_page((unsigned long)kvm->coalesced_mmio_ring);
138 }
139
kvm_vm_ioctl_register_coalesced_mmio(struct kvm * kvm,struct kvm_coalesced_mmio_zone * zone)140 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
141 struct kvm_coalesced_mmio_zone *zone)
142 {
143 int ret;
144 struct kvm_coalesced_mmio_dev *dev;
145
146 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
147 if (!dev)
148 return -ENOMEM;
149
150 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
151 dev->kvm = kvm;
152 dev->zone = *zone;
153
154 mutex_lock(&kvm->slots_lock);
155 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
156 zone->size, &dev->dev);
157 if (ret < 0)
158 goto out_free_dev;
159 list_add_tail(&dev->list, &kvm->coalesced_zones);
160 mutex_unlock(&kvm->slots_lock);
161
162 return 0;
163
164 out_free_dev:
165 mutex_unlock(&kvm->slots_lock);
166 kfree(dev);
167
168 return ret;
169 }
170
kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm * kvm,struct kvm_coalesced_mmio_zone * zone)171 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
172 struct kvm_coalesced_mmio_zone *zone)
173 {
174 struct kvm_coalesced_mmio_dev *dev, *tmp;
175
176 mutex_lock(&kvm->slots_lock);
177
178 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
179 if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
180 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
181 kvm_iodevice_destructor(&dev->dev);
182 }
183
184 mutex_unlock(&kvm->slots_lock);
185
186 return 0;
187 }
188