1 /* 2 * Copyright (C) 2015, 2016 ARM Ltd. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 #ifndef __KVM_ARM_VGIC_MMIO_H__ 17 #define __KVM_ARM_VGIC_MMIO_H__ 18 19 struct vgic_register_region { 20 unsigned int reg_offset; 21 unsigned int len; 22 unsigned int bits_per_irq; 23 unsigned int access_flags; 24 union { 25 unsigned long (*read)(struct kvm_vcpu *vcpu, gpa_t addr, 26 unsigned int len); 27 unsigned long (*its_read)(struct kvm *kvm, struct vgic_its *its, 28 gpa_t addr, unsigned int len); 29 }; 30 union { 31 void (*write)(struct kvm_vcpu *vcpu, gpa_t addr, 32 unsigned int len, unsigned long val); 33 void (*its_write)(struct kvm *kvm, struct vgic_its *its, 34 gpa_t addr, unsigned int len, 35 unsigned long val); 36 }; 37 unsigned long (*uaccess_read)(struct kvm_vcpu *vcpu, gpa_t addr, 38 unsigned int len); 39 union { 40 int (*uaccess_write)(struct kvm_vcpu *vcpu, gpa_t addr, 41 unsigned int len, unsigned long val); 42 int (*uaccess_its_write)(struct kvm *kvm, struct vgic_its *its, 43 gpa_t addr, unsigned int len, 44 unsigned long val); 45 }; 46 }; 47 48 extern struct kvm_io_device_ops kvm_io_gic_ops; 49 50 #define VGIC_ACCESS_8bit 1 51 #define VGIC_ACCESS_32bit 2 52 #define VGIC_ACCESS_64bit 4 53 54 /* 55 * Generate a mask that covers the number of bytes required to address 56 * up to 1024 interrupts, each represented by <bits> bits. This assumes 57 * that <bits> is a power of two. 58 */ 59 #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1) 60 61 /* 62 * (addr & mask) gives us the _byte_ offset for the INT ID. 63 * We multiply this by 8 the get the _bit_ offset, then divide this by 64 * the number of bits to learn the actual INT ID. 65 * But instead of a division (which requires a "long long div" implementation), 66 * we shift by the binary logarithm of <bits>. 67 * This assumes that <bits> is a power of two. 68 */ 69 #define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \ 70 8 >> ilog2(bits)) 71 72 /* 73 * Some VGIC registers store per-IRQ information, with a different number 74 * of bits per IRQ. For those registers this macro is used. 75 * The _WITH_LENGTH version instantiates registers with a fixed length 76 * and is mutually exclusive with the _PER_IRQ version. 77 */ 78 #define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, ur, uw, bpi, acc) \ 79 { \ 80 .reg_offset = off, \ 81 .bits_per_irq = bpi, \ 82 .len = bpi * 1024 / 8, \ 83 .access_flags = acc, \ 84 .read = rd, \ 85 .write = wr, \ 86 .uaccess_read = ur, \ 87 .uaccess_write = uw, \ 88 } 89 90 #define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc) \ 91 { \ 92 .reg_offset = off, \ 93 .bits_per_irq = 0, \ 94 .len = length, \ 95 .access_flags = acc, \ 96 .read = rd, \ 97 .write = wr, \ 98 } 99 100 #define REGISTER_DESC_WITH_LENGTH_UACCESS(off, rd, wr, urd, uwr, length, acc) \ 101 { \ 102 .reg_offset = off, \ 103 .bits_per_irq = 0, \ 104 .len = length, \ 105 .access_flags = acc, \ 106 .read = rd, \ 107 .write = wr, \ 108 .uaccess_read = urd, \ 109 .uaccess_write = uwr, \ 110 } 111 112 int kvm_vgic_register_mmio_region(struct kvm *kvm, struct kvm_vcpu *vcpu, 113 struct vgic_register_region *reg_desc, 114 struct vgic_io_device *region, 115 int nr_irqs, bool offset_private); 116 117 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len); 118 119 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len, 120 unsigned long data); 121 122 unsigned long extract_bytes(u64 data, unsigned int offset, 123 unsigned int num); 124 125 u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len, 126 unsigned long val); 127 128 unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu, 129 gpa_t addr, unsigned int len); 130 131 unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu, 132 gpa_t addr, unsigned int len); 133 134 void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr, 135 unsigned int len, unsigned long val); 136 137 int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr, 138 unsigned int len, unsigned long val); 139 140 unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu, gpa_t addr, 141 unsigned int len); 142 143 void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr, 144 unsigned int len, unsigned long val); 145 146 unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu, 147 gpa_t addr, unsigned int len); 148 149 void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, 150 gpa_t addr, unsigned int len, 151 unsigned long val); 152 153 void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, 154 gpa_t addr, unsigned int len, 155 unsigned long val); 156 157 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, 158 gpa_t addr, unsigned int len); 159 160 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, 161 gpa_t addr, unsigned int len, 162 unsigned long val); 163 164 void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, 165 gpa_t addr, unsigned int len, 166 unsigned long val); 167 168 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, 169 gpa_t addr, unsigned int len); 170 171 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, 172 gpa_t addr, unsigned int len, 173 unsigned long val); 174 175 void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, 176 gpa_t addr, unsigned int len, 177 unsigned long val); 178 179 int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu, 180 gpa_t addr, unsigned int len, 181 unsigned long val); 182 183 int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu, 184 gpa_t addr, unsigned int len, 185 unsigned long val); 186 187 unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu, 188 gpa_t addr, unsigned int len); 189 190 void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, 191 gpa_t addr, unsigned int len, 192 unsigned long val); 193 194 unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu, 195 gpa_t addr, unsigned int len); 196 197 void vgic_mmio_write_config(struct kvm_vcpu *vcpu, 198 gpa_t addr, unsigned int len, 199 unsigned long val); 200 201 int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev, 202 bool is_write, int offset, u32 *val); 203 204 u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid); 205 206 void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, 207 const u64 val); 208 209 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); 210 211 unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev); 212 213 u64 vgic_sanitise_outer_cacheability(u64 reg); 214 u64 vgic_sanitise_inner_cacheability(u64 reg); 215 u64 vgic_sanitise_shareability(u64 reg); 216 u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift, 217 u64 (*sanitise_fn)(u64)); 218 219 /* Find the proper register handler entry given a certain address offset */ 220 const struct vgic_register_region * 221 vgic_find_mmio_region(const struct vgic_register_region *regions, 222 int nr_regions, unsigned int offset); 223 224 #endif 225