Lines Matching +full:reg +full:- +full:addr
1 // SPDX-License-Identifier: GPL-2.0-only
18 phys_addr_t addr, phys_addr_t alignment) in vgic_check_ioaddr() argument
20 if (addr & ~kvm_phys_mask(kvm)) in vgic_check_ioaddr()
21 return -E2BIG; in vgic_check_ioaddr()
23 if (!IS_ALIGNED(addr, alignment)) in vgic_check_ioaddr()
24 return -EINVAL; in vgic_check_ioaddr()
27 return -EEXIST; in vgic_check_ioaddr()
34 if (kvm->arch.vgic.vgic_model != type_needed) in vgic_check_type()
35 return -ENODEV; in vgic_check_type()
41 * kvm_vgic_addr - set or get vgic VM base addresses
43 * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
44 * @addr: pointer to address value
56 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) in kvm_vgic_addr() argument
59 struct vgic_dist *vgic = &kvm->arch.vgic; in kvm_vgic_addr()
63 mutex_lock(&kvm->lock); in kvm_vgic_addr()
67 addr_ptr = &vgic->vgic_dist_base; in kvm_vgic_addr()
72 addr_ptr = &vgic->vgic_cpu_base; in kvm_vgic_addr()
77 addr_ptr = &vgic->vgic_dist_base; in kvm_vgic_addr()
87 r = vgic_v3_set_redist_base(kvm, 0, *addr, 0); in kvm_vgic_addr()
90 rdreg = list_first_entry_or_null(&vgic->rd_regions, in kvm_vgic_addr()
95 addr_ptr = &rdreg->base; in kvm_vgic_addr()
107 index = *addr & KVM_VGIC_V3_RDIST_INDEX_MASK; in kvm_vgic_addr()
110 gpa_t base = *addr & KVM_VGIC_V3_RDIST_BASE_MASK; in kvm_vgic_addr()
111 u32 count = (*addr & KVM_VGIC_V3_RDIST_COUNT_MASK) in kvm_vgic_addr()
113 u8 flags = (*addr & KVM_VGIC_V3_RDIST_FLAGS_MASK) in kvm_vgic_addr()
117 r = -EINVAL; in kvm_vgic_addr()
126 r = -ENOENT; in kvm_vgic_addr()
130 *addr = index; in kvm_vgic_addr()
131 *addr |= rdreg->base; in kvm_vgic_addr()
132 *addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT; in kvm_vgic_addr()
136 r = -ENODEV; in kvm_vgic_addr()
143 r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment); in kvm_vgic_addr()
145 *addr_ptr = *addr; in kvm_vgic_addr()
147 *addr = *addr_ptr; in kvm_vgic_addr()
151 mutex_unlock(&kvm->lock); in kvm_vgic_addr()
160 switch (attr->group) { in vgic_set_common_attr()
162 u64 __user *uaddr = (u64 __user *)(long)attr->addr; in vgic_set_common_attr()
163 u64 addr; in vgic_set_common_attr() local
164 unsigned long type = (unsigned long)attr->attr; in vgic_set_common_attr()
166 if (copy_from_user(&addr, uaddr, sizeof(addr))) in vgic_set_common_attr()
167 return -EFAULT; in vgic_set_common_attr()
169 r = kvm_vgic_addr(dev->kvm, type, &addr, true); in vgic_set_common_attr()
170 return (r == -ENODEV) ? -ENXIO : r; in vgic_set_common_attr()
173 u32 __user *uaddr = (u32 __user *)(long)attr->addr; in vgic_set_common_attr()
178 return -EFAULT; in vgic_set_common_attr()
182 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs in vgic_set_common_attr()
183 * - at most 1024 interrupts in vgic_set_common_attr()
184 * - a multiple of 32 interrupts in vgic_set_common_attr()
189 return -EINVAL; in vgic_set_common_attr()
191 mutex_lock(&dev->kvm->lock); in vgic_set_common_attr()
193 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis) in vgic_set_common_attr()
194 ret = -EBUSY; in vgic_set_common_attr()
196 dev->kvm->arch.vgic.nr_spis = in vgic_set_common_attr()
197 val - VGIC_NR_PRIVATE_IRQS; in vgic_set_common_attr()
199 mutex_unlock(&dev->kvm->lock); in vgic_set_common_attr()
204 switch (attr->attr) { in vgic_set_common_attr()
206 mutex_lock(&dev->kvm->lock); in vgic_set_common_attr()
207 r = vgic_init(dev->kvm); in vgic_set_common_attr()
208 mutex_unlock(&dev->kvm->lock); in vgic_set_common_attr()
215 return -ENXIO; in vgic_set_common_attr()
221 int r = -ENXIO; in vgic_get_common_attr()
223 switch (attr->group) { in vgic_get_common_attr()
225 u64 __user *uaddr = (u64 __user *)(long)attr->addr; in vgic_get_common_attr()
226 u64 addr; in vgic_get_common_attr() local
227 unsigned long type = (unsigned long)attr->attr; in vgic_get_common_attr()
229 if (copy_from_user(&addr, uaddr, sizeof(addr))) in vgic_get_common_attr()
230 return -EFAULT; in vgic_get_common_attr()
232 r = kvm_vgic_addr(dev->kvm, type, &addr, false); in vgic_get_common_attr()
234 return (r == -ENODEV) ? -ENXIO : r; in vgic_get_common_attr()
236 if (copy_to_user(uaddr, &addr, sizeof(addr))) in vgic_get_common_attr()
237 return -EFAULT; in vgic_get_common_attr()
241 u32 __user *uaddr = (u32 __user *)(long)attr->addr; in vgic_get_common_attr()
243 r = put_user(dev->kvm->arch.vgic.nr_spis + in vgic_get_common_attr()
254 return kvm_vgic_create(dev->kvm, type); in vgic_create()
264 int ret = -ENODEV; in kvm_register_vgic_device()
289 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> in vgic_v2_parse_attr()
292 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) in vgic_v2_parse_attr()
293 return -EINVAL; in vgic_v2_parse_attr()
295 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid); in vgic_v2_parse_attr()
296 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; in vgic_v2_parse_attr()
306 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { in unlock_vcpus()
308 mutex_unlock(&tmp_vcpu->mutex); in unlock_vcpus()
314 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1); in unlock_all_vcpus()
325 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure in lock_all_vcpus()
330 if (!mutex_trylock(&tmp_vcpu->mutex)) { in lock_all_vcpus()
331 unlock_vcpus(kvm, c - 1); in lock_all_vcpus()
340 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
344 * @reg: address the value is read or written
349 u32 *reg, bool is_write) in vgic_v2_attr_regs_access() argument
352 gpa_t addr; in vgic_v2_attr_regs_access() local
361 addr = reg_attr.addr; in vgic_v2_attr_regs_access()
363 mutex_lock(&dev->kvm->lock); in vgic_v2_attr_regs_access()
365 ret = vgic_init(dev->kvm); in vgic_v2_attr_regs_access()
369 if (!lock_all_vcpus(dev->kvm)) { in vgic_v2_attr_regs_access()
370 ret = -EBUSY; in vgic_v2_attr_regs_access()
374 switch (attr->group) { in vgic_v2_attr_regs_access()
376 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg); in vgic_v2_attr_regs_access()
379 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg); in vgic_v2_attr_regs_access()
382 ret = -EINVAL; in vgic_v2_attr_regs_access()
386 unlock_all_vcpus(dev->kvm); in vgic_v2_attr_regs_access()
388 mutex_unlock(&dev->kvm->lock); in vgic_v2_attr_regs_access()
398 if (ret != -ENXIO) in vgic_v2_set_attr()
401 switch (attr->group) { in vgic_v2_set_attr()
404 u32 __user *uaddr = (u32 __user *)(long)attr->addr; in vgic_v2_set_attr()
405 u32 reg; in vgic_v2_set_attr() local
407 if (get_user(reg, uaddr)) in vgic_v2_set_attr()
408 return -EFAULT; in vgic_v2_set_attr()
410 return vgic_v2_attr_regs_access(dev, attr, ®, true); in vgic_v2_set_attr()
414 return -ENXIO; in vgic_v2_set_attr()
423 if (ret != -ENXIO) in vgic_v2_get_attr()
426 switch (attr->group) { in vgic_v2_get_attr()
429 u32 __user *uaddr = (u32 __user *)(long)attr->addr; in vgic_v2_get_attr()
430 u32 reg = 0; in vgic_v2_get_attr() local
432 ret = vgic_v2_attr_regs_access(dev, attr, ®, false); in vgic_v2_get_attr()
435 return put_user(reg, uaddr); in vgic_v2_get_attr()
439 return -ENXIO; in vgic_v2_get_attr()
445 switch (attr->group) { in vgic_v2_has_attr()
447 switch (attr->attr) { in vgic_v2_has_attr()
459 switch (attr->attr) { in vgic_v2_has_attr()
464 return -ENXIO; in vgic_v2_has_attr()
468 .name = "kvm-arm-vgic-v2",
485 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) { in vgic_v3_parse_attr()
486 vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >> in vgic_v3_parse_attr()
490 reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg); in vgic_v3_parse_attr()
492 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0); in vgic_v3_parse_attr()
495 if (!reg_attr->vcpu) in vgic_v3_parse_attr()
496 return -EINVAL; in vgic_v3_parse_attr()
498 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; in vgic_v3_parse_attr()
504 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
508 * @reg: address the value is read or written
513 u64 *reg, bool is_write) in vgic_v3_attr_regs_access() argument
516 gpa_t addr; in vgic_v3_attr_regs_access() local
526 addr = reg_attr.addr; in vgic_v3_attr_regs_access()
528 mutex_lock(&dev->kvm->lock); in vgic_v3_attr_regs_access()
530 if (unlikely(!vgic_initialized(dev->kvm))) { in vgic_v3_attr_regs_access()
531 ret = -EBUSY; in vgic_v3_attr_regs_access()
535 if (!lock_all_vcpus(dev->kvm)) { in vgic_v3_attr_regs_access()
536 ret = -EBUSY; in vgic_v3_attr_regs_access()
540 switch (attr->group) { in vgic_v3_attr_regs_access()
543 tmp32 = *reg; in vgic_v3_attr_regs_access()
545 ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32); in vgic_v3_attr_regs_access()
547 *reg = tmp32; in vgic_v3_attr_regs_access()
551 tmp32 = *reg; in vgic_v3_attr_regs_access()
553 ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32); in vgic_v3_attr_regs_access()
555 *reg = tmp32; in vgic_v3_attr_regs_access()
560 regid = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK); in vgic_v3_attr_regs_access()
562 regid, reg); in vgic_v3_attr_regs_access()
568 info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >> in vgic_v3_attr_regs_access()
571 intid = attr->attr & in vgic_v3_attr_regs_access()
574 intid, reg); in vgic_v3_attr_regs_access()
576 ret = -EINVAL; in vgic_v3_attr_regs_access()
581 ret = -EINVAL; in vgic_v3_attr_regs_access()
585 unlock_all_vcpus(dev->kvm); in vgic_v3_attr_regs_access()
587 mutex_unlock(&dev->kvm->lock); in vgic_v3_attr_regs_access()
597 if (ret != -ENXIO) in vgic_v3_set_attr()
600 switch (attr->group) { in vgic_v3_set_attr()
603 u32 __user *uaddr = (u32 __user *)(long)attr->addr; in vgic_v3_set_attr()
605 u64 reg; in vgic_v3_set_attr() local
608 return -EFAULT; in vgic_v3_set_attr()
610 reg = tmp32; in vgic_v3_set_attr()
611 return vgic_v3_attr_regs_access(dev, attr, ®, true); in vgic_v3_set_attr()
614 u64 __user *uaddr = (u64 __user *)(long)attr->addr; in vgic_v3_set_attr()
615 u64 reg; in vgic_v3_set_attr() local
617 if (get_user(reg, uaddr)) in vgic_v3_set_attr()
618 return -EFAULT; in vgic_v3_set_attr()
620 return vgic_v3_attr_regs_access(dev, attr, ®, true); in vgic_v3_set_attr()
623 u32 __user *uaddr = (u32 __user *)(long)attr->addr; in vgic_v3_set_attr()
624 u64 reg; in vgic_v3_set_attr() local
628 return -EFAULT; in vgic_v3_set_attr()
630 reg = tmp32; in vgic_v3_set_attr()
631 return vgic_v3_attr_regs_access(dev, attr, ®, true); in vgic_v3_set_attr()
636 switch (attr->attr) { in vgic_v3_set_attr()
638 mutex_lock(&dev->kvm->lock); in vgic_v3_set_attr()
640 if (!lock_all_vcpus(dev->kvm)) { in vgic_v3_set_attr()
641 mutex_unlock(&dev->kvm->lock); in vgic_v3_set_attr()
642 return -EBUSY; in vgic_v3_set_attr()
644 ret = vgic_v3_save_pending_tables(dev->kvm); in vgic_v3_set_attr()
645 unlock_all_vcpus(dev->kvm); in vgic_v3_set_attr()
646 mutex_unlock(&dev->kvm->lock); in vgic_v3_set_attr()
652 return -ENXIO; in vgic_v3_set_attr()
661 if (ret != -ENXIO) in vgic_v3_get_attr()
664 switch (attr->group) { in vgic_v3_get_attr()
667 u32 __user *uaddr = (u32 __user *)(long)attr->addr; in vgic_v3_get_attr()
668 u64 reg; in vgic_v3_get_attr() local
671 ret = vgic_v3_attr_regs_access(dev, attr, ®, false); in vgic_v3_get_attr()
674 tmp32 = reg; in vgic_v3_get_attr()
678 u64 __user *uaddr = (u64 __user *)(long)attr->addr; in vgic_v3_get_attr()
679 u64 reg; in vgic_v3_get_attr() local
681 ret = vgic_v3_attr_regs_access(dev, attr, ®, false); in vgic_v3_get_attr()
684 return put_user(reg, uaddr); in vgic_v3_get_attr()
687 u32 __user *uaddr = (u32 __user *)(long)attr->addr; in vgic_v3_get_attr()
688 u64 reg; in vgic_v3_get_attr() local
691 ret = vgic_v3_attr_regs_access(dev, attr, ®, false); in vgic_v3_get_attr()
694 tmp32 = reg; in vgic_v3_get_attr()
698 return -ENXIO; in vgic_v3_get_attr()
704 switch (attr->group) { in vgic_v3_has_attr()
706 switch (attr->attr) { in vgic_v3_has_attr()
720 if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >> in vgic_v3_has_attr()
727 switch (attr->attr) { in vgic_v3_has_attr()
734 return -ENXIO; in vgic_v3_has_attr()
738 .name = "kvm-arm-vgic-v3",