diff options
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts | 2 | ||||
-rw-r--r-- | arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts | 2 | ||||
-rw-r--r-- | arch/arm64/configs/defconfig | 2 | ||||
-rw-r--r-- | arch/arm64/hyperv/mshyperv.c | 53 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/sysreg.h | 1 | ||||
-rw-r--r-- | arch/arm64/kvm/arch_timer.c | 4 | ||||
-rw-r--r-- | arch/arm64/kvm/arm.c | 69 | ||||
-rw-r--r-- | arch/arm64/kvm/hypercalls.c | 10 | ||||
-rw-r--r-- | arch/arm64/kvm/nested.c | 6 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-debug.c | 5 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-init.c | 31 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-its.c | 56 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-kvm-device.c | 12 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-v4.c | 92 |
15 files changed, 204 insertions, 144 deletions
diff --git a/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts b/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts index 712f29fbe85e..b9a0f7ac4d9c 100644 --- a/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts +++ b/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause /* * Copyright (c) 2023, Luca Weiss <luca.weiss@fairphone.com> - * Copyright (c) 2024, Caleb Connolly <caleb@postmarketos.org> + * Copyright (c) 2024, Casey Connolly <casey.connolly@linaro.org> */ /dts-v1/; diff --git a/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts b/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts index e5da58d11064..2cf7b5e1243c 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2022, Alexander Martinz <amartinz@shiftphones.com> - * Copyright (c) 2022, Caleb Connolly <caleb@connolly.tech> + * Copyright (c) 2022, Casey Connolly <casey.connolly@linaro.org> * Copyright (c) 2022, Dylan Van Assche <me@dylanvanassche.be> */ diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 1e99db100607..897fc686e6a9 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -774,7 +774,7 @@ CONFIG_MFD_MT6397=y CONFIG_MFD_SPMI_PMIC=y CONFIG_MFD_RK8XX_I2C=y CONFIG_MFD_RK8XX_SPI=y -CONFIG_MFD_SEC_CORE=y +CONFIG_MFD_SEC_I2C=y CONFIG_MFD_SL28CPLD=y CONFIG_RZ_MTU3=y CONFIG_MFD_TI_AM335X_TSCADC=m diff --git a/arch/arm64/hyperv/mshyperv.c b/arch/arm64/hyperv/mshyperv.c index 4e27cc29c79e..4fdc26ade1d7 100644 --- a/arch/arm64/hyperv/mshyperv.c +++ b/arch/arm64/hyperv/mshyperv.c @@ -28,6 +28,48 @@ int hv_get_hypervisor_version(union hv_hypervisor_version_info *info) } EXPORT_SYMBOL_GPL(hv_get_hypervisor_version); +#ifdef CONFIG_ACPI + +static bool __init hyperv_detect_via_acpi(void) +{ + if (acpi_disabled) + return false; + /* + * Hypervisor ID is only available in ACPI v6+, and the + * structure layout was extended in v6 to accommodate that + * new field. + * + * At the very minimum, this check makes sure not to read + * past the FADT structure. + * + * It is also needed to catch running in some unknown + * non-Hyper-V environment that has ACPI 5.x or less. + * In such a case, it can't be Hyper-V. + */ + if (acpi_gbl_FADT.header.revision < 6) + return false; + return strncmp((char *)&acpi_gbl_FADT.hypervisor_id, "MsHyperV", 8) == 0; +} + +#else + +static bool __init hyperv_detect_via_acpi(void) +{ + return false; +} + +#endif + +static bool __init hyperv_detect_via_smccc(void) +{ + uuid_t hyperv_uuid = UUID_INIT( + 0x58ba324d, 0x6447, 0x24cd, + 0x75, 0x6c, 0xef, 0x8e, + 0x24, 0x70, 0x59, 0x16); + + return arm_smccc_hypervisor_has_uuid(&hyperv_uuid); +} + static int __init hyperv_init(void) { struct hv_get_vp_registers_output result; @@ -36,13 +78,11 @@ static int __init hyperv_init(void) /* * Allow for a kernel built with CONFIG_HYPERV to be running in - * a non-Hyper-V environment, including on DT instead of ACPI. + * a non-Hyper-V environment. + * * In such cases, do nothing and return success. */ - if (acpi_disabled) - return 0; - - if (strncmp((char *)&acpi_gbl_FADT.hypervisor_id, "MsHyperV", 8)) + if (!hyperv_detect_via_acpi() && !hyperv_detect_via_smccc()) return 0; /* Setup the guest ID */ @@ -77,6 +117,9 @@ static int __init hyperv_init(void) if (ms_hyperv.priv_high & HV_ACCESS_PARTITION_ID) hv_get_partition_id(); + ms_hyperv.vtl = get_vtl(); + if (ms_hyperv.vtl > 0) /* non default VTL */ + pr_info("Linux runs in Hyper-V Virtual Trust Level %d\n", ms_hyperv.vtl); ms_hyperv_late_init(); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index d941abc6b5ee..6ce2c5173482 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1320,9 +1320,6 @@ int __init populate_sysreg_config(const struct sys_reg_desc *sr, unsigned int idx); int __init populate_nv_trap_config(void); -bool lock_all_vcpus(struct kvm *kvm); -void unlock_all_vcpus(struct kvm *kvm); - void kvm_calculate_traps(struct kvm_vcpu *vcpu); /* MMIO helpers */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index cd853801a8f7..f1bb0d10c39a 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -12,6 +12,7 @@ #include <linux/bits.h> #include <linux/stringify.h> #include <linux/kasan-tags.h> +#include <linux/kconfig.h> #include <asm/gpr-num.h> diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 5133dcbfe9f7..fdbc8beec930 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -1766,7 +1766,7 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, mutex_lock(&kvm->lock); - if (lock_all_vcpus(kvm)) { + if (!kvm_trylock_all_vcpus(kvm)) { set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags); /* @@ -1778,7 +1778,7 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, kvm->arch.timer_data.voffset = offset->counter_offset; kvm->arch.timer_data.poffset = offset->counter_offset; - unlock_all_vcpus(kvm); + kvm_unlock_all_vcpus(kvm); } else { ret = -EBUSY; } diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 36cfcffb40d8..de2b4e9c9f9f 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1924,49 +1924,6 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) } } -/* unlocks vcpus from @vcpu_lock_idx and smaller */ -static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx) -{ - struct kvm_vcpu *tmp_vcpu; - - for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { - tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); - mutex_unlock(&tmp_vcpu->mutex); - } -} - -void unlock_all_vcpus(struct kvm *kvm) -{ - lockdep_assert_held(&kvm->lock); - - unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1); -} - -/* Returns true if all vcpus were locked, false otherwise */ -bool lock_all_vcpus(struct kvm *kvm) -{ - struct kvm_vcpu *tmp_vcpu; - unsigned long c; - - lockdep_assert_held(&kvm->lock); - - /* - * Any time a vcpu is in an ioctl (including running), the - * core KVM code tries to grab the vcpu->mutex. - * - * By grabbing the vcpu->mutex of all VCPUs we ensure that no - * other VCPUs can fiddle with the state while we access it. - */ - kvm_for_each_vcpu(c, tmp_vcpu, kvm) { - if (!mutex_trylock(&tmp_vcpu->mutex)) { - unlock_vcpus(kvm, c - 1); - return false; - } - } - - return true; -} - static unsigned long nvhe_percpu_size(void) { return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - @@ -2790,6 +2747,7 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, &irqfd->irq_entry); } + void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, struct irq_bypass_producer *prod) { @@ -2800,8 +2758,29 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, if (irq_entry->type != KVM_IRQ_ROUTING_MSI) return; - kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq, - &irqfd->irq_entry); + kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq); +} + +bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, + struct kvm_kernel_irq_routing_entry *new) +{ + if (new->type != KVM_IRQ_ROUTING_MSI) + return true; + + return memcmp(&old->msi, &new->msi, sizeof(new->msi)); +} + +int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, + uint32_t guest_irq, bool set) +{ + /* + * Remapping the vLPI requires taking the its_lock mutex to resolve + * the new translation. We're in spinlock land at this point, so no + * chance of resolving the translation. + * + * Unmap the vLPI and fall back to software LPI injection. + */ + return kvm_vgic_v4_unset_forwarding(kvm, host_irq); } void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons) diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 569941eeb3fe..58c5fe7d7572 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -270,6 +270,7 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) u32 feature; u8 action; gpa_t gpa; + uuid_t uuid; action = kvm_smccc_get_action(vcpu, func_id); switch (action) { @@ -355,10 +356,11 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) val[0] = gpa; break; case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID: - val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0; - val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1; - val[2] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2; - val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3; + uuid = ARM_SMCCC_VENDOR_HYP_UID_KVM; + val[0] = smccc_uuid_to_reg(&uuid, 0); + val[1] = smccc_uuid_to_reg(&uuid, 1); + val[2] = smccc_uuid_to_reg(&uuid, 2); + val[3] = smccc_uuid_to_reg(&uuid, 3); break; case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID: val[0] = smccc_feat->vendor_hyp_bmap; diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 291dbe38eb5c..4a53e4147fb0 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -918,6 +918,8 @@ static void invalidate_vncr_va(struct kvm *kvm, } } +#define tlbi_va_s1_to_va(v) (u64)sign_extend64((v) << 12, 48) + static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val, struct s1e2_tlbi_scope *scope) { @@ -964,7 +966,7 @@ static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val, scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val)); if (!scope->size) scope->size = SZ_1G; - scope->va = (val << 12) & ~(scope->size - 1); + scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1); scope->asid = FIELD_GET(TLBIR_ASID_MASK, val); break; case OP_TLBI_ASIDE1: @@ -992,7 +994,7 @@ static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val, scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val)); if (!scope->size) scope->size = SZ_1G; - scope->va = (val << 12) & ~(scope->size - 1); + scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1); break; case OP_TLBI_RVAE2: case OP_TLBI_RVAE2IS: diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c index f8425f381de9..2684f273d9e1 100644 --- a/arch/arm64/kvm/vgic/vgic-debug.c +++ b/arch/arm64/kvm/vgic/vgic-debug.c @@ -490,6 +490,9 @@ static int vgic_its_debug_show(struct seq_file *s, void *v) struct its_device *dev = iter->dev; struct its_ite *ite = iter->ite; + if (!ite) + return 0; + if (list_is_first(&ite->ite_list, &dev->itt_head)) { seq_printf(s, "\n"); seq_printf(s, "Device ID: 0x%x, Event ID Range: [0 - %llu]\n", @@ -498,7 +501,7 @@ static int vgic_its_debug_show(struct seq_file *s, void *v) seq_printf(s, "-----------------------------------------------\n"); } - if (ite && ite->irq && ite->collection) { + if (ite->irq && ite->collection) { seq_printf(s, "%8u %8u %8u %8u %8u %2d\n", ite->event_id, ite->irq->intid, ite->irq->hwintid, ite->collection->target_addr, diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 1f33e71c2a73..eb1205654ac8 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -84,15 +84,40 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) !kvm_vgic_global_state.can_emulate_gicv2) return -ENODEV; - /* Must be held to avoid race with vCPU creation */ + /* + * Ensure mutual exclusion with vCPU creation and any vCPU ioctls by: + * + * - Holding kvm->lock to prevent KVM_CREATE_VCPU from reaching + * kvm_arch_vcpu_precreate() and ensuring created_vcpus is stable. + * This alone is insufficient, as kvm_vm_ioctl_create_vcpu() drops + * the kvm->lock before completing the vCPU creation. + */ lockdep_assert_held(&kvm->lock); + /* + * - Acquiring the vCPU mutex for every *online* vCPU to prevent + * concurrent vCPU ioctls for vCPUs already visible to userspace. + */ ret = -EBUSY; - if (!lock_all_vcpus(kvm)) + if (kvm_trylock_all_vcpus(kvm)) return ret; + /* + * - Taking the config_lock which protects VGIC data structures such + * as the per-vCPU arrays of private IRQs (SGIs, PPIs). + */ mutex_lock(&kvm->arch.config_lock); + /* + * - Bailing on the entire thing if a vCPU is in the middle of creation, + * dropped the kvm->lock, but hasn't reached kvm_arch_vcpu_create(). + * + * The whole combination of this guarantees that no vCPU can get into + * KVM with a VGIC configuration inconsistent with the VM's VGIC. + */ + if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus)) + goto out_unlock; + if (irqchip_in_kernel(kvm)) { ret = -EEXIST; goto out_unlock; @@ -142,7 +167,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) out_unlock: mutex_unlock(&kvm->arch.config_lock); - unlock_all_vcpus(kvm); + kvm_unlock_all_vcpus(kvm); return ret; } diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 569f9da9049f..534049c7c94b 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -306,39 +306,34 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, } } - raw_spin_unlock_irqrestore(&irq->irq_lock, flags); - if (irq->hw) - return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); + ret = its_prop_update_vlpi(irq->host_irq, prop, needs_inv); - return 0; + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); + return ret; } static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) { - int ret = 0; - unsigned long flags; + struct its_vlpi_map map; + int ret; - raw_spin_lock_irqsave(&irq->irq_lock, flags); + guard(raw_spinlock_irqsave)(&irq->irq_lock); irq->target_vcpu = vcpu; - raw_spin_unlock_irqrestore(&irq->irq_lock, flags); - if (irq->hw) { - struct its_vlpi_map map; - - ret = its_get_vlpi(irq->host_irq, &map); - if (ret) - return ret; + if (!irq->hw) + return 0; - if (map.vpe) - atomic_dec(&map.vpe->vlpi_count); - map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; - atomic_inc(&map.vpe->vlpi_count); + ret = its_get_vlpi(irq->host_irq, &map); + if (ret) + return ret; - ret = its_map_vlpi(irq->host_irq, &map); - } + if (map.vpe) + atomic_dec(&map.vpe->vlpi_count); - return ret; + map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; + atomic_inc(&map.vpe->vlpi_count); + return its_map_vlpi(irq->host_irq, &map); } static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm, @@ -756,12 +751,17 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi) /* Requires the its_lock to be held. */ static void its_free_ite(struct kvm *kvm, struct its_ite *ite) { + struct vgic_irq *irq = ite->irq; list_del(&ite->ite_list); /* This put matches the get in vgic_add_lpi. */ - if (ite->irq) { - if (ite->irq->hw) - WARN_ON(its_unmap_vlpi(ite->irq->host_irq)); + if (irq) { + scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) { + if (irq->hw) + WARN_ON(its_unmap_vlpi(ite->irq->host_irq)); + + irq->hw = false; + } vgic_put_irq(kvm, ite->irq); } @@ -1971,7 +1971,7 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev, mutex_lock(&dev->kvm->lock); - if (!lock_all_vcpus(dev->kvm)) { + if (kvm_trylock_all_vcpus(dev->kvm)) { mutex_unlock(&dev->kvm->lock); return -EBUSY; } @@ -2006,7 +2006,7 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev, } out: mutex_unlock(&dev->kvm->arch.config_lock); - unlock_all_vcpus(dev->kvm); + kvm_unlock_all_vcpus(dev->kvm); mutex_unlock(&dev->kvm->lock); return ret; } @@ -2676,7 +2676,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) mutex_lock(&kvm->lock); - if (!lock_all_vcpus(kvm)) { + if (kvm_trylock_all_vcpus(kvm)) { mutex_unlock(&kvm->lock); return -EBUSY; } @@ -2698,7 +2698,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) mutex_unlock(&its->its_lock); mutex_unlock(&kvm->arch.config_lock); - unlock_all_vcpus(kvm); + kvm_unlock_all_vcpus(kvm); mutex_unlock(&kvm->lock); return ret; } diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index 359094f68c23..f9ae790163fb 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -268,7 +268,7 @@ static int vgic_set_common_attr(struct kvm_device *dev, return -ENXIO; mutex_lock(&dev->kvm->lock); - if (!lock_all_vcpus(dev->kvm)) { + if (kvm_trylock_all_vcpus(dev->kvm)) { mutex_unlock(&dev->kvm->lock); return -EBUSY; } @@ -276,7 +276,7 @@ static int vgic_set_common_attr(struct kvm_device *dev, mutex_lock(&dev->kvm->arch.config_lock); r = vgic_v3_save_pending_tables(dev->kvm); mutex_unlock(&dev->kvm->arch.config_lock); - unlock_all_vcpus(dev->kvm); + kvm_unlock_all_vcpus(dev->kvm); mutex_unlock(&dev->kvm->lock); return r; } @@ -390,7 +390,7 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev, mutex_lock(&dev->kvm->lock); - if (!lock_all_vcpus(dev->kvm)) { + if (kvm_trylock_all_vcpus(dev->kvm)) { mutex_unlock(&dev->kvm->lock); return -EBUSY; } @@ -415,7 +415,7 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev, out: mutex_unlock(&dev->kvm->arch.config_lock); - unlock_all_vcpus(dev->kvm); + kvm_unlock_all_vcpus(dev->kvm); mutex_unlock(&dev->kvm->lock); if (!ret && !is_write) @@ -554,7 +554,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, mutex_lock(&dev->kvm->lock); - if (!lock_all_vcpus(dev->kvm)) { + if (kvm_trylock_all_vcpus(dev->kvm)) { mutex_unlock(&dev->kvm->lock); return -EBUSY; } @@ -611,7 +611,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, out: mutex_unlock(&dev->kvm->arch.config_lock); - unlock_all_vcpus(dev->kvm); + kvm_unlock_all_vcpus(dev->kvm); mutex_unlock(&dev->kvm->lock); if (!ret && uaccess && !is_write) { diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index c7de6154627c..193946108192 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -444,7 +444,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, if (IS_ERR(its)) return 0; - mutex_lock(&its->its_lock); + guard(mutex)(&its->its_lock); /* * Perform the actual DevID/EventID -> LPI translation. @@ -455,11 +455,13 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, */ if (vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid, irq_entry->msi.data, &irq)) - goto out; + return 0; + + raw_spin_lock_irqsave(&irq->irq_lock, flags); /* Silently exit if the vLPI is already mapped */ if (irq->hw) - goto out; + goto out_unlock_irq; /* * Emit the mapping request. If it fails, the ITS probably @@ -479,68 +481,74 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, ret = its_map_vlpi(virq, &map); if (ret) - goto out; + goto out_unlock_irq; irq->hw = true; irq->host_irq = virq; atomic_inc(&map.vpe->vlpi_count); /* Transfer pending state */ - raw_spin_lock_irqsave(&irq->irq_lock, flags); - if (irq->pending_latch) { - ret = irq_set_irqchip_state(irq->host_irq, - IRQCHIP_STATE_PENDING, - irq->pending_latch); - WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq); + if (!irq->pending_latch) + goto out_unlock_irq; - /* - * Clear pending_latch and communicate this state - * change via vgic_queue_irq_unlock. - */ - irq->pending_latch = false; - vgic_queue_irq_unlock(kvm, irq, flags); - } else { - raw_spin_unlock_irqrestore(&irq->irq_lock, flags); - } + ret = irq_set_irqchip_state(irq->host_irq, IRQCHIP_STATE_PENDING, + irq->pending_latch); + WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq); -out: - mutex_unlock(&its->its_lock); + /* + * Clear pending_latch and communicate this state + * change via vgic_queue_irq_unlock. + */ + irq->pending_latch = false; + vgic_queue_irq_unlock(kvm, irq, flags); + return ret; + +out_unlock_irq: + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); return ret; } -int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq, - struct kvm_kernel_irq_routing_entry *irq_entry) +static struct vgic_irq *__vgic_host_irq_get_vlpi(struct kvm *kvm, int host_irq) { - struct vgic_its *its; struct vgic_irq *irq; - int ret; + unsigned long idx; + + guard(rcu)(); + xa_for_each(&kvm->arch.vgic.lpi_xa, idx, irq) { + if (!irq->hw || irq->host_irq != host_irq) + continue; + + if (!vgic_try_get_irq_kref(irq)) + return NULL; + + return irq; + } + + return NULL; +} + +int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq) +{ + struct vgic_irq *irq; + unsigned long flags; + int ret = 0; if (!vgic_supports_direct_msis(kvm)) return 0; - /* - * Get the ITS, and escape early on error (not a valid - * doorbell for any of our vITSs). - */ - its = vgic_get_its(kvm, irq_entry); - if (IS_ERR(its)) + irq = __vgic_host_irq_get_vlpi(kvm, host_irq); + if (!irq) return 0; - mutex_lock(&its->its_lock); - - ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid, - irq_entry->msi.data, &irq); - if (ret) - goto out; - - WARN_ON(irq->hw && irq->host_irq != virq); + raw_spin_lock_irqsave(&irq->irq_lock, flags); + WARN_ON(irq->hw && irq->host_irq != host_irq); if (irq->hw) { atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count); irq->hw = false; - ret = its_unmap_vlpi(virq); + ret = its_unmap_vlpi(host_irq); } -out: - mutex_unlock(&its->its_lock); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); + vgic_put_irq(kvm, irq); return ret; } |