diff options
Diffstat (limited to 'arch/arm64/include')
28 files changed, 730 insertions, 300 deletions
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index 81e4157f92b7..71493b760b83 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -44,6 +44,7 @@ struct cpuinfo_arm64 { u64 reg_dczid; u64 reg_midr; u64 reg_revidr; + u64 reg_aidr; u64 reg_gmid; u64 reg_smidr; u64 reg_mpamidr; diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index dffff6763812..661735616787 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -134,6 +134,7 @@ #define HISI_CPU_PART_TSV110 0xD01 #define HISI_CPU_PART_HIP09 0xD02 +#define HISI_CPU_PART_HIP12 0xD06 #define APPLE_CPU_PART_M1_ICESTORM 0x022 #define APPLE_CPU_PART_M1_FIRESTORM 0x023 @@ -222,6 +223,7 @@ #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110) #define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09) +#define MIDR_HISI_HIP12 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP12) #define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM) #define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM) #define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index d40e427ddad9..1e7c7475e43f 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -38,7 +38,7 @@ orr x0, x0, #HCR_E2H .LnVHE_\@: - msr hcr_el2, x0 + msr_hcr_el2 x0 isb .endm @@ -204,26 +204,28 @@ orr x0, x0, #(1 << 62) .Lskip_spe_fgt_\@: + +.Lset_debug_fgt_\@: msr_s SYS_HDFGRTR_EL2, x0 msr_s SYS_HDFGWTR_EL2, x0 mov x0, xzr mrs x1, id_aa64pfr1_el1 ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4 - cbz x1, .Lskip_debug_fgt_\@ + cbz x1, .Lskip_sme_fgt_\@ /* Disable nVHE traps of TPIDR2 and SMPRI */ - orr x0, x0, #HFGxTR_EL2_nSMPRI_EL1_MASK - orr x0, x0, #HFGxTR_EL2_nTPIDR2_EL0_MASK + orr x0, x0, #HFGRTR_EL2_nSMPRI_EL1_MASK + orr x0, x0, #HFGRTR_EL2_nTPIDR2_EL0_MASK -.Lskip_debug_fgt_\@: +.Lskip_sme_fgt_\@: mrs_s x1, SYS_ID_AA64MMFR3_EL1 ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 cbz x1, .Lskip_pie_fgt_\@ /* Disable trapping of PIR_EL1 / PIRE0_EL1 */ - orr x0, x0, #HFGxTR_EL2_nPIR_EL1 - orr x0, x0, #HFGxTR_EL2_nPIRE0_EL1 + orr x0, x0, #HFGRTR_EL2_nPIR_EL1 + orr x0, x0, #HFGRTR_EL2_nPIRE0_EL1 .Lskip_pie_fgt_\@: mrs_s x1, SYS_ID_AA64MMFR3_EL1 @@ -231,17 +233,19 @@ cbz x1, .Lskip_poe_fgt_\@ /* Disable trapping of POR_EL0 */ - orr x0, x0, #HFGxTR_EL2_nPOR_EL0 + orr x0, x0, #HFGRTR_EL2_nPOR_EL0 .Lskip_poe_fgt_\@: /* GCS depends on PIE so we don't check it if PIE is absent */ mrs_s x1, SYS_ID_AA64PFR1_EL1 ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4 - cbz x1, .Lset_fgt_\@ + cbz x1, .Lskip_gce_fgt_\@ /* Disable traps of access to GCS registers at EL0 and EL1 */ - orr x0, x0, #HFGxTR_EL2_nGCS_EL1_MASK - orr x0, x0, #HFGxTR_EL2_nGCS_EL0_MASK + orr x0, x0, #HFGRTR_EL2_nGCS_EL1_MASK + orr x0, x0, #HFGRTR_EL2_nGCS_EL0_MASK + +.Lskip_gce_fgt_\@: .Lset_fgt_\@: msr_s SYS_HFGRTR_EL2, x0 diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index e4f77757937e..e1deed824464 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -20,7 +20,8 @@ #define ESR_ELx_EC_FP_ASIMD UL(0x07) #define ESR_ELx_EC_CP10_ID UL(0x08) /* EL2 only */ #define ESR_ELx_EC_PAC UL(0x09) /* EL2 and above */ -/* Unallocated EC: 0x0A - 0x0B */ +#define ESR_ELx_EC_OTHER UL(0x0A) +/* Unallocated EC: 0x0B */ #define ESR_ELx_EC_CP14_64 UL(0x0C) #define ESR_ELx_EC_BTI UL(0x0D) #define ESR_ELx_EC_ILL UL(0x0E) @@ -99,6 +100,8 @@ #define ESR_ELx_AET_CE (UL(6) << ESR_ELx_AET_SHIFT) /* Shared ISS field definitions for Data/Instruction aborts */ +#define ESR_ELx_VNCR_SHIFT (13) +#define ESR_ELx_VNCR (UL(1) << ESR_ELx_VNCR_SHIFT) #define ESR_ELx_SET_SHIFT (11) #define ESR_ELx_SET_MASK (UL(3) << ESR_ELx_SET_SHIFT) #define ESR_ELx_FnV_SHIFT (10) @@ -181,6 +184,13 @@ #define ESR_ELx_WFx_ISS_WFE (UL(1) << 0) #define ESR_ELx_xVC_IMM_MASK ((UL(1) << 16) - 1) +/* ISS definitions for LD64B/ST64B/{T,P}SBCSYNC instructions */ +#define ESR_ELx_ISS_OTHER_ST64BV (0) +#define ESR_ELx_ISS_OTHER_ST64BV0 (1) +#define ESR_ELx_ISS_OTHER_LDST64B (2) +#define ESR_ELx_ISS_OTHER_TSBCSYNC (3) +#define ESR_ELx_ISS_OTHER_PSBCSYNC (4) + #define DISR_EL1_IDS (UL(1) << 24) /* * DISR_EL1 and ESR_ELx share the bottom 13 bits, but the RES0 bits may mean @@ -378,12 +388,14 @@ /* * ISS values for SME traps */ +#define ESR_ELx_SME_ISS_SMTC_MASK GENMASK(2, 0) +#define ESR_ELx_SME_ISS_SMTC(esr) ((esr) & ESR_ELx_SME_ISS_SMTC_MASK) -#define ESR_ELx_SME_ISS_SME_DISABLED 0 -#define ESR_ELx_SME_ISS_ILL 1 -#define ESR_ELx_SME_ISS_SM_DISABLED 2 -#define ESR_ELx_SME_ISS_ZA_DISABLED 3 -#define ESR_ELx_SME_ISS_ZT_DISABLED 4 +#define ESR_ELx_SME_ISS_SMTC_SME_DISABLED 0 +#define ESR_ELx_SME_ISS_SMTC_ILL 1 +#define ESR_ELx_SME_ISS_SMTC_SM_DISABLED 2 +#define ESR_ELx_SME_ISS_SMTC_ZA_DISABLED 3 +#define ESR_ELx_SME_ISS_SMTC_ZT_DISABLED 4 /* ISS field definitions for MOPS exceptions */ #define ESR_ELx_MOPS_ISS_MEM_INST (UL(1) << 24) @@ -440,6 +452,11 @@ static inline bool esr_is_cfi_brk(unsigned long esr) (esr_brk_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE; } +static inline bool esr_is_ubsan_brk(unsigned long esr) +{ + return (esr_brk_comment(esr) & ~UBSAN_BRK_MASK) == UBSAN_BRK_IMM; +} + static inline bool esr_fsc_is_translation_fault(unsigned long esr) { esr = esr & ESR_ELx_FSC; diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index 87e307804b99..635a43c4ec85 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -48,6 +48,12 @@ enum fixed_addresses { FIX_EARLYCON_MEM_BASE, FIX_TEXT_POKE0, +#ifdef CONFIG_KVM + /* One slot per CPU, mapping the guest's VNCR page at EL2. */ + FIX_VNCR_END, + FIX_VNCR = FIX_VNCR_END + NR_CPUS, +#endif + #ifdef CONFIG_ACPI_APEI_GHES /* Used for GHES mapping from assorted contexts */ FIX_APEI_GHES_IRQ, diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 564bc09b3e06..b8cf0ea43cc0 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -6,6 +6,7 @@ #define __ASM_FP_H #include <asm/errno.h> +#include <asm/percpu.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <asm/sigcontext.h> @@ -76,7 +77,6 @@ extern void fpsimd_load_state(struct user_fpsimd_state *state); extern void fpsimd_thread_switch(struct task_struct *next); extern void fpsimd_flush_thread(void); -extern void fpsimd_signal_preserve_current_state(void); extern void fpsimd_preserve_current_state(void); extern void fpsimd_restore_current_state(void); extern void fpsimd_update_current_state(struct user_fpsimd_state const *state); @@ -93,9 +93,12 @@ struct cpu_fp_state { enum fp_type to_save; }; +DECLARE_PER_CPU(struct cpu_fp_state, fpsimd_last_state); + extern void fpsimd_bind_state_to_cpu(struct cpu_fp_state *fp_state); extern void fpsimd_flush_task_state(struct task_struct *target); +extern void fpsimd_save_and_flush_current_state(void); extern void fpsimd_save_and_flush_cpu_state(void); static inline bool thread_sm_enabled(struct thread_struct *thread) @@ -108,6 +111,8 @@ static inline bool thread_za_enabled(struct thread_struct *thread) return system_supports_sme() && (thread->svcr & SVCR_ZA_MASK); } +extern void task_smstop_sm(struct task_struct *task); + /* Maximum VL that SVE/SME VL-agnostic software can transparently support */ #define VL_ARCH_MAX 0x100 @@ -195,10 +200,8 @@ struct vl_info { extern void sve_alloc(struct task_struct *task, bool flush); extern void fpsimd_release_task(struct task_struct *task); -extern void fpsimd_sync_to_sve(struct task_struct *task); -extern void fpsimd_force_sync_to_sve(struct task_struct *task); -extern void sve_sync_to_fpsimd(struct task_struct *task); -extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task); +extern void fpsimd_sync_from_effective_state(struct task_struct *task); +extern void fpsimd_sync_to_effective_state_zeropad(struct task_struct *task); extern int vec_set_vector_length(struct task_struct *task, enum vec_type type, unsigned long vl, unsigned long flags); @@ -292,14 +295,29 @@ static inline bool sve_vq_available(unsigned int vq) return vq_available(ARM64_VEC_SVE, vq); } -size_t sve_state_size(struct task_struct const *task); +static inline size_t __sve_state_size(unsigned int sve_vl, unsigned int sme_vl) +{ + unsigned int vl = max(sve_vl, sme_vl); + return SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)); +} + +/* + * Return how many bytes of memory are required to store the full SVE + * state for task, given task's currently configured vector length. + */ +static inline size_t sve_state_size(struct task_struct const *task) +{ + unsigned int sve_vl = task_get_sve_vl(task); + unsigned int sme_vl = task_get_sme_vl(task); + return __sve_state_size(sve_vl, sme_vl); +} #else /* ! CONFIG_ARM64_SVE */ static inline void sve_alloc(struct task_struct *task, bool flush) { } static inline void fpsimd_release_task(struct task_struct *task) { } -static inline void sve_sync_to_fpsimd(struct task_struct *task) { } -static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { } +static inline void fpsimd_sync_from_effective_state(struct task_struct *task) { } +static inline void fpsimd_sync_to_effective_state_zeropad(struct task_struct *task) { } static inline int sve_max_virtualisable_vl(void) { @@ -333,6 +351,11 @@ static inline void vec_update_vq_map(enum vec_type t) { } static inline int vec_verify_vq_map(enum vec_type t) { return 0; } static inline void sve_setup(void) { } +static inline size_t __sve_state_size(unsigned int sve_vl, unsigned int sme_vl) +{ + return 0; +} + static inline size_t sve_state_size(struct task_struct const *task) { return 0; @@ -385,6 +408,16 @@ extern int sme_set_current_vl(unsigned long arg); extern int sme_get_current_vl(void); extern void sme_suspend_exit(void); +static inline size_t __sme_state_size(unsigned int sme_vl) +{ + size_t size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(sme_vl)); + + if (system_supports_sme2()) + size += ZT_SIG_REG_SIZE; + + return size; +} + /* * Return how many bytes of memory are required to store the full SME * specific state for task, given task's currently configured vector @@ -392,15 +425,7 @@ extern void sme_suspend_exit(void); */ static inline size_t sme_state_size(struct task_struct const *task) { - unsigned int vl = task_get_sme_vl(task); - size_t size; - - size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl)); - - if (system_supports_sme2()) - size += ZT_SIG_REG_SIZE; - - return size; + return __sme_state_size(task_get_sme_vl(task)); } #else @@ -421,6 +446,11 @@ static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; } static inline int sme_get_current_vl(void) { return -EINVAL; } static inline void sme_suspend_exit(void) { } +static inline size_t __sme_state_size(unsigned int sme_vl) +{ + return 0; +} + static inline size_t sme_state_size(struct task_struct const *task) { return 0; diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index cbfa7b6f2e09..77d6b8c63d4e 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -41,7 +41,7 @@ do { \ \ ___hcr = read_sysreg(hcr_el2); \ if (!(___hcr & HCR_TGE)) { \ - write_sysreg(___hcr | HCR_TGE, hcr_el2); \ + write_sysreg_hcr(___hcr | HCR_TGE); \ isb(); \ } \ /* \ @@ -82,7 +82,7 @@ do { \ */ \ barrier(); \ if (!___ctx->cnt && !(___hcr & HCR_TGE)) \ - write_sysreg(___hcr, hcr_el2); \ + write_sysreg_hcr(___hcr); \ } while (0) static inline void ack_bad_irq(unsigned int irq) diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index 07fbf5bf85a7..2a8155c4a882 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -69,29 +69,38 @@ extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, #include <asm-generic/hugetlb.h> -#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE -static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, - unsigned long start, - unsigned long end) +static inline void __flush_hugetlb_tlb_range(struct vm_area_struct *vma, + unsigned long start, + unsigned long end, + unsigned long stride, + bool last_level) { - unsigned long stride = huge_page_size(hstate_vma(vma)); - switch (stride) { #ifndef __PAGETABLE_PMD_FOLDED case PUD_SIZE: - __flush_tlb_range(vma, start, end, PUD_SIZE, false, 1); + __flush_tlb_range(vma, start, end, PUD_SIZE, last_level, 1); break; #endif case CONT_PMD_SIZE: case PMD_SIZE: - __flush_tlb_range(vma, start, end, PMD_SIZE, false, 2); + __flush_tlb_range(vma, start, end, PMD_SIZE, last_level, 2); break; case CONT_PTE_SIZE: - __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 3); + __flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, 3); break; default: - __flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN); + __flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, TLBI_TTL_UNKNOWN); } } +#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE +static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, + unsigned long start, + unsigned long end) +{ + unsigned long stride = huge_page_size(hstate_vma(vma)); + + __flush_hugetlb_tlb_range(vma, start, end, stride, false); +} + #endif /* __ASM_HUGETLB_H */ diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index e9c8a581e16f..1da290aeedce 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -12,67 +12,70 @@ #include <asm/sysreg.h> #include <asm/types.h> -/* Hyp Configuration Register (HCR) bits */ - -#define HCR_TID5 (UL(1) << 58) -#define HCR_DCT (UL(1) << 57) -#define HCR_ATA_SHIFT 56 -#define HCR_ATA (UL(1) << HCR_ATA_SHIFT) -#define HCR_TTLBOS (UL(1) << 55) -#define HCR_TTLBIS (UL(1) << 54) -#define HCR_ENSCXT (UL(1) << 53) -#define HCR_TOCU (UL(1) << 52) -#define HCR_AMVOFFEN (UL(1) << 51) -#define HCR_TICAB (UL(1) << 50) -#define HCR_TID4 (UL(1) << 49) -#define HCR_FIEN (UL(1) << 47) -#define HCR_FWB (UL(1) << 46) -#define HCR_NV2 (UL(1) << 45) -#define HCR_AT (UL(1) << 44) -#define HCR_NV1 (UL(1) << 43) -#define HCR_NV (UL(1) << 42) -#define HCR_API (UL(1) << 41) -#define HCR_APK (UL(1) << 40) -#define HCR_TEA (UL(1) << 37) -#define HCR_TERR (UL(1) << 36) -#define HCR_TLOR (UL(1) << 35) -#define HCR_E2H (UL(1) << 34) -#define HCR_ID (UL(1) << 33) -#define HCR_CD (UL(1) << 32) -#define HCR_RW_SHIFT 31 -#define HCR_RW (UL(1) << HCR_RW_SHIFT) -#define HCR_TRVM (UL(1) << 30) -#define HCR_HCD (UL(1) << 29) -#define HCR_TDZ (UL(1) << 28) -#define HCR_TGE (UL(1) << 27) -#define HCR_TVM (UL(1) << 26) -#define HCR_TTLB (UL(1) << 25) -#define HCR_TPU (UL(1) << 24) -#define HCR_TPC (UL(1) << 23) /* HCR_TPCP if FEAT_DPB */ -#define HCR_TSW (UL(1) << 22) -#define HCR_TACR (UL(1) << 21) -#define HCR_TIDCP (UL(1) << 20) -#define HCR_TSC (UL(1) << 19) -#define HCR_TID3 (UL(1) << 18) -#define HCR_TID2 (UL(1) << 17) -#define HCR_TID1 (UL(1) << 16) -#define HCR_TID0 (UL(1) << 15) -#define HCR_TWE (UL(1) << 14) -#define HCR_TWI (UL(1) << 13) -#define HCR_DC (UL(1) << 12) -#define HCR_BSU (3 << 10) -#define HCR_BSU_IS (UL(1) << 10) -#define HCR_FB (UL(1) << 9) -#define HCR_VSE (UL(1) << 8) -#define HCR_VI (UL(1) << 7) -#define HCR_VF (UL(1) << 6) -#define HCR_AMO (UL(1) << 5) -#define HCR_IMO (UL(1) << 4) -#define HCR_FMO (UL(1) << 3) -#define HCR_PTW (UL(1) << 2) -#define HCR_SWIO (UL(1) << 1) -#define HCR_VM (UL(1) << 0) -#define HCR_RES0 ((UL(1) << 48) | (UL(1) << 39)) +/* + * Because I'm terribly lazy and that repainting the whole of the KVM + * code with the proper names is a pain, use a helper to map the names + * inherited from AArch32 with the new fancy nomenclature. One day... + */ +#define __HCR(x) HCR_EL2_##x + +#define HCR_TID5 __HCR(TID5) +#define HCR_DCT __HCR(DCT) +#define HCR_ATA_SHIFT __HCR(ATA_SHIFT) +#define HCR_ATA __HCR(ATA) +#define HCR_TTLBOS __HCR(TTLBOS) +#define HCR_TTLBIS __HCR(TTLBIS) +#define HCR_ENSCXT __HCR(EnSCXT) +#define HCR_TOCU __HCR(TOCU) +#define HCR_AMVOFFEN __HCR(AMVOFFEN) +#define HCR_TICAB __HCR(TICAB) +#define HCR_TID4 __HCR(TID4) +#define HCR_FIEN __HCR(FIEN) +#define HCR_FWB __HCR(FWB) +#define HCR_NV2 __HCR(NV2) +#define HCR_AT __HCR(AT) +#define HCR_NV1 __HCR(NV1) +#define HCR_NV __HCR(NV) +#define HCR_API __HCR(API) +#define HCR_APK __HCR(APK) +#define HCR_TEA __HCR(TEA) +#define HCR_TERR __HCR(TERR) +#define HCR_TLOR __HCR(TLOR) +#define HCR_E2H __HCR(E2H) +#define HCR_ID __HCR(ID) +#define HCR_CD __HCR(CD) +#define HCR_RW __HCR(RW) +#define HCR_TRVM __HCR(TRVM) +#define HCR_HCD __HCR(HCD) +#define HCR_TDZ __HCR(TDZ) +#define HCR_TGE __HCR(TGE) +#define HCR_TVM __HCR(TVM) +#define HCR_TTLB __HCR(TTLB) +#define HCR_TPU __HCR(TPU) +#define HCR_TPC __HCR(TPCP) +#define HCR_TSW __HCR(TSW) +#define HCR_TACR __HCR(TACR) +#define HCR_TIDCP __HCR(TIDCP) +#define HCR_TSC __HCR(TSC) +#define HCR_TID3 __HCR(TID3) +#define HCR_TID2 __HCR(TID2) +#define HCR_TID1 __HCR(TID1) +#define HCR_TID0 __HCR(TID0) +#define HCR_TWE __HCR(TWE) +#define HCR_TWI __HCR(TWI) +#define HCR_DC __HCR(DC) +#define HCR_BSU __HCR(BSU) +#define HCR_BSU_IS __HCR(BSU_IS) +#define HCR_FB __HCR(FB) +#define HCR_VSE __HCR(VSE) +#define HCR_VI __HCR(VI) +#define HCR_VF __HCR(VF) +#define HCR_AMO __HCR(AMO) +#define HCR_IMO __HCR(IMO) +#define HCR_FMO __HCR(FMO) +#define HCR_PTW __HCR(PTW) +#define HCR_SWIO __HCR(SWIO) +#define HCR_VM __HCR(VM) /* * The bits we set in HCR: @@ -312,56 +315,19 @@ GENMASK(15, 0)) /* - * FGT register definitions - * - * RES0 and polarity masks as of DDI0487J.a, to be updated as needed. - * We're not using the generated masks as they are usually ahead of - * the published ARM ARM, which we use as a reference. - * - * Once we get to a point where the two describe the same thing, we'll - * merge the definitions. One day. - */ -#define __HFGRTR_EL2_RES0 HFGxTR_EL2_RES0 -#define __HFGRTR_EL2_MASK GENMASK(49, 0) -#define __HFGRTR_EL2_nMASK ~(__HFGRTR_EL2_RES0 | __HFGRTR_EL2_MASK) - -/* - * The HFGWTR bits are a subset of HFGRTR bits. To ensure we don't miss any - * future additions, define __HFGWTR* macros relative to __HFGRTR* ones. + * Polarity masks for HCRX_EL2, limited to the bits that we know about + * at this point in time. It doesn't mean that we actually *handle* + * them, but that at least those that are not advertised to a guest + * will be RES0 for that guest. */ -#define __HFGRTR_ONLY_MASK (BIT(46) | BIT(42) | BIT(40) | BIT(28) | \ - GENMASK(26, 25) | BIT(21) | BIT(18) | \ - GENMASK(15, 14) | GENMASK(10, 9) | BIT(2)) -#define __HFGWTR_EL2_RES0 (__HFGRTR_EL2_RES0 | __HFGRTR_ONLY_MASK) -#define __HFGWTR_EL2_MASK (__HFGRTR_EL2_MASK & ~__HFGRTR_ONLY_MASK) -#define __HFGWTR_EL2_nMASK ~(__HFGWTR_EL2_RES0 | __HFGWTR_EL2_MASK) - -#define __HFGITR_EL2_RES0 HFGITR_EL2_RES0 -#define __HFGITR_EL2_MASK (BIT(62) | BIT(60) | GENMASK(54, 0)) -#define __HFGITR_EL2_nMASK ~(__HFGITR_EL2_RES0 | __HFGITR_EL2_MASK) - -#define __HDFGRTR_EL2_RES0 HDFGRTR_EL2_RES0 -#define __HDFGRTR_EL2_MASK (BIT(63) | GENMASK(58, 50) | GENMASK(48, 43) | \ - GENMASK(41, 40) | GENMASK(37, 22) | \ - GENMASK(19, 9) | GENMASK(7, 0)) -#define __HDFGRTR_EL2_nMASK ~(__HDFGRTR_EL2_RES0 | __HDFGRTR_EL2_MASK) - -#define __HDFGWTR_EL2_RES0 HDFGWTR_EL2_RES0 -#define __HDFGWTR_EL2_MASK (GENMASK(57, 52) | GENMASK(50, 48) | \ - GENMASK(46, 44) | GENMASK(42, 41) | \ - GENMASK(37, 35) | GENMASK(33, 31) | \ - GENMASK(29, 23) | GENMASK(21, 10) | \ - GENMASK(8, 7) | GENMASK(5, 0)) -#define __HDFGWTR_EL2_nMASK ~(__HDFGWTR_EL2_RES0 | __HDFGWTR_EL2_MASK) - -#define __HAFGRTR_EL2_RES0 HAFGRTR_EL2_RES0 -#define __HAFGRTR_EL2_MASK (GENMASK(49, 17) | GENMASK(4, 0)) -#define __HAFGRTR_EL2_nMASK ~(__HAFGRTR_EL2_RES0 | __HAFGRTR_EL2_MASK) - -/* Similar definitions for HCRX_EL2 */ -#define __HCRX_EL2_RES0 HCRX_EL2_RES0 -#define __HCRX_EL2_MASK (BIT(6)) -#define __HCRX_EL2_nMASK ~(__HCRX_EL2_RES0 | __HCRX_EL2_MASK) +#define __HCRX_EL2_MASK (BIT_ULL(6)) +#define __HCRX_EL2_nMASK (GENMASK_ULL(24, 14) | \ + GENMASK_ULL(11, 7) | \ + GENMASK_ULL(5, 0)) +#define __HCRX_EL2_RES0 ~(__HCRX_EL2_nMASK | __HCRX_EL2_MASK) +#define __HCRX_EL2_RES1 ~(__HCRX_EL2_nMASK | \ + __HCRX_EL2_MASK | \ + __HCRX_EL2_RES0) /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ #define HPFAR_MASK (~UL(0xf)) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 08ba91e6fb03..d941abc6b5ee 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -39,7 +39,7 @@ #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS -#define KVM_VCPU_MAX_FEATURES 7 +#define KVM_VCPU_MAX_FEATURES 9 #define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1) #define KVM_REQ_SLEEP \ @@ -53,6 +53,7 @@ #define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7) #define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8) #define KVM_REQ_GUEST_HYP_IRQ_PENDING KVM_ARCH_REQ(9) +#define KVM_REQ_MAP_L1_VNCR_EL2 KVM_ARCH_REQ(10) #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ KVM_DIRTY_LOG_INITIALLY_SET) @@ -273,11 +274,17 @@ struct kvm_sysreg_masks; enum fgt_group_id { __NO_FGT_GROUP__, - HFGxTR_GROUP, + HFGRTR_GROUP, + HFGWTR_GROUP = HFGRTR_GROUP, HDFGRTR_GROUP, HDFGWTR_GROUP = HDFGRTR_GROUP, HFGITR_GROUP, HAFGRTR_GROUP, + HFGRTR2_GROUP, + HFGWTR2_GROUP = HFGRTR2_GROUP, + HDFGRTR2_GROUP, + HDFGWTR2_GROUP = HDFGRTR2_GROUP, + HFGITR2_GROUP, /* Must be last */ __NR_FGT_GROUP_IDS__ @@ -359,8 +366,8 @@ struct kvm_arch { cpumask_var_t supported_cpus; - /* PMCR_EL0.N value for the guest */ - u8 pmcr_n; + /* Maximum number of counters for the guest */ + u8 nr_pmu_counters; /* Iterator for idreg debugfs */ u8 idreg_debugfs_iter; @@ -389,6 +396,9 @@ struct kvm_arch { /* Masks for VNCR-backed and general EL2 sysregs */ struct kvm_sysreg_masks *sysreg_masks; + /* Count the number of VNCR_EL2 currently mapped */ + atomic_t vncr_map_count; + /* * For an untrusted host VM, 'pkvm.handle' is used to lookup * the associated pKVM instance in the hypervisor. @@ -561,6 +571,13 @@ enum vcpu_sysreg { VNCR(HDFGRTR_EL2), VNCR(HDFGWTR_EL2), VNCR(HAFGRTR_EL2), + VNCR(HFGRTR2_EL2), + VNCR(HFGWTR2_EL2), + VNCR(HFGITR2_EL2), + VNCR(HDFGRTR2_EL2), + VNCR(HDFGWTR2_EL2), + + VNCR(VNCR_EL2), VNCR(CNTVOFF_EL2), VNCR(CNTV_CVAL_EL0), @@ -606,6 +623,37 @@ struct kvm_sysreg_masks { } mask[NR_SYS_REGS - __SANITISED_REG_START__]; }; +struct fgt_masks { + const char *str; + u64 mask; + u64 nmask; + u64 res0; +}; + +extern struct fgt_masks hfgrtr_masks; +extern struct fgt_masks hfgwtr_masks; +extern struct fgt_masks hfgitr_masks; +extern struct fgt_masks hdfgrtr_masks; +extern struct fgt_masks hdfgwtr_masks; +extern struct fgt_masks hafgrtr_masks; +extern struct fgt_masks hfgrtr2_masks; +extern struct fgt_masks hfgwtr2_masks; +extern struct fgt_masks hfgitr2_masks; +extern struct fgt_masks hdfgrtr2_masks; +extern struct fgt_masks hdfgwtr2_masks; + +extern struct fgt_masks kvm_nvhe_sym(hfgrtr_masks); +extern struct fgt_masks kvm_nvhe_sym(hfgwtr_masks); +extern struct fgt_masks kvm_nvhe_sym(hfgitr_masks); +extern struct fgt_masks kvm_nvhe_sym(hdfgrtr_masks); +extern struct fgt_masks kvm_nvhe_sym(hdfgwtr_masks); +extern struct fgt_masks kvm_nvhe_sym(hafgrtr_masks); +extern struct fgt_masks kvm_nvhe_sym(hfgrtr2_masks); +extern struct fgt_masks kvm_nvhe_sym(hfgwtr2_masks); +extern struct fgt_masks kvm_nvhe_sym(hfgitr2_masks); +extern struct fgt_masks kvm_nvhe_sym(hdfgrtr2_masks); +extern struct fgt_masks kvm_nvhe_sym(hdfgwtr2_masks); + struct kvm_cpu_context { struct user_pt_regs regs; /* sp = sp_el0 */ @@ -654,6 +702,8 @@ struct kvm_host_data { #define KVM_HOST_DATA_FLAG_HAS_TRBE 1 #define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4 #define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5 +#define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 6 +#define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 7 unsigned long flags; struct kvm_cpu_context host_ctxt; @@ -730,6 +780,8 @@ struct vcpu_reset_state { bool reset; }; +struct vncr_tlb; + struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; @@ -824,6 +876,9 @@ struct kvm_vcpu_arch { /* Per-vcpu CCSIDR override or NULL */ u32 *ccsidr; + + /* Per-vcpu TLB for VNCR_EL2 -- NULL when !NV */ + struct vncr_tlb *vncr_tlb; }; /* @@ -971,20 +1026,22 @@ struct kvm_vcpu_arch { #define vcpu_sve_zcr_elx(vcpu) \ (unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1) -#define vcpu_sve_state_size(vcpu) ({ \ +#define sve_state_size_from_vl(sve_max_vl) ({ \ size_t __size_ret; \ - unsigned int __vcpu_vq; \ + unsigned int __vq; \ \ - if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ + if (WARN_ON(!sve_vl_valid(sve_max_vl))) { \ __size_ret = 0; \ } else { \ - __vcpu_vq = vcpu_sve_max_vq(vcpu); \ - __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ + __vq = sve_vq_from_vl(sve_max_vl); \ + __size_ret = SVE_SIG_REGS_SIZE(__vq); \ } \ \ __size_ret; \ }) +#define vcpu_sve_state_size(vcpu) sve_state_size_from_vl((vcpu)->arch.sve_max_vl) + #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ KVM_GUESTDBG_USE_SW_BP | \ KVM_GUESTDBG_USE_HW | \ @@ -1550,12 +1607,16 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val); kvm_cmp_feat_signed(kvm, id, fld, op, limit) : \ kvm_cmp_feat_unsigned(kvm, id, fld, op, limit)) -#define kvm_has_feat(kvm, id, fld, limit) \ +#define __kvm_has_feat(kvm, id, fld, limit) \ kvm_cmp_feat(kvm, id, fld, >=, limit) -#define kvm_has_feat_enum(kvm, id, fld, val) \ +#define kvm_has_feat(kvm, ...) __kvm_has_feat(kvm, __VA_ARGS__) + +#define __kvm_has_feat_enum(kvm, id, fld, val) \ kvm_cmp_feat_unsigned(kvm, id, fld, ==, val) +#define kvm_has_feat_enum(kvm, ...) __kvm_has_feat_enum(kvm, __VA_ARGS__) + #define kvm_has_feat_range(kvm, id, fld, min, max) \ (kvm_cmp_feat(kvm, id, fld, >=, min) && \ kvm_cmp_feat(kvm, id, fld, <=, max)) @@ -1593,4 +1654,9 @@ static inline bool kvm_arch_has_irq_bypass(void) return true; } +void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt); +void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1); +void check_feature_map(void); + + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h index 692f403c1896..0bd07ea068a1 100644 --- a/arch/arm64/include/asm/kvm_nested.h +++ b/arch/arm64/include/asm/kvm_nested.h @@ -231,6 +231,38 @@ static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans) shift; \ }) +static inline u64 decode_range_tlbi(u64 val, u64 *range, u16 *asid) +{ + u64 base, tg, num, scale; + int shift; + + tg = FIELD_GET(GENMASK(47, 46), val); + + switch(tg) { + case 1: + shift = 12; + break; + case 2: + shift = 14; + break; + case 3: + default: /* IMPDEF: handle tg==0 as 64k */ + shift = 16; + break; + } + + base = (val & GENMASK(36, 0)) << shift; + + if (asid) + *asid = FIELD_GET(TLBIR_ASID_MASK, val); + + scale = FIELD_GET(GENMASK(45, 44), val); + num = FIELD_GET(GENMASK(43, 39), val); + *range = __TLBI_RANGE_PAGES(num, scale) << shift; + + return base; +} + static inline unsigned int ps_to_output_size(unsigned int ps) { switch (ps) { @@ -245,4 +277,72 @@ static inline unsigned int ps_to_output_size(unsigned int ps) } } +enum trans_regime { + TR_EL10, + TR_EL20, + TR_EL2, +}; + +struct s1_walk_info { + u64 baddr; + enum trans_regime regime; + unsigned int max_oa_bits; + unsigned int pgshift; + unsigned int txsz; + int sl; + bool as_el0; + bool hpd; + bool e0poe; + bool poe; + bool pan; + bool be; + bool s2; +}; + +struct s1_walk_result { + union { + struct { + u64 desc; + u64 pa; + s8 level; + u8 APTable; + bool nG; + u16 asid; + bool UXNTable; + bool PXNTable; + bool uwxn; + bool uov; + bool ur; + bool uw; + bool ux; + bool pwxn; + bool pov; + bool pr; + bool pw; + bool px; + }; + struct { + u8 fst; + bool ptw; + bool s2; + }; + }; + bool failed; +}; + +int __kvm_translate_va(struct kvm_vcpu *vcpu, struct s1_walk_info *wi, + struct s1_walk_result *wr, u64 va); + +/* VNCR management */ +int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu); +int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu); +void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val); + +#define vncr_fixmap(c) \ + ({ \ + u32 __c = (c); \ + BUG_ON(__c >= NR_CPUS); \ + (FIX_VNCR - __c); \ + }) + #endif /* __ARM64_KVM_NESTED_H */ diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 6b9d274052c7..2888b5d03757 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -59,6 +59,11 @@ typedef u64 kvm_pte_t; #define KVM_PHYS_INVALID (-1ULL) +#define KVM_PTE_TYPE BIT(1) +#define KVM_PTE_TYPE_BLOCK 0 +#define KVM_PTE_TYPE_PAGE 1 +#define KVM_PTE_TYPE_TABLE 1 + #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2) #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2) @@ -413,7 +418,7 @@ static inline bool kvm_pgtable_walk_lock_held(void) */ struct kvm_pgtable { union { - struct rb_root pkvm_mappings; + struct rb_root_cached pkvm_mappings; struct { u32 ia_bits; s8 start_level; diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h index abd693ce5b93..ea58282f59bb 100644 --- a/arch/arm64/include/asm/kvm_pkvm.h +++ b/arch/arm64/include/asm/kvm_pkvm.h @@ -135,6 +135,12 @@ static inline unsigned long host_s2_pgtable_pages(void) return res; } +#ifdef CONFIG_NVHE_EL2_DEBUG +static inline unsigned long pkvm_selftest_pages(void) { return 32; } +#else +static inline unsigned long pkvm_selftest_pages(void) { return 0; } +#endif + #define KVM_FFA_MBOX_NR_PAGES 1 static inline unsigned long hyp_ffa_proxy_pages(void) @@ -167,6 +173,8 @@ struct pkvm_mapping { struct rb_node node; u64 gfn; u64 pfn; + u64 nr_pages; + u64 __subtree_last; /* Internal member for interval tree */ }; int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, diff --git a/arch/arm64/include/asm/mem_encrypt.h b/arch/arm64/include/asm/mem_encrypt.h index a2a1eeb36d4b..314b2b52025f 100644 --- a/arch/arm64/include/asm/mem_encrypt.h +++ b/arch/arm64/include/asm/mem_encrypt.h @@ -4,6 +4,8 @@ #include <asm/rsi.h> +struct device; + struct arm64_mem_crypt_ops { int (*encrypt)(unsigned long addr, int numpages); int (*decrypt)(unsigned long addr, int numpages); diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h index 6d6d4065b0cb..265e8301d7ba 100644 --- a/arch/arm64/include/asm/pgtable-types.h +++ b/arch/arm64/include/asm/pgtable-types.h @@ -11,11 +11,19 @@ #include <asm/types.h> -typedef u64 pteval_t; -typedef u64 pmdval_t; -typedef u64 pudval_t; -typedef u64 p4dval_t; -typedef u64 pgdval_t; +/* + * Page Table Descriptor + * + * Generic page table descriptor format from which + * all level specific descriptors can be derived. + */ +typedef u64 ptdesc_t; + +typedef ptdesc_t pteval_t; +typedef ptdesc_t pmdval_t; +typedef ptdesc_t pudval_t; +typedef ptdesc_t p4dval_t; +typedef ptdesc_t pgdval_t; /* * These are used to make use of C type-checking.. @@ -46,7 +54,7 @@ typedef struct { pgdval_t pgd; } pgd_t; #define pgd_val(x) ((x).pgd) #define __pgd(x) ((pgd_t) { (x) } ) -typedef struct { pteval_t pgprot; } pgprot_t; +typedef struct { ptdesc_t pgprot; } pgprot_t; #define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index d3b538be1500..88db8a0c0b37 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -40,6 +40,85 @@ #include <linux/sched.h> #include <linux/page_table_check.h> +static inline void emit_pte_barriers(void) +{ + /* + * These barriers are emitted under certain conditions after a pte entry + * was modified (see e.g. __set_pte_complete()). The dsb makes the store + * visible to the table walker. The isb ensures that any previous + * speculative "invalid translation" marker that is in the CPU's + * pipeline gets cleared, so that any access to that address after + * setting the pte to valid won't cause a spurious fault. If the thread + * gets preempted after storing to the pgtable but before emitting these + * barriers, __switch_to() emits a dsb which ensure the walker gets to + * see the store. There is no guarantee of an isb being issued though. + * This is safe because it will still get issued (albeit on a + * potentially different CPU) when the thread starts running again, + * before any access to the address. + */ + dsb(ishst); + isb(); +} + +static inline void queue_pte_barriers(void) +{ + unsigned long flags; + + if (in_interrupt()) { + emit_pte_barriers(); + return; + } + + flags = read_thread_flags(); + + if (flags & BIT(TIF_LAZY_MMU)) { + /* Avoid the atomic op if already set. */ + if (!(flags & BIT(TIF_LAZY_MMU_PENDING))) + set_thread_flag(TIF_LAZY_MMU_PENDING); + } else { + emit_pte_barriers(); + } +} + +#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE +static inline void arch_enter_lazy_mmu_mode(void) +{ + /* + * lazy_mmu_mode is not supposed to permit nesting. But in practice this + * does happen with CONFIG_DEBUG_PAGEALLOC, where a page allocation + * inside a lazy_mmu_mode section (such as zap_pte_range()) will change + * permissions on the linear map with apply_to_page_range(), which + * re-enters lazy_mmu_mode. So we tolerate nesting in our + * implementation. The first call to arch_leave_lazy_mmu_mode() will + * flush and clear the flag such that the remainder of the work in the + * outer nest behaves as if outside of lazy mmu mode. This is safe and + * keeps tracking simple. + */ + + if (in_interrupt()) + return; + + set_thread_flag(TIF_LAZY_MMU); +} + +static inline void arch_flush_lazy_mmu_mode(void) +{ + if (in_interrupt()) + return; + + if (test_and_clear_thread_flag(TIF_LAZY_MMU_PENDING)) + emit_pte_barriers(); +} + +static inline void arch_leave_lazy_mmu_mode(void) +{ + if (in_interrupt()) + return; + + arch_flush_lazy_mmu_mode(); + clear_thread_flag(TIF_LAZY_MMU); +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE @@ -320,18 +399,20 @@ static inline void __set_pte_nosync(pte_t *ptep, pte_t pte) WRITE_ONCE(*ptep, pte); } -static inline void __set_pte(pte_t *ptep, pte_t pte) +static inline void __set_pte_complete(pte_t pte) { - __set_pte_nosync(ptep, pte); - /* * Only if the new pte is valid and kernel, otherwise TLB maintenance - * or update_mmu_cache() have the necessary barriers. + * has the necessary barriers. */ - if (pte_valid_not_user(pte)) { - dsb(ishst); - isb(); - } + if (pte_valid_not_user(pte)) + queue_pte_barriers(); +} + +static inline void __set_pte(pte_t *ptep, pte_t pte) +{ + __set_pte_nosync(ptep, pte); + __set_pte_complete(pte); } static inline pte_t __ptep_get(pte_t *ptep) @@ -423,23 +504,6 @@ static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte)); } -static inline void __set_ptes(struct mm_struct *mm, - unsigned long __always_unused addr, - pte_t *ptep, pte_t pte, unsigned int nr) -{ - page_table_check_ptes_set(mm, ptep, pte, nr); - __sync_cache_and_tags(pte, nr); - - for (;;) { - __check_safe_pte_update(mm, ptep, pte); - __set_pte(ptep, pte); - if (--nr == 0) - break; - ptep++; - pte = pte_advance_pfn(pte, 1); - } -} - /* * Hugetlb definitions. */ @@ -609,7 +673,6 @@ static inline pmd_t pmd_mkspecial(pmd_t pmd) #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) -#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) #define pud_young(pud) pte_young(pud_pte(pud)) #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) @@ -649,30 +712,64 @@ static inline pgprot_t pud_pgprot(pud_t pud) return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud)); } -static inline void __set_pte_at(struct mm_struct *mm, - unsigned long __always_unused addr, - pte_t *ptep, pte_t pte, unsigned int nr) +static inline void __set_ptes_anysz(struct mm_struct *mm, pte_t *ptep, + pte_t pte, unsigned int nr, + unsigned long pgsize) +{ + unsigned long stride = pgsize >> PAGE_SHIFT; + + switch (pgsize) { + case PAGE_SIZE: + page_table_check_ptes_set(mm, ptep, pte, nr); + break; + case PMD_SIZE: + page_table_check_pmds_set(mm, (pmd_t *)ptep, pte_pmd(pte), nr); + break; +#ifndef __PAGETABLE_PMD_FOLDED + case PUD_SIZE: + page_table_check_puds_set(mm, (pud_t *)ptep, pte_pud(pte), nr); + break; +#endif + default: + VM_WARN_ON(1); + } + + __sync_cache_and_tags(pte, nr * stride); + + for (;;) { + __check_safe_pte_update(mm, ptep, pte); + __set_pte_nosync(ptep, pte); + if (--nr == 0) + break; + ptep++; + pte = pte_advance_pfn(pte, stride); + } + + __set_pte_complete(pte); +} + +static inline void __set_ptes(struct mm_struct *mm, + unsigned long __always_unused addr, + pte_t *ptep, pte_t pte, unsigned int nr) { - __sync_cache_and_tags(pte, nr); - __check_safe_pte_update(mm, ptep, pte); - __set_pte(ptep, pte); + __set_ptes_anysz(mm, ptep, pte, nr, PAGE_SIZE); } -static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, pmd_t pmd) +static inline void __set_pmds(struct mm_struct *mm, + unsigned long __always_unused addr, + pmd_t *pmdp, pmd_t pmd, unsigned int nr) { - page_table_check_pmd_set(mm, pmdp, pmd); - return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), - PMD_SIZE >> PAGE_SHIFT); + __set_ptes_anysz(mm, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE); } +#define set_pmd_at(mm, addr, pmdp, pmd) __set_pmds(mm, addr, pmdp, pmd, 1) -static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, - pud_t *pudp, pud_t pud) +static inline void __set_puds(struct mm_struct *mm, + unsigned long __always_unused addr, + pud_t *pudp, pud_t pud, unsigned int nr) { - page_table_check_pud_set(mm, pudp, pud); - return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud), - PUD_SIZE >> PAGE_SHIFT); + __set_ptes_anysz(mm, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE); } +#define set_pud_at(mm, addr, pudp, pud) __set_puds(mm, addr, pudp, pud, 1) #define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d)) #define __phys_to_p4d_val(phys) __phys_to_pte_val(phys) @@ -739,8 +836,7 @@ static inline int pmd_trans_huge(pmd_t pmd) * If pmd is present-invalid, pmd_table() won't detect it * as a table, so force the valid bit for the comparison. */ - return pmd_val(pmd) && pmd_present(pmd) && - !pmd_table(__pmd(pmd_val(pmd) | PTE_VALID)); + return pmd_present(pmd) && !pmd_table(__pmd(pmd_val(pmd) | PTE_VALID)); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -754,8 +850,6 @@ static inline bool pud_table(pud_t pud) { return true; } PUD_TYPE_TABLE) #endif -extern pgd_t init_pg_dir[]; -extern pgd_t init_pg_end[]; extern pgd_t swapper_pg_dir[]; extern pgd_t idmap_pg_dir[]; extern pgd_t tramp_pg_dir[]; @@ -780,10 +874,8 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) WRITE_ONCE(*pmdp, pmd); - if (pmd_valid(pmd)) { - dsb(ishst); - isb(); - } + if (pmd_valid(pmd)) + queue_pte_barriers(); } static inline void pmd_clear(pmd_t *pmdp) @@ -813,12 +905,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) /* use ONLY for statically allocated translation tables */ #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ -#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) - #if CONFIG_PGTABLE_LEVELS > 2 #define pmd_ERROR(e) \ @@ -848,10 +934,8 @@ static inline void set_pud(pud_t *pudp, pud_t pud) WRITE_ONCE(*pudp, pud); - if (pud_valid(pud)) { - dsb(ishst); - isb(); - } + if (pud_valid(pud)) + queue_pte_barriers(); } static inline void pud_clear(pud_t *pudp) @@ -930,8 +1014,7 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) } WRITE_ONCE(*p4dp, p4d); - dsb(ishst); - isb(); + queue_pte_barriers(); } static inline void p4d_clear(p4d_t *p4dp) @@ -1059,8 +1142,7 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) } WRITE_ONCE(*pgdp, pgd); - dsb(ishst); - isb(); + queue_pte_barriers(); } static inline void pgd_clear(pgd_t *pgdp) @@ -1301,16 +1383,37 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */ -static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, - unsigned long address, pte_t *ptep) +static inline pte_t __ptep_get_and_clear_anysz(struct mm_struct *mm, + pte_t *ptep, + unsigned long pgsize) { pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0)); - page_table_check_pte_clear(mm, pte); + switch (pgsize) { + case PAGE_SIZE: + page_table_check_pte_clear(mm, pte); + break; + case PMD_SIZE: + page_table_check_pmd_clear(mm, pte_pmd(pte)); + break; +#ifndef __PAGETABLE_PMD_FOLDED + case PUD_SIZE: + page_table_check_pud_clear(mm, pte_pud(pte)); + break; +#endif + default: + VM_WARN_ON(1); + } return pte; } +static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, + unsigned long address, pte_t *ptep) +{ + return __ptep_get_and_clear_anysz(mm, ptep, PAGE_SIZE); +} + static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr, int full) { @@ -1347,11 +1450,7 @@ static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm, static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { - pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0)); - - page_table_check_pmd_clear(mm, pmd); - - return pmd; + return pte_pmd(__ptep_get_and_clear_anysz(mm, (pte_t *)pmdp, PMD_SIZE)); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h index b2931d1ae0fb..fded5358641f 100644 --- a/arch/arm64/include/asm/ptdump.h +++ b/arch/arm64/include/asm/ptdump.h @@ -24,8 +24,8 @@ struct ptdump_info { }; struct ptdump_prot_bits { - u64 mask; - u64 val; + ptdesc_t mask; + ptdesc_t val; const char *set; const char *clear; }; @@ -34,7 +34,7 @@ struct ptdump_pg_level { const struct ptdump_prot_bits *bits; char name[4]; int num; - u64 mask; + ptdesc_t mask; }; /* @@ -51,7 +51,7 @@ struct ptdump_pg_state { const struct mm_struct *mm; unsigned long start_address; int level; - u64 current_prot; + ptdesc_t current_prot; bool check_wx; unsigned long wx_pages; unsigned long uxn_pages; @@ -59,7 +59,13 @@ struct ptdump_pg_state { void ptdump_walk(struct seq_file *s, struct ptdump_info *info); void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, - u64 val); + pteval_t val); +void note_page_pte(struct ptdump_state *st, unsigned long addr, pte_t pte); +void note_page_pmd(struct ptdump_state *st, unsigned long addr, pmd_t pmd); +void note_page_pud(struct ptdump_state *st, unsigned long addr, pud_t pud); +void note_page_p4d(struct ptdump_state *st, unsigned long addr, p4d_t p4d); +void note_page_pgd(struct ptdump_state *st, unsigned long addr, pgd_t pgd); +void note_page_flush(struct ptdump_state *st); #ifdef CONFIG_PTDUMP_DEBUGFS #define EFI_RUNTIME_MAP_END DEFAULT_MAP_WINDOW_64 void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name); @@ -69,7 +75,13 @@ static inline void ptdump_debugfs_register(struct ptdump_info *info, #endif /* CONFIG_PTDUMP_DEBUGFS */ #else static inline void note_page(struct ptdump_state *pt_st, unsigned long addr, - int level, u64 val) { } + int level, pteval_t val) { } +static inline void note_page_pte(struct ptdump_state *st, unsigned long addr, pte_t pte) { } +static inline void note_page_pmd(struct ptdump_state *st, unsigned long addr, pmd_t pmd) { } +static inline void note_page_pud(struct ptdump_state *st, unsigned long addr, pud_t pud) { } +static inline void note_page_p4d(struct ptdump_state *st, unsigned long addr, p4d_t p4d) { } +static inline void note_page_pgd(struct ptdump_state *st, unsigned long addr, pgd_t pgd) { } +static inline void note_page_flush(struct ptdump_state *st) { } #endif /* CONFIG_PTDUMP */ #endif /* __ASM_PTDUMP_H */ diff --git a/arch/arm64/include/asm/rsi_cmds.h b/arch/arm64/include/asm/rsi_cmds.h index e6a211001bd3..2c8763876dfb 100644 --- a/arch/arm64/include/asm/rsi_cmds.h +++ b/arch/arm64/include/asm/rsi_cmds.h @@ -7,6 +7,8 @@ #define __ASM_RSI_CMDS_H #include <linux/arm-smccc.h> +#include <linux/string.h> +#include <asm/memory.h> #include <asm/rsi_smc.h> diff --git a/arch/arm64/include/asm/rwonce.h b/arch/arm64/include/asm/rwonce.h index 56f7b1d4d54b..97d9256d33c9 100644 --- a/arch/arm64/include/asm/rwonce.h +++ b/arch/arm64/include/asm/rwonce.h @@ -12,16 +12,12 @@ #ifndef BUILD_VDSO -#ifdef CONFIG_AS_HAS_LDAPR #define __LOAD_RCPC(sfx, regs...) \ ALTERNATIVE( \ "ldar" #sfx "\t" #regs, \ ".arch_extension rcpc\n" \ "ldapr" #sfx "\t" #regs, \ ARM64_HAS_LDAPR) -#else -#define __LOAD_RCPC(sfx, regs...) "ldar" #sfx "\t" #regs -#endif /* CONFIG_AS_HAS_LDAPR */ /* * When building with LTO, there is an increased risk of the compiler diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 40971ac1303f..51b0d594239e 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -11,6 +11,7 @@ extern char __alt_instructions[], __alt_instructions_end[]; extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; extern char __hyp_text_start[], __hyp_text_end[]; +extern char __hyp_data_start[], __hyp_data_end[]; extern char __hyp_rodata_start[], __hyp_rodata_end[]; extern char __hyp_reloc_begin[], __hyp_reloc_end[]; extern char __hyp_bss_start[], __hyp_bss_end[]; diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index ab8e14b96f68..712daa90e643 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -61,6 +61,22 @@ static inline void syscall_set_return_value(struct task_struct *task, regs->regs[0] = val; } +static inline void syscall_set_nr(struct task_struct *task, + struct pt_regs *regs, + int nr) +{ + regs->syscallno = nr; + if (nr == -1) { + /* + * When the syscall number is set to -1, the syscall will be + * skipped. In this case the syscall return value has to be + * set explicitly, otherwise the first syscall argument is + * returned as the syscall return value. + */ + syscall_set_return_value(task, regs, -ENOSYS, 0); + } +} + #define SYSCALL_MAX_ARGS 6 static inline void syscall_get_arguments(struct task_struct *task, @@ -73,6 +89,19 @@ static inline void syscall_get_arguments(struct task_struct *task, memcpy(args, ®s->regs[1], 5 * sizeof(args[0])); } +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + const unsigned long *args) +{ + memcpy(®s->regs[0], args, 6 * sizeof(args[0])); + /* + * Also copy the first argument into orig_x0 + * so that syscall_get_arguments() would return it + * instead of the previous value. + */ + regs->orig_x0 = regs->regs[0]; +} + /* * We don't care about endianness (__AUDIT_ARCH_LE bit) here because * AArch64 has the same system calls both on little- and big- endian. diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 2639d3633073..cd853801a8f7 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -117,6 +117,7 @@ #define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31) +/* Data cache zero operations */ #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) #define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4) #define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6) @@ -153,11 +154,13 @@ #define SYS_DC_CIGVAC sys_insn(1, 3, 7, 14, 3) #define SYS_DC_CIGDVAC sys_insn(1, 3, 7, 14, 5) -/* Data cache zero operations */ #define SYS_DC_ZVA sys_insn(1, 3, 7, 4, 1) #define SYS_DC_GVA sys_insn(1, 3, 7, 4, 3) #define SYS_DC_GZVA sys_insn(1, 3, 7, 4, 4) +#define SYS_DC_CIVAPS sys_insn(1, 0, 7, 15, 1) +#define SYS_DC_CIGDVAPS sys_insn(1, 0, 7, 15, 5) + /* * Automatically generated definitions for system registers, the * manual encodings below are in the process of being converted to @@ -497,12 +500,22 @@ #define __PMEV_op2(n) ((n) & 0x7) #define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3)) +#define SYS_PMEVCNTSVRn_EL1(n) sys_reg(2, 0, 14, __CNTR_CRm(n), __PMEV_op2(n)) #define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n)) #define __TYPER_CRm(n) (0xc | (((n) >> 3) & 0x3)) #define SYS_PMEVTYPERn_EL0(n) sys_reg(3, 3, 14, __TYPER_CRm(n), __PMEV_op2(n)) #define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7) +#define SYS_SPMCGCRn_EL1(n) sys_reg(2, 0, 9, 13, ((n) & 1)) + +#define __SPMEV_op2(n) ((n) & 0x7) +#define __SPMEV_crm(p, n) ((((p) & 7) << 1) | (((n) >> 3) & 1)) +#define SYS_SPMEVCNTRn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b000, n), __SPMEV_op2(n)) +#define SYS_SPMEVFILT2Rn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b011, n), __SPMEV_op2(n)) +#define SYS_SPMEVFILTRn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b010, n), __SPMEV_op2(n)) +#define SYS_SPMEVTYPERn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b001, n), __SPMEV_op2(n)) + #define SYS_VPIDR_EL2 sys_reg(3, 4, 0, 0, 0) #define SYS_VMPIDR_EL2 sys_reg(3, 4, 0, 0, 5) @@ -521,7 +534,6 @@ #define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0) #define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2) -#define SYS_VNCR_EL2 sys_reg(3, 4, 2, 2, 0) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) @@ -608,28 +620,18 @@ /* VHE encodings for architectural EL0/1 system registers */ #define SYS_BRBCR_EL12 sys_reg(2, 5, 9, 0, 0) -#define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0) -#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2) -#define SYS_SCTLR2_EL12 sys_reg(3, 5, 1, 0, 3) -#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) -#define SYS_TRFCR_EL12 sys_reg(3, 5, 1, 2, 1) -#define SYS_SMCR_EL12 sys_reg(3, 5, 1, 2, 6) #define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0) #define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1) -#define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2) -#define SYS_TCR2_EL12 sys_reg(3, 5, 2, 0, 3) #define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0) #define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1) #define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0) #define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1) #define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0) #define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0) -#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0) #define SYS_PMSCR_EL12 sys_reg(3, 5, 9, 9, 0) #define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) #define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) #define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) -#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1) #define SYS_SCXTNUM_EL12 sys_reg(3, 5, 13, 0, 7) #define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0) #define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0) @@ -1091,6 +1093,15 @@ __emit_inst(0xd5000000|(\sreg)|(.L__gpr_num_\rt)) .endm + .macro msr_hcr_el2, reg +#if IS_ENABLED(CONFIG_AMPERE_ERRATUM_AC04_CPU_23) + dsb nsh + msr hcr_el2, \reg + isb +#else + msr hcr_el2, \reg +#endif + .endm #else #include <linux/bitfield.h> @@ -1178,6 +1189,13 @@ write_sysreg(__scs_new, sysreg); \ } while (0) +#define sysreg_clear_set_hcr(clear, set) do { \ + u64 __scs_val = read_sysreg(hcr_el2); \ + u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \ + if (__scs_new != __scs_val) \ + write_sysreg_hcr(__scs_new); \ +} while (0) + #define sysreg_clear_set_s(sysreg, clear, set) do { \ u64 __scs_val = read_sysreg_s(sysreg); \ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \ @@ -1185,6 +1203,17 @@ write_sysreg_s(__scs_new, sysreg); \ } while (0) +#define write_sysreg_hcr(__val) do { \ + if (IS_ENABLED(CONFIG_AMPERE_ERRATUM_AC04_CPU_23) && \ + (!system_capabilities_finalized() || \ + alternative_has_cap_unlikely(ARM64_WORKAROUND_AMPERE_AC04_CPU_23))) \ + asm volatile("dsb nsh; msr hcr_el2, %x0; isb" \ + : : "rZ" (__val)); \ + else \ + asm volatile("msr hcr_el2, %x0" \ + : : "rZ" (__val)); \ +} while (0) + #define read_sysreg_par() ({ \ u64 par; \ asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 1114c1c3300a..1269c2487574 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -59,11 +59,12 @@ void arch_setup_new_exec(void); #define TIF_SIGPENDING 0 /* signal pending */ #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ -#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ -#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ -#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ -#define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */ -#define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */ +#define TIF_NEED_RESCHED_LAZY 2 /* Lazy rescheduling needed */ +#define TIF_NOTIFY_RESUME 3 /* callback before returning to user */ +#define TIF_FOREIGN_FPSTATE 4 /* CPU's FP state is not current's */ +#define TIF_UPROBE 5 /* uprobe breakpoint or singlestep */ +#define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ @@ -82,9 +83,12 @@ void arch_setup_new_exec(void); #define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */ #define TIF_KERNEL_FPSTATE 29 /* Task is in a kernel mode FPSIMD section */ #define TIF_TSC_SIGSEGV 30 /* SIGSEGV on counter-timer access */ +#define TIF_LAZY_MMU 31 /* Task in lazy mmu mode */ +#define TIF_LAZY_MMU_PENDING 32 /* Ops pending for lazy mmu mode exit */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) @@ -100,10 +104,10 @@ void arch_setup_new_exec(void); #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_TSC_SIGSEGV (1 << TIF_TSC_SIGSEGV) -#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ +#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \ - _TIF_NOTIFY_SIGNAL) + _TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h index 3322c7047d84..da1ab8759592 100644 --- a/arch/arm64/include/asm/vdso/gettimeofday.h +++ b/arch/arm64/include/asm/vdso/gettimeofday.h @@ -8,6 +8,7 @@ #ifndef __ASSEMBLY__ #include <asm/alternative.h> +#include <asm/arch_timer.h> #include <asm/barrier.h> #include <asm/unistd.h> #include <asm/sysreg.h> @@ -69,8 +70,6 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_time_data *vd) { - u64 res; - /* * Core checks for mode already, so this raced against a concurrent * update. Return something. Core will do another round and then @@ -79,24 +78,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, if (clock_mode == VDSO_CLOCKMODE_NONE) return 0; - /* - * If FEAT_ECV is available, use the self-synchronizing counter. - * Otherwise the isb is required to prevent that the counter value - * is speculated. - */ - asm volatile( - ALTERNATIVE("isb\n" - "mrs %0, cntvct_el0", - "nop\n" - __mrs_s("%0", SYS_CNTVCTSS_EL0), - ARM64_HAS_ECV) - : "=r" (res) - : - : "memory"); - - arch_counter_enforce_ordering(res); - - return res; + return __arch_counter_get_cntvct(); } #if IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB) diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index ebf4a9f943ed..aa280f356b96 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -67,7 +67,8 @@ * __boot_cpu_mode records what mode CPUs were booted in. * A correctly-implemented bootloader must start all CPUs in the same mode: * In this case, both 32bit halves of __boot_cpu_mode will contain the - * same value (either 0 if booted in EL1, BOOT_CPU_MODE_EL2 if booted in EL2). + * same value (either BOOT_CPU_MODE_EL1 if booted in EL1, BOOT_CPU_MODE_EL2 if + * booted in EL2). * * Should the bootloader fail to do this, the two values will be different. * This allows the kernel to flag an error when the secondaries have come up. diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h index 38fafffe699f..12f534e8f3ed 100644 --- a/arch/arm64/include/asm/vmalloc.h +++ b/arch/arm64/include/asm/vmalloc.h @@ -23,6 +23,51 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot) return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); } +#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size +static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, + unsigned long end, u64 pfn, + unsigned int max_page_shift) +{ + /* + * If the block is at least CONT_PTE_SIZE in size, and is naturally + * aligned in both virtual and physical space, then we can pte-map the + * block using the PTE_CONT bit for more efficient use of the TLB. + */ + if (max_page_shift < CONT_PTE_SHIFT) + return PAGE_SIZE; + + if (end - addr < CONT_PTE_SIZE) + return PAGE_SIZE; + + if (!IS_ALIGNED(addr, CONT_PTE_SIZE)) + return PAGE_SIZE; + + if (!IS_ALIGNED(PFN_PHYS(pfn), CONT_PTE_SIZE)) + return PAGE_SIZE; + + return CONT_PTE_SIZE; +} + +#define arch_vmap_pte_range_unmap_size arch_vmap_pte_range_unmap_size +static inline unsigned long arch_vmap_pte_range_unmap_size(unsigned long addr, + pte_t *ptep) +{ + /* + * The caller handles alignment so it's sufficient just to check + * PTE_CONT. + */ + return pte_valid_cont(__ptep_get(ptep)) ? CONT_PTE_SIZE : PAGE_SIZE; +} + +#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift +static inline int arch_vmap_pte_supported_shift(unsigned long size) +{ + if (size >= CONT_PTE_SIZE) + return CONT_PTE_SHIFT; + + return PAGE_SHIFT; +} + #endif #define arch_vmap_pgprot_tagged arch_vmap_pgprot_tagged diff --git a/arch/arm64/include/asm/vncr_mapping.h b/arch/arm64/include/asm/vncr_mapping.h index 4f9bbd4d6c26..6f556e993644 100644 --- a/arch/arm64/include/asm/vncr_mapping.h +++ b/arch/arm64/include/asm/vncr_mapping.h @@ -35,6 +35,8 @@ #define VNCR_CNTP_CTL_EL0 0x180 #define VNCR_SCXTNUM_EL1 0x188 #define VNCR_TFSR_EL1 0x190 +#define VNCR_HDFGRTR2_EL2 0x1A0 +#define VNCR_HDFGWTR2_EL2 0x1B0 #define VNCR_HFGRTR_EL2 0x1B8 #define VNCR_HFGWTR_EL2 0x1C0 #define VNCR_HFGITR_EL2 0x1C8 @@ -52,6 +54,9 @@ #define VNCR_PIRE0_EL1 0x290 #define VNCR_PIR_EL1 0x2A0 #define VNCR_POR_EL1 0x2A8 +#define VNCR_HFGRTR2_EL2 0x2C0 +#define VNCR_HFGWTR2_EL2 0x2C8 +#define VNCR_HFGITR2_EL2 0x310 #define VNCR_ICH_LR0_EL2 0x400 #define VNCR_ICH_LR1_EL2 0x408 #define VNCR_ICH_LR2_EL2 0x410 diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index af9d9acaf997..ed5f3892674c 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -431,10 +431,11 @@ enum { /* Device Control API on vcpu fd */ #define KVM_ARM_VCPU_PMU_V3_CTRL 0 -#define KVM_ARM_VCPU_PMU_V3_IRQ 0 -#define KVM_ARM_VCPU_PMU_V3_INIT 1 -#define KVM_ARM_VCPU_PMU_V3_FILTER 2 -#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3 +#define KVM_ARM_VCPU_PMU_V3_IRQ 0 +#define KVM_ARM_VCPU_PMU_V3_INIT 1 +#define KVM_ARM_VCPU_PMU_V3_FILTER 2 +#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3 +#define KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS 4 #define KVM_ARM_VCPU_TIMER_CTRL 1 #define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 |