diff options
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r-- | arch/riscv/mm/cacheflush.c | 31 | ||||
-rw-r--r-- | arch/riscv/mm/pgtable.c | 10 | ||||
-rw-r--r-- | arch/riscv/mm/tlbflush.c | 38 |
3 files changed, 74 insertions, 5 deletions
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index b81672729887..4ca5aafce22e 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -24,7 +24,20 @@ void flush_icache_all(void) if (num_online_cpus() < 2) return; - else if (riscv_use_sbi_for_rfence()) + + /* + * Make sure all previous writes to the D$ are ordered before making + * the IPI. The RISC-V spec states that a hart must execute a data fence + * before triggering a remote fence.i in order to make the modification + * visable for remote harts. + * + * IPIs on RISC-V are triggered by MMIO writes to either CLINT or + * S-IMSIC, so the fence ensures previous data writes "happen before" + * the MMIO. + */ + RISCV_FENCE(w, o); + + if (riscv_use_sbi_for_rfence()) sbi_remote_fence_i(NULL); else on_each_cpu(ipi_remote_fence_i, NULL, 1); @@ -101,6 +114,9 @@ EXPORT_SYMBOL_GPL(riscv_cbom_block_size); unsigned int riscv_cboz_block_size; EXPORT_SYMBOL_GPL(riscv_cboz_block_size); +unsigned int riscv_cbop_block_size; +EXPORT_SYMBOL_GPL(riscv_cbop_block_size); + static void __init cbo_get_block_size(struct device_node *node, const char *name, u32 *block_size, unsigned long *first_hartid) @@ -125,8 +141,8 @@ static void __init cbo_get_block_size(struct device_node *node, void __init riscv_init_cbo_blocksizes(void) { - unsigned long cbom_hartid, cboz_hartid; - u32 cbom_block_size = 0, cboz_block_size = 0; + unsigned long cbom_hartid, cboz_hartid, cbop_hartid; + u32 cbom_block_size = 0, cboz_block_size = 0, cbop_block_size = 0; struct device_node *node; struct acpi_table_header *rhct; acpi_status status; @@ -138,13 +154,15 @@ void __init riscv_init_cbo_blocksizes(void) &cbom_block_size, &cbom_hartid); cbo_get_block_size(node, "riscv,cboz-block-size", &cboz_block_size, &cboz_hartid); + cbo_get_block_size(node, "riscv,cbop-block-size", + &cbop_block_size, &cbop_hartid); } } else { status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct); if (ACPI_FAILURE(status)) return; - acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, NULL); + acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, &cbop_block_size); acpi_put_table((struct acpi_table_header *)rhct); } @@ -153,6 +171,9 @@ void __init riscv_init_cbo_blocksizes(void) if (cboz_block_size) riscv_cboz_block_size = cboz_block_size; + + if (cbop_block_size) + riscv_cbop_block_size = cbop_block_size; } #ifdef CONFIG_SMP @@ -172,7 +193,7 @@ static void set_icache_stale_mask(void) stale_cpu = cpumask_test_cpu(cpu, mask); cpumask_setall(mask); - cpumask_assign_cpu(cpu, mask, stale_cpu); + __assign_cpu(cpu, mask, stale_cpu); put_cpu(); } #endif diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c index 4ae67324f992..8b6c0a112a8d 100644 --- a/arch/riscv/mm/pgtable.c +++ b/arch/riscv/mm/pgtable.c @@ -154,4 +154,14 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, flush_tlb_mm(vma->vm_mm); return pmd; } + +pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address, + pud_t *pudp) +{ + VM_WARN_ON_ONCE(!pud_present(*pudp)); + pud_t old = pudp_establish(vma, address, pudp, pud_mkinvalid(*pudp)); + + flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); + return old; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index f9e27ba1df99..e737ba7949b1 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -7,6 +7,27 @@ #include <linux/mmu_notifier.h> #include <asm/sbi.h> #include <asm/mmu_context.h> +#include <asm/cpufeature.h> + +#define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL) + +static inline void local_sfence_inval_ir(void) +{ + asm volatile(SFENCE_INVAL_IR() ::: "memory"); +} + +static inline void local_sfence_w_inval(void) +{ + asm volatile(SFENCE_W_INVAL() ::: "memory"); +} + +static inline void local_sinval_vma(unsigned long vma, unsigned long asid) +{ + if (asid != FLUSH_TLB_NO_ASID) + asm volatile(SINVAL_VMA(%0, %1) : : "r" (vma), "r" (asid) : "memory"); + else + asm volatile(SINVAL_VMA(%0, zero) : : "r" (vma) : "memory"); +} /* * Flush entire TLB if number of entries to be flushed is greater @@ -27,6 +48,16 @@ static void local_flush_tlb_range_threshold_asid(unsigned long start, return; } + if (has_svinval()) { + local_sfence_w_inval(); + for (i = 0; i < nr_ptes_in_range; ++i) { + local_sinval_vma(start, asid); + start += stride; + } + local_sfence_inval_ir(); + return; + } + for (i = 0; i < nr_ptes_in_range; ++i) { local_flush_tlb_page_asid(start, asid); start += stride; @@ -182,6 +213,13 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm), start, end - start, PMD_SIZE); } + +void flush_pud_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm), + start, end - start, PUD_SIZE); +} #endif bool arch_tlbbatch_should_defer(struct mm_struct *mm) |