summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/tlbflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm/tlbflush.h')
-rw-r--r--arch/arm64/include/asm/tlbflush.h85
1 files changed, 84 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 18a5dc0c9a54..a2d65d7d6aae 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -8,7 +8,7 @@
#ifndef __ASM_TLBFLUSH_H
#define __ASM_TLBFLUSH_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/bitfield.h>
#include <linux/mm_types.h>
@@ -249,6 +249,19 @@ static inline unsigned long get_trans_granule(void)
* cannot be easily determined, the value TLBI_TTL_UNKNOWN will
* perform a non-hinted invalidation.
*
+ * local_flush_tlb_page(vma, addr)
+ * Local variant of flush_tlb_page(). Stale TLB entries may
+ * remain in remote CPUs.
+ *
+ * local_flush_tlb_page_nonotify(vma, addr)
+ * Same as local_flush_tlb_page() except MMU notifier will not be
+ * called.
+ *
+ * local_flush_tlb_contpte(vma, addr)
+ * Invalidate the virtual-address range
+ * '[addr, addr+CONT_PTE_SIZE)' mapped with contpte on local CPU
+ * for the user address space corresponding to 'vma->mm'. Stale
+ * TLB entries may remain in remote CPUs.
*
* Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
* on top of these routines, since that is our interface to the mmu_gather
@@ -282,6 +295,33 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
}
+static inline void __local_flush_tlb_page_nonotify_nosync(struct mm_struct *mm,
+ unsigned long uaddr)
+{
+ unsigned long addr;
+
+ dsb(nshst);
+ addr = __TLBI_VADDR(uaddr, ASID(mm));
+ __tlbi(vale1, addr);
+ __tlbi_user(vale1, addr);
+}
+
+static inline void local_flush_tlb_page_nonotify(struct vm_area_struct *vma,
+ unsigned long uaddr)
+{
+ __local_flush_tlb_page_nonotify_nosync(vma->vm_mm, uaddr);
+ dsb(nsh);
+}
+
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long uaddr)
+{
+ __local_flush_tlb_page_nonotify_nosync(vma->vm_mm, uaddr);
+ mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, uaddr & PAGE_MASK,
+ (uaddr & PAGE_MASK) + PAGE_SIZE);
+ dsb(nsh);
+}
+
static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
unsigned long uaddr)
{
@@ -472,6 +512,22 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
dsb(ish);
}
+static inline void local_flush_tlb_contpte(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ unsigned long asid;
+
+ addr = round_down(addr, CONT_PTE_SIZE);
+
+ dsb(nshst);
+ asid = ASID(vma->vm_mm);
+ __flush_tlb_range_op(vale1, addr, CONT_PTES, PAGE_SIZE, asid,
+ 3, true, lpa2_is_enabled());
+ mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, addr,
+ addr + CONT_PTE_SIZE);
+ dsb(nsh);
+}
+
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
@@ -524,6 +580,33 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
{
__flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, true, 3);
}
+
+static inline bool __pte_flags_need_flush(ptdesc_t oldval, ptdesc_t newval)
+{
+ ptdesc_t diff = oldval ^ newval;
+
+ /* invalid to valid transition requires no flush */
+ if (!(oldval & PTE_VALID))
+ return false;
+
+ /* Transition in the SW bits requires no flush */
+ diff &= ~PTE_SWBITS_MASK;
+
+ return diff;
+}
+
+static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
+{
+ return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
+}
+#define pte_needs_flush pte_needs_flush
+
+static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
+{
+ return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
+}
+#define huge_pmd_needs_flush huge_pmd_needs_flush
+
#endif
#endif