diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/Kbuild | 1 | ||||
-rw-r--r-- | include/asm-generic/bitops/atomic.h | 6 | ||||
-rw-r--r-- | include/asm-generic/bitops/builtin-ffs.h | 5 | ||||
-rw-r--r-- | include/asm-generic/getorder.h | 2 | ||||
-rw-r--r-- | include/asm-generic/io.h | 4 | ||||
-rw-r--r-- | include/asm-generic/mmu_context.h | 58 | ||||
-rw-r--r-- | include/asm-generic/nommu_context.h | 19 | ||||
-rw-r--r-- | include/asm-generic/qrwlock.h | 8 | ||||
-rw-r--r-- | include/asm-generic/qspinlock.h | 4 |
9 files changed, 79 insertions, 28 deletions
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 4365b9aa3e3f..267f6dfb8960 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -34,6 +34,7 @@ mandatory-y += kmap_size.h mandatory-y += kprobes.h mandatory-y += linkage.h mandatory-y += local.h +mandatory-y += local64.h mandatory-y += mm-arch-hooks.h mandatory-y += mmiowb.h mandatory-y += mmu.h diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index dd90c9792909..0e7316a86240 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -11,19 +11,19 @@ * See Documentation/atomic_bitops.txt for details. */ -static inline void set_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); } -static inline void clear_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); } -static inline void change_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h index 458c85ebcd15..1dacfdb4247e 100644 --- a/include/asm-generic/bitops/builtin-ffs.h +++ b/include/asm-generic/bitops/builtin-ffs.h @@ -10,9 +10,6 @@ * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). */ -static __always_inline int ffs(int x) -{ - return __builtin_ffs(x); -} +#define ffs(x) __builtin_ffs(x) #endif diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h index e9f20b813a69..f2979e3a96b6 100644 --- a/include/asm-generic/getorder.h +++ b/include/asm-generic/getorder.h @@ -26,7 +26,7 @@ * * The result is undefined if the size is 0. */ -static inline __attribute_const__ int get_order(unsigned long size) +static __always_inline __attribute_const__ int get_order(unsigned long size) { if (__builtin_constant_p(size)) { if (!size) diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 9ea83d80eb6f..c6af40ce03be 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -1137,6 +1137,10 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, } #endif +#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED +extern int devmem_is_allowed(unsigned long pfn); +#endif + #endif /* __KERNEL__ */ #endif /* __ASM_GENERIC_IO_H */ diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h index 6be9106fb6fb..91727065bacb 100644 --- a/include/asm-generic/mmu_context.h +++ b/include/asm-generic/mmu_context.h @@ -3,44 +3,74 @@ #define __ASM_GENERIC_MMU_CONTEXT_H /* - * Generic hooks for NOMMU architectures, which do not need to do - * anything special here. + * Generic hooks to implement no-op functionality. */ -#include <asm-generic/mm_hooks.h> - struct task_struct; struct mm_struct; +/* + * enter_lazy_tlb - Called when "tsk" is about to enter lazy TLB mode. + * + * @mm: the currently active mm context which is becoming lazy + * @tsk: task which is entering lazy tlb + * + * tsk->mm will be NULL + */ +#ifndef enter_lazy_tlb static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } +#endif +/** + * init_new_context - Initialize context of a new mm_struct. + * @tsk: task struct for the mm + * @mm: the new mm struct + * @return: 0 on success, -errno on failure + */ +#ifndef init_new_context static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { return 0; } +#endif +/** + * destroy_context - Undo init_new_context when the mm is going away + * @mm: old mm struct + */ +#ifndef destroy_context static inline void destroy_context(struct mm_struct *mm) { } +#endif -static inline void deactivate_mm(struct task_struct *task, - struct mm_struct *mm) -{ -} - -static inline void switch_mm(struct mm_struct *prev, - struct mm_struct *next, - struct task_struct *tsk) +/** + * activate_mm - called after exec switches the current task to a new mm, to switch to it + * @prev_mm: previous mm of this task + * @next_mm: new mm + */ +#ifndef activate_mm +static inline void activate_mm(struct mm_struct *prev_mm, + struct mm_struct *next_mm) { + switch_mm(prev_mm, next_mm, current); } +#endif -static inline void activate_mm(struct mm_struct *prev_mm, - struct mm_struct *next_mm) +/** + * dectivate_mm - called when an mm is released after exit or exec switches away from it + * @tsk: the task + * @mm: the old mm + */ +#ifndef deactivate_mm +static inline void deactivate_mm(struct task_struct *tsk, + struct mm_struct *mm) { } +#endif #endif /* __ASM_GENERIC_MMU_CONTEXT_H */ diff --git a/include/asm-generic/nommu_context.h b/include/asm-generic/nommu_context.h new file mode 100644 index 000000000000..4f916f9e16cd --- /dev/null +++ b/include/asm-generic/nommu_context.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_NOMMU_H +#define __ASM_GENERIC_NOMMU_H + +/* + * Generic hooks for NOMMU architectures, which do not need to do + * anything special here. + */ +#include <asm-generic/mm_hooks.h> + +static inline void switch_mm(struct mm_struct *prev, + struct mm_struct *next, + struct task_struct *tsk) +{ +} + +#include <asm-generic/mmu_context.h> + +#endif /* __ASM_GENERIC_NOMMU_H */ diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 3aefde23dcea..84ce841ce735 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -37,7 +37,7 @@ extern void queued_write_lock_slowpath(struct qrwlock *lock); */ static inline int queued_read_trylock(struct qrwlock *lock) { - u32 cnts; + int cnts; cnts = atomic_read(&lock->cnts); if (likely(!(cnts & _QW_WMASK))) { @@ -56,7 +56,7 @@ static inline int queued_read_trylock(struct qrwlock *lock) */ static inline int queued_write_trylock(struct qrwlock *lock) { - u32 cnts; + int cnts; cnts = atomic_read(&lock->cnts); if (unlikely(cnts)) @@ -71,7 +71,7 @@ static inline int queued_write_trylock(struct qrwlock *lock) */ static inline void queued_read_lock(struct qrwlock *lock) { - u32 cnts; + int cnts; cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); if (likely(!(cnts & _QW_WMASK))) @@ -87,7 +87,7 @@ static inline void queued_read_lock(struct qrwlock *lock) */ static inline void queued_write_lock(struct qrwlock *lock) { - u32 cnts = 0; + int cnts = 0; /* Optimize for the unfair lock case where the fair flag is 0. */ if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))) return; diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 4fe7fd0fe834..d74b13825501 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -60,7 +60,7 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock) */ static __always_inline int queued_spin_trylock(struct qspinlock *lock) { - u32 val = atomic_read(&lock->val); + int val = atomic_read(&lock->val); if (unlikely(val)) return 0; @@ -77,7 +77,7 @@ extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); */ static __always_inline void queued_spin_lock(struct qspinlock *lock) { - u32 val = 0; + int val = 0; if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) return; |