summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/.gitignore2
-rw-r--r--lib/Kconfig56
-rw-r--r--lib/Kconfig.debug341
-rw-r--r--lib/Kconfig.kasan3
-rw-r--r--lib/Kconfig.ubsan14
-rw-r--r--lib/Makefile50
-rw-r--r--lib/argv_split.c2
-rw-r--r--lib/atomic64.c14
-rw-r--r--lib/bch.c23
-rw-r--r--lib/bitmap.c36
-rw-r--r--lib/bucket_locks.c16
-rw-r--r--lib/crc32.c11
-rw-r--r--lib/crc32defs.h14
-rw-r--r--lib/crc64.c56
-rw-r--r--lib/debugobjects.c151
-rw-r--r--lib/dec_and_lock.c16
-rw-r--r--lib/decompress_bunzip2.c3
-rw-r--r--lib/devres.c114
-rw-r--r--lib/dma-debug.c1752
-rw-r--r--lib/dma-direct.c161
-rw-r--r--lib/dma-virt.c61
-rw-r--r--lib/dump_stack.c60
-rw-r--r--lib/errseq.c23
-rw-r--r--lib/find_bit_benchmark.c7
-rw-r--r--lib/fonts/font_7x14.c256
-rw-r--r--lib/fonts/font_8x16.c256
-rw-r--r--lib/fonts/font_8x8.c256
-rw-r--r--lib/fonts/font_pearl_8x8.c256
-rw-r--r--lib/gen_crc32table.c5
-rw-r--r--lib/gen_crc64table.c68
-rw-r--r--lib/idr.c165
-rw-r--r--lib/int_sqrt.c30
-rw-r--r--lib/interval_tree_test.c5
-rw-r--r--lib/iommu-common.c267
-rw-r--r--lib/iommu-helper.c14
-rw-r--r--lib/ioremap.c4
-rw-r--r--lib/iov_iter.c134
-rw-r--r--lib/kfifo.c4
-rw-r--r--lib/klist.c10
-rw-r--r--lib/kobject.c76
-rw-r--r--lib/kobject_uevent.c272
-rw-r--r--lib/libcrc32c.c6
-rw-r--r--lib/list_debug.c14
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--lib/lockref.c28
-rw-r--r--lib/logic_pio.c280
-rw-r--r--lib/lru_cache.c2
-rw-r--r--lib/mpi/mpi-internal.h75
-rw-r--r--lib/mpi/mpi-pow.c3
-rw-r--r--lib/mpi/mpiutil.c4
-rw-r--r--lib/nlattr.c4
-rw-r--r--lib/nmi_backtrace.c3
-rw-r--r--lib/percpu_counter.c1
-rw-r--r--lib/percpu_ida.c391
-rw-r--r--lib/radix-tree.c24
-rw-r--r--lib/raid6/.gitignore1
-rw-r--r--lib/raid6/Makefile33
-rw-r--r--lib/raid6/algos.c7
-rw-r--r--lib/raid6/altivec.uc3
-rw-r--r--lib/raid6/s390vx.uc34
-rw-r--r--lib/raid6/sse2.c14
-rw-r--r--lib/raid6/test/Makefile29
-rw-r--r--lib/raid6/tilegx.uc87
-rw-r--r--lib/raid6/vpermxor.uc105
-rw-r--r--lib/rbtree_test.c2
-rw-r--r--lib/reciprocal_div.c41
-rw-r--r--lib/reed_solomon/decode_rs.c34
-rw-r--r--lib/reed_solomon/encode_rs.c15
-rw-r--r--lib/reed_solomon/reed_solomon.c240
-rw-r--r--lib/refcount.c83
-rw-r--r--lib/rhashtable.c156
-rw-r--r--lib/sbitmap.c119
-rw-r--r--lib/scatterlist.c18
-rw-r--r--lib/sha256.c283
-rw-r--r--lib/swiotlb.c1135
-rw-r--r--lib/test_bitfield.c168
-rw-r--r--lib/test_bitmap.c35
-rw-r--r--lib/test_bpf.c708
-rw-r--r--lib/test_debug_virtual.c2
-rw-r--r--lib/test_firmware.c11
-rw-r--r--lib/test_hexdump.c28
-rw-r--r--lib/test_ida.c177
-rw-r--r--lib/test_kasan.c8
-rw-r--r--lib/test_kmod.c5
-rw-r--r--lib/test_overflow.c613
-rw-r--r--lib/test_printf.c26
-rw-r--r--lib/test_rhashtable.c21
-rw-r--r--lib/test_ubsan.c144
-rw-r--r--lib/test_user_copy.c3
-rw-r--r--lib/textsearch.c40
-rw-r--r--lib/ucmpdi2.c2
-rw-r--r--lib/ucs2_string.c2
-rw-r--r--lib/vsprintf.c203
-rw-r--r--lib/xz/xz_crc32.c3
-rw-r--r--lib/zstd/Makefile17
95 files changed, 4828 insertions, 5728 deletions
diff --git a/lib/.gitignore b/lib/.gitignore
index 09aae85418ab..f2a39c9e5485 100644
--- a/lib/.gitignore
+++ b/lib/.gitignore
@@ -2,5 +2,7 @@
# Generated files
#
gen_crc32table
+gen_crc64table
crc32table.h
+crc64table.h
oid_registry_data.c
diff --git a/lib/Kconfig b/lib/Kconfig
index e96089499371..a3928d4438b5 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -55,6 +55,22 @@ config ARCH_USE_CMPXCHG_LOCKREF
config ARCH_HAS_FAST_MULTIPLIER
bool
+config INDIRECT_PIO
+ bool "Access I/O in non-MMIO mode"
+ depends on ARM64
+ help
+ On some platforms where no separate I/O space exists, there are I/O
+ hosts which can not be accessed in MMIO mode. Using the logical PIO
+ mechanism, the host-local I/O resource can be mapped into system
+ logic PIO space shared with MMIO hosts, such as PCI/PCIe, then the
+ system can access the I/O devices with the mapped-logic PIO through
+ I/O accessors.
+
+ This way has relatively little I/O performance cost. Please make
+ sure your devices really need this configure item enabled.
+
+ When in doubt, say N.
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
@@ -154,6 +170,14 @@ config CRC32_BIT
endchoice
+config CRC64
+ tristate "CRC64 functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC64 functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC64
+ functions require M here.
+
config CRC4
tristate "CRC4 functions"
help
@@ -207,7 +231,6 @@ config AUDIT_COMPAT_GENERIC
config RANDOM32_SELFTEST
bool "PRNG perform self test on init"
- default n
help
This option enables the 32 bit PRNG library functions to perform a
self test on initialization.
@@ -389,7 +412,7 @@ config ASSOCIATIVE_ARRAY
See:
- Documentation/assoc_array.txt
+ Documentation/core-api/assoc_array.rst
for more information.
@@ -404,24 +427,14 @@ config HAS_IOPORT_MAP
depends on HAS_IOMEM && !NO_IOPORT_MAP
default y
-config HAS_DMA
- bool
- depends on !NO_DMA
- default y
+source "kernel/dma/Kconfig"
config SGL_ALLOC
bool
default n
-config DMA_DIRECT_OPS
- bool
- depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
- default n
-
-config DMA_VIRT_OPS
+config IOMMU_HELPER
bool
- depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
- default n
config CHECK_SIGNATURE
bool
@@ -570,6 +583,9 @@ config ARCH_HAS_PMEM_API
config ARCH_HAS_UACCESS_FLUSHCACHE
bool
+config ARCH_HAS_UACCESS_MCSAFE
+ bool
+
config STACKDEPOT
bool
select STACKTRACE
@@ -588,20 +604,20 @@ config STRING_SELFTEST
endmenu
-config GENERIC_ASHLDI3
+config GENERIC_LIB_ASHLDI3
bool
-config GENERIC_ASHRDI3
+config GENERIC_LIB_ASHRDI3
bool
-config GENERIC_LSHRDI3
+config GENERIC_LIB_LSHRDI3
bool
-config GENERIC_MULDI3
+config GENERIC_LIB_MULDI3
bool
-config GENERIC_CMPDI2
+config GENERIC_LIB_CMPDI2
bool
-config GENERIC_UCMPDI2
+config GENERIC_LIB_UCMPDI2
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 64155e310a9f..4966c4fbe7f7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1,3 +1,5 @@
+menu "Kernel hacking"
+
menu "printk and dmesg options"
config PRINTK_TIME
@@ -30,6 +32,17 @@ config CONSOLE_LOGLEVEL_DEFAULT
usage in the kernel. That is controlled by the MESSAGE_LOGLEVEL_DEFAULT
option.
+config CONSOLE_LOGLEVEL_QUIET
+ int "quiet console loglevel (1-15)"
+ range 1 15
+ default "4"
+ help
+ loglevel to use when "quiet" is passed on the kernel commandline.
+
+ When "quiet" is passed on the kernel commandline this loglevel
+ will be used as the loglevel. IOW passing "quiet" will be the
+ equivalent of passing "loglevel=<CONSOLE_LOGLEVEL_QUIET>"
+
config MESSAGE_LOGLEVEL_DEFAULT
int "Default message log level (1-7)"
range 1 7
@@ -165,7 +178,7 @@ config DEBUG_INFO_REDUCED
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
- depends on DEBUG_INFO && !FRV
+ depends on DEBUG_INFO
help
Generate debug info into separate .dwo files. This significantly
reduces the build directory size for builds with DEBUG_INFO,
@@ -198,14 +211,6 @@ config GDB_SCRIPTS
instance. See Documentation/dev-tools/gdb-kernel-debugging.rst
for further details.
-config ENABLE_WARN_DEPRECATED
- bool "Enable __deprecated logic"
- default y
- help
- Enable the __deprecated logic in the kernel build.
- Disable this to suppress the "warning: 'foo' is deprecated
- (declared at kernel/power/somefile.c:1234)" messages.
-
config ENABLE_MUST_CHECK
bool "Enable __must_check logic"
default y
@@ -324,11 +329,11 @@ config DEBUG_SECTION_MISMATCH
the analysis would not catch the illegal reference.
This option tells gcc to inline less (but it does result in
a larger kernel).
- - Run the section mismatch analysis for each module/built-in.o file.
+ - Run the section mismatch analysis for each module/built-in.a file.
When we run the section mismatch analysis on vmlinux.o, we
lose valuable information about where the mismatch was
introduced.
- Running the analysis for each module/built-in.o file
+ Running the analysis for each module/built-in.a file
tells where the mismatch happens much closer to the
source. The drawback is that the same mismatch is
reported at least twice.
@@ -354,10 +359,7 @@ config ARCH_WANT_FRAME_POINTERS
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
- depends on DEBUG_KERNEL && \
- (CRIS || M68K || FRV || UML || \
- SUPERH || BLACKFIN || MN10300 || METAG) || \
- ARCH_WANT_FRAME_POINTERS
+ depends on DEBUG_KERNEL && (M68K || UML || SUPERH) || ARCH_WANT_FRAME_POINTERS
default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
help
If you say Y here the resulting kernel image will be slightly
@@ -739,12 +741,15 @@ config ARCH_HAS_KCOV
only for x86_64. KCOV requires testing on other archs, and most likely
disabling of instrumentation for some early boot code.
+config CC_HAS_SANCOV_TRACE_PC
+ def_bool $(cc-option,-fsanitize-coverage=trace-pc)
+
config KCOV
bool "Code coverage for fuzzing"
depends on ARCH_HAS_KCOV
+ depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
select DEBUG_FS
- select GCC_PLUGINS if !COMPILE_TEST
- select GCC_PLUGIN_SANCOV if !COMPILE_TEST
+ select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
help
KCOV exposes kernel code coverage information in a form suitable
for coverage-guided fuzzing (randomized testing).
@@ -758,7 +763,7 @@ config KCOV
config KCOV_ENABLE_COMPARISONS
bool "Enable comparison operands collection by KCOV"
depends on KCOV
- default n
+ depends on $(cc-option,-fsanitize-coverage=trace-cmp)
help
KCOV also exposes operands of every comparison in the instrumented
code along with operand sizes and PCs of the comparison instructions.
@@ -768,7 +773,7 @@ config KCOV_ENABLE_COMPARISONS
config KCOV_INSTRUMENT_ALL
bool "Instrument all code by default"
depends on KCOV
- default y if KCOV
+ default y
help
If you are doing generic system call fuzzing (like e.g. syzkaller),
then you will want to instrument the whole kernel and you should
@@ -803,6 +808,30 @@ config SOFTLOCKUP_DETECTOR
chance to run. The current stack trace is displayed upon
detection and the system will stay locked up.
+config BOOTPARAM_SOFTLOCKUP_PANIC
+ bool "Panic (Reboot) On Soft Lockups"
+ depends on SOFTLOCKUP_DETECTOR
+ help
+ Say Y here to enable the kernel to panic on "soft lockups",
+ which are bugs that cause the kernel to loop in kernel
+ mode for more than 20 seconds (configurable using the watchdog_thresh
+ sysctl), without giving other tasks a chance to run.
+
+ The panic can be used in combination with panic_timeout,
+ to cause the system to reboot automatically after a
+ lockup has been detected. This feature is useful for
+ high-availability systems that have uptime guarantees and
+ where a lockup must be resolved ASAP.
+
+ Say N if unsure.
+
+config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
+ int
+ depends on SOFTLOCKUP_DETECTOR
+ range 0 1
+ default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
+ default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
+
config HARDLOCKUP_DETECTOR_PERF
bool
select SOFTLOCKUP_DETECTOR
@@ -852,30 +881,6 @@ config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
default 0 if !BOOTPARAM_HARDLOCKUP_PANIC
default 1 if BOOTPARAM_HARDLOCKUP_PANIC
-config BOOTPARAM_SOFTLOCKUP_PANIC
- bool "Panic (Reboot) On Soft Lockups"
- depends on SOFTLOCKUP_DETECTOR
- help
- Say Y here to enable the kernel to panic on "soft lockups",
- which are bugs that cause the kernel to loop in kernel
- mode for more than 20 seconds (configurable using the watchdog_thresh
- sysctl), without giving other tasks a chance to run.
-
- The panic can be used in combination with panic_timeout,
- to cause the system to reboot automatically after a
- lockup has been detected. This feature is useful for
- high-availability systems that have uptime guarantees and
- where a lockup must be resolved ASAP.
-
- Say N if unsure.
-
-config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
- int
- depends on SOFTLOCKUP_DETECTOR
- range 0 1
- default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
- default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
-
config DETECT_HUNG_TASK
bool "Detect Hung Tasks"
depends on DEBUG_KERNEL
@@ -1034,69 +1039,20 @@ config DEBUG_PREEMPT
menu "Lock Debugging (spinlocks, mutexes, etc...)"
-config DEBUG_RT_MUTEXES
- bool "RT Mutex debugging, deadlock detection"
- depends on DEBUG_KERNEL && RT_MUTEXES
- help
- This allows rt mutex semantics violations and rt mutex related
- deadlocks (lockups) to be detected and reported automatically.
-
-config DEBUG_SPINLOCK
- bool "Spinlock and rw-lock debugging: basic checks"
- depends on DEBUG_KERNEL
- select UNINLINE_SPIN_UNLOCK
- help
- Say Y here and build SMP to catch missing spinlock initialization
- and certain other kinds of spinlock errors commonly made. This is
- best used in conjunction with the NMI watchdog so that spinlock
- deadlocks are also debuggable.
-
-config DEBUG_MUTEXES
- bool "Mutex debugging: basic checks"
- depends on DEBUG_KERNEL
- help
- This feature allows mutex semantics violations to be detected and
- reported.
-
-config DEBUG_WW_MUTEX_SLOWPATH
- bool "Wait/wound mutex debugging: Slowpath testing"
- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
- select DEBUG_LOCK_ALLOC
- select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
- help
- This feature enables slowpath testing for w/w mutex users by
- injecting additional -EDEADLK wound/backoff cases. Together with
- the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
- will test all possible w/w mutex interface abuse with the
- exception of simply not acquiring all the required locks.
- Note that this feature can introduce significant overhead, so
- it really should not be enabled in a production or distro kernel,
- even a debug kernel. If you are a driver writer, enable it. If
- you are a distro, do not.
-
-config DEBUG_LOCK_ALLOC
- bool "Lock debugging: detect incorrect freeing of live locks"
- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
- select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
- select DEBUG_RT_MUTEXES if RT_MUTEXES
- select LOCKDEP
- help
- This feature will check whether any held lock (spinlock, rwlock,
- mutex or rwsem) is incorrectly freed by the kernel, via any of the
- memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
- vfree(), etc.), whether a live lock is incorrectly reinitialized via
- spin_lock_init()/mutex_init()/etc., or whether there is any lock
- held during task exit.
+config LOCK_DEBUGGING_SUPPORT
+ bool
+ depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ default y
config PROVE_LOCKING
bool "Lock debugging: prove locking correctness"
- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select LOCKDEP
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
+ select DEBUG_RWSEMS if RWSEM_SPIN_ON_OWNER
+ select DEBUG_WW_MUTEX_SLOWPATH
select DEBUG_LOCK_ALLOC
select TRACE_IRQFLAGS
default n
@@ -1134,20 +1090,9 @@ config PROVE_LOCKING
For more details, see Documentation/locking/lockdep-design.txt.
-config LOCKDEP
- bool
- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
- select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE && !X86
- select KALLSYMS
- select KALLSYMS_ALL
-
-config LOCKDEP_SMALL
- bool
-
config LOCK_STAT
bool "Lock usage statistics"
- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select LOCKDEP
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
@@ -1167,6 +1112,80 @@ config LOCK_STAT
CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
(CONFIG_LOCKDEP defines "acquire" and "release" events.)
+config DEBUG_RT_MUTEXES
+ bool "RT Mutex debugging, deadlock detection"
+ depends on DEBUG_KERNEL && RT_MUTEXES
+ help
+ This allows rt mutex semantics violations and rt mutex related
+ deadlocks (lockups) to be detected and reported automatically.
+
+config DEBUG_SPINLOCK
+ bool "Spinlock and rw-lock debugging: basic checks"
+ depends on DEBUG_KERNEL
+ select UNINLINE_SPIN_UNLOCK
+ help
+ Say Y here and build SMP to catch missing spinlock initialization
+ and certain other kinds of spinlock errors commonly made. This is
+ best used in conjunction with the NMI watchdog so that spinlock
+ deadlocks are also debuggable.
+
+config DEBUG_MUTEXES
+ bool "Mutex debugging: basic checks"
+ depends on DEBUG_KERNEL
+ help
+ This feature allows mutex semantics violations to be detected and
+ reported.
+
+config DEBUG_WW_MUTEX_SLOWPATH
+ bool "Wait/wound mutex debugging: Slowpath testing"
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
+ select DEBUG_LOCK_ALLOC
+ select DEBUG_SPINLOCK
+ select DEBUG_MUTEXES
+ help
+ This feature enables slowpath testing for w/w mutex users by
+ injecting additional -EDEADLK wound/backoff cases. Together with
+ the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
+ will test all possible w/w mutex interface abuse with the
+ exception of simply not acquiring all the required locks.
+ Note that this feature can introduce significant overhead, so
+ it really should not be enabled in a production or distro kernel,
+ even a debug kernel. If you are a driver writer, enable it. If
+ you are a distro, do not.
+
+config DEBUG_RWSEMS
+ bool "RW Semaphore debugging: basic checks"
+ depends on DEBUG_KERNEL && RWSEM_SPIN_ON_OWNER
+ help
+ This debugging feature allows mismatched rw semaphore locks and unlocks
+ to be detected and reported.
+
+config DEBUG_LOCK_ALLOC
+ bool "Lock debugging: detect incorrect freeing of live locks"
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
+ select DEBUG_SPINLOCK
+ select DEBUG_MUTEXES
+ select DEBUG_RT_MUTEXES if RT_MUTEXES
+ select LOCKDEP
+ help
+ This feature will check whether any held lock (spinlock, rwlock,
+ mutex or rwsem) is incorrectly freed by the kernel, via any of the
+ memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
+ vfree(), etc.), whether a live lock is incorrectly reinitialized via
+ spin_lock_init()/mutex_init()/etc., or whether there is any lock
+ held during task exit.
+
+config LOCKDEP
+ bool
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
+ select STACKTRACE
+ select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !X86
+ select KALLSYMS
+ select KALLSYMS_ALL
+
+config LOCKDEP_SMALL
+ bool
+
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
@@ -1179,6 +1198,7 @@ config DEBUG_ATOMIC_SLEEP
bool "Sleep inside atomic section checking"
select PREEMPT_COUNT
depends on DEBUG_KERNEL
+ depends on !ARCH_NO_PREEMPT
help
If you say Y here, various routines which may sleep will become very
noisy if they are called inside atomic sections: when a spinlock is
@@ -1200,7 +1220,6 @@ config LOCK_TORTURE_TEST
tristate "torture tests for locking"
depends on DEBUG_KERNEL
select TORTURE_TEST
- default n
help
This option provides a kernel module that runs torture tests
on kernel locking primitives. The kernel module may be built
@@ -1258,13 +1277,13 @@ config WARN_ALL_UNSEEDED_RANDOM
time. This is really bad from a security perspective, and
so architecture maintainers really need to do what they can
to get the CRNG seeded sooner after the system is booted.
- However, since users can not do anything actionble to
+ However, since users cannot do anything actionable to
address this, by default the kernel will issue only a single
warning for the first use of unseeded randomness.
Say Y here if you want to receive warnings for all uses of
unseeded randomness. This will be of use primarily for
- those developers interersted in improving the security of
+ those developers interested in improving the security of
Linux kernels running on their architecture (or
subarchitecture).
@@ -1492,6 +1511,10 @@ config NETDEV_NOTIFIER_ERROR_INJECT
If unsure, say N.
+config FUNCTION_ERROR_INJECTION
+ def_bool y
+ depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
+
config FAULT_INJECTION
bool "Fault-injection framework"
depends on DEBUG_KERNEL
@@ -1499,10 +1522,6 @@ config FAULT_INJECTION
Provide fault-injection framework.
For more details, see Documentation/fault-injection/.
-config FUNCTION_ERROR_INJECTION
- def_bool y
- depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
-
config FAILSLAB
bool "Fault-injection capability for kmalloc"
depends on FAULT_INJECTION
@@ -1533,16 +1552,6 @@ config FAIL_IO_TIMEOUT
Only works with drivers that use the generic timeout handling,
for others it wont do anything.
-config FAIL_MMC_REQUEST
- bool "Fault-injection capability for MMC IO"
- depends on FAULT_INJECTION_DEBUG_FS && MMC
- help
- Provide fault-injection capability for MMC IO.
- This will make the mmc core return data errors. This is
- useful to test the error handling in the mmc block device
- and to test how the mmc host driver handles retries from
- the block device.
-
config FAIL_FUTEX
bool "Fault-injection capability for futexes"
select DEBUG_FS
@@ -1550,6 +1559,12 @@ config FAIL_FUTEX
help
Provide fault-injection capability for futexes.
+config FAULT_INJECTION_DEBUG_FS
+ bool "Debugfs entries for fault-injection capabilities"
+ depends on FAULT_INJECTION && SYSFS && DEBUG_FS
+ help
+ Enable configuration of fault-injection capabilities via debugfs.
+
config FAIL_FUNCTION
bool "Fault-injection capability for functions"
depends on FAULT_INJECTION_DEBUG_FS && FUNCTION_ERROR_INJECTION
@@ -1560,18 +1575,22 @@ config FAIL_FUNCTION
an error value and have to handle it. This is useful to test the
error handling in various subsystems.
-config FAULT_INJECTION_DEBUG_FS
- bool "Debugfs entries for fault-injection capabilities"
- depends on FAULT_INJECTION && SYSFS && DEBUG_FS
+config FAIL_MMC_REQUEST
+ bool "Fault-injection capability for MMC IO"
+ depends on FAULT_INJECTION_DEBUG_FS && MMC
help
- Enable configuration of fault-injection capabilities via debugfs.
+ Provide fault-injection capability for MMC IO.
+ This will make the mmc core return data errors. This is
+ useful to test the error handling in the mmc block device
+ and to test how the mmc host driver handles retries from
+ the block device.
config FAULT_INJECTION_STACKTRACE_FILTER
bool "stacktrace filter for fault-injection capabilities"
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE && !X86
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86
help
Provide stacktrace filter for fault-injection capabilities
@@ -1623,7 +1642,7 @@ config PROVIDE_OHCI1394_DMA_INIT
config DMA_API_DEBUG
bool "Enable debugging of DMA-API usage"
- depends on HAVE_DMA_API_DEBUG
+ select NEED_DMA_MAP_STATE
help
Enable this option to debug the use of the DMA API by device drivers.
With this option you will be able to detect common bugs in device
@@ -1640,6 +1659,23 @@ config DMA_API_DEBUG
If unsure, say N.
+config DMA_API_DEBUG_SG
+ bool "Debug DMA scatter-gather usage"
+ default y
+ depends on DMA_API_DEBUG
+ help
+ Perform extra checking that callers of dma_map_sg() have respected the
+ appropriate segment length/boundary limits for the given device when
+ preparing DMA scatterlists.
+
+ This is particularly likely to have been overlooked in cases where the
+ dma_map_sg() API is used for general bulk mapping of pages rather than
+ preparing literal scatter-gather descriptors, where there is a risk of
+ unexpected behaviour from DMA API implementations if the scatterlist
+ is technically out-of-spec.
+
+ If unsure, say N.
+
menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
def_bool y
@@ -1650,7 +1686,6 @@ config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_FS
depends on BLOCK
- default n
help
This module enables testing of the different dumping mechanisms by
inducing system failures at predefined crash points.
@@ -1684,10 +1719,9 @@ config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
depends on KPROBES
- default n
help
This option provides for testing basic kprobes functionality on
- boot. A sample kprobe, jprobe and kretprobe are inserted and
+ boot. Samples of kprobe and kretprobe are inserted and
verified for functionality.
Say N if you are unsure.
@@ -1695,7 +1729,6 @@ config KPROBES_SANITY_TEST
config BACKTRACE_SELF_TEST
tristate "Self test for the backtrace code"
depends on DEBUG_KERNEL
- default n
help
This option provides a kernel module that can be used to test
the kernel stack backtrace code. This option is not useful
@@ -1765,18 +1798,26 @@ config TEST_PRINTF
config TEST_BITMAP
tristate "Test bitmap_*() family of functions at runtime"
- default n
help
Enable this option to test the bitmap functions at boot.
If unsure, say N.
+config TEST_BITFIELD
+ tristate "Test bitfield functions at runtime"
+ help
+ Enable this option to test the bitfield functions at boot.
+
+ If unsure, say N.
+
config TEST_UUID
tristate "Test functions located in the uuid module at runtime"
+config TEST_OVERFLOW
+ tristate "Test check_*_overflow() functions at runtime"
+
config TEST_RHASHTABLE
tristate "Perform selftest on resizable hash table"
- default n
help
Enable this option to test the rhashtable functions at boot.
@@ -1784,7 +1825,6 @@ config TEST_RHASHTABLE
config TEST_HASH
tristate "Perform selftest on hash functions"
- default n
help
Enable this option to test the kernel's integer (<linux/hash.h>),
string (<linux/stringhash.h>), and siphash (<linux/siphash.h>)
@@ -1793,9 +1833,11 @@ config TEST_HASH
This is intended to help people writing architecture-specific
optimized versions. If unsure, say N.
+config TEST_IDA
+ tristate "Perform selftest on IDA functions"
+
config TEST_PARMAN
tristate "Perform selftest on priority array manager"
- default n
depends on PARMAN
help
Enable this option to test priority array manager on boot
@@ -1805,7 +1847,6 @@ config TEST_PARMAN
config TEST_LKM
tristate "Test module loading with 'hello world' module"
- default n
depends on m
help
This builds the "test_module" module that emits "Hello, world"
@@ -1819,7 +1860,6 @@ config TEST_LKM
config TEST_USER_COPY
tristate "Test user/kernel boundary protections"
- default n
depends on m
help
This builds the "test_user_copy" module that runs sanity checks
@@ -1832,7 +1872,6 @@ config TEST_USER_COPY
config TEST_BPF
tristate "Test BPF filter functionality"
- default n
depends on m && NET
help
This builds the "test_bpf" module that runs various test vectors
@@ -1846,7 +1885,6 @@ config TEST_BPF
config FIND_BIT_BENCHMARK
tristate "Test find_bit functions"
- default n
help
This builds the "test_find_bit" module that measure find_*_bit()
functions performance.
@@ -1855,7 +1893,6 @@ config FIND_BIT_BENCHMARK
config TEST_FIRMWARE
tristate "Test firmware loading via userspace interface"
- default n
depends on FW_LOADER
help
This builds the "test_firmware" module that creates a userspace
@@ -1868,7 +1905,6 @@ config TEST_FIRMWARE
config TEST_SYSCTL
tristate "sysctl test driver"
- default n
depends on PROC_SYSCTL
help
This builds the "test_sysctl" module. This driver enables to test the
@@ -1879,7 +1915,6 @@ config TEST_SYSCTL
config TEST_UDELAY
tristate "udelay test driver"
- default n
help
This builds the "udelay_test" module that helps to make sure
that udelay() is working properly.
@@ -1888,7 +1923,6 @@ config TEST_UDELAY
config TEST_STATIC_KEYS
tristate "Test static keys"
- default n
depends on m
help
Test the static key interfaces.
@@ -1897,7 +1931,6 @@ config TEST_STATIC_KEYS
config TEST_KMOD
tristate "kmod stress tester"
- default n
depends on m
depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
depends on NETDEVICES && NET_CORE && INET # for TUN
@@ -1969,7 +2002,7 @@ config STRICT_DEVMEM
bool "Filter access to /dev/mem"
depends on MMU && DEVMEM
depends on ARCH_HAS_DEVMEM_IS_ALLOWED
- default y if TILE || PPC || X86 || ARM64
+ default y if PPC || X86 || ARM64
---help---
If this option is disabled, you allow userspace (root) access to all
of memory, including kernel and userspace memory. Accidental
@@ -2000,3 +2033,7 @@ config IO_STRICT_DEVMEM
if the driver using a given range cannot be disabled.
If in doubt, say Y.
+
+source "arch/$(SRCARCH)/Kconfig.debug"
+
+endmenu # Kernel hacking
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 3d35d062970d..befb127507c0 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -5,7 +5,8 @@ if HAVE_ARCH_KASAN
config KASAN
bool "KASan: runtime memory debugger"
- depends on SLUB || (SLAB && !DEBUG_SLAB)
+ depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+ select SLUB_DEBUG if SLUB
select CONSTRUCTORS
select STACKDEPOT
help
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index a669c193b878..98fa559ebd80 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -1,9 +1,6 @@
config ARCH_HAS_UBSAN_SANITIZE_ALL
bool
-config ARCH_WANTS_UBSAN_NO_NULL
- def_bool n
-
config UBSAN
bool "Undefined behaviour sanity checker"
help
@@ -39,10 +36,9 @@ config UBSAN_ALIGNMENT
Enabling this option on architectures that support unaligned
accesses may produce a lot of false positives.
-config UBSAN_NULL
- bool "Enable checking of null pointers"
- depends on UBSAN
- default y if !ARCH_WANTS_UBSAN_NO_NULL
+config TEST_UBSAN
+ tristate "Module for testing for undefined behavior detection"
+ depends on m && UBSAN
help
- This option enables detection of memory accesses via a
- null pointer.
+ This is a test module for UBSAN.
+ It triggers various undefined behavior, and detect it.
diff --git a/lib/Makefile b/lib/Makefile
index a90d4fcd748f..ca3f7ebb900d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -18,18 +18,17 @@ KCOV_INSTRUMENT_debugobjects.o := n
KCOV_INSTRUMENT_dynamic_debug.o := n
lib-y := ctype.o string.o vsprintf.o cmdline.o \
- rbtree.o radix-tree.o dump_stack.o timerqueue.o\
+ rbtree.o radix-tree.o timerqueue.o\
idr.o int_sqrt.o extable.o \
sha1.o chacha20.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
- earlycpio.o seq_buf.o siphash.o \
+ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
nmi_backtrace.o nodemask.o win_minmax.o
+lib-$(CONFIG_PRINTK) += dump_stack.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
-lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
-lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
lib-y += kobject.o klist.o
obj-y += lockref.o
@@ -38,7 +37,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
+ percpu-refcount.o rhashtable.o reciprocal_div.o \
once.o refcount.o usercopy.o errseq.o bucket_locks.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
@@ -51,10 +50,15 @@ obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
+obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_TEST_KASAN) += test_kasan.o
+CFLAGS_test_kasan.o += -fno-builtin
+obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
+UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
obj-$(CONFIG_TEST_LKM) += test_module.o
+obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
obj-$(CONFIG_TEST_SORT) += test_sort.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
@@ -62,6 +66,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
+obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
@@ -81,6 +86,8 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
+obj-y += logic_pio.o
+
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_BTREE) += btree.o
@@ -90,10 +97,6 @@ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
-ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
- lib-y += dec_and_lock.o
-endif
-
obj-$(CONFIG_BITREVERSE) += bitrev.o
obj-$(CONFIG_RATIONAL) += rational.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
@@ -101,6 +104,7 @@ obj-$(CONFIG_CRC16) += crc16.o
obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
obj-$(CONFIG_CRC32) += crc32.o
+obj-$(CONFIG_CRC64) += crc64.o
obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o
obj-$(CONFIG_CRC4) += crc4.o
obj-$(CONFIG_CRC7) += crc7.o
@@ -115,6 +119,7 @@ obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
obj-$(CONFIG_BCH) += bch.o
+CFLAGS_bch.o := $(call cc-option,-Wframe-larger-than=4500)
obj-$(CONFIG_LZO_COMPRESS) += lzo/
obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
obj-$(CONFIG_LZ4_COMPRESS) += lz4/
@@ -140,8 +145,7 @@ obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
-obj-$(CONFIG_SWIOTLB) += swiotlb.o
-obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o iommu-common.o
+obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
@@ -161,8 +165,6 @@ obj-$(CONFIG_NLATTR) += nlattr.o
obj-$(CONFIG_LRU_CACHE) += lru_cache.o
-obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
-
obj-$(CONFIG_GENERIC_CSUM) += checksum.o
obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
@@ -217,7 +219,9 @@ obj-$(CONFIG_FONT_SUPPORT) += fonts/
obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
hostprogs-y := gen_crc32table
+hostprogs-y += gen_crc64table
clean-files := crc32table.h
+clean-files += crc64table.h
$(obj)/crc32.o: $(obj)/crc32table.h
@@ -227,6 +231,14 @@ quiet_cmd_crc32 = GEN $@
$(obj)/crc32table.h: $(obj)/gen_crc32table
$(call cmd,crc32)
+$(obj)/crc64.o: $(obj)/crc64table.h
+
+quiet_cmd_crc64 = GEN $@
+ cmd_crc64 = $< > $@
+
+$(obj)/crc64table.h: $(obj)/gen_crc64table
+ $(call cmd,crc64)
+
#
# Build a fast OID lookip registry from include/linux/oid_registry.h
#
@@ -253,9 +265,9 @@ obj-$(CONFIG_SBITMAP) += sbitmap.o
obj-$(CONFIG_PARMAN) += parman.o
# GCC library routines
-obj-$(CONFIG_GENERIC_ASHLDI3) += ashldi3.o
-obj-$(CONFIG_GENERIC_ASHRDI3) += ashrdi3.o
-obj-$(CONFIG_GENERIC_LSHRDI3) += lshrdi3.o
-obj-$(CONFIG_GENERIC_MULDI3) += muldi3.o
-obj-$(CONFIG_GENERIC_CMPDI2) += cmpdi2.o
-obj-$(CONFIG_GENERIC_UCMPDI2) += ucmpdi2.o
+obj-$(CONFIG_GENERIC_LIB_ASHLDI3) += ashldi3.o
+obj-$(CONFIG_GENERIC_LIB_ASHRDI3) += ashrdi3.o
+obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o
+obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o
+obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
+obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
diff --git a/lib/argv_split.c b/lib/argv_split.c
index 5c35752a9414..1a19a0a93dc1 100644
--- a/lib/argv_split.c
+++ b/lib/argv_split.c
@@ -69,7 +69,7 @@ char **argv_split(gfp_t gfp, const char *str, int *argcp)
return NULL;
argc = count_argc(argv_str);
- argv = kmalloc(sizeof(*argv) * (argc + 2), gfp);
+ argv = kmalloc_array(argc + 2, sizeof(*argv), gfp);
if (!argv) {
kfree(argv_str);
return NULL;
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 53c2d5edc826..1d91e31eceec 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -178,18 +178,18 @@ long long atomic64_xchg(atomic64_t *v, long long new)
}
EXPORT_SYMBOL(atomic64_xchg);
-int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- int ret = 0;
+ long long val;
raw_spin_lock_irqsave(lock, flags);
- if (v->counter != u) {
+ val = v->counter;
+ if (val != u)
v->counter += a;
- ret = 1;
- }
raw_spin_unlock_irqrestore(lock, flags);
- return ret;
+
+ return val;
}
-EXPORT_SYMBOL(atomic64_add_unless);
+EXPORT_SYMBOL(atomic64_fetch_add_unless);
diff --git a/lib/bch.c b/lib/bch.c
index bc89dfe4d1b3..7b0f2006698b 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -78,15 +78,22 @@
#define GF_M(_p) (CONFIG_BCH_CONST_M)
#define GF_T(_p) (CONFIG_BCH_CONST_T)
#define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1)
+#define BCH_MAX_M (CONFIG_BCH_CONST_M)
#else
#define GF_M(_p) ((_p)->m)
#define GF_T(_p) ((_p)->t)
#define GF_N(_p) ((_p)->n)
+#define BCH_MAX_M 15
#endif
+#define BCH_MAX_T (((1 << BCH_MAX_M) - 1) / BCH_MAX_M)
+
#define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32)
#define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8)
+#define BCH_ECC_MAX_WORDS DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 32)
+#define BCH_ECC_MAX_BYTES DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 8)
+
#ifndef dbg
#define dbg(_fmt, args...) do {} while (0)
#endif
@@ -187,7 +194,8 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
const unsigned int l = BCH_ECC_WORDS(bch)-1;
unsigned int i, mlen;
unsigned long m;
- uint32_t w, r[l+1];
+ uint32_t w, r[BCH_ECC_MAX_WORDS];
+ const size_t r_bytes = BCH_ECC_WORDS(bch) * sizeof(*r);
const uint32_t * const tab0 = bch->mod8_tab;
const uint32_t * const tab1 = tab0 + 256*(l+1);
const uint32_t * const tab2 = tab1 + 256*(l+1);
@@ -198,7 +206,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
/* load ecc parity bytes into internal 32-bit buffer */
load_ecc8(bch, bch->ecc_buf, ecc);
} else {
- memset(bch->ecc_buf, 0, sizeof(r));
+ memset(bch->ecc_buf, 0, r_bytes);
}
/* process first unaligned data bytes */
@@ -215,7 +223,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
mlen = len/4;
data += 4*mlen;
len -= 4*mlen;
- memcpy(r, bch->ecc_buf, sizeof(r));
+ memcpy(r, bch->ecc_buf, r_bytes);
/*
* split each 32-bit word into 4 polynomials of weight 8 as follows:
@@ -241,7 +249,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
r[l] = p0[l]^p1[l]^p2[l]^p3[l];
}
- memcpy(bch->ecc_buf, r, sizeof(r));
+ memcpy(bch->ecc_buf, r, r_bytes);
/* process last unaligned bytes */
if (len)
@@ -434,7 +442,7 @@ static int solve_linear_system(struct bch_control *bch, unsigned int *rows,
{
const int m = GF_M(bch);
unsigned int tmp, mask;
- int rem, c, r, p, k, param[m];
+ int rem, c, r, p, k, param[BCH_MAX_M];
k = 0;
mask = 1 << m;
@@ -1114,7 +1122,7 @@ static int build_deg2_base(struct bch_control *bch)
{
const int m = GF_M(bch);
int i, j, r;
- unsigned int sum, x, y, remaining, ak = 0, xi[m];
+ unsigned int sum, x, y, remaining, ak = 0, xi[BCH_MAX_M];
/* find k s.t. Tr(a^k) = 1 and 0 <= k < m */
for (i = 0; i < m; i++) {
@@ -1254,7 +1262,6 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
struct bch_control *bch = NULL;
const int min_m = 5;
- const int max_m = 15;
/* default primitive polynomials */
static const unsigned int prim_poly_tab[] = {
@@ -1270,7 +1277,7 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
goto fail;
}
#endif
- if ((m < min_m) || (m > max_m))
+ if ((m < min_m) || (m > BCH_MAX_M))
/*
* values of m greater than 15 are not currently supported;
* supporting m > 15 would require changing table base type
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 9e498c77ed0e..2fd07f6df0b8 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
@@ -64,12 +65,9 @@ EXPORT_SYMBOL(__bitmap_equal);
void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits)
{
- unsigned int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = BITS_TO_LONGS(bits);
for (k = 0; k < lim; ++k)
dst[k] = ~src[k];
-
- if (bits % BITS_PER_LONG)
- dst[k] = ~src[k];
}
EXPORT_SYMBOL(__bitmap_complement);
@@ -607,7 +605,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
/* if no digit is after '-', it's wrong*/
if (at_start && in_range)
return -EINVAL;
- if (!(a <= b) || !(used_size <= group_size))
+ if (!(a <= b) || group_size == 0 || !(used_size <= group_size))
return -EINVAL;
if (b >= nmaskbits)
return -ERANGE;
@@ -1128,6 +1126,25 @@ void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int n
EXPORT_SYMBOL(bitmap_copy_le);
#endif
+unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
+{
+ return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
+ flags);
+}
+EXPORT_SYMBOL(bitmap_alloc);
+
+unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
+{
+ return bitmap_alloc(nbits, flags | __GFP_ZERO);
+}
+EXPORT_SYMBOL(bitmap_zalloc);
+
+void bitmap_free(const unsigned long *bitmap)
+{
+ kfree(bitmap);
+}
+EXPORT_SYMBOL(bitmap_free);
+
#if BITS_PER_LONG == 64
/**
* bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
@@ -1135,14 +1152,10 @@ EXPORT_SYMBOL(bitmap_copy_le);
* @buf: array of u32 (in host byte order), the source bitmap
* @nbits: number of bits in @bitmap
*/
-void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
- unsigned int nbits)
+void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits)
{
unsigned int i, halfwords;
- if (!nbits)
- return;
-
halfwords = DIV_ROUND_UP(nbits, 32);
for (i = 0; i < halfwords; i++) {
bitmap[i/2] = (unsigned long) buf[i];
@@ -1166,9 +1179,6 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
{
unsigned int i, halfwords;
- if (!nbits)
- return;
-
halfwords = DIV_ROUND_UP(nbits, 32);
for (i = 0; i < halfwords; i++) {
buf[i] = (u32) (bitmap[i/2] & UINT_MAX);
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c
index 266a97c5708b..64b92e1dbace 100644
--- a/lib/bucket_locks.c
+++ b/lib/bucket_locks.c
@@ -11,8 +11,9 @@
* to a power of 2 to be suitable as a hash table.
*/
-int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
- size_t max_size, unsigned int cpu_mult, gfp_t gfp)
+int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
+ size_t max_size, unsigned int cpu_mult, gfp_t gfp,
+ const char *name, struct lock_class_key *key)
{
spinlock_t *tlocks = NULL;
unsigned int i, size;
@@ -30,14 +31,13 @@ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
}
if (sizeof(spinlock_t) != 0) {
- if (gfpflags_allow_blocking(gfp))
- tlocks = kvmalloc(size * sizeof(spinlock_t), gfp);
- else
- tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp);
+ tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
if (!tlocks)
return -ENOMEM;
- for (i = 0; i < size; i++)
+ for (i = 0; i < size; i++) {
spin_lock_init(&tlocks[i]);
+ lockdep_init_map(&tlocks[i].dep_map, name, key, 0);
+ }
}
*locks = tlocks;
@@ -45,7 +45,7 @@ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
return 0;
}
-EXPORT_SYMBOL(alloc_bucket_spinlocks);
+EXPORT_SYMBOL(__alloc_bucket_spinlocks);
void free_bucket_spinlocks(spinlock_t *locks)
{
diff --git a/lib/crc32.c b/lib/crc32.c
index 2ef20fe84b69..a6c9afafc8c8 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -27,6 +27,7 @@
/* see: Documentation/crc32.txt for a description of algorithms */
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/sched.h>
@@ -184,7 +185,7 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
#if CRC_LE_BITS == 1
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len, NULL, CRCPOLY_LE);
+ return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
}
u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
@@ -194,7 +195,7 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len,
- (const u32 (*)[256])crc32table_le, CRCPOLY_LE);
+ (const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
}
u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
@@ -268,7 +269,7 @@ static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
{
- return crc32_generic_shift(crc, len, CRCPOLY_LE);
+ return crc32_generic_shift(crc, len, CRC32_POLY_LE);
}
u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
@@ -330,13 +331,13 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
#if CRC_LE_BITS == 1
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_be_generic(crc, p, len, NULL, CRCPOLY_BE);
+ return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
}
#else
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
return crc32_be_generic(crc, p, len,
- (const u32 (*)[256])crc32table_be, CRCPOLY_BE);
+ (const u32 (*)[256])crc32table_be, CRC32_POLY_BE);
}
#endif
EXPORT_SYMBOL(crc32_be);
diff --git a/lib/crc32defs.h b/lib/crc32defs.h
index cb275a28a750..0c8fb5923e7e 100644
--- a/lib/crc32defs.h
+++ b/lib/crc32defs.h
@@ -1,18 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * There are multiple 16-bit CRC polynomials in common use, but this is
- * *the* standard CRC-32 polynomial, first popularized by Ethernet.
- * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
- */
-#define CRCPOLY_LE 0xedb88320
-#define CRCPOLY_BE 0x04c11db7
-
-/*
- * This is the CRC32c polynomial, as outlined by Castagnoli.
- * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
- * x^8+x^6+x^0
- */
-#define CRC32C_POLY_LE 0x82F63B78
/* Try to choose an implementation variant via Kconfig */
#ifdef CONFIG_CRC32_SLICEBY8
diff --git a/lib/crc64.c b/lib/crc64.c
new file mode 100644
index 000000000000..0ef8ae6ac047
--- /dev/null
+++ b/lib/crc64.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Normal 64-bit CRC calculation.
+ *
+ * This is a basic crc64 implementation following ECMA-182 specification,
+ * which can be found from,
+ * http://www.ecma-international.org/publications/standards/Ecma-182.htm
+ *
+ * Dr. Ross N. Williams has a great document to introduce the idea of CRC
+ * algorithm, here the CRC64 code is also inspired by the table-driven
+ * algorithm and detail example from this paper. This paper can be found
+ * from,
+ * http://www.ross.net/crc/download/crc_v3.txt
+ *
+ * crc64table[256] is the lookup table of a table-driven 64-bit CRC
+ * calculation, which is generated by gen_crc64table.c in kernel build
+ * time. The polynomial of crc64 arithmetic is from ECMA-182 specification
+ * as well, which is defined as,
+ *
+ * x^64 + x^62 + x^57 + x^55 + x^54 + x^53 + x^52 + x^47 + x^46 + x^45 +
+ * x^40 + x^39 + x^38 + x^37 + x^35 + x^33 + x^32 + x^31 + x^29 + x^27 +
+ * x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
+ * x^7 + x^4 + x + 1
+ *
+ * Copyright 2018 SUSE Linux.
+ * Author: Coly Li <colyli@suse.de>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include "crc64table.h"
+
+MODULE_DESCRIPTION("CRC64 calculations");
+MODULE_LICENSE("GPL v2");
+
+/**
+ * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
+ * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
+ or the previous crc64 value if computing incrementally.
+ * @p: pointer to buffer over which CRC64 is run
+ * @len: length of buffer @p
+ */
+u64 __pure crc64_be(u64 crc, const void *p, size_t len)
+{
+ size_t i, t;
+
+ const unsigned char *_p = p;
+
+ for (i = 0; i < len; i++) {
+ t = ((crc >> 56) ^ (*_p++)) & 0xFF;
+ crc = crc64table[t] ^ (crc << 8);
+ }
+
+ return crc;
+}
+EXPORT_SYMBOL_GPL(crc64_be);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 2f5349c6e81a..70935ed91125 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -42,14 +42,18 @@ static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
static DEFINE_RAW_SPINLOCK(pool_lock);
static HLIST_HEAD(obj_pool);
+static HLIST_HEAD(obj_to_free);
static int obj_pool_min_free = ODEBUG_POOL_SIZE;
static int obj_pool_free = ODEBUG_POOL_SIZE;
static int obj_pool_used;
static int obj_pool_max_used;
+/* The number of objs on the global free list */
+static int obj_nr_tofree;
static struct kmem_cache *obj_cache;
static int debug_objects_maxchain __read_mostly;
+static int __maybe_unused debug_objects_maxchecked __read_mostly;
static int debug_objects_fixups __read_mostly;
static int debug_objects_warnings __read_mostly;
static int debug_objects_enabled __read_mostly
@@ -96,12 +100,32 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
static void fill_pool(void)
{
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
- struct debug_obj *new;
+ struct debug_obj *new, *obj;
unsigned long flags;
if (likely(obj_pool_free >= debug_objects_pool_min_level))
return;
+ /*
+ * Reuse objs from the global free list; they will be reinitialized
+ * when allocating.
+ */
+ while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ /*
+ * Recheck with the lock held as the worker thread might have
+ * won the race and freed the global free list already.
+ */
+ if (obj_nr_tofree) {
+ obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
+ hlist_del(&obj->node);
+ obj_nr_tofree--;
+ hlist_add_head(&obj->node, &obj_pool);
+ obj_pool_free++;
+ }
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+ }
+
if (unlikely(!obj_cache))
return;
@@ -177,62 +201,76 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
* workqueue function to free objects.
*
* To reduce contention on the global pool_lock, the actual freeing of
- * debug objects will be delayed if the pool_lock is busy. We also free
- * the objects in a batch of 4 for each lock/unlock cycle.
+ * debug objects will be delayed if the pool_lock is busy.
*/
-#define ODEBUG_FREE_BATCH 4
-
static void free_obj_work(struct work_struct *work)
{
- struct debug_obj *objs[ODEBUG_FREE_BATCH];
+ struct hlist_node *tmp;
+ struct debug_obj *obj;
unsigned long flags;
- int i;
+ HLIST_HEAD(tofree);
if (!raw_spin_trylock_irqsave(&pool_lock, flags))
return;
- while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
- for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
- objs[i] = hlist_entry(obj_pool.first,
- typeof(*objs[0]), node);
- hlist_del(&objs[i]->node);
- }
- obj_pool_free -= ODEBUG_FREE_BATCH;
- debug_objects_freed += ODEBUG_FREE_BATCH;
- /*
- * We release pool_lock across kmem_cache_free() to
- * avoid contention on pool_lock.
- */
- raw_spin_unlock_irqrestore(&pool_lock, flags);
- for (i = 0; i < ODEBUG_FREE_BATCH; i++)
- kmem_cache_free(obj_cache, objs[i]);
- if (!raw_spin_trylock_irqsave(&pool_lock, flags))
- return;
+ /*
+ * The objs on the pool list might be allocated before the work is
+ * run, so recheck if pool list it full or not, if not fill pool
+ * list from the global free list
+ */
+ while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
+ obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
+ hlist_del(&obj->node);
+ hlist_add_head(&obj->node, &obj_pool);
+ obj_pool_free++;
+ obj_nr_tofree--;
+ }
+
+ /*
+ * Pool list is already full and there are still objs on the free
+ * list. Move remaining free objs to a temporary list to free the
+ * memory outside the pool_lock held region.
+ */
+ if (obj_nr_tofree) {
+ hlist_move_list(&obj_to_free, &tofree);
+ debug_objects_freed += obj_nr_tofree;
+ obj_nr_tofree = 0;
}
raw_spin_unlock_irqrestore(&pool_lock, flags);
+
+ hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
+ hlist_del(&obj->node);
+ kmem_cache_free(obj_cache, obj);
+ }
}
-/*
- * Put the object back into the pool and schedule work to free objects
- * if necessary.
- */
-static void free_object(struct debug_obj *obj)
+static bool __free_object(struct debug_obj *obj)
{
unsigned long flags;
- int sched = 0;
+ bool work;
raw_spin_lock_irqsave(&pool_lock, flags);
- /*
- * schedule work when the pool is filled and the cache is
- * initialized:
- */
- if (obj_pool_free > debug_objects_pool_size && obj_cache)
- sched = 1;
- hlist_add_head(&obj->node, &obj_pool);
- obj_pool_free++;
+ work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
obj_pool_used--;
+
+ if (work) {
+ obj_nr_tofree++;
+ hlist_add_head(&obj->node, &obj_to_free);
+ } else {
+ obj_pool_free++;
+ hlist_add_head(&obj->node, &obj_pool);
+ }
raw_spin_unlock_irqrestore(&pool_lock, flags);
- if (sched)
+ return work;
+}
+
+/*
+ * Put the object back into the pool and schedule work to free objects
+ * if necessary.
+ */
+static void free_object(struct debug_obj *obj)
+{
+ if (__free_object(obj))
schedule_work(&debug_obj_work);
}
@@ -322,9 +360,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
limit++;
if (is_on_stack)
- pr_warn("object is on stack, but not annotated\n");
+ pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
+ task_stack_page(current));
else
- pr_warn("object is not on stack, but annotated\n");
+ pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
+ task_stack_page(current));
+
WARN_ON(1);
}
@@ -714,13 +755,13 @@ EXPORT_SYMBOL_GPL(debug_object_active_state);
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
- struct hlist_node *tmp;
- HLIST_HEAD(freelist);
struct debug_obj_descr *descr;
enum debug_obj_state state;
struct debug_bucket *db;
+ struct hlist_node *tmp;
struct debug_obj *obj;
- int cnt;
+ int cnt, objs_checked = 0;
+ bool work = false;
saddr = (unsigned long) address;
eaddr = saddr + size;
@@ -751,21 +792,24 @@ repeat:
goto repeat;
default:
hlist_del(&obj->node);
- hlist_add_head(&obj->node, &freelist);
+ work |= __free_object(obj);
break;
}
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- /* Now free them */
- hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
- hlist_del(&obj->node);
- free_object(obj);
- }
-
if (cnt > debug_objects_maxchain)
debug_objects_maxchain = cnt;
+
+ objs_checked += cnt;
}
+
+ if (objs_checked > debug_objects_maxchecked)
+ debug_objects_maxchecked = objs_checked;
+
+ /* Schedule work to actually kmem_cache_free() objects */
+ if (work)
+ schedule_work(&debug_obj_work);
}
void debug_check_no_obj_freed(const void *address, unsigned long size)
@@ -780,12 +824,14 @@ void debug_check_no_obj_freed(const void *address, unsigned long size)
static int debug_stats_show(struct seq_file *m, void *v)
{
seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
+ seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
seq_printf(m, "warnings :%d\n", debug_objects_warnings);
seq_printf(m, "fixups :%d\n", debug_objects_fixups);
seq_printf(m, "pool_free :%d\n", obj_pool_free);
seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
seq_printf(m, "pool_used :%d\n", obj_pool_used);
seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
+ seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
return 0;
@@ -1142,8 +1188,7 @@ void __init debug_objects_mem_init(void)
if (!obj_cache || debug_objects_replace_static_objects()) {
debug_objects_enabled = 0;
- if (obj_cache)
- kmem_cache_destroy(obj_cache);
+ kmem_cache_destroy(obj_cache);
pr_warn("out of memory.\n");
} else
debug_objects_selftest();
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index 347fa7ac2e8a..9555b68bb774 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -33,3 +33,19 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
}
EXPORT_SYMBOL(_atomic_dec_and_lock);
+
+int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+ unsigned long *flags)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ spin_lock_irqsave(lock, *flags);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ spin_unlock_irqrestore(lock, *flags);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 0234361b24b8..7c4932eed748 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -51,6 +51,7 @@
#endif /* STATIC */
#include <linux/decompress/mm.h>
+#include <linux/crc32poly.h>
#ifndef INT_MAX
#define INT_MAX 0x7fffffff
@@ -654,7 +655,7 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
for (i = 0; i < 256; i++) {
c = i << 24;
for (j = 8; j; j--)
- c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1);
+ c = c&0x80000000 ? (c << 1)^(CRC32_POLY_BE) : (c << 1);
bd->crc32Table[i] = c;
}
diff --git a/lib/devres.c b/lib/devres.c
index 5f2aedd58bc5..faccf1a037d0 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -4,6 +4,13 @@
#include <linux/io.h>
#include <linux/gfp.h>
#include <linux/export.h>
+#include <linux/of_address.h>
+
+enum devm_ioremap_type {
+ DEVM_IOREMAP = 0,
+ DEVM_IOREMAP_NC,
+ DEVM_IOREMAP_WC,
+};
void devm_ioremap_release(struct device *dev, void *res)
{
@@ -15,24 +22,28 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
return *(void **)res == match_data;
}
-/**
- * devm_ioremap - Managed ioremap()
- * @dev: Generic device to remap IO address for
- * @offset: Resource address to map
- * @size: Size of map
- *
- * Managed ioremap(). Map is automatically unmapped on driver detach.
- */
-void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
- resource_size_t size)
+static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
+ resource_size_t size,
+ enum devm_ioremap_type type)
{
- void __iomem **ptr, *addr;
+ void __iomem **ptr, *addr = NULL;
ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
- addr = ioremap(offset, size);
+ switch (type) {
+ case DEVM_IOREMAP:
+ addr = ioremap(offset, size);
+ break;
+ case DEVM_IOREMAP_NC:
+ addr = ioremap_nocache(offset, size);
+ break;
+ case DEVM_IOREMAP_WC:
+ addr = ioremap_wc(offset, size);
+ break;
+ }
+
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
@@ -41,6 +52,20 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
return addr;
}
+
+/**
+ * devm_ioremap - Managed ioremap()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed ioremap(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
+ resource_size_t size)
+{
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
+}
EXPORT_SYMBOL(devm_ioremap);
/**
@@ -55,20 +80,7 @@ EXPORT_SYMBOL(devm_ioremap);
void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
resource_size_t size)
{
- void __iomem **ptr, *addr;
-
- ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- addr = ioremap_nocache(offset, size);
- if (addr) {
- *ptr = addr;
- devres_add(dev, ptr);
- } else
- devres_free(ptr);
-
- return addr;
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NC);
}
EXPORT_SYMBOL(devm_ioremap_nocache);
@@ -83,20 +95,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
resource_size_t size)
{
- void __iomem **ptr, *addr;
-
- ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- addr = ioremap_wc(offset, size);
- if (addr) {
- *ptr = addr;
- devres_add(dev, ptr);
- } else
- devres_free(ptr);
-
- return addr;
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC);
}
EXPORT_SYMBOL(devm_ioremap_wc);
@@ -164,6 +163,41 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
}
EXPORT_SYMBOL(devm_ioremap_resource);
+/*
+ * devm_of_iomap - Requests a resource and maps the memory mapped IO
+ * for a given device_node managed by a given device
+ *
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps it. All operations are managed and will be undone
+ * on driver detach of the device.
+ *
+ * This is to be used when a device requests/maps resources described
+ * by other device tree nodes (children or otherwise).
+ *
+ * @dev: The device "managing" the resource
+ * @node: The device-tree node where the resource resides
+ * @index: index of the MMIO range in the "reg" property
+ * @size: Returns the size of the resource (pass NULL if not needed)
+ * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
+ * error code on failure. Usage example:
+ *
+ * base = devm_of_iomap(&pdev->dev, node, 0, NULL);
+ * if (IS_ERR(base))
+ * return PTR_ERR(base);
+ */
+void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
+ resource_size_t *size)
+{
+ struct resource res;
+
+ if (of_address_to_resource(node, index, &res))
+ return IOMEM_ERR_PTR(-EINVAL);
+ if (size)
+ *size = resource_size(&res);
+ return devm_ioremap_resource(dev, &res);
+}
+EXPORT_SYMBOL(devm_of_iomap);
+
#ifdef CONFIG_HAS_IOPORT_MAP
/*
* Generic iomap devres
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
deleted file mode 100644
index 7f5cdc1e6b29..000000000000
--- a/lib/dma-debug.c
+++ /dev/null
@@ -1,1752 +0,0 @@
-/*
- * Copyright (C) 2008 Advanced Micro Devices, Inc.
- *
- * Author: Joerg Roedel <joerg.roedel@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/sched/task_stack.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/sched/task.h>
-#include <linux/stacktrace.h>
-#include <linux/dma-debug.h>
-#include <linux/spinlock.h>
-#include <linux/vmalloc.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/export.h>
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/ctype.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include <asm/sections.h>
-
-#define HASH_SIZE 1024ULL
-#define HASH_FN_SHIFT 13
-#define HASH_FN_MASK (HASH_SIZE - 1)
-
-enum {
- dma_debug_single,
- dma_debug_page,
- dma_debug_sg,
- dma_debug_coherent,
- dma_debug_resource,
-};
-
-enum map_err_types {
- MAP_ERR_CHECK_NOT_APPLICABLE,
- MAP_ERR_NOT_CHECKED,
- MAP_ERR_CHECKED,
-};
-
-#define DMA_DEBUG_STACKTRACE_ENTRIES 5
-
-/**
- * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
- * @list: node on pre-allocated free_entries list
- * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
- * @type: single, page, sg, coherent
- * @pfn: page frame of the start address
- * @offset: offset of mapping relative to pfn
- * @size: length of the mapping
- * @direction: enum dma_data_direction
- * @sg_call_ents: 'nents' from dma_map_sg
- * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
- * @map_err_type: track whether dma_mapping_error() was checked
- * @stacktrace: support backtraces when a violation is detected
- */
-struct dma_debug_entry {
- struct list_head list;
- struct device *dev;
- int type;
- unsigned long pfn;
- size_t offset;
- u64 dev_addr;
- u64 size;
- int direction;
- int sg_call_ents;
- int sg_mapped_ents;
- enum map_err_types map_err_type;
-#ifdef CONFIG_STACKTRACE
- struct stack_trace stacktrace;
- unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
-#endif
-};
-
-typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
-
-struct hash_bucket {
- struct list_head list;
- spinlock_t lock;
-} ____cacheline_aligned_in_smp;
-
-/* Hash list to save the allocated dma addresses */
-static struct hash_bucket dma_entry_hash[HASH_SIZE];
-/* List of pre-allocated dma_debug_entry's */
-static LIST_HEAD(free_entries);
-/* Lock for the list above */
-static DEFINE_SPINLOCK(free_entries_lock);
-
-/* Global disable flag - will be set in case of an error */
-static bool global_disable __read_mostly;
-
-/* Early initialization disable flag, set at the end of dma_debug_init */
-static bool dma_debug_initialized __read_mostly;
-
-static inline bool dma_debug_disabled(void)
-{
- return global_disable || !dma_debug_initialized;
-}
-
-/* Global error count */
-static u32 error_count;
-
-/* Global error show enable*/
-static u32 show_all_errors __read_mostly;
-/* Number of errors to show */
-static u32 show_num_errors = 1;
-
-static u32 num_free_entries;
-static u32 min_free_entries;
-static u32 nr_total_entries;
-
-/* number of preallocated entries requested by kernel cmdline */
-static u32 req_entries;
-
-/* debugfs dentry's for the stuff above */
-static struct dentry *dma_debug_dent __read_mostly;
-static struct dentry *global_disable_dent __read_mostly;
-static struct dentry *error_count_dent __read_mostly;
-static struct dentry *show_all_errors_dent __read_mostly;
-static struct dentry *show_num_errors_dent __read_mostly;
-static struct dentry *num_free_entries_dent __read_mostly;
-static struct dentry *min_free_entries_dent __read_mostly;
-static struct dentry *filter_dent __read_mostly;
-
-/* per-driver filter related state */
-
-#define NAME_MAX_LEN 64
-
-static char current_driver_name[NAME_MAX_LEN] __read_mostly;
-static struct device_driver *current_driver __read_mostly;
-
-static DEFINE_RWLOCK(driver_name_lock);
-
-static const char *const maperr2str[] = {
- [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
- [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
- [MAP_ERR_CHECKED] = "dma map error checked",
-};
-
-static const char *type2name[5] = { "single", "page",
- "scather-gather", "coherent",
- "resource" };
-
-static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
- "DMA_FROM_DEVICE", "DMA_NONE" };
-
-/*
- * The access to some variables in this macro is racy. We can't use atomic_t
- * here because all these variables are exported to debugfs. Some of them even
- * writeable. This is also the reason why a lock won't help much. But anyway,
- * the races are no big deal. Here is why:
- *
- * error_count: the addition is racy, but the worst thing that can happen is
- * that we don't count some errors
- * show_num_errors: the subtraction is racy. Also no big deal because in
- * worst case this will result in one warning more in the
- * system log than the user configured. This variable is
- * writeable via debugfs.
- */
-static inline void dump_entry_trace(struct dma_debug_entry *entry)
-{
-#ifdef CONFIG_STACKTRACE
- if (entry) {
- pr_warning("Mapped at:\n");
- print_stack_trace(&entry->stacktrace, 0);
- }
-#endif
-}
-
-static bool driver_filter(struct device *dev)
-{
- struct device_driver *drv;
- unsigned long flags;
- bool ret;
-
- /* driver filter off */
- if (likely(!current_driver_name[0]))
- return true;
-
- /* driver filter on and initialized */
- if (current_driver && dev && dev->driver == current_driver)
- return true;
-
- /* driver filter on, but we can't filter on a NULL device... */
- if (!dev)
- return false;
-
- if (current_driver || !current_driver_name[0])
- return false;
-
- /* driver filter on but not yet initialized */
- drv = dev->driver;
- if (!drv)
- return false;
-
- /* lock to protect against change of current_driver_name */
- read_lock_irqsave(&driver_name_lock, flags);
-
- ret = false;
- if (drv->name &&
- strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
- current_driver = drv;
- ret = true;
- }
-
- read_unlock_irqrestore(&driver_name_lock, flags);
-
- return ret;
-}
-
-#define err_printk(dev, entry, format, arg...) do { \
- error_count += 1; \
- if (driver_filter(dev) && \
- (show_all_errors || show_num_errors > 0)) { \
- WARN(1, "%s %s: " format, \
- dev ? dev_driver_string(dev) : "NULL", \
- dev ? dev_name(dev) : "NULL", ## arg); \
- dump_entry_trace(entry); \
- } \
- if (!show_all_errors && show_num_errors > 0) \
- show_num_errors -= 1; \
- } while (0);
-
-/*
- * Hash related functions
- *
- * Every DMA-API request is saved into a struct dma_debug_entry. To
- * have quick access to these structs they are stored into a hash.
- */
-static int hash_fn(struct dma_debug_entry *entry)
-{
- /*
- * Hash function is based on the dma address.
- * We use bits 20-27 here as the index into the hash
- */
- return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
-}
-
-/*
- * Request exclusive access to a hash bucket for a given dma_debug_entry.
- */
-static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
- unsigned long *flags)
- __acquires(&dma_entry_hash[idx].lock)
-{
- int idx = hash_fn(entry);
- unsigned long __flags;
-
- spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
- *flags = __flags;
- return &dma_entry_hash[idx];
-}
-
-/*
- * Give up exclusive access to the hash bucket
- */
-static void put_hash_bucket(struct hash_bucket *bucket,
- unsigned long *flags)
- __releases(&bucket->lock)
-{
- unsigned long __flags = *flags;
-
- spin_unlock_irqrestore(&bucket->lock, __flags);
-}
-
-static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
-{
- return ((a->dev_addr == b->dev_addr) &&
- (a->dev == b->dev)) ? true : false;
-}
-
-static bool containing_match(struct dma_debug_entry *a,
- struct dma_debug_entry *b)
-{
- if (a->dev != b->dev)
- return false;
-
- if ((b->dev_addr <= a->dev_addr) &&
- ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
- return true;
-
- return false;
-}
-
-/*
- * Search a given entry in the hash bucket list
- */
-static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
- struct dma_debug_entry *ref,
- match_fn match)
-{
- struct dma_debug_entry *entry, *ret = NULL;
- int matches = 0, match_lvl, last_lvl = -1;
-
- list_for_each_entry(entry, &bucket->list, list) {
- if (!match(ref, entry))
- continue;
-
- /*
- * Some drivers map the same physical address multiple
- * times. Without a hardware IOMMU this results in the
- * same device addresses being put into the dma-debug
- * hash multiple times too. This can result in false
- * positives being reported. Therefore we implement a
- * best-fit algorithm here which returns the entry from
- * the hash which fits best to the reference value
- * instead of the first-fit.
- */
- matches += 1;
- match_lvl = 0;
- entry->size == ref->size ? ++match_lvl : 0;
- entry->type == ref->type ? ++match_lvl : 0;
- entry->direction == ref->direction ? ++match_lvl : 0;
- entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
-
- if (match_lvl == 4) {
- /* perfect-fit - return the result */
- return entry;
- } else if (match_lvl > last_lvl) {
- /*
- * We found an entry that fits better then the
- * previous one or it is the 1st match.
- */
- last_lvl = match_lvl;
- ret = entry;
- }
- }
-
- /*
- * If we have multiple matches but no perfect-fit, just return
- * NULL.
- */
- ret = (matches == 1) ? ret : NULL;
-
- return ret;
-}
-
-static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
- struct dma_debug_entry *ref)
-{
- return __hash_bucket_find(bucket, ref, exact_match);
-}
-
-static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
- struct dma_debug_entry *ref,
- unsigned long *flags)
-{
-
- unsigned int max_range = dma_get_max_seg_size(ref->dev);
- struct dma_debug_entry *entry, index = *ref;
- unsigned int range = 0;
-
- while (range <= max_range) {
- entry = __hash_bucket_find(*bucket, ref, containing_match);
-
- if (entry)
- return entry;
-
- /*
- * Nothing found, go back a hash bucket
- */
- put_hash_bucket(*bucket, flags);
- range += (1 << HASH_FN_SHIFT);
- index.dev_addr -= (1 << HASH_FN_SHIFT);
- *bucket = get_hash_bucket(&index, flags);
- }
-
- return NULL;
-}
-
-/*
- * Add an entry to a hash bucket
- */
-static void hash_bucket_add(struct hash_bucket *bucket,
- struct dma_debug_entry *entry)
-{
- list_add_tail(&entry->list, &bucket->list);
-}
-
-/*
- * Remove entry from a hash bucket list
- */
-static void hash_bucket_del(struct dma_debug_entry *entry)
-{
- list_del(&entry->list);
-}
-
-static unsigned long long phys_addr(struct dma_debug_entry *entry)
-{
- if (entry->type == dma_debug_resource)
- return __pfn_to_phys(entry->pfn) + entry->offset;
-
- return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
-}
-
-/*
- * Dump mapping entries for debugging purposes
- */
-void debug_dma_dump_mappings(struct device *dev)
-{
- int idx;
-
- for (idx = 0; idx < HASH_SIZE; idx++) {
- struct hash_bucket *bucket = &dma_entry_hash[idx];
- struct dma_debug_entry *entry;
- unsigned long flags;
-
- spin_lock_irqsave(&bucket->lock, flags);
-
- list_for_each_entry(entry, &bucket->list, list) {
- if (!dev || dev == entry->dev) {
- dev_info(entry->dev,
- "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
- type2name[entry->type], idx,
- phys_addr(entry), entry->pfn,
- entry->dev_addr, entry->size,
- dir2name[entry->direction],
- maperr2str[entry->map_err_type]);
- }
- }
-
- spin_unlock_irqrestore(&bucket->lock, flags);
- }
-}
-EXPORT_SYMBOL(debug_dma_dump_mappings);
-
-/*
- * For each mapping (initial cacheline in the case of
- * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
- * scatterlist, or the cacheline specified in dma_map_single) insert
- * into this tree using the cacheline as the key. At
- * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
- * the entry already exists at insertion time add a tag as a reference
- * count for the overlapping mappings. For now, the overlap tracking
- * just ensures that 'unmaps' balance 'maps' before marking the
- * cacheline idle, but we should also be flagging overlaps as an API
- * violation.
- *
- * Memory usage is mostly constrained by the maximum number of available
- * dma-debug entries in that we need a free dma_debug_entry before
- * inserting into the tree. In the case of dma_map_page and
- * dma_alloc_coherent there is only one dma_debug_entry and one
- * dma_active_cacheline entry to track per event. dma_map_sg(), on the
- * other hand, consumes a single dma_debug_entry, but inserts 'nents'
- * entries into the tree.
- *
- * At any time debug_dma_assert_idle() can be called to trigger a
- * warning if any cachelines in the given page are in the active set.
- */
-static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
-static DEFINE_SPINLOCK(radix_lock);
-#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
-#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
-#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
-
-static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
-{
- return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
- (entry->offset >> L1_CACHE_SHIFT);
-}
-
-static int active_cacheline_read_overlap(phys_addr_t cln)
-{
- int overlap = 0, i;
-
- for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
- if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
- overlap |= 1 << i;
- return overlap;
-}
-
-static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
-{
- int i;
-
- if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
- return overlap;
-
- for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
- if (overlap & 1 << i)
- radix_tree_tag_set(&dma_active_cacheline, cln, i);
- else
- radix_tree_tag_clear(&dma_active_cacheline, cln, i);
-
- return overlap;
-}
-
-static void active_cacheline_inc_overlap(phys_addr_t cln)
-{
- int overlap = active_cacheline_read_overlap(cln);
-
- overlap = active_cacheline_set_overlap(cln, ++overlap);
-
- /* If we overflowed the overlap counter then we're potentially
- * leaking dma-mappings. Otherwise, if maps and unmaps are
- * balanced then this overflow may cause false negatives in
- * debug_dma_assert_idle() as the cacheline may be marked idle
- * prematurely.
- */
- WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
- "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
- ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
-}
-
-static int active_cacheline_dec_overlap(phys_addr_t cln)
-{
- int overlap = active_cacheline_read_overlap(cln);
-
- return active_cacheline_set_overlap(cln, --overlap);
-}
-
-static int active_cacheline_insert(struct dma_debug_entry *entry)
-{
- phys_addr_t cln = to_cacheline_number(entry);
- unsigned long flags;
- int rc;
-
- /* If the device is not writing memory then we don't have any
- * concerns about the cpu consuming stale data. This mitigates
- * legitimate usages of overlapping mappings.
- */
- if (entry->direction == DMA_TO_DEVICE)
- return 0;
-
- spin_lock_irqsave(&radix_lock, flags);
- rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
- if (rc == -EEXIST)
- active_cacheline_inc_overlap(cln);
- spin_unlock_irqrestore(&radix_lock, flags);
-
- return rc;
-}
-
-static void active_cacheline_remove(struct dma_debug_entry *entry)
-{
- phys_addr_t cln = to_cacheline_number(entry);
- unsigned long flags;
-
- /* ...mirror the insert case */
- if (entry->direction == DMA_TO_DEVICE)
- return;
-
- spin_lock_irqsave(&radix_lock, flags);
- /* since we are counting overlaps the final put of the
- * cacheline will occur when the overlap count is 0.
- * active_cacheline_dec_overlap() returns -1 in that case
- */
- if (active_cacheline_dec_overlap(cln) < 0)
- radix_tree_delete(&dma_active_cacheline, cln);
- spin_unlock_irqrestore(&radix_lock, flags);
-}
-
-/**
- * debug_dma_assert_idle() - assert that a page is not undergoing dma
- * @page: page to lookup in the dma_active_cacheline tree
- *
- * Place a call to this routine in cases where the cpu touching the page
- * before the dma completes (page is dma_unmapped) will lead to data
- * corruption.
- */
-void debug_dma_assert_idle(struct page *page)
-{
- static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
- struct dma_debug_entry *entry = NULL;
- void **results = (void **) &ents;
- unsigned int nents, i;
- unsigned long flags;
- phys_addr_t cln;
-
- if (dma_debug_disabled())
- return;
-
- if (!page)
- return;
-
- cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
- spin_lock_irqsave(&radix_lock, flags);
- nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
- CACHELINES_PER_PAGE);
- for (i = 0; i < nents; i++) {
- phys_addr_t ent_cln = to_cacheline_number(ents[i]);
-
- if (ent_cln == cln) {
- entry = ents[i];
- break;
- } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
- break;
- }
- spin_unlock_irqrestore(&radix_lock, flags);
-
- if (!entry)
- return;
-
- cln = to_cacheline_number(entry);
- err_printk(entry->dev, entry,
- "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
- &cln);
-}
-
-/*
- * Wrapper function for adding an entry to the hash.
- * This function takes care of locking itself.
- */
-static void add_dma_entry(struct dma_debug_entry *entry)
-{
- struct hash_bucket *bucket;
- unsigned long flags;
- int rc;
-
- bucket = get_hash_bucket(entry, &flags);
- hash_bucket_add(bucket, entry);
- put_hash_bucket(bucket, &flags);
-
- rc = active_cacheline_insert(entry);
- if (rc == -ENOMEM) {
- pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
- global_disable = true;
- }
-
- /* TODO: report -EEXIST errors here as overlapping mappings are
- * not supported by the DMA API
- */
-}
-
-static struct dma_debug_entry *__dma_entry_alloc(void)
-{
- struct dma_debug_entry *entry;
-
- entry = list_entry(free_entries.next, struct dma_debug_entry, list);
- list_del(&entry->list);
- memset(entry, 0, sizeof(*entry));
-
- num_free_entries -= 1;
- if (num_free_entries < min_free_entries)
- min_free_entries = num_free_entries;
-
- return entry;
-}
-
-/* struct dma_entry allocator
- *
- * The next two functions implement the allocator for
- * struct dma_debug_entries.
- */
-static struct dma_debug_entry *dma_entry_alloc(void)
-{
- struct dma_debug_entry *entry;
- unsigned long flags;
-
- spin_lock_irqsave(&free_entries_lock, flags);
-
- if (list_empty(&free_entries)) {
- global_disable = true;
- spin_unlock_irqrestore(&free_entries_lock, flags);
- pr_err("DMA-API: debugging out of memory - disabling\n");
- return NULL;
- }
-
- entry = __dma_entry_alloc();
-
- spin_unlock_irqrestore(&free_entries_lock, flags);
-
-#ifdef CONFIG_STACKTRACE
- entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
- entry->stacktrace.entries = entry->st_entries;
- entry->stacktrace.skip = 2;
- save_stack_trace(&entry->stacktrace);
-#endif
-
- return entry;
-}
-
-static void dma_entry_free(struct dma_debug_entry *entry)
-{
- unsigned long flags;
-
- active_cacheline_remove(entry);
-
- /*
- * add to beginning of the list - this way the entries are
- * more likely cache hot when they are reallocated.
- */
- spin_lock_irqsave(&free_entries_lock, flags);
- list_add(&entry->list, &free_entries);
- num_free_entries += 1;
- spin_unlock_irqrestore(&free_entries_lock, flags);
-}
-
-int dma_debug_resize_entries(u32 num_entries)
-{
- int i, delta, ret = 0;
- unsigned long flags;
- struct dma_debug_entry *entry;
- LIST_HEAD(tmp);
-
- spin_lock_irqsave(&free_entries_lock, flags);
-
- if (nr_total_entries < num_entries) {
- delta = num_entries - nr_total_entries;
-
- spin_unlock_irqrestore(&free_entries_lock, flags);
-
- for (i = 0; i < delta; i++) {
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- break;
-
- list_add_tail(&entry->list, &tmp);
- }
-
- spin_lock_irqsave(&free_entries_lock, flags);
-
- list_splice(&tmp, &free_entries);
- nr_total_entries += i;
- num_free_entries += i;
- } else {
- delta = nr_total_entries - num_entries;
-
- for (i = 0; i < delta && !list_empty(&free_entries); i++) {
- entry = __dma_entry_alloc();
- kfree(entry);
- }
-
- nr_total_entries -= i;
- }
-
- if (nr_total_entries != num_entries)
- ret = 1;
-
- spin_unlock_irqrestore(&free_entries_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(dma_debug_resize_entries);
-
-/*
- * DMA-API debugging init code
- *
- * The init code does two things:
- * 1. Initialize core data structures
- * 2. Preallocate a given number of dma_debug_entry structs
- */
-
-static int prealloc_memory(u32 num_entries)
-{
- struct dma_debug_entry *entry, *next_entry;
- int i;
-
- for (i = 0; i < num_entries; ++i) {
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- goto out_err;
-
- list_add_tail(&entry->list, &free_entries);
- }
-
- num_free_entries = num_entries;
- min_free_entries = num_entries;
-
- pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
-
- return 0;
-
-out_err:
-
- list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
- list_del(&entry->list);
- kfree(entry);
- }
-
- return -ENOMEM;
-}
-
-static ssize_t filter_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- char buf[NAME_MAX_LEN + 1];
- unsigned long flags;
- int len;
-
- if (!current_driver_name[0])
- return 0;
-
- /*
- * We can't copy to userspace directly because current_driver_name can
- * only be read under the driver_name_lock with irqs disabled. So
- * create a temporary copy first.
- */
- read_lock_irqsave(&driver_name_lock, flags);
- len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
- read_unlock_irqrestore(&driver_name_lock, flags);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t filter_write(struct file *file, const char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- char buf[NAME_MAX_LEN];
- unsigned long flags;
- size_t len;
- int i;
-
- /*
- * We can't copy from userspace directly. Access to
- * current_driver_name is protected with a write_lock with irqs
- * disabled. Since copy_from_user can fault and may sleep we
- * need to copy to temporary buffer first
- */
- len = min(count, (size_t)(NAME_MAX_LEN - 1));
- if (copy_from_user(buf, userbuf, len))
- return -EFAULT;
-
- buf[len] = 0;
-
- write_lock_irqsave(&driver_name_lock, flags);
-
- /*
- * Now handle the string we got from userspace very carefully.
- * The rules are:
- * - only use the first token we got
- * - token delimiter is everything looking like a space
- * character (' ', '\n', '\t' ...)
- *
- */
- if (!isalnum(buf[0])) {
- /*
- * If the first character userspace gave us is not
- * alphanumerical then assume the filter should be
- * switched off.
- */
- if (current_driver_name[0])
- pr_info("DMA-API: switching off dma-debug driver filter\n");
- current_driver_name[0] = 0;
- current_driver = NULL;
- goto out_unlock;
- }
-
- /*
- * Now parse out the first token and use it as the name for the
- * driver to filter for.
- */
- for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
- current_driver_name[i] = buf[i];
- if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
- break;
- }
- current_driver_name[i] = 0;
- current_driver = NULL;
-
- pr_info("DMA-API: enable driver filter for driver [%s]\n",
- current_driver_name);
-
-out_unlock:
- write_unlock_irqrestore(&driver_name_lock, flags);
-
- return count;
-}
-
-static const struct file_operations filter_fops = {
- .read = filter_read,
- .write = filter_write,
- .llseek = default_llseek,
-};
-
-static int dma_debug_fs_init(void)
-{
- dma_debug_dent = debugfs_create_dir("dma-api", NULL);
- if (!dma_debug_dent) {
- pr_err("DMA-API: can not create debugfs directory\n");
- return -ENOMEM;
- }
-
- global_disable_dent = debugfs_create_bool("disabled", 0444,
- dma_debug_dent,
- &global_disable);
- if (!global_disable_dent)
- goto out_err;
-
- error_count_dent = debugfs_create_u32("error_count", 0444,
- dma_debug_dent, &error_count);
- if (!error_count_dent)
- goto out_err;
-
- show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
- dma_debug_dent,
- &show_all_errors);
- if (!show_all_errors_dent)
- goto out_err;
-
- show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
- dma_debug_dent,
- &show_num_errors);
- if (!show_num_errors_dent)
- goto out_err;
-
- num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
- dma_debug_dent,
- &num_free_entries);
- if (!num_free_entries_dent)
- goto out_err;
-
- min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
- dma_debug_dent,
- &min_free_entries);
- if (!min_free_entries_dent)
- goto out_err;
-
- filter_dent = debugfs_create_file("driver_filter", 0644,
- dma_debug_dent, NULL, &filter_fops);
- if (!filter_dent)
- goto out_err;
-
- return 0;
-
-out_err:
- debugfs_remove_recursive(dma_debug_dent);
-
- return -ENOMEM;
-}
-
-static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
-{
- struct dma_debug_entry *entry;
- unsigned long flags;
- int count = 0, i;
-
- for (i = 0; i < HASH_SIZE; ++i) {
- spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
- list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
- if (entry->dev == dev) {
- count += 1;
- *out_entry = entry;
- }
- }
- spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
- }
-
- return count;
-}
-
-static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
-{
- struct device *dev = data;
- struct dma_debug_entry *uninitialized_var(entry);
- int count;
-
- if (dma_debug_disabled())
- return 0;
-
- switch (action) {
- case BUS_NOTIFY_UNBOUND_DRIVER:
- count = device_dma_allocations(dev, &entry);
- if (count == 0)
- break;
- err_printk(dev, entry, "DMA-API: device driver has pending "
- "DMA allocations while released from device "
- "[count=%d]\n"
- "One of leaked entries details: "
- "[device address=0x%016llx] [size=%llu bytes] "
- "[mapped with %s] [mapped as %s]\n",
- count, entry->dev_addr, entry->size,
- dir2name[entry->direction], type2name[entry->type]);
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-void dma_debug_add_bus(struct bus_type *bus)
-{
- struct notifier_block *nb;
-
- if (dma_debug_disabled())
- return;
-
- nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
- if (nb == NULL) {
- pr_err("dma_debug_add_bus: out of memory\n");
- return;
- }
-
- nb->notifier_call = dma_debug_device_change;
-
- bus_register_notifier(bus, nb);
-}
-
-/*
- * Let the architectures decide how many entries should be preallocated.
- */
-void dma_debug_init(u32 num_entries)
-{
- int i;
-
- /* Do not use dma_debug_initialized here, since we really want to be
- * called to set dma_debug_initialized
- */
- if (global_disable)
- return;
-
- for (i = 0; i < HASH_SIZE; ++i) {
- INIT_LIST_HEAD(&dma_entry_hash[i].list);
- spin_lock_init(&dma_entry_hash[i].lock);
- }
-
- if (dma_debug_fs_init() != 0) {
- pr_err("DMA-API: error creating debugfs entries - disabling\n");
- global_disable = true;
-
- return;
- }
-
- if (req_entries)
- num_entries = req_entries;
-
- if (prealloc_memory(num_entries) != 0) {
- pr_err("DMA-API: debugging out of memory error - disabled\n");
- global_disable = true;
-
- return;
- }
-
- nr_total_entries = num_free_entries;
-
- dma_debug_initialized = true;
-
- pr_info("DMA-API: debugging enabled by kernel config\n");
-}
-
-static __init int dma_debug_cmdline(char *str)
-{
- if (!str)
- return -EINVAL;
-
- if (strncmp(str, "off", 3) == 0) {
- pr_info("DMA-API: debugging disabled on kernel command line\n");
- global_disable = true;
- }
-
- return 0;
-}
-
-static __init int dma_debug_entries_cmdline(char *str)
-{
- int res;
-
- if (!str)
- return -EINVAL;
-
- res = get_option(&str, &req_entries);
-
- if (!res)
- req_entries = 0;
-
- return 0;
-}
-
-__setup("dma_debug=", dma_debug_cmdline);
-__setup("dma_debug_entries=", dma_debug_entries_cmdline);
-
-static void check_unmap(struct dma_debug_entry *ref)
-{
- struct dma_debug_entry *entry;
- struct hash_bucket *bucket;
- unsigned long flags;
-
- bucket = get_hash_bucket(ref, &flags);
- entry = bucket_find_exact(bucket, ref);
-
- if (!entry) {
- /* must drop lock before calling dma_mapping_error */
- put_hash_bucket(bucket, &flags);
-
- if (dma_mapping_error(ref->dev, ref->dev_addr)) {
- err_printk(ref->dev, NULL,
- "DMA-API: device driver tries to free an "
- "invalid DMA memory address\n");
- } else {
- err_printk(ref->dev, NULL,
- "DMA-API: device driver tries to free DMA "
- "memory it has not allocated [device "
- "address=0x%016llx] [size=%llu bytes]\n",
- ref->dev_addr, ref->size);
- }
- return;
- }
-
- if (ref->size != entry->size) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
- "DMA memory with different size "
- "[device address=0x%016llx] [map size=%llu bytes] "
- "[unmap size=%llu bytes]\n",
- ref->dev_addr, entry->size, ref->size);
- }
-
- if (ref->type != entry->type) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
- "DMA memory with wrong function "
- "[device address=0x%016llx] [size=%llu bytes] "
- "[mapped as %s] [unmapped as %s]\n",
- ref->dev_addr, ref->size,
- type2name[entry->type], type2name[ref->type]);
- } else if ((entry->type == dma_debug_coherent) &&
- (phys_addr(ref) != phys_addr(entry))) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
- "DMA memory with different CPU address "
- "[device address=0x%016llx] [size=%llu bytes] "
- "[cpu alloc address=0x%016llx] "
- "[cpu free address=0x%016llx]",
- ref->dev_addr, ref->size,
- phys_addr(entry),
- phys_addr(ref));
- }
-
- if (ref->sg_call_ents && ref->type == dma_debug_sg &&
- ref->sg_call_ents != entry->sg_call_ents) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
- "DMA sg list with different entry count "
- "[map count=%d] [unmap count=%d]\n",
- entry->sg_call_ents, ref->sg_call_ents);
- }
-
- /*
- * This may be no bug in reality - but most implementations of the
- * DMA API don't handle this properly, so check for it here
- */
- if (ref->direction != entry->direction) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
- "DMA memory with different direction "
- "[device address=0x%016llx] [size=%llu bytes] "
- "[mapped with %s] [unmapped with %s]\n",
- ref->dev_addr, ref->size,
- dir2name[entry->direction],
- dir2name[ref->direction]);
- }
-
- /*
- * Drivers should use dma_mapping_error() to check the returned
- * addresses of dma_map_single() and dma_map_page().
- * If not, print this warning message. See Documentation/DMA-API.txt.
- */
- if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
- err_printk(ref->dev, entry,
- "DMA-API: device driver failed to check map error"
- "[device address=0x%016llx] [size=%llu bytes] "
- "[mapped as %s]",
- ref->dev_addr, ref->size,
- type2name[entry->type]);
- }
-
- hash_bucket_del(entry);
- dma_entry_free(entry);
-
- put_hash_bucket(bucket, &flags);
-}
-
-static void check_for_stack(struct device *dev,
- struct page *page, size_t offset)
-{
- void *addr;
- struct vm_struct *stack_vm_area = task_stack_vm_area(current);
-
- if (!stack_vm_area) {
- /* Stack is direct-mapped. */
- if (PageHighMem(page))
- return;
- addr = page_address(page) + offset;
- if (object_is_on_stack(addr))
- err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr);
- } else {
- /* Stack is vmalloced. */
- int i;
-
- for (i = 0; i < stack_vm_area->nr_pages; i++) {
- if (page != stack_vm_area->pages[i])
- continue;
-
- addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
- err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr);
- break;
- }
- }
-}
-
-static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
-{
- unsigned long a1 = (unsigned long)addr;
- unsigned long b1 = a1 + len;
- unsigned long a2 = (unsigned long)start;
- unsigned long b2 = (unsigned long)end;
-
- return !(b1 <= a2 || a1 >= b2);
-}
-
-static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
-{
- if (overlap(addr, len, _stext, _etext) ||
- overlap(addr, len, __start_rodata, __end_rodata))
- err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
-}
-
-static void check_sync(struct device *dev,
- struct dma_debug_entry *ref,
- bool to_cpu)
-{
- struct dma_debug_entry *entry;
- struct hash_bucket *bucket;
- unsigned long flags;
-
- bucket = get_hash_bucket(ref, &flags);
-
- entry = bucket_find_contain(&bucket, ref, &flags);
-
- if (!entry) {
- err_printk(dev, NULL, "DMA-API: device driver tries "
- "to sync DMA memory it has not allocated "
- "[device address=0x%016llx] [size=%llu bytes]\n",
- (unsigned long long)ref->dev_addr, ref->size);
- goto out;
- }
-
- if (ref->size > entry->size) {
- err_printk(dev, entry, "DMA-API: device driver syncs"
- " DMA memory outside allocated range "
- "[device address=0x%016llx] "
- "[allocation size=%llu bytes] "
- "[sync offset+size=%llu]\n",
- entry->dev_addr, entry->size,
- ref->size);
- }
-
- if (entry->direction == DMA_BIDIRECTIONAL)
- goto out;
-
- if (ref->direction != entry->direction) {
- err_printk(dev, entry, "DMA-API: device driver syncs "
- "DMA memory with different direction "
- "[device address=0x%016llx] [size=%llu bytes] "
- "[mapped with %s] [synced with %s]\n",
- (unsigned long long)ref->dev_addr, entry->size,
- dir2name[entry->direction],
- dir2name[ref->direction]);
- }
-
- if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
- !(ref->direction == DMA_TO_DEVICE))
- err_printk(dev, entry, "DMA-API: device driver syncs "
- "device read-only DMA memory for cpu "
- "[device address=0x%016llx] [size=%llu bytes] "
- "[mapped with %s] [synced with %s]\n",
- (unsigned long long)ref->dev_addr, entry->size,
- dir2name[entry->direction],
- dir2name[ref->direction]);
-
- if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
- !(ref->direction == DMA_FROM_DEVICE))
- err_printk(dev, entry, "DMA-API: device driver syncs "
- "device write-only DMA memory to device "
- "[device address=0x%016llx] [size=%llu bytes] "
- "[mapped with %s] [synced with %s]\n",
- (unsigned long long)ref->dev_addr, entry->size,
- dir2name[entry->direction],
- dir2name[ref->direction]);
-
- if (ref->sg_call_ents && ref->type == dma_debug_sg &&
- ref->sg_call_ents != entry->sg_call_ents) {
- err_printk(ref->dev, entry, "DMA-API: device driver syncs "
- "DMA sg list with different entry count "
- "[map count=%d] [sync count=%d]\n",
- entry->sg_call_ents, ref->sg_call_ents);
- }
-
-out:
- put_hash_bucket(bucket, &flags);
-}
-
-void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
- size_t size, int direction, dma_addr_t dma_addr,
- bool map_single)
-{
- struct dma_debug_entry *entry;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- if (dma_mapping_error(dev, dma_addr))
- return;
-
- entry = dma_entry_alloc();
- if (!entry)
- return;
-
- entry->dev = dev;
- entry->type = dma_debug_page;
- entry->pfn = page_to_pfn(page);
- entry->offset = offset,
- entry->dev_addr = dma_addr;
- entry->size = size;
- entry->direction = direction;
- entry->map_err_type = MAP_ERR_NOT_CHECKED;
-
- if (map_single)
- entry->type = dma_debug_single;
-
- check_for_stack(dev, page, offset);
-
- if (!PageHighMem(page)) {
- void *addr = page_address(page) + offset;
-
- check_for_illegal_area(dev, addr, size);
- }
-
- add_dma_entry(entry);
-}
-EXPORT_SYMBOL(debug_dma_map_page);
-
-void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- struct dma_debug_entry ref;
- struct dma_debug_entry *entry;
- struct hash_bucket *bucket;
- unsigned long flags;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- ref.dev = dev;
- ref.dev_addr = dma_addr;
- bucket = get_hash_bucket(&ref, &flags);
-
- list_for_each_entry(entry, &bucket->list, list) {
- if (!exact_match(&ref, entry))
- continue;
-
- /*
- * The same physical address can be mapped multiple
- * times. Without a hardware IOMMU this results in the
- * same device addresses being put into the dma-debug
- * hash multiple times too. This can result in false
- * positives being reported. Therefore we implement a
- * best-fit algorithm here which updates the first entry
- * from the hash which fits the reference value and is
- * not currently listed as being checked.
- */
- if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
- entry->map_err_type = MAP_ERR_CHECKED;
- break;
- }
- }
-
- put_hash_bucket(bucket, &flags);
-}
-EXPORT_SYMBOL(debug_dma_mapping_error);
-
-void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, int direction, bool map_single)
-{
- struct dma_debug_entry ref = {
- .type = dma_debug_page,
- .dev = dev,
- .dev_addr = addr,
- .size = size,
- .direction = direction,
- };
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- if (map_single)
- ref.type = dma_debug_single;
-
- check_unmap(&ref);
-}
-EXPORT_SYMBOL(debug_dma_unmap_page);
-
-void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int mapped_ents, int direction)
-{
- struct dma_debug_entry *entry;
- struct scatterlist *s;
- int i;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- for_each_sg(sg, s, mapped_ents, i) {
- entry = dma_entry_alloc();
- if (!entry)
- return;
-
- entry->type = dma_debug_sg;
- entry->dev = dev;
- entry->pfn = page_to_pfn(sg_page(s));
- entry->offset = s->offset,
- entry->size = sg_dma_len(s);
- entry->dev_addr = sg_dma_address(s);
- entry->direction = direction;
- entry->sg_call_ents = nents;
- entry->sg_mapped_ents = mapped_ents;
-
- check_for_stack(dev, sg_page(s), s->offset);
-
- if (!PageHighMem(sg_page(s))) {
- check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
- }
-
- add_dma_entry(entry);
- }
-}
-EXPORT_SYMBOL(debug_dma_map_sg);
-
-static int get_nr_mapped_entries(struct device *dev,
- struct dma_debug_entry *ref)
-{
- struct dma_debug_entry *entry;
- struct hash_bucket *bucket;
- unsigned long flags;
- int mapped_ents;
-
- bucket = get_hash_bucket(ref, &flags);
- entry = bucket_find_exact(bucket, ref);
- mapped_ents = 0;
-
- if (entry)
- mapped_ents = entry->sg_mapped_ents;
- put_hash_bucket(bucket, &flags);
-
- return mapped_ents;
-}
-
-void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, int dir)
-{
- struct scatterlist *s;
- int mapped_ents = 0, i;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- for_each_sg(sglist, s, nelems, i) {
-
- struct dma_debug_entry ref = {
- .type = dma_debug_sg,
- .dev = dev,
- .pfn = page_to_pfn(sg_page(s)),
- .offset = s->offset,
- .dev_addr = sg_dma_address(s),
- .size = sg_dma_len(s),
- .direction = dir,
- .sg_call_ents = nelems,
- };
-
- if (mapped_ents && i >= mapped_ents)
- break;
-
- if (!i)
- mapped_ents = get_nr_mapped_entries(dev, &ref);
-
- check_unmap(&ref);
- }
-}
-EXPORT_SYMBOL(debug_dma_unmap_sg);
-
-void debug_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t dma_addr, void *virt)
-{
- struct dma_debug_entry *entry;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- if (unlikely(virt == NULL))
- return;
-
- /* handle vmalloc and linear addresses */
- if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
- return;
-
- entry = dma_entry_alloc();
- if (!entry)
- return;
-
- entry->type = dma_debug_coherent;
- entry->dev = dev;
- entry->offset = offset_in_page(virt);
- entry->size = size;
- entry->dev_addr = dma_addr;
- entry->direction = DMA_BIDIRECTIONAL;
-
- if (is_vmalloc_addr(virt))
- entry->pfn = vmalloc_to_pfn(virt);
- else
- entry->pfn = page_to_pfn(virt_to_page(virt));
-
- add_dma_entry(entry);
-}
-EXPORT_SYMBOL(debug_dma_alloc_coherent);
-
-void debug_dma_free_coherent(struct device *dev, size_t size,
- void *virt, dma_addr_t addr)
-{
- struct dma_debug_entry ref = {
- .type = dma_debug_coherent,
- .dev = dev,
- .offset = offset_in_page(virt),
- .dev_addr = addr,
- .size = size,
- .direction = DMA_BIDIRECTIONAL,
- };
-
- /* handle vmalloc and linear addresses */
- if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
- return;
-
- if (is_vmalloc_addr(virt))
- ref.pfn = vmalloc_to_pfn(virt);
- else
- ref.pfn = page_to_pfn(virt_to_page(virt));
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- check_unmap(&ref);
-}
-EXPORT_SYMBOL(debug_dma_free_coherent);
-
-void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
- int direction, dma_addr_t dma_addr)
-{
- struct dma_debug_entry *entry;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- entry = dma_entry_alloc();
- if (!entry)
- return;
-
- entry->type = dma_debug_resource;
- entry->dev = dev;
- entry->pfn = PHYS_PFN(addr);
- entry->offset = offset_in_page(addr);
- entry->size = size;
- entry->dev_addr = dma_addr;
- entry->direction = direction;
- entry->map_err_type = MAP_ERR_NOT_CHECKED;
-
- add_dma_entry(entry);
-}
-EXPORT_SYMBOL(debug_dma_map_resource);
-
-void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- struct dma_debug_entry ref = {
- .type = dma_debug_resource,
- .dev = dev,
- .dev_addr = dma_addr,
- .size = size,
- .direction = direction,
- };
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- check_unmap(&ref);
-}
-EXPORT_SYMBOL(debug_dma_unmap_resource);
-
-void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- struct dma_debug_entry ref;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- ref.type = dma_debug_single;
- ref.dev = dev;
- ref.dev_addr = dma_handle;
- ref.size = size;
- ref.direction = direction;
- ref.sg_call_ents = 0;
-
- check_sync(dev, &ref, true);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
-
-void debug_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- int direction)
-{
- struct dma_debug_entry ref;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- ref.type = dma_debug_single;
- ref.dev = dev;
- ref.dev_addr = dma_handle;
- ref.size = size;
- ref.direction = direction;
- ref.sg_call_ents = 0;
-
- check_sync(dev, &ref, false);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_for_device);
-
-void debug_dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- int direction)
-{
- struct dma_debug_entry ref;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- ref.type = dma_debug_single;
- ref.dev = dev;
- ref.dev_addr = dma_handle;
- ref.size = offset + size;
- ref.direction = direction;
- ref.sg_call_ents = 0;
-
- check_sync(dev, &ref, true);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
-
-void debug_dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size, int direction)
-{
- struct dma_debug_entry ref;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- ref.type = dma_debug_single;
- ref.dev = dev;
- ref.dev_addr = dma_handle;
- ref.size = offset + size;
- ref.direction = direction;
- ref.sg_call_ents = 0;
-
- check_sync(dev, &ref, false);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
-
-void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, int direction)
-{
- struct scatterlist *s;
- int mapped_ents = 0, i;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- for_each_sg(sg, s, nelems, i) {
-
- struct dma_debug_entry ref = {
- .type = dma_debug_sg,
- .dev = dev,
- .pfn = page_to_pfn(sg_page(s)),
- .offset = s->offset,
- .dev_addr = sg_dma_address(s),
- .size = sg_dma_len(s),
- .direction = direction,
- .sg_call_ents = nelems,
- };
-
- if (!i)
- mapped_ents = get_nr_mapped_entries(dev, &ref);
-
- if (i >= mapped_ents)
- break;
-
- check_sync(dev, &ref, true);
- }
-}
-EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
-
-void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, int direction)
-{
- struct scatterlist *s;
- int mapped_ents = 0, i;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- for_each_sg(sg, s, nelems, i) {
-
- struct dma_debug_entry ref = {
- .type = dma_debug_sg,
- .dev = dev,
- .pfn = page_to_pfn(sg_page(s)),
- .offset = s->offset,
- .dev_addr = sg_dma_address(s),
- .size = sg_dma_len(s),
- .direction = direction,
- .sg_call_ents = nelems,
- };
- if (!i)
- mapped_ents = get_nr_mapped_entries(dev, &ref);
-
- if (i >= mapped_ents)
- break;
-
- check_sync(dev, &ref, false);
- }
-}
-EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
-
-static int __init dma_debug_driver_setup(char *str)
-{
- int i;
-
- for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
- current_driver_name[i] = *str;
- if (*str == 0)
- break;
- }
-
- if (current_driver_name[0])
- pr_info("DMA-API: enable driver filter for driver [%s]\n",
- current_driver_name);
-
-
- return 1;
-}
-__setup("dma_debug_driver=", dma_debug_driver_setup);
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
deleted file mode 100644
index c9e8e21cb334..000000000000
--- a/lib/dma-direct.c
+++ /dev/null
@@ -1,161 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DMA operations that map physical memory directly without using an IOMMU or
- * flushing caches.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-direct.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-contiguous.h>
-#include <linux/pfn.h>
-
-#define DIRECT_MAPPING_ERROR 0
-
-/*
- * Most architectures use ZONE_DMA for the first 16 Megabytes, but
- * some use it for entirely different regions:
- */
-#ifndef ARCH_ZONE_DMA_BITS
-#define ARCH_ZONE_DMA_BITS 24
-#endif
-
-static bool
-check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
- const char *caller)
-{
- if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
- if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
- dev_err(dev,
- "%s: overflow %pad+%zu of device mask %llx\n",
- caller, &dma_addr, size, *dev->dma_mask);
- }
- return false;
- }
- return true;
-}
-
-static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
-{
- return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask;
-}
-
-void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, unsigned long attrs)
-{
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- int page_order = get_order(size);
- struct page *page = NULL;
-
- /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
- if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
- gfp |= GFP_DMA;
- if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
- gfp |= GFP_DMA32;
-
-again:
- /* CMA can be used only in the context which permits sleeping */
- if (gfpflags_allow_blocking(gfp)) {
- page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
- if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
- dma_release_from_contiguous(dev, page, count);
- page = NULL;
- }
- }
- if (!page)
- page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
-
- if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
- __free_pages(page, page_order);
- page = NULL;
-
- if (dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
- !(gfp & GFP_DMA)) {
- gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
- goto again;
- }
- }
-
- if (!page)
- return NULL;
-
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
- memset(page_address(page), 0, size);
- return page_address(page);
-}
-
-/*
- * NOTE: this function must never look at the dma_addr argument, because we want
- * to be able to use it as a helper for iommu implementations as well.
- */
-void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_addr, unsigned long attrs)
-{
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
- free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
-
- if (!check_addr(dev, dma_addr, size, __func__))
- return DIRECT_MAPPING_ERROR;
- return dma_addr;
-}
-
-static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sgl, sg, nents, i) {
- BUG_ON(!sg_page(sg));
-
- sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
- if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
- return 0;
- sg_dma_len(sg) = sg->length;
- }
-
- return nents;
-}
-
-int dma_direct_supported(struct device *dev, u64 mask)
-{
-#ifdef CONFIG_ZONE_DMA
- if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
- return 0;
-#else
- /*
- * Because 32-bit DMA masks are so common we expect every architecture
- * to be able to satisfy them - either by not supporting more physical
- * memory, or by providing a ZONE_DMA32. If neither is the case, the
- * architecture needs to use an IOMMU instead of the direct mapping.
- */
- if (mask < DMA_BIT_MASK(32))
- return 0;
-#endif
- return 1;
-}
-
-static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == DIRECT_MAPPING_ERROR;
-}
-
-const struct dma_map_ops dma_direct_ops = {
- .alloc = dma_direct_alloc,
- .free = dma_direct_free,
- .map_page = dma_direct_map_page,
- .map_sg = dma_direct_map_sg,
- .dma_supported = dma_direct_supported,
- .mapping_error = dma_direct_mapping_error,
- .is_phys = 1,
-};
-EXPORT_SYMBOL(dma_direct_ops);
diff --git a/lib/dma-virt.c b/lib/dma-virt.c
deleted file mode 100644
index 8e61a02ef9ca..000000000000
--- a/lib/dma-virt.c
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * lib/dma-virt.c
- *
- * DMA operations that map to virtual addresses without flushing memory.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-
-static void *dma_virt_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- void *ret;
-
- ret = (void *)__get_free_pages(gfp, get_order(size));
- if (ret)
- *dma_handle = (uintptr_t)ret;
- return ret;
-}
-
-static void dma_virt_free(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- return (uintptr_t)(page_address(page) + offset);
-}
-
-static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sgl, sg, nents, i) {
- BUG_ON(!sg_page(sg));
- sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
- sg_dma_len(sg) = sg->length;
- }
-
- return nents;
-}
-
-const struct dma_map_ops dma_virt_ops = {
- .alloc = dma_virt_alloc,
- .free = dma_virt_free,
- .map_page = dma_virt_map_page,
- .map_sg = dma_virt_map_sg,
-};
-EXPORT_SYMBOL(dma_virt_ops);
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index c5edbedd364d..5cff72f18c4a 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -10,6 +10,66 @@
#include <linux/sched/debug.h>
#include <linux/smp.h>
#include <linux/atomic.h>
+#include <linux/kexec.h>
+#include <linux/utsname.h>
+
+static char dump_stack_arch_desc_str[128];
+
+/**
+ * dump_stack_set_arch_desc - set arch-specific str to show with task dumps
+ * @fmt: printf-style format string
+ * @...: arguments for the format string
+ *
+ * The configured string will be printed right after utsname during task
+ * dumps. Usually used to add arch-specific system identifiers. If an
+ * arch wants to make use of such an ID string, it should initialize this
+ * as soon as possible during boot.
+ */
+void __init dump_stack_set_arch_desc(const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str),
+ fmt, args);
+ va_end(args);
+}
+
+/**
+ * dump_stack_print_info - print generic debug info for dump_stack()
+ * @log_lvl: log level
+ *
+ * Arch-specific dump_stack() implementations can use this function to
+ * print out the same debug information as the generic dump_stack().
+ */
+void dump_stack_print_info(const char *log_lvl)
+{
+ printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s\n",
+ log_lvl, raw_smp_processor_id(), current->pid, current->comm,
+ kexec_crash_loaded() ? "Kdump: loaded " : "",
+ print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+
+ if (dump_stack_arch_desc_str[0] != '\0')
+ printk("%sHardware name: %s\n",
+ log_lvl, dump_stack_arch_desc_str);
+
+ print_worker_info(log_lvl, current);
+}
+
+/**
+ * show_regs_print_info - print generic debug info for show_regs()
+ * @log_lvl: log level
+ *
+ * show_regs() implementations can use this function to print out generic
+ * debug information.
+ */
+void show_regs_print_info(const char *log_lvl)
+{
+ dump_stack_print_info(log_lvl);
+}
static void __dump_stack(void)
{
diff --git a/lib/errseq.c b/lib/errseq.c
index df782418b333..81f9e33aa7e7 100644
--- a/lib/errseq.c
+++ b/lib/errseq.c
@@ -111,27 +111,22 @@ EXPORT_SYMBOL(errseq_set);
* errseq_sample() - Grab current errseq_t value.
* @eseq: Pointer to errseq_t to be sampled.
*
- * This function allows callers to sample an errseq_t value, marking it as
- * "seen" if required.
+ * This function allows callers to initialise their errseq_t variable.
+ * If the error has been "seen", new callers will not see an old error.
+ * If there is an unseen error in @eseq, the caller of this function will
+ * see it the next time it checks for an error.
*
+ * Context: Any context.
* Return: The current errseq value.
*/
errseq_t errseq_sample(errseq_t *eseq)
{
errseq_t old = READ_ONCE(*eseq);
- errseq_t new = old;
- /*
- * For the common case of no errors ever having been set, we can skip
- * marking the SEEN bit. Once an error has been set, the value will
- * never go back to zero.
- */
- if (old != 0) {
- new |= ERRSEQ_SEEN;
- if (old != new)
- cmpxchg(eseq, old, new);
- }
- return new;
+ /* If nobody has seen this error yet, then we can be the first. */
+ if (!(old & ERRSEQ_SEEN))
+ old = 0;
+ return old;
}
EXPORT_SYMBOL(errseq_sample);
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
index 5985a25e6cbc..5367ffa5c18f 100644
--- a/lib/find_bit_benchmark.c
+++ b/lib/find_bit_benchmark.c
@@ -132,7 +132,12 @@ static int __init find_bit_test(void)
test_find_next_bit(bitmap, BITMAP_LEN);
test_find_next_zero_bit(bitmap, BITMAP_LEN);
test_find_last_bit(bitmap, BITMAP_LEN);
- test_find_first_bit(bitmap, BITMAP_LEN);
+
+ /*
+ * test_find_first_bit() may take some time, so
+ * traverse only part of bitmap to avoid soft lockup.
+ */
+ test_find_first_bit(bitmap, BITMAP_LEN / 10);
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
pr_err("\nStart testing find_bit() with sparse bitmap\n");
diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c
index 9ae5b62c8a0d..89752d0b23e8 100644
--- a/lib/fonts/font_7x14.c
+++ b/lib/fonts/font_7x14.c
@@ -2058,7 +2058,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 128 0x80 '€' */
+ /* 128 0x80 'Ç' */
0x00, /* 0000000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -2074,7 +2074,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x70, /* 0111000 */
0x00, /* 0000000 */
- /* 129 0x81 '' */
+ /* 129 0x81 'ü' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2090,7 +2090,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 130 0x82 '‚' */
+ /* 130 0x82 'é' */
0x0c, /* 0000110 */
0x18, /* 0001100 */
0x30, /* 0011000 */
@@ -2106,7 +2106,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 131 0x83 'ƒ' */
+ /* 131 0x83 'â' */
0x10, /* 0001000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -2122,7 +2122,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 132 0x84 '„' */
+ /* 132 0x84 'ä' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2138,7 +2138,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 133 0x85 '…' */
+ /* 133 0x85 'à' */
0x60, /* 0110000 */
0x30, /* 0011000 */
0x18, /* 0001100 */
@@ -2154,7 +2154,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 134 0x86 '†' */
+ /* 134 0x86 'Ã¥' */
0x38, /* 0011100 */
0x6c, /* 0110110 */
0x38, /* 0011100 */
@@ -2170,7 +2170,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 135 0x87 '‡' */
+ /* 135 0x87 'ç' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2186,7 +2186,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0xe0, /* 1110000 */
- /* 136 0x88 'ˆ' */
+ /* 136 0x88 'ê' */
0x10, /* 0001000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -2202,7 +2202,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 137 0x89 '‰' */
+ /* 137 0x89 'ë' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2218,7 +2218,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 138 0x8a 'Š' */
+ /* 138 0x8a 'è' */
0xc0, /* 1100000 */
0x60, /* 0110000 */
0x30, /* 0011000 */
@@ -2234,7 +2234,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 139 0x8b '‹' */
+ /* 139 0x8b 'ï' */
0x00, /* 0000000 */
0x6c, /* 0110110 */
0x00, /* 0000000 */
@@ -2250,7 +2250,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 140 0x8c 'Œ' */
+ /* 140 0x8c 'î' */
0x30, /* 0011000 */
0x78, /* 0111100 */
0xcc, /* 1100110 */
@@ -2266,7 +2266,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 141 0x8d '' */
+ /* 141 0x8d 'ì' */
0xc0, /* 1100000 */
0x60, /* 0110000 */
0x30, /* 0011000 */
@@ -2282,7 +2282,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 142 0x8e 'Ž' */
+ /* 142 0x8e 'Ä' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2298,7 +2298,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 143 0x8f '' */
+ /* 143 0x8f 'Ã…' */
0x30, /* 0011000 */
0x48, /* 0100100 */
0x48, /* 0100100 */
@@ -2314,7 +2314,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 144 0x90 '' */
+ /* 144 0x90 'É' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0xfc, /* 1111110 */
@@ -2330,7 +2330,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 145 0x91 '‘' */
+ /* 145 0x91 'æ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2346,7 +2346,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 146 0x92 '’' */
+ /* 146 0x92 'Æ' */
0x00, /* 0000000 */
0x3e, /* 0011111 */
0x6c, /* 0110110 */
@@ -2362,7 +2362,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 147 0x93 '“' */
+ /* 147 0x93 'ô' */
0x10, /* 0001000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -2378,7 +2378,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 148 0x94 '”' */
+ /* 148 0x94 'ö' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2394,7 +2394,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 149 0x95 '•' */
+ /* 149 0x95 'ò' */
0xc0, /* 1100000 */
0x60, /* 0110000 */
0x30, /* 0011000 */
@@ -2410,7 +2410,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 150 0x96 '–' */
+ /* 150 0x96 'û' */
0x30, /* 0011000 */
0x78, /* 0111100 */
0xcc, /* 1100110 */
@@ -2426,7 +2426,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 151 0x97 '—' */
+ /* 151 0x97 'ù' */
0x60, /* 0110000 */
0x30, /* 0011000 */
0x18, /* 0001100 */
@@ -2442,7 +2442,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 152 0x98 '˜' */
+ /* 152 0x98 'ÿ' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2458,7 +2458,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x18, /* 0001100 */
0x70, /* 0111000 */
- /* 153 0x99 '™' */
+ /* 153 0x99 'Ö' */
0xcc, /* 1100110 */
0x00, /* 0000000 */
0x78, /* 0111100 */
@@ -2474,7 +2474,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 154 0x9a 'š' */
+ /* 154 0x9a 'Ü' */
0xcc, /* 1100110 */
0x00, /* 0000000 */
0xcc, /* 1100110 */
@@ -2490,7 +2490,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 155 0x9b '›' */
+ /* 155 0x9b '¢' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x7c, /* 0111110 */
@@ -2506,7 +2506,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 156 0x9c 'œ' */
+ /* 156 0x9c '£' */
0x38, /* 0011100 */
0x6c, /* 0110110 */
0x64, /* 0110010 */
@@ -2522,7 +2522,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 157 0x9d '' */
+ /* 157 0x9d 'Â¥' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0xcc, /* 1100110 */
@@ -2538,7 +2538,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 158 0x9e 'ž' */
+ /* 158 0x9e 'â‚§' */
0xf8, /* 1111100 */
0xcc, /* 1100110 */
0xcc, /* 1100110 */
@@ -2554,7 +2554,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 159 0x9f 'Ÿ' */
+ /* 159 0x9f 'Æ’' */
0x1c, /* 0001110 */
0x36, /* 0011011 */
0x30, /* 0011000 */
@@ -2570,7 +2570,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 160 0xa0 ' ' */
+ /* 160 0xa0 'á' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0x60, /* 0110000 */
@@ -2586,7 +2586,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 161 0xa1 '¡' */
+ /* 161 0xa1 'í' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0x60, /* 0110000 */
@@ -2602,7 +2602,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 162 0xa2 '¢' */
+ /* 162 0xa2 'ó' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0x60, /* 0110000 */
@@ -2618,7 +2618,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 163 0xa3 '£' */
+ /* 163 0xa3 'ú' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0x60, /* 0110000 */
@@ -2634,7 +2634,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 164 0xa4 '¤' */
+ /* 164 0xa4 'ñ' */
0x00, /* 0000000 */
0x76, /* 0111011 */
0xdc, /* 1101110 */
@@ -2650,7 +2650,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 165 0xa5 '¥' */
+ /* 165 0xa5 'Ñ' */
0x76, /* 0111011 */
0xdc, /* 1101110 */
0x00, /* 0000000 */
@@ -2666,7 +2666,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 166 0xa6 '¦' */
+ /* 166 0xa6 'ª' */
0x00, /* 0000000 */
0x78, /* 0111100 */
0xd8, /* 1101100 */
@@ -2682,7 +2682,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 167 0xa7 '§' */
+ /* 167 0xa7 'º' */
0x00, /* 0000000 */
0x70, /* 0111000 */
0xd8, /* 1101100 */
@@ -2698,7 +2698,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 168 0xa8 '¨' */
+ /* 168 0xa8 '¿' */
0x00, /* 0000000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2714,7 +2714,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 169 0xa9 '©' */
+ /* 169 0xa9 'âŒ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2730,7 +2730,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 170 0xaa 'ª' */
+ /* 170 0xaa '¬' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2746,7 +2746,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 171 0xab '«' */
+ /* 171 0xab '½' */
0x60, /* 0110000 */
0xe0, /* 1110000 */
0x62, /* 0110001 */
@@ -2762,7 +2762,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x7c, /* 0111110 */
- /* 172 0xac '¬' */
+ /* 172 0xac '¼' */
0x60, /* 0110000 */
0xe0, /* 1110000 */
0x62, /* 0110001 */
@@ -2778,7 +2778,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x0c, /* 0000110 */
0x00, /* 0000000 */
- /* 173 0xad '­' */
+ /* 173 0xad '¡' */
0x00, /* 0000000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2794,7 +2794,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 174 0xae '®' */
+ /* 174 0xae '«' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2810,7 +2810,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 175 0xaf '¯' */
+ /* 175 0xaf '»' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2826,7 +2826,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 176 0xb0 '°' */
+ /* 176 0xb0 'â–‘' */
0x88, /* 1000100 */
0x22, /* 0010001 */
0x88, /* 1000100 */
@@ -2842,7 +2842,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x88, /* 1000100 */
0x22, /* 0010001 */
- /* 177 0xb1 '±' */
+ /* 177 0xb1 'â–’' */
0x54, /* 0101010 */
0xaa, /* 1010101 */
0x54, /* 0101010 */
@@ -2858,7 +2858,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x54, /* 0101010 */
0xaa, /* 1010101 */
- /* 178 0xb2 '²' */
+ /* 178 0xb2 'â–“' */
0xee, /* 1110111 */
0xba, /* 1011101 */
0xee, /* 1110111 */
@@ -2874,7 +2874,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xee, /* 1110111 */
0xba, /* 1011101 */
- /* 179 0xb3 '³' */
+ /* 179 0xb3 '│' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2890,7 +2890,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 180 0xb4 '´' */
+ /* 180 0xb4 '┤' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2906,7 +2906,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 181 0xb5 'µ' */
+ /* 181 0xb5 'â•¡' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2922,7 +2922,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 182 0xb6 '¶' */
+ /* 182 0xb6 'â•¢' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -2938,7 +2938,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 183 0xb7 '·' */
+ /* 183 0xb7 'â•–' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2954,7 +2954,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 184 0xb8 '¸' */
+ /* 184 0xb8 'â••' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2970,7 +2970,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 185 0xb9 '¹' */
+ /* 185 0xb9 'â•£' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -2986,7 +2986,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 186 0xba 'º' */
+ /* 186 0xba 'â•‘' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3002,7 +3002,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 187 0xbb '»' */
+ /* 187 0xbb 'â•—' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3018,7 +3018,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 188 0xbc '¼' */
+ /* 188 0xbc 'â•' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3034,7 +3034,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 189 0xbd '½' */
+ /* 189 0xbd '╜' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3050,7 +3050,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 190 0xbe '¾' */
+ /* 190 0xbe 'â•›' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3066,7 +3066,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 191 0xbf '¿' */
+ /* 191 0xbf 'â”' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3082,7 +3082,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 192 0xc0 'À' */
+ /* 192 0xc0 'â””' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3098,7 +3098,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 193 0xc1 'Á' */
+ /* 193 0xc1 'â”´' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3114,7 +3114,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 194 0xc2 'Â' */
+ /* 194 0xc2 '┬' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3130,7 +3130,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 195 0xc3 'Ã' */
+ /* 195 0xc3 '├' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3146,7 +3146,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 196 0xc4 'Ä' */
+ /* 196 0xc4 '─' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3162,7 +3162,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 197 0xc5 'Å' */
+ /* 197 0xc5 '┼' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3178,7 +3178,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 198 0xc6 'Æ' */
+ /* 198 0xc6 '╞' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3194,7 +3194,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 199 0xc7 'Ç' */
+ /* 199 0xc7 '╟' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3210,7 +3210,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 200 0xc8 'È' */
+ /* 200 0xc8 '╚' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3226,7 +3226,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 201 0xc9 'É' */
+ /* 201 0xc9 'â•”' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3242,7 +3242,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 202 0xca 'Ê' */
+ /* 202 0xca 'â•©' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3258,7 +3258,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 203 0xcb 'Ë' */
+ /* 203 0xcb '╦' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3274,7 +3274,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 204 0xcc 'Ì' */
+ /* 204 0xcc 'â• ' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3290,7 +3290,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 205 0xcd 'Í' */
+ /* 205 0xcd 'â•' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3306,7 +3306,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 206 0xce 'Î' */
+ /* 206 0xce '╬' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3322,7 +3322,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 207 0xcf 'Ï' */
+ /* 207 0xcf 'â•§' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3338,7 +3338,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 208 0xd0 'Ð' */
+ /* 208 0xd0 '╨' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3354,7 +3354,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 209 0xd1 'Ñ' */
+ /* 209 0xd1 '╤' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3370,7 +3370,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 210 0xd2 'Ò' */
+ /* 210 0xd2 'â•¥' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3386,7 +3386,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 211 0xd3 'Ó' */
+ /* 211 0xd3 'â•™' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3402,7 +3402,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 212 0xd4 'Ô' */
+ /* 212 0xd4 '╘' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3418,7 +3418,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 213 0xd5 'Õ' */
+ /* 213 0xd5 'â•’' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3434,7 +3434,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 214 0xd6 'Ö' */
+ /* 214 0xd6 'â•“' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3450,7 +3450,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 215 0xd7 '×' */
+ /* 215 0xd7 'â•«' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3466,7 +3466,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 216 0xd8 'Ø' */
+ /* 216 0xd8 '╪' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3482,7 +3482,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 217 0xd9 'Ù' */
+ /* 217 0xd9 '┘' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3498,7 +3498,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 218 0xda 'Ú' */
+ /* 218 0xda '┌' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3514,7 +3514,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 219 0xdb 'Û' */
+ /* 219 0xdb 'â–ˆ' */
0xfe, /* 1111111 */
0xfe, /* 1111111 */
0xfe, /* 1111111 */
@@ -3530,7 +3530,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xfe, /* 1111111 */
0xfe, /* 1111111 */
- /* 220 0xdc 'Ü' */
+ /* 220 0xdc 'â–„' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3546,7 +3546,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xfe, /* 1111111 */
0xfe, /* 1111111 */
- /* 221 0xdd 'Ý' */
+ /* 221 0xdd '▌' */
0xe0, /* 1110000 */
0xe0, /* 1110000 */
0xe0, /* 1110000 */
@@ -3562,7 +3562,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xe0, /* 1110000 */
0xe0, /* 1110000 */
- /* 222 0xde 'Þ' */
+ /* 222 0xde 'â–' */
0x1e, /* 0001111 */
0x1e, /* 0001111 */
0x1e, /* 0001111 */
@@ -3578,7 +3578,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x1e, /* 0001111 */
0x1e, /* 0001111 */
- /* 223 0xdf 'ß' */
+ /* 223 0xdf 'â–€' */
0xfe, /* 1111111 */
0xfe, /* 1111111 */
0xfe, /* 1111111 */
@@ -3594,7 +3594,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 224 0xe0 'à' */
+ /* 224 0xe0 'α' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3610,7 +3610,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 225 0xe1 'á' */
+ /* 225 0xe1 'ß' */
0x00, /* 0000000 */
0x78, /* 0111100 */
0xcc, /* 1100110 */
@@ -3626,7 +3626,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 226 0xe2 'â' */
+ /* 226 0xe2 'Γ' */
0x00, /* 0000000 */
0xfc, /* 1111110 */
0xcc, /* 1100110 */
@@ -3642,7 +3642,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 227 0xe3 'ã' */
+ /* 227 0xe3 'Ï€' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0xfe, /* 1111111 */
@@ -3658,7 +3658,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 228 0xe4 'ä' */
+ /* 228 0xe4 'Σ' */
0x00, /* 0000000 */
0xfc, /* 1111110 */
0xcc, /* 1100110 */
@@ -3674,7 +3674,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 229 0xe5 'å' */
+ /* 229 0xe5 'σ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3690,7 +3690,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 230 0xe6 'æ' */
+ /* 230 0xe6 'µ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3706,7 +3706,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xc0, /* 1100000 */
0x80, /* 1000000 */
- /* 231 0xe7 'ç' */
+ /* 231 0xe7 'Ï„' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3722,7 +3722,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 232 0xe8 'è' */
+ /* 232 0xe8 'Φ' */
0x00, /* 0000000 */
0xfc, /* 1111110 */
0x30, /* 0011000 */
@@ -3738,7 +3738,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 233 0xe9 'é' */
+ /* 233 0xe9 'Θ' */
0x00, /* 0000000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -3754,7 +3754,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 234 0xea 'ê' */
+ /* 234 0xea 'Ω' */
0x00, /* 0000000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -3770,7 +3770,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 235 0xeb 'ë' */
+ /* 235 0xeb 'δ' */
0x00, /* 0000000 */
0x3c, /* 0011110 */
0x60, /* 0110000 */
@@ -3786,7 +3786,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 236 0xec 'ì' */
+ /* 236 0xec '∞' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3802,7 +3802,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 237 0xed 'í' */
+ /* 237 0xed 'φ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x06, /* 0000011 */
@@ -3818,7 +3818,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 238 0xee 'î' */
+ /* 238 0xee 'ε' */
0x00, /* 0000000 */
0x1c, /* 0001110 */
0x30, /* 0011000 */
@@ -3834,7 +3834,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 239 0xef 'ï' */
+ /* 239 0xef '∩' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x78, /* 0111100 */
@@ -3850,7 +3850,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 240 0xf0 'ð' */
+ /* 240 0xf0 '≡' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3866,7 +3866,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 241 0xf1 'ñ' */
+ /* 241 0xf1 '±' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3882,7 +3882,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 242 0xf2 'ò' */
+ /* 242 0xf2 '≥' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x60, /* 0110000 */
@@ -3898,7 +3898,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 243 0xf3 'ó' */
+ /* 243 0xf3 '≤' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x18, /* 0001100 */
@@ -3914,7 +3914,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 244 0xf4 'ô' */
+ /* 244 0xf4 '⌠' */
0x00, /* 0000000 */
0x1c, /* 0001110 */
0x36, /* 0011011 */
@@ -3930,7 +3930,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 245 0xf5 'õ' */
+ /* 245 0xf5 '⌡' */
0x18, /* 0001100 */
0x18, /* 0001100 */
0x18, /* 0001100 */
@@ -3946,7 +3946,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 246 0xf6 'ö' */
+ /* 246 0xf6 '÷' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3962,7 +3962,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 247 0xf7 '÷' */
+ /* 247 0xf7 '≈' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3978,7 +3978,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 248 0xf8 'ø' */
+ /* 248 0xf8 '°' */
0x38, /* 0011100 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3994,7 +3994,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 249 0xf9 'ù' */
+ /* 249 0xf9 '·' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -4010,7 +4010,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 250 0xfa 'ú' */
+ /* 250 0xfa '•' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -4026,7 +4026,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 251 0xfb 'û' */
+ /* 251 0xfb '√' */
0x1e, /* 0001111 */
0x18, /* 0001100 */
0x18, /* 0001100 */
@@ -4042,7 +4042,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 252 0xfc 'ü' */
+ /* 252 0xfc 'â¿' */
0xd8, /* 1101100 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -4058,7 +4058,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 253 0xfd 'ý' */
+ /* 253 0xfd '²' */
0x78, /* 0111100 */
0xcc, /* 1100110 */
0x18, /* 0001100 */
@@ -4074,7 +4074,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 254 0xfe 'þ' */
+ /* 254 0xfe 'â– ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -4090,7 +4090,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 255 0xff 'ÿ' */
+ /* 255 0xff ' ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c
index 34292cdfaa23..b7ab1f5fbdb8 100644
--- a/lib/fonts/font_8x16.c
+++ b/lib/fonts/font_8x16.c
@@ -2316,7 +2316,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 128 0x80 '€' */
+ /* 128 0x80 'Ç' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3c, /* 00111100 */
@@ -2334,7 +2334,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 129 0x81 '' */
+ /* 129 0x81 'ü' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xcc, /* 11001100 */
@@ -2352,7 +2352,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 130 0x82 '‚' */
+ /* 130 0x82 'é' */
0x00, /* 00000000 */
0x0c, /* 00001100 */
0x18, /* 00011000 */
@@ -2370,7 +2370,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 131 0x83 'ƒ' */
+ /* 131 0x83 'â' */
0x00, /* 00000000 */
0x10, /* 00010000 */
0x38, /* 00111000 */
@@ -2388,7 +2388,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 132 0x84 '„' */
+ /* 132 0x84 'ä' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xcc, /* 11001100 */
@@ -2406,7 +2406,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 133 0x85 '…' */
+ /* 133 0x85 'à' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2424,7 +2424,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 134 0x86 '†' */
+ /* 134 0x86 'Ã¥' */
0x00, /* 00000000 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -2442,7 +2442,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 135 0x87 '‡' */
+ /* 135 0x87 'ç' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2460,7 +2460,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 136 0x88 'ˆ' */
+ /* 136 0x88 'ê' */
0x00, /* 00000000 */
0x10, /* 00010000 */
0x38, /* 00111000 */
@@ -2478,7 +2478,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 137 0x89 '‰' */
+ /* 137 0x89 'ë' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -2496,7 +2496,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 138 0x8a 'Š' */
+ /* 138 0x8a 'è' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2514,7 +2514,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 139 0x8b '‹' */
+ /* 139 0x8b 'ï' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x66, /* 01100110 */
@@ -2532,7 +2532,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 140 0x8c 'Œ' */
+ /* 140 0x8c 'î' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x3c, /* 00111100 */
@@ -2550,7 +2550,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 141 0x8d '' */
+ /* 141 0x8d 'ì' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2568,7 +2568,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 142 0x8e 'Ž' */
+ /* 142 0x8e 'Ä' */
0x00, /* 00000000 */
0xc6, /* 11000110 */
0x00, /* 00000000 */
@@ -2586,7 +2586,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 143 0x8f '' */
+ /* 143 0x8f 'Ã…' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x38, /* 00111000 */
@@ -2604,7 +2604,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 144 0x90 '' */
+ /* 144 0x90 'É' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -2622,7 +2622,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 145 0x91 '‘' */
+ /* 145 0x91 'æ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2640,7 +2640,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 146 0x92 '’' */
+ /* 146 0x92 'Æ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3e, /* 00111110 */
@@ -2658,7 +2658,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 147 0x93 '“' */
+ /* 147 0x93 'ô' */
0x00, /* 00000000 */
0x10, /* 00010000 */
0x38, /* 00111000 */
@@ -2676,7 +2676,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 148 0x94 '”' */
+ /* 148 0x94 'ö' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -2694,7 +2694,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 149 0x95 '•' */
+ /* 149 0x95 'ò' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2712,7 +2712,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 150 0x96 '–' */
+ /* 150 0x96 'û' */
0x00, /* 00000000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -2730,7 +2730,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 151 0x97 '—' */
+ /* 151 0x97 'ù' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2748,7 +2748,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 152 0x98 '˜' */
+ /* 152 0x98 'ÿ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -2766,7 +2766,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x78, /* 01111000 */
0x00, /* 00000000 */
- /* 153 0x99 '™' */
+ /* 153 0x99 'Ö' */
0x00, /* 00000000 */
0xc6, /* 11000110 */
0x00, /* 00000000 */
@@ -2784,7 +2784,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 154 0x9a 'š' */
+ /* 154 0x9a 'Ü' */
0x00, /* 00000000 */
0xc6, /* 11000110 */
0x00, /* 00000000 */
@@ -2802,7 +2802,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 155 0x9b '›' */
+ /* 155 0x9b '¢' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2820,7 +2820,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 156 0x9c 'œ' */
+ /* 156 0x9c '£' */
0x00, /* 00000000 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -2838,7 +2838,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 157 0x9d '' */
+ /* 157 0x9d 'Â¥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x66, /* 01100110 */
@@ -2856,7 +2856,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 158 0x9e 'ž' */
+ /* 158 0x9e 'â‚§' */
0x00, /* 00000000 */
0xf8, /* 11111000 */
0xcc, /* 11001100 */
@@ -2874,7 +2874,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 159 0x9f 'Ÿ' */
+ /* 159 0x9f 'Æ’' */
0x00, /* 00000000 */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
@@ -2892,7 +2892,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 160 0xa0 ' ' */
+ /* 160 0xa0 'á' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2910,7 +2910,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 161 0xa1 '¡' */
+ /* 161 0xa1 'í' */
0x00, /* 00000000 */
0x0c, /* 00001100 */
0x18, /* 00011000 */
@@ -2928,7 +2928,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 162 0xa2 '¢' */
+ /* 162 0xa2 'ó' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2946,7 +2946,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 163 0xa3 '£' */
+ /* 163 0xa3 'ú' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2964,7 +2964,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 164 0xa4 '¤' */
+ /* 164 0xa4 'ñ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x76, /* 01110110 */
@@ -2982,7 +2982,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 165 0xa5 '¥' */
+ /* 165 0xa5 'Ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -3000,7 +3000,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 166 0xa6 '¦' */
+ /* 166 0xa6 'ª' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3c, /* 00111100 */
@@ -3018,7 +3018,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 167 0xa7 '§' */
+ /* 167 0xa7 'º' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -3036,7 +3036,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 168 0xa8 '¨' */
+ /* 168 0xa8 '¿' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x30, /* 00110000 */
@@ -3054,7 +3054,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 169 0xa9 '©' */
+ /* 169 0xa9 'âŒ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3072,7 +3072,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 170 0xaa 'ª' */
+ /* 170 0xaa '¬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3090,7 +3090,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 171 0xab '«' */
+ /* 171 0xab '½' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0xe0, /* 11100000 */
@@ -3108,7 +3108,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 172 0xac '¬' */
+ /* 172 0xac '¼' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0xe0, /* 11100000 */
@@ -3126,7 +3126,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 173 0xad '­' */
+ /* 173 0xad '¡' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -3144,7 +3144,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 174 0xae '®' */
+ /* 174 0xae '«' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3162,7 +3162,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 175 0xaf '¯' */
+ /* 175 0xaf '»' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3180,7 +3180,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 176 0xb0 '°' */
+ /* 176 0xb0 'â–‘' */
0x11, /* 00010001 */
0x44, /* 01000100 */
0x11, /* 00010001 */
@@ -3198,7 +3198,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x11, /* 00010001 */
0x44, /* 01000100 */
- /* 177 0xb1 '±' */
+ /* 177 0xb1 'â–’' */
0x55, /* 01010101 */
0xaa, /* 10101010 */
0x55, /* 01010101 */
@@ -3216,7 +3216,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x55, /* 01010101 */
0xaa, /* 10101010 */
- /* 178 0xb2 '²' */
+ /* 178 0xb2 'â–“' */
0xdd, /* 11011101 */
0x77, /* 01110111 */
0xdd, /* 11011101 */
@@ -3234,7 +3234,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xdd, /* 11011101 */
0x77, /* 01110111 */
- /* 179 0xb3 '³' */
+ /* 179 0xb3 '│' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3252,7 +3252,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 180 0xb4 '´' */
+ /* 180 0xb4 '┤' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3270,7 +3270,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 181 0xb5 'µ' */
+ /* 181 0xb5 'â•¡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3288,7 +3288,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 182 0xb6 '¶' */
+ /* 182 0xb6 'â•¢' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3306,7 +3306,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 183 0xb7 '·' */
+ /* 183 0xb7 'â•–' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3324,7 +3324,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 184 0xb8 '¸' */
+ /* 184 0xb8 'â••' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3342,7 +3342,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 185 0xb9 '¹' */
+ /* 185 0xb9 'â•£' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3360,7 +3360,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 186 0xba 'º' */
+ /* 186 0xba 'â•‘' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3378,7 +3378,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 187 0xbb '»' */
+ /* 187 0xbb 'â•—' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3396,7 +3396,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 188 0xbc '¼' */
+ /* 188 0xbc 'â•' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3414,7 +3414,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 189 0xbd '½' */
+ /* 189 0xbd '╜' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3432,7 +3432,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 190 0xbe '¾' */
+ /* 190 0xbe 'â•›' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3450,7 +3450,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 191 0xbf '¿' */
+ /* 191 0xbf 'â”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3468,7 +3468,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 192 0xc0 'À' */
+ /* 192 0xc0 'â””' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3486,7 +3486,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 193 0xc1 'Á' */
+ /* 193 0xc1 'â”´' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3504,7 +3504,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 194 0xc2 'Â' */
+ /* 194 0xc2 '┬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3522,7 +3522,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 195 0xc3 'Ã' */
+ /* 195 0xc3 '├' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3540,7 +3540,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 196 0xc4 'Ä' */
+ /* 196 0xc4 '─' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3558,7 +3558,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 197 0xc5 'Å' */
+ /* 197 0xc5 '┼' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3576,7 +3576,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 198 0xc6 'Æ' */
+ /* 198 0xc6 '╞' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3594,7 +3594,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 199 0xc7 'Ç' */
+ /* 199 0xc7 '╟' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3612,7 +3612,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 200 0xc8 'È' */
+ /* 200 0xc8 '╚' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3630,7 +3630,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 201 0xc9 'É' */
+ /* 201 0xc9 'â•”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3648,7 +3648,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 202 0xca 'Ê' */
+ /* 202 0xca 'â•©' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3666,7 +3666,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 203 0xcb 'Ë' */
+ /* 203 0xcb '╦' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3684,7 +3684,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 204 0xcc 'Ì' */
+ /* 204 0xcc 'â• ' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3702,7 +3702,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 205 0xcd 'Í' */
+ /* 205 0xcd 'â•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3720,7 +3720,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 206 0xce 'Î' */
+ /* 206 0xce '╬' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3738,7 +3738,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 207 0xcf 'Ï' */
+ /* 207 0xcf 'â•§' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3756,7 +3756,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 208 0xd0 'Ð' */
+ /* 208 0xd0 '╨' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3774,7 +3774,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 209 0xd1 'Ñ' */
+ /* 209 0xd1 '╤' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3792,7 +3792,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 210 0xd2 'Ò' */
+ /* 210 0xd2 'â•¥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3810,7 +3810,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 211 0xd3 'Ó' */
+ /* 211 0xd3 'â•™' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3828,7 +3828,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 212 0xd4 'Ô' */
+ /* 212 0xd4 '╘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3846,7 +3846,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 213 0xd5 'Õ' */
+ /* 213 0xd5 'â•’' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3864,7 +3864,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 214 0xd6 'Ö' */
+ /* 214 0xd6 'â•“' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3882,7 +3882,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 215 0xd7 '×' */
+ /* 215 0xd7 'â•«' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3900,7 +3900,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 216 0xd8 'Ø' */
+ /* 216 0xd8 '╪' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3918,7 +3918,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 217 0xd9 'Ù' */
+ /* 217 0xd9 '┘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3936,7 +3936,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 218 0xda 'Ú' */
+ /* 218 0xda '┌' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3954,7 +3954,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 219 0xdb 'Û' */
+ /* 219 0xdb 'â–ˆ' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -3972,7 +3972,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 220 0xdc 'Ü' */
+ /* 220 0xdc 'â–„' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3990,7 +3990,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 221 0xdd 'Ý' */
+ /* 221 0xdd '▌' */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
@@ -4008,7 +4008,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xf0, /* 11110000 */
0xf0, /* 11110000 */
- /* 222 0xde 'Þ' */
+ /* 222 0xde 'â–' */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
@@ -4026,7 +4026,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x0f, /* 00001111 */
0x0f, /* 00001111 */
- /* 223 0xdf 'ß' */
+ /* 223 0xdf 'â–€' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -4044,7 +4044,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 224 0xe0 'à' */
+ /* 224 0xe0 'α' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4062,7 +4062,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 225 0xe1 'á' */
+ /* 225 0xe1 'ß' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x78, /* 01111000 */
@@ -4080,7 +4080,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 226 0xe2 'â' */
+ /* 226 0xe2 'Γ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -4098,7 +4098,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 227 0xe3 'ã' */
+ /* 227 0xe3 'Ï€' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4116,7 +4116,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 228 0xe4 'ä' */
+ /* 228 0xe4 'Σ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -4134,7 +4134,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 229 0xe5 'å' */
+ /* 229 0xe5 'σ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4152,7 +4152,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 230 0xe6 'æ' */
+ /* 230 0xe6 'µ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4170,7 +4170,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xc0, /* 11000000 */
0x00, /* 00000000 */
- /* 231 0xe7 'ç' */
+ /* 231 0xe7 'Ï„' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4188,7 +4188,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 232 0xe8 'è' */
+ /* 232 0xe8 'Φ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -4206,7 +4206,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 233 0xe9 'é' */
+ /* 233 0xe9 'Θ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -4224,7 +4224,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 234 0xea 'ê' */
+ /* 234 0xea 'Ω' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -4242,7 +4242,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 235 0xeb 'ë' */
+ /* 235 0xeb 'δ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x1e, /* 00011110 */
@@ -4260,7 +4260,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 236 0xec 'ì' */
+ /* 236 0xec '∞' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4278,7 +4278,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 237 0xed 'í' */
+ /* 237 0xed 'φ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4296,7 +4296,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 238 0xee 'î' */
+ /* 238 0xee 'ε' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x1c, /* 00011100 */
@@ -4314,7 +4314,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 239 0xef 'ï' */
+ /* 239 0xef '∩' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4332,7 +4332,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 240 0xf0 'ð' */
+ /* 240 0xf0 '≡' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4350,7 +4350,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 241 0xf1 'ñ' */
+ /* 241 0xf1 '±' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4368,7 +4368,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 242 0xf2 'ò' */
+ /* 242 0xf2 '≥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4386,7 +4386,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 243 0xf3 'ó' */
+ /* 243 0xf3 '≤' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4404,7 +4404,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 244 0xf4 'ô' */
+ /* 244 0xf4 '⌠' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x0e, /* 00001110 */
@@ -4422,7 +4422,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 245 0xf5 'õ' */
+ /* 245 0xf5 '⌡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -4440,7 +4440,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 246 0xf6 'ö' */
+ /* 246 0xf6 '÷' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4458,7 +4458,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 247 0xf7 '÷' */
+ /* 247 0xf7 '≈' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4476,7 +4476,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 248 0xf8 'ø' */
+ /* 248 0xf8 '°' */
0x00, /* 00000000 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -4494,7 +4494,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 249 0xf9 'ù' */
+ /* 249 0xf9 '·' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4512,7 +4512,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 250 0xfa 'ú' */
+ /* 250 0xfa '•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4530,7 +4530,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 251 0xfb 'û' */
+ /* 251 0xfb '√' */
0x00, /* 00000000 */
0x0f, /* 00001111 */
0x0c, /* 00001100 */
@@ -4548,7 +4548,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 252 0xfc 'ü' */
+ /* 252 0xfc 'â¿' */
0x00, /* 00000000 */
0x6c, /* 01101100 */
0x36, /* 00110110 */
@@ -4566,7 +4566,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 253 0xfd 'ý' */
+ /* 253 0xfd '²' */
0x00, /* 00000000 */
0x3c, /* 00111100 */
0x66, /* 01100110 */
@@ -4584,7 +4584,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 254 0xfe 'þ' */
+ /* 254 0xfe 'â– ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4602,7 +4602,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 255 0xff 'ÿ' */
+ /* 255 0xff ' ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c
index 751becf3c521..2328ebc8bab5 100644
--- a/lib/fonts/font_8x8.c
+++ b/lib/fonts/font_8x8.c
@@ -1291,7 +1291,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 128 0x80 '€' */
+ /* 128 0x80 'Ç' */
0x7c, /* 01111100 */
0xc6, /* 11000110 */
0xc0, /* 11000000 */
@@ -1301,7 +1301,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x0c, /* 00001100 */
0x78, /* 01111000 */
- /* 129 0x81 '' */
+ /* 129 0x81 'ü' */
0xcc, /* 11001100 */
0x00, /* 00000000 */
0xcc, /* 11001100 */
@@ -1311,7 +1311,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 130 0x82 '‚' */
+ /* 130 0x82 'é' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1321,7 +1321,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 131 0x83 'ƒ' */
+ /* 131 0x83 'â' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x78, /* 01111000 */
@@ -1331,7 +1331,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 132 0x84 '„' */
+ /* 132 0x84 'ä' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x78, /* 01111000 */
@@ -1341,7 +1341,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 133 0x85 '…' */
+ /* 133 0x85 'à' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x78, /* 01111000 */
@@ -1351,7 +1351,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 134 0x86 '†' */
+ /* 134 0x86 'Ã¥' */
0x30, /* 00110000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -1361,7 +1361,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 135 0x87 '‡' */
+ /* 135 0x87 'ç' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -1371,7 +1371,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x0c, /* 00001100 */
0x38, /* 00111000 */
- /* 136 0x88 'ˆ' */
+ /* 136 0x88 'ê' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x7c, /* 01111100 */
@@ -1381,7 +1381,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 137 0x89 '‰' */
+ /* 137 0x89 'ë' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x7c, /* 01111100 */
@@ -1391,7 +1391,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 138 0x8a 'Š' */
+ /* 138 0x8a 'è' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1401,7 +1401,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 139 0x8b '‹' */
+ /* 139 0x8b 'ï' */
0x66, /* 01100110 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -1411,7 +1411,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 140 0x8c 'Œ' */
+ /* 140 0x8c 'î' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x38, /* 00111000 */
@@ -1421,7 +1421,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 141 0x8d '' */
+ /* 141 0x8d 'ì' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -1431,7 +1431,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 142 0x8e 'Ž' */
+ /* 142 0x8e 'Ä' */
0xc6, /* 11000110 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -1441,7 +1441,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 143 0x8f '' */
+ /* 143 0x8f 'Ã…' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x7c, /* 01111100 */
@@ -1451,7 +1451,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 144 0x90 '' */
+ /* 144 0x90 'É' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0xfe, /* 11111110 */
@@ -1461,7 +1461,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 145 0x91 '‘' */
+ /* 145 0x91 'æ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -1471,7 +1471,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 146 0x92 '’' */
+ /* 146 0x92 'Æ' */
0x3e, /* 00111110 */
0x6c, /* 01101100 */
0xcc, /* 11001100 */
@@ -1481,7 +1481,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xce, /* 11001110 */
0x00, /* 00000000 */
- /* 147 0x93 '“' */
+ /* 147 0x93 'ô' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x7c, /* 01111100 */
@@ -1491,7 +1491,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 148 0x94 '”' */
+ /* 148 0x94 'ö' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x7c, /* 01111100 */
@@ -1501,7 +1501,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 149 0x95 '•' */
+ /* 149 0x95 'ò' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1511,7 +1511,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 150 0x96 '–' */
+ /* 150 0x96 'û' */
0x78, /* 01111000 */
0x84, /* 10000100 */
0x00, /* 00000000 */
@@ -1521,7 +1521,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 151 0x97 '—' */
+ /* 151 0x97 'ù' */
0x60, /* 01100000 */
0x30, /* 00110000 */
0xcc, /* 11001100 */
@@ -1531,7 +1531,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 152 0x98 '˜' */
+ /* 152 0x98 'ÿ' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -1541,7 +1541,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x06, /* 00000110 */
0xfc, /* 11111100 */
- /* 153 0x99 '™' */
+ /* 153 0x99 'Ö' */
0xc6, /* 11000110 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -1551,7 +1551,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x38, /* 00111000 */
0x00, /* 00000000 */
- /* 154 0x9a 'š' */
+ /* 154 0x9a 'Ü' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -1561,7 +1561,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 155 0x9b '›' */
+ /* 155 0x9b '¢' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x7e, /* 01111110 */
@@ -1571,7 +1571,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 156 0x9c 'œ' */
+ /* 156 0x9c '£' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x64, /* 01100100 */
@@ -1581,7 +1581,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xfc, /* 11111100 */
0x00, /* 00000000 */
- /* 157 0x9d '' */
+ /* 157 0x9d 'Â¥' */
0x66, /* 01100110 */
0x66, /* 01100110 */
0x3c, /* 00111100 */
@@ -1591,7 +1591,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 158 0x9e 'ž' */
+ /* 158 0x9e 'â‚§' */
0xf8, /* 11111000 */
0xcc, /* 11001100 */
0xcc, /* 11001100 */
@@ -1601,7 +1601,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0xc7, /* 11000111 */
- /* 159 0x9f 'Ÿ' */
+ /* 159 0x9f 'Æ’' */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
0x18, /* 00011000 */
@@ -1611,7 +1611,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x70, /* 01110000 */
0x00, /* 00000000 */
- /* 160 0xa0 ' ' */
+ /* 160 0xa0 'á' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -1621,7 +1621,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 161 0xa1 '¡' */
+ /* 161 0xa1 'í' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -1631,7 +1631,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 162 0xa2 '¢' */
+ /* 162 0xa2 'ó' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1641,7 +1641,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 163 0xa3 '£' */
+ /* 163 0xa3 'ú' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0xcc, /* 11001100 */
@@ -1651,7 +1651,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 164 0xa4 '¤' */
+ /* 164 0xa4 'ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -1661,7 +1661,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x66, /* 01100110 */
0x00, /* 00000000 */
- /* 165 0xa5 '¥' */
+ /* 165 0xa5 'Ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -1671,7 +1671,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xce, /* 11001110 */
0x00, /* 00000000 */
- /* 166 0xa6 '¦' */
+ /* 166 0xa6 'ª' */
0x3c, /* 00111100 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -1681,7 +1681,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 167 0xa7 '§' */
+ /* 167 0xa7 'º' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -1691,7 +1691,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 168 0xa8 '¨' */
+ /* 168 0xa8 '¿' */
0x18, /* 00011000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -1701,7 +1701,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3e, /* 00111110 */
0x00, /* 00000000 */
- /* 169 0xa9 '©' */
+ /* 169 0xa9 'âŒ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1711,7 +1711,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 170 0xaa 'ª' */
+ /* 170 0xaa '¬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1721,7 +1721,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 171 0xab '«' */
+ /* 171 0xab '½' */
0x63, /* 01100011 */
0xe6, /* 11100110 */
0x6c, /* 01101100 */
@@ -1731,7 +1731,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xcc, /* 11001100 */
0x0f, /* 00001111 */
- /* 172 0xac '¬' */
+ /* 172 0xac '¼' */
0x63, /* 01100011 */
0xe6, /* 11100110 */
0x6c, /* 01101100 */
@@ -1741,7 +1741,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xdf, /* 11011111 */
0x06, /* 00000110 */
- /* 173 0xad '­' */
+ /* 173 0xad '¡' */
0x18, /* 00011000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -1751,7 +1751,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x00, /* 00000000 */
- /* 174 0xae '®' */
+ /* 174 0xae '«' */
0x00, /* 00000000 */
0x33, /* 00110011 */
0x66, /* 01100110 */
@@ -1761,7 +1761,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 175 0xaf '¯' */
+ /* 175 0xaf '»' */
0x00, /* 00000000 */
0xcc, /* 11001100 */
0x66, /* 01100110 */
@@ -1771,7 +1771,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 176 0xb0 '°' */
+ /* 176 0xb0 'â–‘' */
0x22, /* 00100010 */
0x88, /* 10001000 */
0x22, /* 00100010 */
@@ -1781,7 +1781,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x22, /* 00100010 */
0x88, /* 10001000 */
- /* 177 0xb1 '±' */
+ /* 177 0xb1 'â–’' */
0x55, /* 01010101 */
0xaa, /* 10101010 */
0x55, /* 01010101 */
@@ -1791,7 +1791,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x55, /* 01010101 */
0xaa, /* 10101010 */
- /* 178 0xb2 '²' */
+ /* 178 0xb2 'â–“' */
0x77, /* 01110111 */
0xdd, /* 11011101 */
0x77, /* 01110111 */
@@ -1801,7 +1801,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x77, /* 01110111 */
0xdd, /* 11011101 */
- /* 179 0xb3 '³' */
+ /* 179 0xb3 '│' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1811,7 +1811,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 180 0xb4 '´' */
+ /* 180 0xb4 '┤' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1821,7 +1821,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 181 0xb5 'µ' */
+ /* 181 0xb5 'â•¡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xf8, /* 11111000 */
@@ -1831,7 +1831,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 182 0xb6 '¶' */
+ /* 182 0xb6 'â•¢' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1841,7 +1841,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 183 0xb7 '·' */
+ /* 183 0xb7 'â•–' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1851,7 +1851,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 184 0xb8 '¸' */
+ /* 184 0xb8 'â••' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xf8, /* 11111000 */
@@ -1861,7 +1861,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 185 0xb9 '¹' */
+ /* 185 0xb9 'â•£' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf6, /* 11110110 */
@@ -1871,7 +1871,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 186 0xba 'º' */
+ /* 186 0xba 'â•‘' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1881,7 +1881,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 187 0xbb '»' */
+ /* 187 0xbb 'â•—' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -1891,7 +1891,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 188 0xbc '¼' */
+ /* 188 0xbc 'â•' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf6, /* 11110110 */
@@ -1901,7 +1901,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 189 0xbd '½' */
+ /* 189 0xbd '╜' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1911,7 +1911,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 190 0xbe '¾' */
+ /* 190 0xbe 'â•›' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xf8, /* 11111000 */
@@ -1921,7 +1921,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 191 0xbf '¿' */
+ /* 191 0xbf 'â”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1931,7 +1931,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 192 0xc0 'À' */
+ /* 192 0xc0 'â””' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1941,7 +1941,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 193 0xc1 'Á' */
+ /* 193 0xc1 'â”´' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1951,7 +1951,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 194 0xc2 'Â' */
+ /* 194 0xc2 '┬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1961,7 +1961,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 195 0xc3 'Ã' */
+ /* 195 0xc3 '├' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1971,7 +1971,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 196 0xc4 'Ä' */
+ /* 196 0xc4 '─' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1981,7 +1981,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 197 0xc5 'Å' */
+ /* 197 0xc5 '┼' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1991,7 +1991,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 198 0xc6 'Æ' */
+ /* 198 0xc6 '╞' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x1f, /* 00011111 */
@@ -2001,7 +2001,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 199 0xc7 'Ç' */
+ /* 199 0xc7 '╟' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2011,7 +2011,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 200 0xc8 'È' */
+ /* 200 0xc8 '╚' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x37, /* 00110111 */
@@ -2021,7 +2021,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 201 0xc9 'É' */
+ /* 201 0xc9 'â•”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3f, /* 00111111 */
@@ -2031,7 +2031,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 202 0xca 'Ê' */
+ /* 202 0xca 'â•©' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf7, /* 11110111 */
@@ -2041,7 +2041,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 203 0xcb 'Ë' */
+ /* 203 0xcb '╦' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2051,7 +2051,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 204 0xcc 'Ì' */
+ /* 204 0xcc 'â• ' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x37, /* 00110111 */
@@ -2061,7 +2061,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 205 0xcd 'Í' */
+ /* 205 0xcd 'â•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2071,7 +2071,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 206 0xce 'Î' */
+ /* 206 0xce '╬' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf7, /* 11110111 */
@@ -2081,7 +2081,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 207 0xcf 'Ï' */
+ /* 207 0xcf 'â•§' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xff, /* 11111111 */
@@ -2091,7 +2091,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 208 0xd0 'Ð' */
+ /* 208 0xd0 '╨' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2101,7 +2101,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 209 0xd1 'Ñ' */
+ /* 209 0xd1 '╤' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2111,7 +2111,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 210 0xd2 'Ò' */
+ /* 210 0xd2 'â•¥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2121,7 +2121,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 211 0xd3 'Ó' */
+ /* 211 0xd3 'â•™' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2131,7 +2131,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 212 0xd4 'Ô' */
+ /* 212 0xd4 '╘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x1f, /* 00011111 */
@@ -2141,7 +2141,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 213 0xd5 'Õ' */
+ /* 213 0xd5 'â•’' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x1f, /* 00011111 */
@@ -2151,7 +2151,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 214 0xd6 'Ö' */
+ /* 214 0xd6 'â•“' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2161,7 +2161,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 215 0xd7 '×' */
+ /* 215 0xd7 'â•«' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2171,7 +2171,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 216 0xd8 'Ø' */
+ /* 216 0xd8 '╪' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xff, /* 11111111 */
@@ -2181,7 +2181,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 217 0xd9 'Ù' */
+ /* 217 0xd9 '┘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2191,7 +2191,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 218 0xda 'Ú' */
+ /* 218 0xda '┌' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2201,7 +2201,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 219 0xdb 'Û' */
+ /* 219 0xdb 'â–ˆ' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -2211,7 +2211,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 220 0xdc 'Ü' */
+ /* 220 0xdc 'â–„' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2221,7 +2221,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 221 0xdd 'Ý' */
+ /* 221 0xdd '▌' */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
@@ -2231,7 +2231,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xf0, /* 11110000 */
0xf0, /* 11110000 */
- /* 222 0xde 'Þ' */
+ /* 222 0xde 'â–' */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
@@ -2241,7 +2241,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x0f, /* 00001111 */
0x0f, /* 00001111 */
- /* 223 0xdf 'ß' */
+ /* 223 0xdf 'â–€' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -2251,7 +2251,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 224 0xe0 'à' */
+ /* 224 0xe0 'α' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x76, /* 01110110 */
@@ -2261,7 +2261,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 225 0xe1 'á' */
+ /* 225 0xe1 'ß' */
0x78, /* 01111000 */
0xcc, /* 11001100 */
0xcc, /* 11001100 */
@@ -2271,7 +2271,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xcc, /* 11001100 */
0x00, /* 00000000 */
- /* 226 0xe2 'â' */
+ /* 226 0xe2 'Γ' */
0xfe, /* 11111110 */
0xc6, /* 11000110 */
0xc0, /* 11000000 */
@@ -2281,7 +2281,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc0, /* 11000000 */
0x00, /* 00000000 */
- /* 227 0xe3 'ã' */
+ /* 227 0xe3 'Ï€' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -2291,7 +2291,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x6c, /* 01101100 */
0x00, /* 00000000 */
- /* 228 0xe4 'ä' */
+ /* 228 0xe4 'Σ' */
0xfe, /* 11111110 */
0xc6, /* 11000110 */
0x60, /* 01100000 */
@@ -2301,7 +2301,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 229 0xe5 'å' */
+ /* 229 0xe5 'σ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -2311,7 +2311,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x70, /* 01110000 */
0x00, /* 00000000 */
- /* 230 0xe6 'æ' */
+ /* 230 0xe6 'µ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x66, /* 01100110 */
@@ -2321,7 +2321,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0xc0, /* 11000000 */
- /* 231 0xe7 'ç' */
+ /* 231 0xe7 'Ï„' */
0x00, /* 00000000 */
0x76, /* 01110110 */
0xdc, /* 11011100 */
@@ -2331,7 +2331,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x00, /* 00000000 */
- /* 232 0xe8 'è' */
+ /* 232 0xe8 'Φ' */
0x7e, /* 01111110 */
0x18, /* 00011000 */
0x3c, /* 00111100 */
@@ -2341,7 +2341,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x7e, /* 01111110 */
- /* 233 0xe9 'é' */
+ /* 233 0xe9 'Θ' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0xc6, /* 11000110 */
@@ -2351,7 +2351,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x38, /* 00111000 */
0x00, /* 00000000 */
- /* 234 0xea 'ê' */
+ /* 234 0xea 'Ω' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0xc6, /* 11000110 */
@@ -2361,7 +2361,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xee, /* 11101110 */
0x00, /* 00000000 */
- /* 235 0xeb 'ë' */
+ /* 235 0xeb 'δ' */
0x0e, /* 00001110 */
0x18, /* 00011000 */
0x0c, /* 00001100 */
@@ -2371,7 +2371,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 236 0xec 'ì' */
+ /* 236 0xec '∞' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -2381,7 +2381,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 237 0xed 'í' */
+ /* 237 0xed 'φ' */
0x06, /* 00000110 */
0x0c, /* 00001100 */
0x7e, /* 01111110 */
@@ -2391,7 +2391,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x60, /* 01100000 */
0xc0, /* 11000000 */
- /* 238 0xee 'î' */
+ /* 238 0xee 'ε' */
0x1e, /* 00011110 */
0x30, /* 00110000 */
0x60, /* 01100000 */
@@ -2401,7 +2401,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x1e, /* 00011110 */
0x00, /* 00000000 */
- /* 239 0xef 'ï' */
+ /* 239 0xef '∩' */
0x00, /* 00000000 */
0x7c, /* 01111100 */
0xc6, /* 11000110 */
@@ -2411,7 +2411,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 240 0xf0 'ð' */
+ /* 240 0xf0 '≡' */
0x00, /* 00000000 */
0xfe, /* 11111110 */
0x00, /* 00000000 */
@@ -2421,7 +2421,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 241 0xf1 'ñ' */
+ /* 241 0xf1 '±' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x7e, /* 01111110 */
@@ -2431,7 +2431,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 242 0xf2 'ò' */
+ /* 242 0xf2 '≥' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x0c, /* 00001100 */
@@ -2441,7 +2441,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 243 0xf3 'ó' */
+ /* 243 0xf3 '≤' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2451,7 +2451,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 244 0xf4 'ô' */
+ /* 244 0xf4 '⌠' */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
0x1b, /* 00011011 */
@@ -2461,7 +2461,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 245 0xf5 'õ' */
+ /* 245 0xf5 '⌡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2471,7 +2471,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xd8, /* 11011000 */
0x70, /* 01110000 */
- /* 246 0xf6 'ö' */
+ /* 246 0xf6 '÷' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -2481,7 +2481,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 247 0xf7 '÷' */
+ /* 247 0xf7 '≈' */
0x00, /* 00000000 */
0x76, /* 01110110 */
0xdc, /* 11011100 */
@@ -2491,7 +2491,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 248 0xf8 'ø' */
+ /* 248 0xf8 '°' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -2501,7 +2501,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 249 0xf9 'ù' */
+ /* 249 0xf9 '·' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2511,7 +2511,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 250 0xfa 'ú' */
+ /* 250 0xfa '•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2521,7 +2521,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 251 0xfb 'û' */
+ /* 251 0xfb '√' */
0x0f, /* 00001111 */
0x0c, /* 00001100 */
0x0c, /* 00001100 */
@@ -2531,7 +2531,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x1c, /* 00011100 */
- /* 252 0xfc 'ü' */
+ /* 252 0xfc 'â¿' */
0x6c, /* 01101100 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2541,7 +2541,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 253 0xfd 'ý' */
+ /* 253 0xfd '²' */
0x78, /* 01111000 */
0x0c, /* 00001100 */
0x18, /* 00011000 */
@@ -2551,7 +2551,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 254 0xfe 'þ' */
+ /* 254 0xfe 'â– ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3c, /* 00111100 */
@@ -2561,7 +2561,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 255 0xff 'ÿ' */
+ /* 255 0xff ' ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c
index b0514c0a7445..b15d3c342c5b 100644
--- a/lib/fonts/font_pearl_8x8.c
+++ b/lib/fonts/font_pearl_8x8.c
@@ -1296,7 +1296,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 128 0x80 '€' */
+ /* 128 0x80 'Ç' */
0x7c, /* 01111100 */
0xc6, /* 11000110 */
0xc0, /* 11000000 */
@@ -1306,7 +1306,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x0c, /* 00001100 */
0x78, /* 01111000 */
- /* 129 0x81 '' */
+ /* 129 0x81 'ü' */
0xcc, /* 11001100 */
0x00, /* 00000000 */
0xcc, /* 11001100 */
@@ -1316,7 +1316,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 130 0x82 '‚' */
+ /* 130 0x82 'é' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1326,7 +1326,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 131 0x83 'ƒ' */
+ /* 131 0x83 'â' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x78, /* 01111000 */
@@ -1336,7 +1336,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 132 0x84 '„' */
+ /* 132 0x84 'ä' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x78, /* 01111000 */
@@ -1346,7 +1346,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 133 0x85 '…' */
+ /* 133 0x85 'à' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x78, /* 01111000 */
@@ -1356,7 +1356,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 134 0x86 '†' */
+ /* 134 0x86 'Ã¥' */
0x30, /* 00110000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -1366,7 +1366,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 135 0x87 '‡' */
+ /* 135 0x87 'ç' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -1376,7 +1376,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x0c, /* 00001100 */
0x38, /* 00111000 */
- /* 136 0x88 'ˆ' */
+ /* 136 0x88 'ê' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x7c, /* 01111100 */
@@ -1386,7 +1386,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 137 0x89 '‰' */
+ /* 137 0x89 'ë' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x7c, /* 01111100 */
@@ -1396,7 +1396,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 138 0x8a 'Š' */
+ /* 138 0x8a 'è' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1406,7 +1406,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 139 0x8b '‹' */
+ /* 139 0x8b 'ï' */
0x66, /* 01100110 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -1416,7 +1416,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 140 0x8c 'Œ' */
+ /* 140 0x8c 'î' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x38, /* 00111000 */
@@ -1426,7 +1426,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 141 0x8d '' */
+ /* 141 0x8d 'ì' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -1436,7 +1436,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 142 0x8e 'Ž' */
+ /* 142 0x8e 'Ä' */
0xc6, /* 11000110 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -1446,7 +1446,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 143 0x8f '' */
+ /* 143 0x8f 'Ã…' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x7c, /* 01111100 */
@@ -1456,7 +1456,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 144 0x90 '' */
+ /* 144 0x90 'É' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0xfe, /* 11111110 */
@@ -1466,7 +1466,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 145 0x91 '‘' */
+ /* 145 0x91 'æ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -1476,7 +1476,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 146 0x92 '’' */
+ /* 146 0x92 'Æ' */
0x3e, /* 00111110 */
0x6c, /* 01101100 */
0xcc, /* 11001100 */
@@ -1486,7 +1486,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xce, /* 11001110 */
0x00, /* 00000000 */
- /* 147 0x93 '“' */
+ /* 147 0x93 'ô' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x7c, /* 01111100 */
@@ -1496,7 +1496,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 148 0x94 '”' */
+ /* 148 0x94 'ö' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x7c, /* 01111100 */
@@ -1506,7 +1506,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 149 0x95 '•' */
+ /* 149 0x95 'ò' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1516,7 +1516,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 150 0x96 '–' */
+ /* 150 0x96 'û' */
0x78, /* 01111000 */
0x84, /* 10000100 */
0x00, /* 00000000 */
@@ -1526,7 +1526,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 151 0x97 '—' */
+ /* 151 0x97 'ù' */
0x60, /* 01100000 */
0x30, /* 00110000 */
0xcc, /* 11001100 */
@@ -1536,7 +1536,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 152 0x98 '˜' */
+ /* 152 0x98 'ÿ' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -1546,7 +1546,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x06, /* 00000110 */
0xfc, /* 11111100 */
- /* 153 0x99 '™' */
+ /* 153 0x99 'Ö' */
0xc6, /* 11000110 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -1556,7 +1556,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x38, /* 00111000 */
0x00, /* 00000000 */
- /* 154 0x9a 'š' */
+ /* 154 0x9a 'Ü' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -1566,7 +1566,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 155 0x9b '›' */
+ /* 155 0x9b '¢' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x7e, /* 01111110 */
@@ -1576,7 +1576,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 156 0x9c 'œ' */
+ /* 156 0x9c '£' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x64, /* 01100100 */
@@ -1586,7 +1586,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xfc, /* 11111100 */
0x00, /* 00000000 */
- /* 157 0x9d '' */
+ /* 157 0x9d 'Â¥' */
0x66, /* 01100110 */
0x66, /* 01100110 */
0x3c, /* 00111100 */
@@ -1596,7 +1596,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 158 0x9e 'ž' */
+ /* 158 0x9e 'â‚§' */
0xf8, /* 11111000 */
0xcc, /* 11001100 */
0xcc, /* 11001100 */
@@ -1606,7 +1606,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0xc7, /* 11000111 */
- /* 159 0x9f 'Ÿ' */
+ /* 159 0x9f 'Æ’' */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
0x18, /* 00011000 */
@@ -1616,7 +1616,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x70, /* 01110000 */
0x00, /* 00000000 */
- /* 160 0xa0 ' ' */
+ /* 160 0xa0 'á' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -1626,7 +1626,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 161 0xa1 '¡' */
+ /* 161 0xa1 'í' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -1636,7 +1636,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 162 0xa2 '¢' */
+ /* 162 0xa2 'ó' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1646,7 +1646,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 163 0xa3 '£' */
+ /* 163 0xa3 'ú' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0xcc, /* 11001100 */
@@ -1656,7 +1656,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 164 0xa4 '¤' */
+ /* 164 0xa4 'ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -1666,7 +1666,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x66, /* 01100110 */
0x00, /* 00000000 */
- /* 165 0xa5 '¥' */
+ /* 165 0xa5 'Ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -1676,7 +1676,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xce, /* 11001110 */
0x00, /* 00000000 */
- /* 166 0xa6 '¦' */
+ /* 166 0xa6 'ª' */
0x3c, /* 00111100 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -1686,7 +1686,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 167 0xa7 '§' */
+ /* 167 0xa7 'º' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -1696,7 +1696,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 168 0xa8 '¨' */
+ /* 168 0xa8 '¿' */
0x18, /* 00011000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -1706,7 +1706,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3e, /* 00111110 */
0x00, /* 00000000 */
- /* 169 0xa9 '©' */
+ /* 169 0xa9 'âŒ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1716,7 +1716,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 170 0xaa 'ª' */
+ /* 170 0xaa '¬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1726,7 +1726,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 171 0xab '«' */
+ /* 171 0xab '½' */
0x63, /* 01100011 */
0xe6, /* 11100110 */
0x6c, /* 01101100 */
@@ -1736,7 +1736,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xcc, /* 11001100 */
0x0f, /* 00001111 */
- /* 172 0xac '¬' */
+ /* 172 0xac '¼' */
0x63, /* 01100011 */
0xe6, /* 11100110 */
0x6c, /* 01101100 */
@@ -1746,7 +1746,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xdf, /* 11011111 */
0x06, /* 00000110 */
- /* 173 0xad '­' */
+ /* 173 0xad '¡' */
0x18, /* 00011000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -1756,7 +1756,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x00, /* 00000000 */
- /* 174 0xae '®' */
+ /* 174 0xae '«' */
0x00, /* 00000000 */
0x33, /* 00110011 */
0x66, /* 01100110 */
@@ -1766,7 +1766,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 175 0xaf '¯' */
+ /* 175 0xaf '»' */
0x00, /* 00000000 */
0xcc, /* 11001100 */
0x66, /* 01100110 */
@@ -1776,7 +1776,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 176 0xb0 '°' */
+ /* 176 0xb0 'â–‘' */
0x22, /* 00100010 */
0x88, /* 10001000 */
0x22, /* 00100010 */
@@ -1786,7 +1786,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x22, /* 00100010 */
0x88, /* 10001000 */
- /* 177 0xb1 '±' */
+ /* 177 0xb1 'â–’' */
0x55, /* 01010101 */
0xaa, /* 10101010 */
0x55, /* 01010101 */
@@ -1796,7 +1796,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x55, /* 01010101 */
0xaa, /* 10101010 */
- /* 178 0xb2 '²' */
+ /* 178 0xb2 'â–“' */
0x77, /* 01110111 */
0xdd, /* 11011101 */
0x77, /* 01110111 */
@@ -1806,7 +1806,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x77, /* 01110111 */
0xdd, /* 11011101 */
- /* 179 0xb3 '³' */
+ /* 179 0xb3 '│' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1816,7 +1816,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 180 0xb4 '´' */
+ /* 180 0xb4 '┤' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1826,7 +1826,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 181 0xb5 'µ' */
+ /* 181 0xb5 'â•¡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xf8, /* 11111000 */
@@ -1836,7 +1836,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 182 0xb6 '¶' */
+ /* 182 0xb6 'â•¢' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1846,7 +1846,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 183 0xb7 '·' */
+ /* 183 0xb7 'â•–' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1856,7 +1856,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 184 0xb8 '¸' */
+ /* 184 0xb8 'â••' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xf8, /* 11111000 */
@@ -1866,7 +1866,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 185 0xb9 '¹' */
+ /* 185 0xb9 'â•£' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf6, /* 11110110 */
@@ -1876,7 +1876,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 186 0xba 'º' */
+ /* 186 0xba 'â•‘' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1886,7 +1886,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 187 0xbb '»' */
+ /* 187 0xbb 'â•—' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -1896,7 +1896,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 188 0xbc '¼' */
+ /* 188 0xbc 'â•' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf6, /* 11110110 */
@@ -1906,7 +1906,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 189 0xbd '½' */
+ /* 189 0xbd '╜' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1916,7 +1916,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 190 0xbe '¾' */
+ /* 190 0xbe 'â•›' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xf8, /* 11111000 */
@@ -1926,7 +1926,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 191 0xbf '¿' */
+ /* 191 0xbf 'â”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1936,7 +1936,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 192 0xc0 'À' */
+ /* 192 0xc0 'â””' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1946,7 +1946,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 193 0xc1 'Á' */
+ /* 193 0xc1 'â”´' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1956,7 +1956,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 194 0xc2 'Â' */
+ /* 194 0xc2 '┬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1966,7 +1966,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 195 0xc3 'Ã' */
+ /* 195 0xc3 '├' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1976,7 +1976,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 196 0xc4 'Ä' */
+ /* 196 0xc4 '─' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1986,7 +1986,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 197 0xc5 'Å' */
+ /* 197 0xc5 '┼' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1996,7 +1996,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 198 0xc6 'Æ' */
+ /* 198 0xc6 '╞' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x1f, /* 00011111 */
@@ -2006,7 +2006,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 199 0xc7 'Ç' */
+ /* 199 0xc7 '╟' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2016,7 +2016,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 200 0xc8 'È' */
+ /* 200 0xc8 '╚' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x37, /* 00110111 */
@@ -2026,7 +2026,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 201 0xc9 'É' */
+ /* 201 0xc9 'â•”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3f, /* 00111111 */
@@ -2036,7 +2036,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 202 0xca 'Ê' */
+ /* 202 0xca 'â•©' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf7, /* 11110111 */
@@ -2046,7 +2046,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 203 0xcb 'Ë' */
+ /* 203 0xcb '╦' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2056,7 +2056,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 204 0xcc 'Ì' */
+ /* 204 0xcc 'â• ' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x37, /* 00110111 */
@@ -2066,7 +2066,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 205 0xcd 'Í' */
+ /* 205 0xcd 'â•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2076,7 +2076,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 206 0xce 'Î' */
+ /* 206 0xce '╬' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf7, /* 11110111 */
@@ -2086,7 +2086,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 207 0xcf 'Ï' */
+ /* 207 0xcf 'â•§' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xff, /* 11111111 */
@@ -2096,7 +2096,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 208 0xd0 'Ð' */
+ /* 208 0xd0 '╨' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2106,7 +2106,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 209 0xd1 'Ñ' */
+ /* 209 0xd1 '╤' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2116,7 +2116,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 210 0xd2 'Ò' */
+ /* 210 0xd2 'â•¥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2126,7 +2126,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 211 0xd3 'Ó' */
+ /* 211 0xd3 'â•™' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2136,7 +2136,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 212 0xd4 'Ô' */
+ /* 212 0xd4 '╘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x1f, /* 00011111 */
@@ -2146,7 +2146,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 213 0xd5 'Õ' */
+ /* 213 0xd5 'â•’' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x1f, /* 00011111 */
@@ -2156,7 +2156,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 214 0xd6 'Ö' */
+ /* 214 0xd6 'â•“' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2166,7 +2166,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 215 0xd7 '×' */
+ /* 215 0xd7 'â•«' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2176,7 +2176,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 216 0xd8 'Ø' */
+ /* 216 0xd8 '╪' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xff, /* 11111111 */
@@ -2186,7 +2186,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 217 0xd9 'Ù' */
+ /* 217 0xd9 '┘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2196,7 +2196,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 218 0xda 'Ú' */
+ /* 218 0xda '┌' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2206,7 +2206,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 219 0xdb 'Û' */
+ /* 219 0xdb 'â–ˆ' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -2216,7 +2216,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 220 0xdc 'Ü' */
+ /* 220 0xdc 'â–„' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2226,7 +2226,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 221 0xdd 'Ý' */
+ /* 221 0xdd '▌' */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
@@ -2236,7 +2236,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xf0, /* 11110000 */
0xf0, /* 11110000 */
- /* 222 0xde 'Þ' */
+ /* 222 0xde 'â–' */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
@@ -2246,7 +2246,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x0f, /* 00001111 */
0x0f, /* 00001111 */
- /* 223 0xdf 'ß' */
+ /* 223 0xdf 'â–€' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -2256,7 +2256,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 224 0xe0 'à' */
+ /* 224 0xe0 'α' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x76, /* 01110110 */
@@ -2266,7 +2266,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 225 0xe1 'á' */
+ /* 225 0xe1 'ß' */
0x78, /* 01111000 */
0xcc, /* 11001100 */
0xcc, /* 11001100 */
@@ -2276,7 +2276,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xcc, /* 11001100 */
0x00, /* 00000000 */
- /* 226 0xe2 'â' */
+ /* 226 0xe2 'Γ' */
0xfe, /* 11111110 */
0xc6, /* 11000110 */
0xc0, /* 11000000 */
@@ -2286,7 +2286,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc0, /* 11000000 */
0x00, /* 00000000 */
- /* 227 0xe3 'ã' */
+ /* 227 0xe3 'Ï€' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -2296,7 +2296,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x6c, /* 01101100 */
0x00, /* 00000000 */
- /* 228 0xe4 'ä' */
+ /* 228 0xe4 'Σ' */
0xfe, /* 11111110 */
0xc6, /* 11000110 */
0x60, /* 01100000 */
@@ -2306,7 +2306,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 229 0xe5 'å' */
+ /* 229 0xe5 'σ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -2316,7 +2316,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x70, /* 01110000 */
0x00, /* 00000000 */
- /* 230 0xe6 'æ' */
+ /* 230 0xe6 'µ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x66, /* 01100110 */
@@ -2326,7 +2326,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0xc0, /* 11000000 */
- /* 231 0xe7 'ç' */
+ /* 231 0xe7 'Ï„' */
0x00, /* 00000000 */
0x76, /* 01110110 */
0xdc, /* 11011100 */
@@ -2336,7 +2336,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x00, /* 00000000 */
- /* 232 0xe8 'è' */
+ /* 232 0xe8 'Φ' */
0x7e, /* 01111110 */
0x18, /* 00011000 */
0x3c, /* 00111100 */
@@ -2346,7 +2346,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x7e, /* 01111110 */
- /* 233 0xe9 'é' */
+ /* 233 0xe9 'Θ' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0xc6, /* 11000110 */
@@ -2356,7 +2356,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x38, /* 00111000 */
0x00, /* 00000000 */
- /* 234 0xea 'ê' */
+ /* 234 0xea 'Ω' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0xc6, /* 11000110 */
@@ -2366,7 +2366,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xee, /* 11101110 */
0x00, /* 00000000 */
- /* 235 0xeb 'ë' */
+ /* 235 0xeb 'δ' */
0x0e, /* 00001110 */
0x18, /* 00011000 */
0x0c, /* 00001100 */
@@ -2376,7 +2376,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 236 0xec 'ì' */
+ /* 236 0xec '∞' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -2386,7 +2386,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 237 0xed 'í' */
+ /* 237 0xed 'φ' */
0x06, /* 00000110 */
0x0c, /* 00001100 */
0x7e, /* 01111110 */
@@ -2396,7 +2396,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x60, /* 01100000 */
0xc0, /* 11000000 */
- /* 238 0xee 'î' */
+ /* 238 0xee 'ε' */
0x1e, /* 00011110 */
0x30, /* 00110000 */
0x60, /* 01100000 */
@@ -2406,7 +2406,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x1e, /* 00011110 */
0x00, /* 00000000 */
- /* 239 0xef 'ï' */
+ /* 239 0xef '∩' */
0x00, /* 00000000 */
0x7c, /* 01111100 */
0xc6, /* 11000110 */
@@ -2416,7 +2416,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 240 0xf0 'ð' */
+ /* 240 0xf0 '≡' */
0x00, /* 00000000 */
0xfe, /* 11111110 */
0x00, /* 00000000 */
@@ -2426,7 +2426,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 241 0xf1 'ñ' */
+ /* 241 0xf1 '±' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x7e, /* 01111110 */
@@ -2436,7 +2436,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 242 0xf2 'ò' */
+ /* 242 0xf2 '≥' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x0c, /* 00001100 */
@@ -2446,7 +2446,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 243 0xf3 'ó' */
+ /* 243 0xf3 '≤' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2456,7 +2456,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 244 0xf4 'ô' */
+ /* 244 0xf4 '⌠' */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
0x1b, /* 00011011 */
@@ -2466,7 +2466,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 245 0xf5 'õ' */
+ /* 245 0xf5 '⌡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2476,7 +2476,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xd8, /* 11011000 */
0x70, /* 01110000 */
- /* 246 0xf6 'ö' */
+ /* 246 0xf6 '÷' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -2486,7 +2486,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 247 0xf7 '÷' */
+ /* 247 0xf7 '≈' */
0x00, /* 00000000 */
0x76, /* 01110110 */
0xdc, /* 11011100 */
@@ -2496,7 +2496,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 248 0xf8 'ø' */
+ /* 248 0xf8 '°' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -2506,7 +2506,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 249 0xf9 'ù' */
+ /* 249 0xf9 '·' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2516,7 +2516,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 250 0xfa 'ú' */
+ /* 250 0xfa '•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2526,7 +2526,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 251 0xfb 'û' */
+ /* 251 0xfb '√' */
0x0f, /* 00001111 */
0x0c, /* 00001100 */
0x0c, /* 00001100 */
@@ -2536,7 +2536,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x1c, /* 00011100 */
- /* 252 0xfc 'ü' */
+ /* 252 0xfc 'â¿' */
0x6c, /* 01101100 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2546,7 +2546,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 253 0xfd 'ý' */
+ /* 253 0xfd '²' */
0x78, /* 01111000 */
0x0c, /* 00001100 */
0x18, /* 00011000 */
@@ -2556,7 +2556,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 254 0xfe 'þ' */
+ /* 254 0xfe 'â– ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3c, /* 00111100 */
@@ -2566,7 +2566,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 255 0xff 'ÿ' */
+ /* 255 0xff ' ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index 8f26660ea10a..f755b997b967 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
+#include "../include/linux/crc32poly.h"
#include "../include/generated/autoconf.h"
#include "crc32defs.h"
#include <inttypes.h>
@@ -57,7 +58,7 @@ static void crc32init_le_generic(const uint32_t polynomial,
static void crc32init_le(void)
{
- crc32init_le_generic(CRCPOLY_LE, crc32table_le);
+ crc32init_le_generic(CRC32_POLY_LE, crc32table_le);
}
static void crc32cinit_le(void)
@@ -76,7 +77,7 @@ static void crc32init_be(void)
crc32table_be[0][0] = 0;
for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
- crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
+ crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0);
for (j = 0; j < i; j++)
crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
}
diff --git a/lib/gen_crc64table.c b/lib/gen_crc64table.c
new file mode 100644
index 000000000000..9011926e4162
--- /dev/null
+++ b/lib/gen_crc64table.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generate lookup table for the table-driven CRC64 calculation.
+ *
+ * gen_crc64table is executed in kernel build time and generates
+ * lib/crc64table.h. This header is included by lib/crc64.c for
+ * the table-driven CRC64 calculation.
+ *
+ * See lib/crc64.c for more information about which specification
+ * and polynomial arithmetic that gen_crc64table.c follows to
+ * generate the lookup table.
+ *
+ * Copyright 2018 SUSE Linux.
+ * Author: Coly Li <colyli@suse.de>
+ */
+#include <inttypes.h>
+#include <stdio.h>
+
+#include <linux/swab.h>
+
+#define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL
+
+static uint64_t crc64_table[256] = {0};
+
+static void generate_crc64_table(void)
+{
+ uint64_t i, j, c, crc;
+
+ for (i = 0; i < 256; i++) {
+ crc = 0;
+ c = i << 56;
+
+ for (j = 0; j < 8; j++) {
+ if ((crc ^ c) & 0x8000000000000000ULL)
+ crc = (crc << 1) ^ CRC64_ECMA182_POLY;
+ else
+ crc <<= 1;
+ c <<= 1;
+ }
+
+ crc64_table[i] = crc;
+ }
+}
+
+static void print_crc64_table(void)
+{
+ int i;
+
+ printf("/* this file is generated - do not edit */\n\n");
+ printf("#include <linux/types.h>\n");
+ printf("#include <linux/cache.h>\n\n");
+ printf("static const u64 ____cacheline_aligned crc64table[256] = {\n");
+ for (i = 0; i < 256; i++) {
+ printf("\t0x%016" PRIx64 "ULL", crc64_table[i]);
+ if (i & 0x1)
+ printf(",\n");
+ else
+ printf(", ");
+ }
+ printf("};\n");
+}
+
+int main(int argc, char *argv[])
+{
+ generate_crc64_table();
+ print_crc64_table();
+ return 0;
+}
diff --git a/lib/idr.c b/lib/idr.c
index 823b813f08f8..fab2fd5bc326 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -4,9 +4,9 @@
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/xarray.h>
DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
-static DEFINE_SPINLOCK(simple_ida_lock);
/**
* idr_alloc_u32() - Allocate an ID.
@@ -317,18 +317,12 @@ EXPORT_SYMBOL(idr_replace);
* bit per ID, and so is more space efficient than an IDR. To use an IDA,
* define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
* then initialise it using ida_init()). To allocate a new ID, call
- * ida_simple_get(). To free an ID, call ida_simple_remove().
+ * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
+ * To free an ID, call ida_free().
*
- * If you have more complex locking requirements, use a loop around
- * ida_pre_get() and ida_get_new() to allocate a new ID. Then use
- * ida_remove() to free an ID. You must make sure that ida_get_new() and
- * ida_remove() cannot be called at the same time as each other for the
- * same IDA.
- *
- * You can also use ida_get_new_above() if you need an ID to be allocated
- * above a particular number. ida_destroy() can be used to dispose of an
- * IDA without needing to free the individual IDs in it. You can use
- * ida_is_empty() to find out whether the IDA has any IDs currently allocated.
+ * ida_destroy() can be used to dispose of an IDA without needing to
+ * free the individual IDs in it. You can use ida_is_empty() to find
+ * out whether the IDA has any IDs currently allocated.
*
* IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
* limitation, it should be quite straightforward to raise the maximum.
@@ -369,25 +363,7 @@ EXPORT_SYMBOL(idr_replace);
#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1)
-/**
- * ida_get_new_above - allocate new ID above or equal to a start id
- * @ida: ida handle
- * @start: id to start search at
- * @id: pointer to the allocated handle
- *
- * Allocate new ID above or equal to @start. It should be called
- * with any required locks to ensure that concurrent calls to
- * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed.
- * Consider using ida_simple_get() if you do not have complex locking
- * requirements.
- *
- * If memory is required, it will return %-EAGAIN, you should unlock
- * and go back to the ida_pre_get() call. If the ida is full, it will
- * return %-ENOSPC. On success, it will return 0.
- *
- * @id returns a value in the range @start ... %0x7fffffff.
- */
-int ida_get_new_above(struct ida *ida, int start, int *id)
+static int ida_get_new_above(struct ida *ida, int start)
{
struct radix_tree_root *root = &ida->ida_rt;
void __rcu **slot;
@@ -426,8 +402,8 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
if (ebit < BITS_PER_LONG) {
tmp |= 1UL << ebit;
rcu_assign_pointer(*slot, (void *)tmp);
- *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT;
- return 0;
+ return new + ebit -
+ RADIX_TREE_EXCEPTIONAL_SHIFT;
}
bitmap = this_cpu_xchg(ida_bitmap, NULL);
if (!bitmap)
@@ -458,8 +434,7 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
RADIX_TREE_EXCEPTIONAL_ENTRY);
radix_tree_iter_replace(root, &iter, slot,
bitmap);
- *id = new;
- return 0;
+ return new;
}
bitmap = this_cpu_xchg(ida_bitmap, NULL);
if (!bitmap)
@@ -468,20 +443,11 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
radix_tree_iter_replace(root, &iter, slot, bitmap);
}
- *id = new;
- return 0;
+ return new;
}
}
-EXPORT_SYMBOL(ida_get_new_above);
-/**
- * ida_remove - Free the given ID
- * @ida: ida handle
- * @id: ID to free
- *
- * This function should not be called at the same time as ida_get_new_above().
- */
-void ida_remove(struct ida *ida, int id)
+static void ida_remove(struct ida *ida, int id)
{
unsigned long index = id / IDA_BITMAP_BITS;
unsigned offset = id % IDA_BITMAP_BITS;
@@ -518,105 +484,96 @@ void ida_remove(struct ida *ida, int id)
}
return;
err:
- WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
+ WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
}
-EXPORT_SYMBOL(ida_remove);
/**
- * ida_destroy - Free the contents of an ida
- * @ida: ida handle
+ * ida_destroy() - Free all IDs.
+ * @ida: IDA handle.
*
- * Calling this function releases all resources associated with an IDA. When
- * this call returns, the IDA is empty and can be reused or freed. The caller
- * should not allow ida_remove() or ida_get_new_above() to be called at the
- * same time.
+ * Calling this function frees all IDs and releases all resources used
+ * by an IDA. When this call returns, the IDA is empty and can be reused
+ * or freed. If the IDA is already empty, there is no need to call this
+ * function.
+ *
+ * Context: Any context.
*/
void ida_destroy(struct ida *ida)
{
+ unsigned long flags;
struct radix_tree_iter iter;
void __rcu **slot;
+ xa_lock_irqsave(&ida->ida_rt, flags);
radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) {
struct ida_bitmap *bitmap = rcu_dereference_raw(*slot);
if (!radix_tree_exception(bitmap))
kfree(bitmap);
radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
}
+ xa_unlock_irqrestore(&ida->ida_rt, flags);
}
EXPORT_SYMBOL(ida_destroy);
/**
- * ida_simple_get - get a new id.
- * @ida: the (initialized) ida.
- * @start: the minimum id (inclusive, < 0x8000000)
- * @end: the maximum id (exclusive, < 0x8000000 or 0)
- * @gfp_mask: memory allocation flags
- *
- * Allocates an id in the range start <= id < end, or returns -ENOSPC.
- * On memory allocation failure, returns -ENOMEM.
+ * ida_alloc_range() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @min: Lowest ID to allocate.
+ * @max: Highest ID to allocate.
+ * @gfp: Memory allocation flags.
*
- * Compared to ida_get_new_above() this function does its own locking, and
- * should be used unless there are special requirements.
+ * Allocate an ID between @min and @max, inclusive. The allocated ID will
+ * not exceed %INT_MAX, even if @max is larger.
*
- * Use ida_simple_remove() to get rid of an id.
+ * Context: Any context.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
*/
-int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
- gfp_t gfp_mask)
+int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
+ gfp_t gfp)
{
- int ret, id;
- unsigned int max;
+ int id = 0;
unsigned long flags;
- BUG_ON((int)start < 0);
- BUG_ON((int)end < 0);
+ if ((int)min < 0)
+ return -ENOSPC;
- if (end == 0)
- max = 0x80000000;
- else {
- BUG_ON(end < start);
- max = end - 1;
- }
+ if ((int)max < 0)
+ max = INT_MAX;
again:
- if (!ida_pre_get(ida, gfp_mask))
- return -ENOMEM;
-
- spin_lock_irqsave(&simple_ida_lock, flags);
- ret = ida_get_new_above(ida, start, &id);
- if (!ret) {
- if (id > max) {
- ida_remove(ida, id);
- ret = -ENOSPC;
- } else {
- ret = id;
- }
+ xa_lock_irqsave(&ida->ida_rt, flags);
+ id = ida_get_new_above(ida, min);
+ if (id > (int)max) {
+ ida_remove(ida, id);
+ id = -ENOSPC;
}
- spin_unlock_irqrestore(&simple_ida_lock, flags);
+ xa_unlock_irqrestore(&ida->ida_rt, flags);
- if (unlikely(ret == -EAGAIN))
+ if (unlikely(id == -EAGAIN)) {
+ if (!ida_pre_get(ida, gfp))
+ return -ENOMEM;
goto again;
+ }
- return ret;
+ return id;
}
-EXPORT_SYMBOL(ida_simple_get);
+EXPORT_SYMBOL(ida_alloc_range);
/**
- * ida_simple_remove - remove an allocated id.
- * @ida: the (initialized) ida.
- * @id: the id returned by ida_simple_get.
- *
- * Use to release an id allocated with ida_simple_get().
+ * ida_free() - Release an allocated ID.
+ * @ida: IDA handle.
+ * @id: Previously allocated ID.
*
- * Compared to ida_remove() this function does its own locking, and should be
- * used unless there are special requirements.
+ * Context: Any context.
*/
-void ida_simple_remove(struct ida *ida, unsigned int id)
+void ida_free(struct ida *ida, unsigned int id)
{
unsigned long flags;
BUG_ON((int)id < 0);
- spin_lock_irqsave(&simple_ida_lock, flags);
+ xa_lock_irqsave(&ida->ida_rt, flags);
ida_remove(ida, id);
- spin_unlock_irqrestore(&simple_ida_lock, flags);
+ xa_unlock_irqrestore(&ida->ida_rt, flags);
}
-EXPORT_SYMBOL(ida_simple_remove);
+EXPORT_SYMBOL(ida_free);
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index e2d329099bf7..14436f4ca6bd 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -38,3 +38,33 @@ unsigned long int_sqrt(unsigned long x)
return y;
}
EXPORT_SYMBOL(int_sqrt);
+
+#if BITS_PER_LONG < 64
+/**
+ * int_sqrt64 - strongly typed int_sqrt function when minimum 64 bit input
+ * is expected.
+ * @x: 64bit integer of which to calculate the sqrt
+ */
+u32 int_sqrt64(u64 x)
+{
+ u64 b, m, y = 0;
+
+ if (x <= ULONG_MAX)
+ return int_sqrt((unsigned long) x);
+
+ m = 1ULL << (fls64(x) & ~1ULL);
+ while (m != 0) {
+ b = y + m;
+ y >>= 1;
+
+ if (x >= b) {
+ x -= b;
+ y += m;
+ }
+ m >>= 2;
+ }
+
+ return y;
+}
+EXPORT_SYMBOL(int_sqrt64);
+#endif
diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c
index 835242e74aaa..75509a1511a3 100644
--- a/lib/interval_tree_test.c
+++ b/lib/interval_tree_test.c
@@ -64,11 +64,12 @@ static int interval_tree_test_init(void)
unsigned long results;
cycles_t time1, time2, time;
- nodes = kmalloc(nnodes * sizeof(struct interval_tree_node), GFP_KERNEL);
+ nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
+ GFP_KERNEL);
if (!nodes)
return -ENOMEM;
- queries = kmalloc(nsearches * sizeof(int), GFP_KERNEL);
+ queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL);
if (!queries) {
kfree(nodes);
return -ENOMEM;
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
deleted file mode 100644
index 55b00de106b5..000000000000
--- a/lib/iommu-common.c
+++ /dev/null
@@ -1,267 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * IOMMU mmap management and range allocation functions.
- * Based almost entirely upon the powerpc iommu allocator.
- */
-
-#include <linux/export.h>
-#include <linux/bitmap.h>
-#include <linux/bug.h>
-#include <linux/iommu-helper.h>
-#include <linux/iommu-common.h>
-#include <linux/dma-mapping.h>
-#include <linux/hash.h>
-
-static unsigned long iommu_large_alloc = 15;
-
-static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
-
-static inline bool need_flush(struct iommu_map_table *iommu)
-{
- return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
-}
-
-static inline void set_flush(struct iommu_map_table *iommu)
-{
- iommu->flags |= IOMMU_NEED_FLUSH;
-}
-
-static inline void clear_flush(struct iommu_map_table *iommu)
-{
- iommu->flags &= ~IOMMU_NEED_FLUSH;
-}
-
-static void setup_iommu_pool_hash(void)
-{
- unsigned int i;
- static bool do_once;
-
- if (do_once)
- return;
- do_once = true;
- for_each_possible_cpu(i)
- per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
-}
-
-/*
- * Initialize iommu_pool entries for the iommu_map_table. `num_entries'
- * is the number of table entries. If `large_pool' is set to true,
- * the top 1/4 of the table will be set aside for pool allocations
- * of more than iommu_large_alloc pages.
- */
-void iommu_tbl_pool_init(struct iommu_map_table *iommu,
- unsigned long num_entries,
- u32 table_shift,
- void (*lazy_flush)(struct iommu_map_table *),
- bool large_pool, u32 npools,
- bool skip_span_boundary_check)
-{
- unsigned int start, i;
- struct iommu_pool *p = &(iommu->large_pool);
-
- setup_iommu_pool_hash();
- if (npools == 0)
- iommu->nr_pools = IOMMU_NR_POOLS;
- else
- iommu->nr_pools = npools;
- BUG_ON(npools > IOMMU_NR_POOLS);
-
- iommu->table_shift = table_shift;
- iommu->lazy_flush = lazy_flush;
- start = 0;
- if (skip_span_boundary_check)
- iommu->flags |= IOMMU_NO_SPAN_BOUND;
- if (large_pool)
- iommu->flags |= IOMMU_HAS_LARGE_POOL;
-
- if (!large_pool)
- iommu->poolsize = num_entries/iommu->nr_pools;
- else
- iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
- for (i = 0; i < iommu->nr_pools; i++) {
- spin_lock_init(&(iommu->pools[i].lock));
- iommu->pools[i].start = start;
- iommu->pools[i].hint = start;
- start += iommu->poolsize; /* start for next pool */
- iommu->pools[i].end = start - 1;
- }
- if (!large_pool)
- return;
- /* initialize large_pool */
- spin_lock_init(&(p->lock));
- p->start = start;
- p->hint = p->start;
- p->end = num_entries;
-}
-EXPORT_SYMBOL(iommu_tbl_pool_init);
-
-unsigned long iommu_tbl_range_alloc(struct device *dev,
- struct iommu_map_table *iommu,
- unsigned long npages,
- unsigned long *handle,
- unsigned long mask,
- unsigned int align_order)
-{
- unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
- unsigned long n, end, start, limit, boundary_size;
- struct iommu_pool *pool;
- int pass = 0;
- unsigned int pool_nr;
- unsigned int npools = iommu->nr_pools;
- unsigned long flags;
- bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
- bool largealloc = (large_pool && npages > iommu_large_alloc);
- unsigned long shift;
- unsigned long align_mask = 0;
-
- if (align_order > 0)
- align_mask = ~0ul >> (BITS_PER_LONG - align_order);
-
- /* Sanity check */
- if (unlikely(npages == 0)) {
- WARN_ON_ONCE(1);
- return IOMMU_ERROR_CODE;
- }
-
- if (largealloc) {
- pool = &(iommu->large_pool);
- pool_nr = 0; /* to keep compiler happy */
- } else {
- /* pick out pool_nr */
- pool_nr = pool_hash & (npools - 1);
- pool = &(iommu->pools[pool_nr]);
- }
- spin_lock_irqsave(&pool->lock, flags);
-
- again:
- if (pass == 0 && handle && *handle &&
- (*handle >= pool->start) && (*handle < pool->end))
- start = *handle;
- else
- start = pool->hint;
-
- limit = pool->end;
-
- /* The case below can happen if we have a small segment appended
- * to a large, or when the previous alloc was at the very end of
- * the available space. If so, go back to the beginning. If a
- * flush is needed, it will get done based on the return value
- * from iommu_area_alloc() below.
- */
- if (start >= limit)
- start = pool->start;
- shift = iommu->table_map_base >> iommu->table_shift;
- if (limit + shift > mask) {
- limit = mask - shift + 1;
- /* If we're constrained on address range, first try
- * at the masked hint to avoid O(n) search complexity,
- * but on second pass, start at 0 in pool 0.
- */
- if ((start & mask) >= limit || pass > 0) {
- spin_unlock(&(pool->lock));
- pool = &(iommu->pools[0]);
- spin_lock(&(pool->lock));
- start = pool->start;
- } else {
- start &= mask;
- }
- }
-
- if (dev)
- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- 1 << iommu->table_shift);
- else
- boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift);
-
- boundary_size = boundary_size >> iommu->table_shift;
- /*
- * if the skip_span_boundary_check had been set during init, we set
- * things up so that iommu_is_span_boundary() merely checks if the
- * (index + npages) < num_tsb_entries
- */
- if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
- shift = 0;
- boundary_size = iommu->poolsize * iommu->nr_pools;
- }
- n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
- boundary_size, align_mask);
- if (n == -1) {
- if (likely(pass == 0)) {
- /* First failure, rescan from the beginning. */
- pool->hint = pool->start;
- set_flush(iommu);
- pass++;
- goto again;
- } else if (!largealloc && pass <= iommu->nr_pools) {
- spin_unlock(&(pool->lock));
- pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
- pool = &(iommu->pools[pool_nr]);
- spin_lock(&(pool->lock));
- pool->hint = pool->start;
- set_flush(iommu);
- pass++;
- goto again;
- } else {
- /* give up */
- n = IOMMU_ERROR_CODE;
- goto bail;
- }
- }
- if (iommu->lazy_flush &&
- (n < pool->hint || need_flush(iommu))) {
- clear_flush(iommu);
- iommu->lazy_flush(iommu);
- }
-
- end = n + npages;
- pool->hint = end;
-
- /* Update handle for SG allocations */
- if (handle)
- *handle = end;
-bail:
- spin_unlock_irqrestore(&(pool->lock), flags);
-
- return n;
-}
-EXPORT_SYMBOL(iommu_tbl_range_alloc);
-
-static struct iommu_pool *get_pool(struct iommu_map_table *tbl,
- unsigned long entry)
-{
- struct iommu_pool *p;
- unsigned long largepool_start = tbl->large_pool.start;
- bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
-
- /* The large pool is the last pool at the top of the table */
- if (large_pool && entry >= largepool_start) {
- p = &tbl->large_pool;
- } else {
- unsigned int pool_nr = entry / tbl->poolsize;
-
- BUG_ON(pool_nr >= tbl->nr_pools);
- p = &tbl->pools[pool_nr];
- }
- return p;
-}
-
-/* Caller supplies the index of the entry into the iommu map table
- * itself when the mapping from dma_addr to the entry is not the
- * default addr->entry mapping below.
- */
-void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
- unsigned long npages, unsigned long entry)
-{
- struct iommu_pool *pool;
- unsigned long flags;
- unsigned long shift = iommu->table_shift;
-
- if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
- entry = (dma_addr - iommu->table_map_base) >> shift;
- pool = get_pool(iommu, entry);
-
- spin_lock_irqsave(&(pool->lock), flags);
- bitmap_clear(iommu->map, entry, npages);
- spin_unlock_irqrestore(&(pool->lock), flags);
-}
-EXPORT_SYMBOL(iommu_tbl_range_free);
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index 23633c0fda4a..92a9f243c0e2 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -3,19 +3,8 @@
* IOMMU helper functions for the free area management
*/
-#include <linux/export.h>
#include <linux/bitmap.h>
-#include <linux/bug.h>
-
-int iommu_is_span_boundary(unsigned int index, unsigned int nr,
- unsigned long shift,
- unsigned long boundary_size)
-{
- BUG_ON(!is_power_of_2(boundary_size));
-
- shift = (shift + index) & (boundary_size - 1);
- return shift + nr > boundary_size;
-}
+#include <linux/iommu-helper.h>
unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr,
@@ -38,4 +27,3 @@ again:
}
return -1;
}
-EXPORT_SYMBOL(iommu_area_alloc);
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 54e5bbaa3200..517f5853ffed 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
if (ioremap_pmd_enabled() &&
((next - addr) == PMD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
- pmd_free_pte_page(pmd)) {
+ pmd_free_pte_page(pmd, addr)) {
if (pmd_set_huge(pmd, phys_addr + addr, prot))
continue;
}
@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
if (ioremap_pud_enabled() &&
((next - addr) == PUD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
- pud_free_pmd_page(pud)) {
+ pud_free_pmd_page(pud, addr)) {
if (pud_set_huge(pud, phys_addr + addr, prot))
continue;
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 970212670b6a..8be175df3075 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -573,6 +573,122 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
}
EXPORT_SYMBOL(_copy_to_iter);
+#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
+static int copyout_mcsafe(void __user *to, const void *from, size_t n)
+{
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ kasan_check_read(from, n);
+ n = copy_to_user_mcsafe((__force void *) to, from, n);
+ }
+ return n;
+}
+
+static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
+ const char *from, size_t len)
+{
+ unsigned long ret;
+ char *to;
+
+ to = kmap_atomic(page);
+ ret = memcpy_mcsafe(to + offset, from, len);
+ kunmap_atomic(to);
+
+ return ret;
+}
+
+static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
+ struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t n, off, xfer = 0;
+ int idx;
+
+ if (!sanity(i))
+ return 0;
+
+ bytes = n = push_pipe(i, bytes, &idx, &off);
+ if (unlikely(!n))
+ return 0;
+ for ( ; n; idx = next_idx(idx, pipe), off = 0) {
+ size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
+ unsigned long rem;
+
+ rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
+ chunk);
+ i->idx = idx;
+ i->iov_offset = off + chunk - rem;
+ xfer += chunk - rem;
+ if (rem)
+ break;
+ n -= chunk;
+ addr += chunk;
+ }
+ i->count -= xfer;
+ return xfer;
+}
+
+/**
+ * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
+ * @addr: source kernel address
+ * @bytes: total transfer length
+ * @iter: destination iterator
+ *
+ * The pmem driver arranges for filesystem-dax to use this facility via
+ * dax_copy_to_iter() for protecting read/write to persistent memory.
+ * Unless / until an architecture can guarantee identical performance
+ * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
+ * performance regression to switch more users to the mcsafe version.
+ *
+ * Otherwise, the main differences between this and typical _copy_to_iter().
+ *
+ * * Typical tail/residue handling after a fault retries the copy
+ * byte-by-byte until the fault happens again. Re-triggering machine
+ * checks is potentially fatal so the implementation uses source
+ * alignment and poison alignment assumptions to avoid re-triggering
+ * hardware exceptions.
+ *
+ * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
+ * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
+ * a short copy.
+ *
+ * See MCSAFE_TEST for self-test.
+ */
+size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
+{
+ const char *from = addr;
+ unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
+
+ if (unlikely(i->type & ITER_PIPE))
+ return copy_pipe_to_iter_mcsafe(addr, bytes, i);
+ if (iter_is_iovec(i))
+ might_fault();
+ iterate_and_advance(i, bytes, v,
+ copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
+ ({
+ rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
+ (from += v.bv_len) - v.bv_len, v.bv_len);
+ if (rem) {
+ curr_addr = (unsigned long) from;
+ bytes = curr_addr - s_addr - rem;
+ return bytes;
+ }
+ }),
+ ({
+ rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
+ v.iov_len);
+ if (rem) {
+ curr_addr = (unsigned long) from;
+ bytes = curr_addr - s_addr - rem;
+ return bytes;
+ }
+ })
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
+#endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
+
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
@@ -640,6 +756,20 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
EXPORT_SYMBOL(_copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+/**
+ * _copy_from_iter_flushcache - write destination through cpu cache
+ * @addr: destination kernel address
+ * @bytes: total transfer length
+ * @iter: source iterator
+ *
+ * The pmem driver arranges for filesystem-dax to use this facility via
+ * dax_copy_from_iter() for ensuring that writes to persistent memory
+ * are flushed through the CPU cache. It is differentiated from
+ * _copy_from_iter_nocache() in that guarantees all data is flushed for
+ * all iterator types. The _copy_from_iter_nocache() only attempts to
+ * bypass the cache for the ITER_IOVEC case, and on some archs may use
+ * instructions that strand dirty-data in the cache.
+ */
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
@@ -1012,7 +1142,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
}
EXPORT_SYMBOL(iov_iter_gap_alignment);
-static inline size_t __pipe_get_pages(struct iov_iter *i,
+static inline ssize_t __pipe_get_pages(struct iov_iter *i,
size_t maxsize,
struct page **pages,
int idx,
@@ -1102,7 +1232,7 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
size_t *start)
{
struct page **p;
- size_t n;
+ ssize_t n;
int idx;
int npages;
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 90ba1eb1df06..015656aa8182 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -39,7 +39,7 @@ int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
size_t esize, gfp_t gfp_mask)
{
/*
- * round down to the next power of 2, since our 'let the indices
+ * round up to the next power of 2, since our 'let the indices
* wrap' technique works only in this case.
*/
size = roundup_pow_of_two(size);
@@ -54,7 +54,7 @@ int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
return -EINVAL;
}
- fifo->data = kmalloc(size * esize, gfp_mask);
+ fifo->data = kmalloc_array(esize, size, gfp_mask);
if (!fifo->data) {
fifo->mask = 0;
diff --git a/lib/klist.c b/lib/klist.c
index 0507fa5d84c5..f6b547812fe3 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i)
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *prev;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
prev = to_klist_node(last->n_node.prev);
@@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i)
prev = to_klist_node(prev->n_node.prev);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
@@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i)
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *next;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
next = to_klist_node(last->n_node.next);
@@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i)
next = to_klist_node(next->n_node.next);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
diff --git a/lib/kobject.c b/lib/kobject.c
index afd5a3fc6123..97d86dc17c42 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -35,6 +35,25 @@ const void *kobject_namespace(struct kobject *kobj)
return kobj->ktype->namespace(kobj);
}
+/**
+ * kobject_get_ownership - get sysfs ownership data for @kobj
+ * @kobj: kobject in question
+ * @uid: kernel user ID for sysfs objects
+ * @gid: kernel group ID for sysfs objects
+ *
+ * Returns initial uid/gid pair that should be used when creating sysfs
+ * representation of given kobject. Normally used to adjust ownership of
+ * objects in a container.
+ */
+void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ *uid = GLOBAL_ROOT_UID;
+ *gid = GLOBAL_ROOT_GID;
+
+ if (kobj->ktype->get_ownership)
+ kobj->ktype->get_ownership(kobj, uid, gid);
+}
+
/*
* populate_dir - populate directory with attributes.
* @kobj: object we're working on.
@@ -125,7 +144,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
int cur = strlen(kobject_name(parent));
/* back up enough to print this name with '/' */
length -= cur;
- strncpy(path + length, kobject_name(parent), cur);
+ memcpy(path + length, kobject_name(parent), cur);
*(path + --length) = '/';
}
@@ -204,8 +223,9 @@ static int kobject_add_internal(struct kobject *kobj)
return -ENOENT;
if (!kobj->name || !kobj->name[0]) {
- WARN(1, "kobject: (%p): attempted to be registered with empty "
- "name!\n", kobj);
+ WARN(1,
+ "kobject: (%p): attempted to be registered with empty name!\n",
+ kobj);
return -EINVAL;
}
@@ -232,14 +252,12 @@ static int kobject_add_internal(struct kobject *kobj)
/* be noisy on error issues */
if (error == -EEXIST)
- WARN(1, "%s failed for %s with "
- "-EEXIST, don't try to register things with "
- "the same name in the same directory.\n",
- __func__, kobject_name(kobj));
+ pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
+ __func__, kobject_name(kobj));
else
- WARN(1, "%s failed for %s (error: %d parent: %s)\n",
- __func__, kobject_name(kobj), error,
- parent ? kobject_name(parent) : "'none'");
+ pr_err("%s failed for %s (error: %d parent: %s)\n",
+ __func__, kobject_name(kobj), error,
+ parent ? kobject_name(parent) : "'none'");
} else
kobj->state_in_sysfs = 1;
@@ -334,8 +352,8 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
}
if (kobj->state_initialized) {
/* do not error out as sometimes we can recover */
- printk(KERN_ERR "kobject (%p): tried to init an initialized "
- "object, something is seriously wrong.\n", kobj);
+ pr_err("kobject (%p): tried to init an initialized object, something is seriously wrong.\n",
+ kobj);
dump_stack();
}
@@ -344,7 +362,7 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
return;
error:
- printk(KERN_ERR "kobject (%p): %s\n", kobj, err_str);
+ pr_err("kobject (%p): %s\n", kobj, err_str);
dump_stack();
}
EXPORT_SYMBOL(kobject_init);
@@ -357,7 +375,7 @@ static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
retval = kobject_set_name_vargs(kobj, fmt, vargs);
if (retval) {
- printk(KERN_ERR "kobject: can not set name properly!\n");
+ pr_err("kobject: can not set name properly!\n");
return retval;
}
kobj->parent = parent;
@@ -399,8 +417,7 @@ int kobject_add(struct kobject *kobj, struct kobject *parent,
return -EINVAL;
if (!kobj->state_initialized) {
- printk(KERN_ERR "kobject '%s' (%p): tried to add an "
- "uninitialized object, something is seriously wrong.\n",
+ pr_err("kobject '%s' (%p): tried to add an uninitialized object, something is seriously wrong.\n",
kobject_name(kobj), kobj);
dump_stack();
return -EINVAL;
@@ -590,9 +607,9 @@ struct kobject *kobject_get(struct kobject *kobj)
{
if (kobj) {
if (!kobj->state_initialized)
- WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
- "initialized, yet kobject_get() is being "
- "called.\n", kobject_name(kobj), kobj);
+ WARN(1, KERN_WARNING
+ "kobject: '%s' (%p): is not initialized, yet kobject_get() is being called.\n",
+ kobject_name(kobj), kobj);
kref_get(&kobj->kref);
}
return kobj;
@@ -622,8 +639,7 @@ static void kobject_cleanup(struct kobject *kobj)
kobject_name(kobj), kobj, __func__, kobj->parent);
if (t && !t->release)
- pr_debug("kobject: '%s' (%p): does not have a release() "
- "function, it is broken and must be fixed.\n",
+ pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed.\n",
kobject_name(kobj), kobj);
/* send "remove" if the caller did not do it but sent "add" */
@@ -686,9 +702,9 @@ void kobject_put(struct kobject *kobj)
{
if (kobj) {
if (!kobj->state_initialized)
- WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
- "initialized, yet kobject_put() is being "
- "called.\n", kobject_name(kobj), kobj);
+ WARN(1, KERN_WARNING
+ "kobject: '%s' (%p): is not initialized, yet kobject_put() is being called.\n",
+ kobject_name(kobj), kobj);
kref_put(&kobj->kref, kobject_release);
}
}
@@ -752,8 +768,7 @@ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
retval = kobject_add(kobj, parent, "%s", name);
if (retval) {
- printk(KERN_WARNING "%s: kobject_add error: %d\n",
- __func__, retval);
+ pr_warn("%s: kobject_add error: %d\n", __func__, retval);
kobject_put(kobj);
kobj = NULL;
}
@@ -872,9 +887,16 @@ static void kset_release(struct kobject *kobj)
kfree(kset);
}
+void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ if (kobj->parent)
+ kobject_get_ownership(kobj->parent, uid, gid);
+}
+
static struct kobj_type kset_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
- .release = kset_release,
+ .release = kset_release,
+ .get_ownership = kset_get_ownership,
};
/**
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 9fe6ec8fda28..63d0816ab23b 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -22,9 +22,11 @@
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
+#include <linux/uidgid.h>
#include <linux/uuid.h>
#include <linux/ctype.h>
#include <net/sock.h>
+#include <net/netlink.h>
#include <net/net_namespace.h>
@@ -32,11 +34,13 @@ u64 uevent_seqnum;
#ifdef CONFIG_UEVENT_HELPER
char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
#endif
-#ifdef CONFIG_NET
+
struct uevent_sock {
struct list_head list;
struct sock *sk;
};
+
+#ifdef CONFIG_NET
static LIST_HEAD(uevent_sock_list);
#endif
@@ -228,30 +232,6 @@ out:
return r;
}
-#ifdef CONFIG_NET
-static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
-{
- struct kobject *kobj = data, *ksobj;
- const struct kobj_ns_type_operations *ops;
-
- ops = kobj_ns_ops(kobj);
- if (!ops && kobj->kset) {
- ksobj = &kobj->kset->kobj;
- if (ksobj->parent != NULL)
- ops = kobj_ns_ops(ksobj->parent);
- }
-
- if (ops && ops->netlink_ns && kobj->ktype->namespace) {
- const void *sock_ns, *ns;
- ns = kobj->ktype->namespace(kobj);
- sock_ns = ops->netlink_ns(dsk);
- return sock_ns != ns;
- }
-
- return 0;
-}
-#endif
-
#ifdef CONFIG_UEVENT_HELPER
static int kobj_usermode_filter(struct kobject *kobj)
{
@@ -293,15 +273,44 @@ static void cleanup_uevent_env(struct subprocess_info *info)
}
#endif
-static int kobject_uevent_net_broadcast(struct kobject *kobj,
- struct kobj_uevent_env *env,
+#ifdef CONFIG_NET
+static struct sk_buff *alloc_uevent_skb(struct kobj_uevent_env *env,
const char *action_string,
const char *devpath)
{
- int retval = 0;
-#if defined(CONFIG_NET)
+ struct netlink_skb_parms *parms;
+ struct sk_buff *skb = NULL;
+ char *scratch;
+ size_t len;
+
+ /* allocate message with maximum possible size */
+ len = strlen(action_string) + strlen(devpath) + 2;
+ skb = alloc_skb(len + env->buflen, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ /* add header */
+ scratch = skb_put(skb, len);
+ sprintf(scratch, "%s@%s", action_string, devpath);
+
+ skb_put_data(skb, env->buf, env->buflen);
+
+ parms = &NETLINK_CB(skb);
+ parms->creds.uid = GLOBAL_ROOT_UID;
+ parms->creds.gid = GLOBAL_ROOT_GID;
+ parms->dst_group = 1;
+ parms->portid = 0;
+
+ return skb;
+}
+
+static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env,
+ const char *action_string,
+ const char *devpath)
+{
struct sk_buff *skb = NULL;
struct uevent_sock *ue_sk;
+ int retval = 0;
/* send netlink message */
list_for_each_entry(ue_sk, &uevent_sock_list, list) {
@@ -311,37 +320,99 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj,
continue;
if (!skb) {
- /* allocate message with the maximum possible size */
- size_t len = strlen(action_string) + strlen(devpath) + 2;
- char *scratch;
-
retval = -ENOMEM;
- skb = alloc_skb(len + env->buflen, GFP_KERNEL);
+ skb = alloc_uevent_skb(env, action_string, devpath);
if (!skb)
continue;
-
- /* add header */
- scratch = skb_put(skb, len);
- sprintf(scratch, "%s@%s", action_string, devpath);
-
- skb_put_data(skb, env->buf, env->buflen);
-
- NETLINK_CB(skb).dst_group = 1;
}
- retval = netlink_broadcast_filtered(uevent_sock, skb_get(skb),
- 0, 1, GFP_KERNEL,
- kobj_bcast_filter,
- kobj);
+ retval = netlink_broadcast(uevent_sock, skb_get(skb), 0, 1,
+ GFP_KERNEL);
/* ENOBUFS should be handled in userspace */
if (retval == -ENOBUFS || retval == -ESRCH)
retval = 0;
}
consume_skb(skb);
-#endif
+
return retval;
}
+static int uevent_net_broadcast_tagged(struct sock *usk,
+ struct kobj_uevent_env *env,
+ const char *action_string,
+ const char *devpath)
+{
+ struct user_namespace *owning_user_ns = sock_net(usk)->user_ns;
+ struct sk_buff *skb = NULL;
+ int ret = 0;
+
+ skb = alloc_uevent_skb(env, action_string, devpath);
+ if (!skb)
+ return -ENOMEM;
+
+ /* fix credentials */
+ if (owning_user_ns != &init_user_ns) {
+ struct netlink_skb_parms *parms = &NETLINK_CB(skb);
+ kuid_t root_uid;
+ kgid_t root_gid;
+
+ /* fix uid */
+ root_uid = make_kuid(owning_user_ns, 0);
+ if (uid_valid(root_uid))
+ parms->creds.uid = root_uid;
+
+ /* fix gid */
+ root_gid = make_kgid(owning_user_ns, 0);
+ if (gid_valid(root_gid))
+ parms->creds.gid = root_gid;
+ }
+
+ ret = netlink_broadcast(usk, skb, 0, 1, GFP_KERNEL);
+ /* ENOBUFS should be handled in userspace */
+ if (ret == -ENOBUFS || ret == -ESRCH)
+ ret = 0;
+
+ return ret;
+}
+#endif
+
+static int kobject_uevent_net_broadcast(struct kobject *kobj,
+ struct kobj_uevent_env *env,
+ const char *action_string,
+ const char *devpath)
+{
+ int ret = 0;
+
+#ifdef CONFIG_NET
+ const struct kobj_ns_type_operations *ops;
+ const struct net *net = NULL;
+
+ ops = kobj_ns_ops(kobj);
+ if (!ops && kobj->kset) {
+ struct kobject *ksobj = &kobj->kset->kobj;
+ if (ksobj->parent != NULL)
+ ops = kobj_ns_ops(ksobj->parent);
+ }
+
+ /* kobjects currently only carry network namespace tags and they
+ * are the only tag relevant here since we want to decide which
+ * network namespaces to broadcast the uevent into.
+ */
+ if (ops && ops->netlink_ns && kobj->ktype->namespace)
+ if (ops->type == KOBJ_NS_TYPE_NET)
+ net = kobj->ktype->namespace(kobj);
+
+ if (!net)
+ ret = uevent_net_broadcast_untagged(env, action_string,
+ devpath);
+ else
+ ret = uevent_net_broadcast_tagged(net->uevent_sock->sk, env,
+ action_string, devpath);
+#endif
+
+ return ret;
+}
+
static void zap_modalias_env(struct kobj_uevent_env *env)
{
static const char modalias_prefix[] = "MODALIAS=";
@@ -602,12 +673,88 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
EXPORT_SYMBOL_GPL(add_uevent_var);
#if defined(CONFIG_NET)
+static int uevent_net_broadcast(struct sock *usk, struct sk_buff *skb,
+ struct netlink_ext_ack *extack)
+{
+ /* u64 to chars: 2^64 - 1 = 21 chars */
+ char buf[sizeof("SEQNUM=") + 21];
+ struct sk_buff *skbc;
+ int ret;
+
+ /* bump and prepare sequence number */
+ ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu", ++uevent_seqnum);
+ if (ret < 0 || (size_t)ret >= sizeof(buf))
+ return -ENOMEM;
+ ret++;
+
+ /* verify message does not overflow */
+ if ((skb->len + ret) > UEVENT_BUFFER_SIZE) {
+ NL_SET_ERR_MSG(extack, "uevent message too big");
+ return -EINVAL;
+ }
+
+ /* copy skb and extend to accommodate sequence number */
+ skbc = skb_copy_expand(skb, 0, ret, GFP_KERNEL);
+ if (!skbc)
+ return -ENOMEM;
+
+ /* append sequence number */
+ skb_put_data(skbc, buf, ret);
+
+ /* remove msg header */
+ skb_pull(skbc, NLMSG_HDRLEN);
+
+ /* set portid 0 to inform userspace message comes from kernel */
+ NETLINK_CB(skbc).portid = 0;
+ NETLINK_CB(skbc).dst_group = 1;
+
+ ret = netlink_broadcast(usk, skbc, 0, 1, GFP_KERNEL);
+ /* ENOBUFS should be handled in userspace */
+ if (ret == -ENOBUFS || ret == -ESRCH)
+ ret = 0;
+
+ return ret;
+}
+
+static int uevent_net_rcv_skb(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net;
+ int ret;
+
+ if (!nlmsg_data(nlh))
+ return -EINVAL;
+
+ /*
+ * Verify that we are allowed to send messages to the target
+ * network namespace. The caller must have CAP_SYS_ADMIN in the
+ * owning user namespace of the target network namespace.
+ */
+ net = sock_net(NETLINK_CB(skb).sk);
+ if (!netlink_ns_capable(skb, net->user_ns, CAP_SYS_ADMIN)) {
+ NL_SET_ERR_MSG(extack, "missing CAP_SYS_ADMIN capability");
+ return -EPERM;
+ }
+
+ mutex_lock(&uevent_sock_mutex);
+ ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack);
+ mutex_unlock(&uevent_sock_mutex);
+
+ return ret;
+}
+
+static void uevent_net_rcv(struct sk_buff *skb)
+{
+ netlink_rcv_skb(skb, &uevent_net_rcv_skb);
+}
+
static int uevent_net_init(struct net *net)
{
struct uevent_sock *ue_sk;
struct netlink_kernel_cfg cfg = {
.groups = 1,
- .flags = NL_CFG_F_NONROOT_RECV,
+ .input = uevent_net_rcv,
+ .flags = NL_CFG_F_NONROOT_RECV
};
ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
@@ -621,27 +768,28 @@ static int uevent_net_init(struct net *net)
kfree(ue_sk);
return -ENODEV;
}
- mutex_lock(&uevent_sock_mutex);
- list_add_tail(&ue_sk->list, &uevent_sock_list);
- mutex_unlock(&uevent_sock_mutex);
+
+ net->uevent_sock = ue_sk;
+
+ /* Restrict uevents to initial user namespace. */
+ if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) {
+ mutex_lock(&uevent_sock_mutex);
+ list_add_tail(&ue_sk->list, &uevent_sock_list);
+ mutex_unlock(&uevent_sock_mutex);
+ }
+
return 0;
}
static void uevent_net_exit(struct net *net)
{
- struct uevent_sock *ue_sk;
+ struct uevent_sock *ue_sk = net->uevent_sock;
- mutex_lock(&uevent_sock_mutex);
- list_for_each_entry(ue_sk, &uevent_sock_list, list) {
- if (sock_net(ue_sk->sk) == net)
- goto found;
+ if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) {
+ mutex_lock(&uevent_sock_mutex);
+ list_del(&ue_sk->list);
+ mutex_unlock(&uevent_sock_mutex);
}
- mutex_unlock(&uevent_sock_mutex);
- return;
-
-found:
- list_del(&ue_sk->list);
- mutex_unlock(&uevent_sock_mutex);
netlink_kernel_release(ue_sk->sk);
kfree(ue_sk);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 9f79547d1b97..f0a2934605bf 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -71,6 +71,12 @@ static void __exit libcrc32c_mod_fini(void)
crypto_free_shash(tfm);
}
+const char *crc32c_impl(void)
+{
+ return crypto_shash_driver_name(tfm);
+}
+EXPORT_SYMBOL(crc32c_impl);
+
module_init(libcrc32c_mod_init);
module_exit(libcrc32c_mod_fini);
diff --git a/lib/list_debug.c b/lib/list_debug.c
index a34db8d27667..5d5424b51b74 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -21,13 +21,13 @@ bool __list_add_valid(struct list_head *new, struct list_head *prev,
struct list_head *next)
{
if (CHECK_DATA_CORRUPTION(next->prev != prev,
- "list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
+ "list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n",
prev, next->prev, next) ||
CHECK_DATA_CORRUPTION(prev->next != next,
- "list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
+ "list_add corruption. prev->next should be next (%px), but was %px. (prev=%px).\n",
next, prev->next, prev) ||
CHECK_DATA_CORRUPTION(new == prev || new == next,
- "list_add double add: new=%p, prev=%p, next=%p.\n",
+ "list_add double add: new=%px, prev=%px, next=%px.\n",
new, prev, next))
return false;
@@ -43,16 +43,16 @@ bool __list_del_entry_valid(struct list_head *entry)
next = entry->next;
if (CHECK_DATA_CORRUPTION(next == LIST_POISON1,
- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
+ "list_del corruption, %px->next is LIST_POISON1 (%px)\n",
entry, LIST_POISON1) ||
CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
+ "list_del corruption, %px->prev is LIST_POISON2 (%px)\n",
entry, LIST_POISON2) ||
CHECK_DATA_CORRUPTION(prev->next != entry,
- "list_del corruption. prev->next should be %p, but was %p\n",
+ "list_del corruption. prev->next should be %px, but was %px\n",
entry, prev->next) ||
CHECK_DATA_CORRUPTION(next->prev != entry,
- "list_del corruption. next->prev should be %p, but was %p\n",
+ "list_del corruption. next->prev should be %px, but was %px\n",
entry, next->prev))
return false;
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index b5c1293ce147..1e1bbf171eca 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -29,7 +29,7 @@
*/
static unsigned int debug_locks_verbose;
-static DEFINE_WW_CLASS(ww_lockdep);
+static DEFINE_WD_CLASS(ww_lockdep);
static int __init setup_debug_locks_verbose(char *str)
{
diff --git a/lib/lockref.c b/lib/lockref.c
index 47169ed7e964..3d468b53d4c9 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -81,6 +81,34 @@ int lockref_get_not_zero(struct lockref *lockref)
EXPORT_SYMBOL(lockref_get_not_zero);
/**
+ * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
+ * @lockref: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count would become zero
+ */
+int lockref_put_not_zero(struct lockref *lockref)
+{
+ int retval;
+
+ CMPXCHG_LOOP(
+ new.count--;
+ if (old.count <= 1)
+ return 0;
+ ,
+ return 1;
+ );
+
+ spin_lock(&lockref->lock);
+ retval = 0;
+ if (lockref->count > 1) {
+ lockref->count--;
+ retval = 1;
+ }
+ spin_unlock(&lockref->lock);
+ return retval;
+}
+EXPORT_SYMBOL(lockref_put_not_zero);
+
+/**
* lockref_get_or_lock - Increments count unless the count is 0 or dead
* @lockref: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if count was zero
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
new file mode 100644
index 000000000000..feea48fd1a0d
--- /dev/null
+++ b/lib/logic_pio.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
+ * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
+ * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
+ */
+
+#define pr_fmt(fmt) "LOGIC PIO: " fmt
+
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/logic_pio.h>
+#include <linux/mm.h>
+#include <linux/rculist.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+/* The unique hardware address list */
+static LIST_HEAD(io_range_list);
+static DEFINE_MUTEX(io_range_mutex);
+
+/* Consider a kernel general helper for this */
+#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
+
+/**
+ * logic_pio_register_range - register logical PIO range for a host
+ * @new_range: pointer to the IO range to be registered.
+ *
+ * Returns 0 on success, the error code in case of failure.
+ *
+ * Register a new IO range node in the IO range list.
+ */
+int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
+{
+ struct logic_pio_hwaddr *range;
+ resource_size_t start;
+ resource_size_t end;
+ resource_size_t mmio_sz = 0;
+ resource_size_t iio_sz = MMIO_UPPER_LIMIT;
+ int ret = 0;
+
+ if (!new_range || !new_range->fwnode || !new_range->size)
+ return -EINVAL;
+
+ start = new_range->hw_start;
+ end = new_range->hw_start + new_range->size;
+
+ mutex_lock(&io_range_mutex);
+ list_for_each_entry_rcu(range, &io_range_list, list) {
+ if (range->fwnode == new_range->fwnode) {
+ /* range already there */
+ goto end_register;
+ }
+ if (range->flags == LOGIC_PIO_CPU_MMIO &&
+ new_range->flags == LOGIC_PIO_CPU_MMIO) {
+ /* for MMIO ranges we need to check for overlap */
+ if (start >= range->hw_start + range->size ||
+ end < range->hw_start) {
+ mmio_sz += range->size;
+ } else {
+ ret = -EFAULT;
+ goto end_register;
+ }
+ } else if (range->flags == LOGIC_PIO_INDIRECT &&
+ new_range->flags == LOGIC_PIO_INDIRECT) {
+ iio_sz += range->size;
+ }
+ }
+
+ /* range not registered yet, check for available space */
+ if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
+ if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) {
+ /* if it's too big check if 64K space can be reserved */
+ if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
+ ret = -E2BIG;
+ goto end_register;
+ }
+ new_range->size = SZ_64K;
+ pr_warn("Requested IO range too big, new size set to 64K\n");
+ }
+ new_range->io_start = mmio_sz;
+ } else if (new_range->flags == LOGIC_PIO_INDIRECT) {
+ if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
+ ret = -E2BIG;
+ goto end_register;
+ }
+ new_range->io_start = iio_sz;
+ } else {
+ /* invalid flag */
+ ret = -EINVAL;
+ goto end_register;
+ }
+
+ list_add_tail_rcu(&new_range->list, &io_range_list);
+
+end_register:
+ mutex_unlock(&io_range_mutex);
+ return ret;
+}
+
+/**
+ * find_io_range_by_fwnode - find logical PIO range for given FW node
+ * @fwnode: FW node handle associated with logical PIO range
+ *
+ * Returns pointer to node on success, NULL otherwise.
+ *
+ * Traverse the io_range_list to find the registered node for @fwnode.
+ */
+struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct logic_pio_hwaddr *range;
+
+ list_for_each_entry_rcu(range, &io_range_list, list) {
+ if (range->fwnode == fwnode)
+ return range;
+ }
+ return NULL;
+}
+
+/* Return a registered range given an input PIO token */
+static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
+{
+ struct logic_pio_hwaddr *range;
+
+ list_for_each_entry_rcu(range, &io_range_list, list) {
+ if (in_range(pio, range->io_start, range->size))
+ return range;
+ }
+ pr_err("PIO entry token %lx invalid\n", pio);
+ return NULL;
+}
+
+/**
+ * logic_pio_to_hwaddr - translate logical PIO to HW address
+ * @pio: logical PIO value
+ *
+ * Returns HW address if valid, ~0 otherwise.
+ *
+ * Translate the input logical PIO to the corresponding hardware address.
+ * The input PIO should be unique in the whole logical PIO space.
+ */
+resource_size_t logic_pio_to_hwaddr(unsigned long pio)
+{
+ struct logic_pio_hwaddr *range;
+
+ range = find_io_range(pio);
+ if (range)
+ return range->hw_start + pio - range->io_start;
+
+ return (resource_size_t)~0;
+}
+
+/**
+ * logic_pio_trans_hwaddr - translate HW address to logical PIO
+ * @fwnode: FW node reference for the host
+ * @addr: Host-relative HW address
+ * @size: size to translate
+ *
+ * Returns Logical PIO value if successful, ~0UL otherwise
+ */
+unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
+ resource_size_t addr, resource_size_t size)
+{
+ struct logic_pio_hwaddr *range;
+
+ range = find_io_range_by_fwnode(fwnode);
+ if (!range || range->flags == LOGIC_PIO_CPU_MMIO) {
+ pr_err("IO range not found or invalid\n");
+ return ~0UL;
+ }
+ if (range->size < size) {
+ pr_err("resource size %pa cannot fit in IO range size %pa\n",
+ &size, &range->size);
+ return ~0UL;
+ }
+ return addr - range->hw_start + range->io_start;
+}
+
+unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
+{
+ struct logic_pio_hwaddr *range;
+
+ list_for_each_entry_rcu(range, &io_range_list, list) {
+ if (range->flags != LOGIC_PIO_CPU_MMIO)
+ continue;
+ if (in_range(addr, range->hw_start, range->size))
+ return addr - range->hw_start + range->io_start;
+ }
+ pr_err("addr %llx not registered in io_range_list\n",
+ (unsigned long long) addr);
+ return ~0UL;
+}
+
+#if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
+#define BUILD_LOGIC_IO(bw, type) \
+type logic_in##bw(unsigned long addr) \
+{ \
+ type ret = (type)~0; \
+ \
+ if (addr < MMIO_UPPER_LIMIT) { \
+ ret = read##bw(PCI_IOBASE + addr); \
+ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
+ struct logic_pio_hwaddr *entry = find_io_range(addr); \
+ \
+ if (entry && entry->ops) \
+ ret = entry->ops->in(entry->hostdata, \
+ addr, sizeof(type)); \
+ else \
+ WARN_ON_ONCE(1); \
+ } \
+ return ret; \
+} \
+ \
+void logic_out##bw(type value, unsigned long addr) \
+{ \
+ if (addr < MMIO_UPPER_LIMIT) { \
+ write##bw(value, PCI_IOBASE + addr); \
+ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
+ struct logic_pio_hwaddr *entry = find_io_range(addr); \
+ \
+ if (entry && entry->ops) \
+ entry->ops->out(entry->hostdata, \
+ addr, value, sizeof(type)); \
+ else \
+ WARN_ON_ONCE(1); \
+ } \
+} \
+ \
+void logic_ins##bw(unsigned long addr, void *buffer, \
+ unsigned int count) \
+{ \
+ if (addr < MMIO_UPPER_LIMIT) { \
+ reads##bw(PCI_IOBASE + addr, buffer, count); \
+ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
+ struct logic_pio_hwaddr *entry = find_io_range(addr); \
+ \
+ if (entry && entry->ops) \
+ entry->ops->ins(entry->hostdata, \
+ addr, buffer, sizeof(type), count); \
+ else \
+ WARN_ON_ONCE(1); \
+ } \
+ \
+} \
+ \
+void logic_outs##bw(unsigned long addr, const void *buffer, \
+ unsigned int count) \
+{ \
+ if (addr < MMIO_UPPER_LIMIT) { \
+ writes##bw(PCI_IOBASE + addr, buffer, count); \
+ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
+ struct logic_pio_hwaddr *entry = find_io_range(addr); \
+ \
+ if (entry && entry->ops) \
+ entry->ops->outs(entry->hostdata, \
+ addr, buffer, sizeof(type), count); \
+ else \
+ WARN_ON_ONCE(1); \
+ } \
+}
+
+BUILD_LOGIC_IO(b, u8)
+EXPORT_SYMBOL(logic_inb);
+EXPORT_SYMBOL(logic_insb);
+EXPORT_SYMBOL(logic_outb);
+EXPORT_SYMBOL(logic_outsb);
+
+BUILD_LOGIC_IO(w, u16)
+EXPORT_SYMBOL(logic_inw);
+EXPORT_SYMBOL(logic_insw);
+EXPORT_SYMBOL(logic_outw);
+EXPORT_SYMBOL(logic_outsw);
+
+BUILD_LOGIC_IO(l, u32)
+EXPORT_SYMBOL(logic_inl);
+EXPORT_SYMBOL(logic_insl);
+EXPORT_SYMBOL(logic_outl);
+EXPORT_SYMBOL(logic_outsl);
+
+#endif /* CONFIG_INDIRECT_PIO && PCI_IOBASE */
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index 28ba40b99337..2b10a4024c35 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -119,7 +119,7 @@ struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
slot = kcalloc(e_count, sizeof(struct hlist_head), GFP_KERNEL);
if (!slot)
goto out_fail;
- element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL);
+ element = kcalloc(e_count, sizeof(struct lc_element *), GFP_KERNEL);
if (!element)
goto out_fail;
diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h
index 7eceeddb3fb8..c2d6f4efcfbc 100644
--- a/lib/mpi/mpi-internal.h
+++ b/lib/mpi/mpi-internal.h
@@ -65,13 +65,6 @@
typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */
typedef int mpi_size_t; /* (must be a signed type) */
-static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
-{
- if (a->alloced < b)
- return mpi_resize(a, b);
- return 0;
-}
-
/* Copy N limbs from S to D. */
#define MPN_COPY(d, s, n) \
do { \
@@ -80,13 +73,6 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
(d)[_i] = (s)[_i]; \
} while (0)
-#define MPN_COPY_INCR(d, s, n) \
- do { \
- mpi_size_t _i; \
- for (_i = 0; _i < (n); _i++) \
- (d)[_i] = (s)[_i]; \
- } while (0)
-
#define MPN_COPY_DECR(d, s, n) \
do { \
mpi_size_t _i; \
@@ -111,15 +97,6 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
} \
} while (0)
-#define MPN_NORMALIZE_NOT_ZERO(d, n) \
- do { \
- for (;;) { \
- if ((d)[(n)-1]) \
- break; \
- (n)--; \
- } \
- } while (0)
-
#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
do { \
if ((size) < KARATSUBA_THRESHOLD) \
@@ -128,46 +105,11 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
mul_n(prodp, up, vp, size, tspace); \
} while (0);
-/* Divide the two-limb number in (NH,,NL) by D, with DI being the largest
- * limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB).
- * If this would yield overflow, DI should be the largest possible number
- * (i.e., only ones). For correct operation, the most significant bit of D
- * has to be set. Put the quotient in Q and the remainder in R.
- */
-#define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \
- do { \
- mpi_limb_t _q, _ql, _r; \
- mpi_limb_t _xh, _xl; \
- umul_ppmm(_q, _ql, (nh), (di)); \
- _q += (nh); /* DI is 2**BITS_PER_MPI_LIMB too small */ \
- umul_ppmm(_xh, _xl, _q, (d)); \
- sub_ddmmss(_xh, _r, (nh), (nl), _xh, _xl); \
- if (_xh) { \
- sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \
- _q++; \
- if (_xh) { \
- sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \
- _q++; \
- } \
- } \
- if (_r >= (d)) { \
- _r -= (d); \
- _q++; \
- } \
- (r) = _r; \
- (q) = _q; \
- } while (0)
-
/*-- mpiutil.c --*/
mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs);
void mpi_free_limb_space(mpi_ptr_t a);
void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs);
-/*-- mpi-bit.c --*/
-void mpi_rshift_limbs(MPI a, unsigned int count);
-int mpi_lshift_limbs(MPI a, unsigned int count);
-
-/*-- mpihelp-add.c --*/
static inline mpi_limb_t mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
@@ -175,7 +117,6 @@ mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
static inline mpi_limb_t mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
mpi_ptr_t s2_ptr, mpi_size_t s2_size);
-/*-- mpihelp-sub.c --*/
static inline mpi_limb_t mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
@@ -183,10 +124,10 @@ mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
static inline mpi_limb_t mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
mpi_ptr_t s2_ptr, mpi_size_t s2_size);
-/*-- mpihelp-cmp.c --*/
+/*-- mpih-cmp.c --*/
int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size);
-/*-- mpihelp-mul.c --*/
+/*-- mpih-mul.c --*/
struct karatsuba_ctx {
struct karatsuba_ctx *next;
@@ -202,7 +143,6 @@ mpi_limb_t mpihelp_addmul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
mpi_limb_t mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
-int mpihelp_mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size);
int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
mpi_ptr_t vp, mpi_size_t vsize, mpi_limb_t *_result);
void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size);
@@ -214,21 +154,16 @@ int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
mpi_ptr_t vp, mpi_size_t vsize,
struct karatsuba_ctx *ctx);
-/*-- mpihelp-mul_1.c (or xxx/cpu/ *.S) --*/
+/*-- generic_mpih-mul1.c --*/
mpi_limb_t mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
-/*-- mpihelp-div.c --*/
-mpi_limb_t mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
- mpi_limb_t divisor_limb);
+/*-- mpih-div.c --*/
mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
mpi_ptr_t np, mpi_size_t nsize,
mpi_ptr_t dp, mpi_size_t dsize);
-mpi_limb_t mpihelp_divmod_1(mpi_ptr_t quot_ptr,
- mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
- mpi_limb_t divisor_limb);
-/*-- mpihelp-shift.c --*/
+/*-- generic_mpih-[lr]shift.c --*/
mpi_limb_t mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
unsigned cnt);
mpi_limb_t mpihelp_rshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index 468fb7cd1221..a5c921e6d667 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -41,7 +41,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
mpi_ptr_t tspace = NULL;
mpi_ptr_t rp, ep, mp, bp;
mpi_size_t esize, msize, bsize, rsize;
- int esign, msign, bsign, rsign;
+ int msign, bsign, rsign;
mpi_size_t size;
int mod_shift_cnt;
int negative_result;
@@ -53,7 +53,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
esize = exp->nlimbs;
msize = mod->nlimbs;
size = 2 * msize;
- esign = exp->sign;
msign = mod->sign;
rp = res->d;
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 314f4dfa603e..20ed0f766787 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -91,14 +91,14 @@ int mpi_resize(MPI a, unsigned nlimbs)
return 0; /* no need to do it */
if (a->d) {
- p = kmalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL);
+ p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
if (!p)
return -ENOMEM;
memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
kzfree(a->d);
a->d = p;
} else {
- a->d = kzalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL);
+ a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
if (!a->d)
return -ENOMEM;
}
diff --git a/lib/nlattr.c b/lib/nlattr.c
index dfa55c873c13..e335bcafa9e4 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -253,8 +253,8 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
if (policy) {
err = validate_nla(nla, maxtype, policy);
if (err < 0) {
- if (extack)
- extack->bad_attr = nla;
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "Attribute failed policy validation");
goto errout;
}
}
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 61a6b5aab07e..15ca78e1c7d4 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -87,11 +87,9 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
bool nmi_cpu_backtrace(struct pt_regs *regs)
{
- static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
int cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
- arch_spin_lock(&lock);
if (regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs));
@@ -102,7 +100,6 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else
dump_stack();
}
- arch_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index c72577e472f2..a66595ba5543 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -4,7 +4,6 @@
*/
#include <linux/percpu_counter.h>
-#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/cpu.h>
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
deleted file mode 100644
index 6016f1deb1f5..000000000000
--- a/lib/percpu_ida.c
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- * Percpu IDA library
- *
- * Copyright (C) 2013 Datera, Inc. Kent Overstreet
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#include <linux/mm.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/bug.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/percpu.h>
-#include <linux/sched/signal.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/percpu_ida.h>
-
-struct percpu_ida_cpu {
- /*
- * Even though this is percpu, we need a lock for tag stealing by remote
- * CPUs:
- */
- spinlock_t lock;
-
- /* nr_free/freelist form a stack of free IDs */
- unsigned nr_free;
- unsigned freelist[];
-};
-
-static inline void move_tags(unsigned *dst, unsigned *dst_nr,
- unsigned *src, unsigned *src_nr,
- unsigned nr)
-{
- *src_nr -= nr;
- memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
- *dst_nr += nr;
-}
-
-/*
- * Try to steal tags from a remote cpu's percpu freelist.
- *
- * We first check how many percpu freelists have tags
- *
- * Then we iterate through the cpus until we find some tags - we don't attempt
- * to find the "best" cpu to steal from, to keep cacheline bouncing to a
- * minimum.
- */
-static inline void steal_tags(struct percpu_ida *pool,
- struct percpu_ida_cpu *tags)
-{
- unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
- struct percpu_ida_cpu *remote;
-
- for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
- cpus_have_tags; cpus_have_tags--) {
- cpu = cpumask_next(cpu, &pool->cpus_have_tags);
-
- if (cpu >= nr_cpu_ids) {
- cpu = cpumask_first(&pool->cpus_have_tags);
- if (cpu >= nr_cpu_ids)
- BUG();
- }
-
- pool->cpu_last_stolen = cpu;
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
-
- cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
-
- if (remote == tags)
- continue;
-
- spin_lock(&remote->lock);
-
- if (remote->nr_free) {
- memcpy(tags->freelist,
- remote->freelist,
- sizeof(unsigned) * remote->nr_free);
-
- tags->nr_free = remote->nr_free;
- remote->nr_free = 0;
- }
-
- spin_unlock(&remote->lock);
-
- if (tags->nr_free)
- break;
- }
-}
-
-/*
- * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
- * our percpu freelist:
- */
-static inline void alloc_global_tags(struct percpu_ida *pool,
- struct percpu_ida_cpu *tags)
-{
- move_tags(tags->freelist, &tags->nr_free,
- pool->freelist, &pool->nr_free,
- min(pool->nr_free, pool->percpu_batch_size));
-}
-
-static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
-{
- int tag = -ENOSPC;
-
- spin_lock(&tags->lock);
- if (tags->nr_free)
- tag = tags->freelist[--tags->nr_free];
- spin_unlock(&tags->lock);
-
- return tag;
-}
-
-/**
- * percpu_ida_alloc - allocate a tag
- * @pool: pool to allocate from
- * @state: task state for prepare_to_wait
- *
- * Returns a tag - an integer in the range [0..nr_tags) (passed to
- * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
- *
- * Safe to be called from interrupt context (assuming it isn't passed
- * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
- *
- * @gfp indicates whether or not to wait until a free id is available (it's not
- * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
- * however long it takes until another thread frees an id (same semantics as a
- * mempool).
- *
- * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
- */
-int percpu_ida_alloc(struct percpu_ida *pool, int state)
-{
- DEFINE_WAIT(wait);
- struct percpu_ida_cpu *tags;
- unsigned long flags;
- int tag;
-
- local_irq_save(flags);
- tags = this_cpu_ptr(pool->tag_cpu);
-
- /* Fastpath */
- tag = alloc_local_tag(tags);
- if (likely(tag >= 0)) {
- local_irq_restore(flags);
- return tag;
- }
-
- while (1) {
- spin_lock(&pool->lock);
-
- /*
- * prepare_to_wait() must come before steal_tags(), in case
- * percpu_ida_free() on another cpu flips a bit in
- * cpus_have_tags
- *
- * global lock held and irqs disabled, don't need percpu lock
- */
- if (state != TASK_RUNNING)
- prepare_to_wait(&pool->wait, &wait, state);
-
- if (!tags->nr_free)
- alloc_global_tags(pool, tags);
- if (!tags->nr_free)
- steal_tags(pool, tags);
-
- if (tags->nr_free) {
- tag = tags->freelist[--tags->nr_free];
- if (tags->nr_free)
- cpumask_set_cpu(smp_processor_id(),
- &pool->cpus_have_tags);
- }
-
- spin_unlock(&pool->lock);
- local_irq_restore(flags);
-
- if (tag >= 0 || state == TASK_RUNNING)
- break;
-
- if (signal_pending_state(state, current)) {
- tag = -ERESTARTSYS;
- break;
- }
-
- schedule();
-
- local_irq_save(flags);
- tags = this_cpu_ptr(pool->tag_cpu);
- }
- if (state != TASK_RUNNING)
- finish_wait(&pool->wait, &wait);
-
- return tag;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_alloc);
-
-/**
- * percpu_ida_free - free a tag
- * @pool: pool @tag was allocated from
- * @tag: a tag previously allocated with percpu_ida_alloc()
- *
- * Safe to be called from interrupt context.
- */
-void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
-{
- struct percpu_ida_cpu *tags;
- unsigned long flags;
- unsigned nr_free;
-
- BUG_ON(tag >= pool->nr_tags);
-
- local_irq_save(flags);
- tags = this_cpu_ptr(pool->tag_cpu);
-
- spin_lock(&tags->lock);
- tags->freelist[tags->nr_free++] = tag;
-
- nr_free = tags->nr_free;
- spin_unlock(&tags->lock);
-
- if (nr_free == 1) {
- cpumask_set_cpu(smp_processor_id(),
- &pool->cpus_have_tags);
- wake_up(&pool->wait);
- }
-
- if (nr_free == pool->percpu_max_size) {
- spin_lock(&pool->lock);
-
- /*
- * Global lock held and irqs disabled, don't need percpu
- * lock
- */
- if (tags->nr_free == pool->percpu_max_size) {
- move_tags(pool->freelist, &pool->nr_free,
- tags->freelist, &tags->nr_free,
- pool->percpu_batch_size);
-
- wake_up(&pool->wait);
- }
- spin_unlock(&pool->lock);
- }
-
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(percpu_ida_free);
-
-/**
- * percpu_ida_destroy - release a tag pool's resources
- * @pool: pool to free
- *
- * Frees the resources allocated by percpu_ida_init().
- */
-void percpu_ida_destroy(struct percpu_ida *pool)
-{
- free_percpu(pool->tag_cpu);
- free_pages((unsigned long) pool->freelist,
- get_order(pool->nr_tags * sizeof(unsigned)));
-}
-EXPORT_SYMBOL_GPL(percpu_ida_destroy);
-
-/**
- * percpu_ida_init - initialize a percpu tag pool
- * @pool: pool to initialize
- * @nr_tags: number of tags that will be available for allocation
- *
- * Initializes @pool so that it can be used to allocate tags - integers in the
- * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
- * preallocated array of tag structures.
- *
- * Allocation is percpu, but sharding is limited by nr_tags - for best
- * performance, the workload should not span more cpus than nr_tags / 128.
- */
-int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
- unsigned long max_size, unsigned long batch_size)
-{
- unsigned i, cpu, order;
-
- memset(pool, 0, sizeof(*pool));
-
- init_waitqueue_head(&pool->wait);
- spin_lock_init(&pool->lock);
- pool->nr_tags = nr_tags;
- pool->percpu_max_size = max_size;
- pool->percpu_batch_size = batch_size;
-
- /* Guard against overflow */
- if (nr_tags > (unsigned) INT_MAX + 1) {
- pr_err("percpu_ida_init(): nr_tags too large\n");
- return -EINVAL;
- }
-
- order = get_order(nr_tags * sizeof(unsigned));
- pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
- if (!pool->freelist)
- return -ENOMEM;
-
- for (i = 0; i < nr_tags; i++)
- pool->freelist[i] = i;
-
- pool->nr_free = nr_tags;
-
- pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
- pool->percpu_max_size * sizeof(unsigned),
- sizeof(unsigned));
- if (!pool->tag_cpu)
- goto err;
-
- for_each_possible_cpu(cpu)
- spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
-
- return 0;
-err:
- percpu_ida_destroy(pool);
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(__percpu_ida_init);
-
-/**
- * percpu_ida_for_each_free - iterate free ids of a pool
- * @pool: pool to iterate
- * @fn: interate callback function
- * @data: parameter for @fn
- *
- * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
- * ids might be missed, some might be iterated duplicated, and some might
- * be iterated and not free soon.
- */
-int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
- void *data)
-{
- unsigned long flags;
- struct percpu_ida_cpu *remote;
- unsigned cpu, i, err = 0;
-
- local_irq_save(flags);
- for_each_possible_cpu(cpu) {
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
- spin_lock(&remote->lock);
- for (i = 0; i < remote->nr_free; i++) {
- err = fn(remote->freelist[i], data);
- if (err)
- break;
- }
- spin_unlock(&remote->lock);
- if (err)
- goto out;
- }
-
- spin_lock(&pool->lock);
- for (i = 0; i < pool->nr_free; i++) {
- err = fn(pool->freelist[i], data);
- if (err)
- break;
- }
- spin_unlock(&pool->lock);
-out:
- local_irq_restore(flags);
- return err;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
-
-/**
- * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
- * @pool: pool related
- * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
- *
- * Note: this just returns a snapshot of free tags number.
- */
-unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
-{
- struct percpu_ida_cpu *remote;
- if (cpu == nr_cpu_ids)
- return pool->nr_free;
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
- return remote->nr_free;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_free_tags);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 8e00138d593f..bc03ecc4dfd2 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -120,7 +120,7 @@ bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
static inline unsigned long
get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
{
- return slot - parent->slots;
+ return parent ? slot - parent->slots : 0;
}
static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
@@ -146,7 +146,7 @@ static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
{
- return root->gfp_mask & __GFP_BITS_MASK;
+ return root->gfp_mask & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
}
static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
@@ -1612,11 +1612,9 @@ static void set_iter_tags(struct radix_tree_iter *iter,
static void __rcu **skip_siblings(struct radix_tree_node **nodep,
void __rcu **slot, struct radix_tree_iter *iter)
{
- void *sib = node_to_entry(slot - 1);
-
while (iter->index < iter->next_index) {
*nodep = rcu_dereference_raw(*slot);
- if (*nodep && *nodep != sib)
+ if (*nodep && !is_sibling_entry(iter->node, *nodep))
return slot;
slot++;
iter->index = __radix_tree_iter_add(iter, 1);
@@ -1631,7 +1629,7 @@ void __rcu **__radix_tree_next_slot(void __rcu **slot,
struct radix_tree_iter *iter, unsigned flags)
{
unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
- struct radix_tree_node *node = rcu_dereference_raw(*slot);
+ struct radix_tree_node *node;
slot = skip_siblings(&node, slot, iter);
@@ -2036,10 +2034,12 @@ void *radix_tree_delete_item(struct radix_tree_root *root,
unsigned long index, void *item)
{
struct radix_tree_node *node = NULL;
- void __rcu **slot;
+ void __rcu **slot = NULL;
void *entry;
entry = __radix_tree_lookup(root, index, &node, &slot);
+ if (!slot)
+ return NULL;
if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
get_slot_offset(node, slot))))
return NULL;
@@ -2106,14 +2106,6 @@ void idr_preload(gfp_t gfp_mask)
}
EXPORT_SYMBOL(idr_preload);
-/**
- * ida_pre_get - reserve resources for ida allocation
- * @ida: ida handle
- * @gfp: memory allocation flags
- *
- * This function should be called before calling ida_get_new_above(). If it
- * is unable to allocate memory, it will return %0. On success, it returns %1.
- */
int ida_pre_get(struct ida *ida, gfp_t gfp)
{
/*
@@ -2134,7 +2126,6 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
return 1;
}
-EXPORT_SYMBOL(ida_pre_get);
void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
@@ -2285,6 +2276,7 @@ void __init radix_tree_init(void)
int ret;
BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
+ BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
index f01b1cb04f91..3de0d8921286 100644
--- a/lib/raid6/.gitignore
+++ b/lib/raid6/.gitignore
@@ -4,3 +4,4 @@ int*.c
tables.c
neon?.c
s390vx?.c
+vpermxor*.c
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 4add700ddfe3..2f8b61dfd9b0 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -5,9 +5,9 @@ raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
int8.o int16.o int32.o
raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o avx512.o recov_avx512.o
-raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o
+raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o \
+ vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
-raid6_pq-$(CONFIG_TILEGX) += tilegx8.o
raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
hostprogs-y += mktables
@@ -91,6 +91,30 @@ $(obj)/altivec8.c: UNROLL := 8
$(obj)/altivec8.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
+CFLAGS_vpermxor1.o += $(altivec_flags)
+targets += vpermxor1.c
+$(obj)/vpermxor1.c: UNROLL := 1
+$(obj)/vpermxor1.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_vpermxor2.o += $(altivec_flags)
+targets += vpermxor2.c
+$(obj)/vpermxor2.c: UNROLL := 2
+$(obj)/vpermxor2.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_vpermxor4.o += $(altivec_flags)
+targets += vpermxor4.c
+$(obj)/vpermxor4.c: UNROLL := 4
+$(obj)/vpermxor4.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_vpermxor8.o += $(altivec_flags)
+targets += vpermxor8.c
+$(obj)/vpermxor8.c: UNROLL := 8
+$(obj)/vpermxor8.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
CFLAGS_neon1.o += $(NEON_FLAGS)
targets += neon1.c
$(obj)/neon1.c: UNROLL := 1
@@ -115,11 +139,6 @@ $(obj)/neon8.c: UNROLL := 8
$(obj)/neon8.c: $(src)/neon.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
-targets += tilegx8.c
-$(obj)/tilegx8.c: UNROLL := 8
-$(obj)/tilegx8.c: $(src)/tilegx.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
targets += s390vx8.c
$(obj)/s390vx8.c: UNROLL := 8
$(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 476994723258..5065b1e7e327 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -74,9 +74,10 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_altivec2,
&raid6_altivec4,
&raid6_altivec8,
-#endif
-#if defined(CONFIG_TILEGX)
- &raid6_tilegx8,
+ &raid6_vpermxor1,
+ &raid6_vpermxor2,
+ &raid6_vpermxor4,
+ &raid6_vpermxor8,
#endif
#if defined(CONFIG_S390)
&raid6_s390vx8,
diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc
index 682aae8a1fef..d20ed0d11411 100644
--- a/lib/raid6/altivec.uc
+++ b/lib/raid6/altivec.uc
@@ -24,10 +24,13 @@
#include <linux/raid/pq.h>
+#ifdef CONFIG_ALTIVEC
+
#include <altivec.h>
#ifdef __KERNEL__
# include <asm/cputable.h>
# include <asm/switch_to.h>
+#endif /* __KERNEL__ */
/*
* This is the C data type to use. We use a vector of
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index 140fa8bb5c23..914ebe98fc21 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -55,22 +55,24 @@ static inline void XOR(int x, int y, int z)
asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
}
-static inline void LOAD_DATA(int x, int n, u8 *ptr)
+static inline void LOAD_DATA(int x, u8 *ptr)
{
- typedef struct { u8 _[16*n]; } addrtype;
+ typedef struct { u8 _[16 * $#]; } addrtype;
register addrtype *__ptr asm("1") = (addrtype *) ptr;
asm volatile ("VLM %2,%3,0,%r1"
- : : "m" (*__ptr), "a" (__ptr), "i" (x), "i" (x + n - 1));
+ : : "m" (*__ptr), "a" (__ptr), "i" (x),
+ "i" (x + $# - 1));
}
-static inline void STORE_DATA(int x, int n, u8 *ptr)
+static inline void STORE_DATA(int x, u8 *ptr)
{
- typedef struct { u8 _[16*n]; } addrtype;
+ typedef struct { u8 _[16 * $#]; } addrtype;
register addrtype *__ptr asm("1") = (addrtype *) ptr;
asm volatile ("VSTM %2,%3,0,1"
- : "=m" (*__ptr) : "a" (__ptr), "i" (x), "i" (x + n - 1));
+ : "=m" (*__ptr) : "a" (__ptr), "i" (x),
+ "i" (x + $# - 1));
}
static inline void COPY_VEC(int x, int y)
@@ -93,19 +95,19 @@ static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
q = dptr[z0 + 2]; /* RS syndrome */
for (d = 0; d < bytes; d += $#*NSIZE) {
- LOAD_DATA(0,$#,&dptr[z0][d]);
+ LOAD_DATA(0,&dptr[z0][d]);
COPY_VEC(8+$$,0+$$);
for (z = z0 - 1; z >= 0; z--) {
MASK(16+$$,8+$$);
AND(16+$$,16+$$,25);
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
- LOAD_DATA(16,$#,&dptr[z][d]);
+ LOAD_DATA(16,&dptr[z][d]);
XOR(0+$$,0+$$,16+$$);
XOR(8+$$,8+$$,16+$$);
}
- STORE_DATA(0,$#,&p[d]);
- STORE_DATA(8,$#,&q[d]);
+ STORE_DATA(0,&p[d]);
+ STORE_DATA(8,&q[d]);
}
kernel_fpu_end(&vxstate, KERNEL_VXR);
}
@@ -127,14 +129,14 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
for (d = 0; d < bytes; d += $#*NSIZE) {
/* P/Q data pages */
- LOAD_DATA(0,$#,&dptr[z0][d]);
+ LOAD_DATA(0,&dptr[z0][d]);
COPY_VEC(8+$$,0+$$);
for (z = z0 - 1; z >= start; z--) {
MASK(16+$$,8+$$);
AND(16+$$,16+$$,25);
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
- LOAD_DATA(16,$#,&dptr[z][d]);
+ LOAD_DATA(16,&dptr[z][d]);
XOR(0+$$,0+$$,16+$$);
XOR(8+$$,8+$$,16+$$);
}
@@ -145,12 +147,12 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
}
- LOAD_DATA(16,$#,&p[d]);
+ LOAD_DATA(16,&p[d]);
XOR(16+$$,16+$$,0+$$);
- STORE_DATA(16,$#,&p[d]);
- LOAD_DATA(16,$#,&q[d]);
+ STORE_DATA(16,&p[d]);
+ LOAD_DATA(16,&q[d]);
XOR(16+$$,16+$$,8+$$);
- STORE_DATA(16,$#,&q[d]);
+ STORE_DATA(16,&q[d]);
}
kernel_fpu_end(&vxstate, KERNEL_VXR);
}
diff --git a/lib/raid6/sse2.c b/lib/raid6/sse2.c
index 1d2276b007ee..8191e1d0d2fb 100644
--- a/lib/raid6/sse2.c
+++ b/lib/raid6/sse2.c
@@ -91,7 +91,7 @@ static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
static void raid6_sse21_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
- {
+{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
@@ -200,9 +200,9 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
kernel_fpu_end();
}
- static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
+static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
- {
+{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
@@ -265,7 +265,7 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
- }
+}
const struct raid6_calls raid6_sse2x2 = {
raid6_sse22_gen_syndrome,
@@ -366,9 +366,9 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
kernel_fpu_end();
}
- static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
+static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
- {
+{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
@@ -471,7 +471,7 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
- }
+}
const struct raid6_calls raid6_sse2x4 = {
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index be1010bdc435..5d73f5cb4d8a 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -45,15 +45,14 @@ else ifeq ($(HAS_NEON),yes)
CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
else
HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
- gcc -c -x c - >&/dev/null && \
- rm ./-.o && echo yes)
+ gcc -c -x c - >/dev/null && rm ./-.o && echo yes)
ifeq ($(HAS_ALTIVEC),yes)
- OBJS += altivec1.o altivec2.o altivec4.o altivec8.o
+ CFLAGS += -I../../../arch/powerpc/include
+ CFLAGS += -DCONFIG_ALTIVEC
+ OBJS += altivec1.o altivec2.o altivec4.o altivec8.o \
+ vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
endif
endif
-ifeq ($(ARCH),tilegx)
-OBJS += tilegx8.o
-endif
.c.o:
$(CC) $(CFLAGS) -c -o $@ $<
@@ -98,6 +97,18 @@ altivec4.c: altivec.uc ../unroll.awk
altivec8.c: altivec.uc ../unroll.awk
$(AWK) ../unroll.awk -vN=8 < altivec.uc > $@
+vpermxor1.c: vpermxor.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=1 < vpermxor.uc > $@
+
+vpermxor2.c: vpermxor.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=2 < vpermxor.uc > $@
+
+vpermxor4.c: vpermxor.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=4 < vpermxor.uc > $@
+
+vpermxor8.c: vpermxor.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=8 < vpermxor.uc > $@
+
int1.c: int.uc ../unroll.awk
$(AWK) ../unroll.awk -vN=1 < int.uc > $@
@@ -116,15 +127,11 @@ int16.c: int.uc ../unroll.awk
int32.c: int.uc ../unroll.awk
$(AWK) ../unroll.awk -vN=32 < int.uc > $@
-tilegx8.c: tilegx.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=8 < tilegx.uc > $@
-
tables.c: mktables
./mktables > tables.c
clean:
- rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c neon*.c tables.c raid6test
- rm -f tilegx*.c
+ rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c vpermxor*.c neon*.c tables.c raid6test
spotless: clean
rm -f *~
diff --git a/lib/raid6/tilegx.uc b/lib/raid6/tilegx.uc
deleted file mode 100644
index 2dd291a11264..000000000000
--- a/lib/raid6/tilegx.uc
+++ /dev/null
@@ -1,87 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- * Copyright 2012 Tilera Corporation - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * tilegx$#.c
- *
- * $#-way unrolled TILE-Gx SIMD for RAID-6 math.
- *
- * This file is postprocessed using unroll.awk.
- *
- */
-
-#include <linux/raid/pq.h>
-
-/* Create 8 byte copies of constant byte */
-# define NBYTES(x) (__insn_v1addi(0, x))
-# define NSIZE 8
-
-/*
- * The SHLBYTE() operation shifts each byte left by 1, *not*
- * rolling over into the next byte
- */
-static inline __attribute_const__ u64 SHLBYTE(u64 v)
-{
- /* Vector One Byte Shift Left Immediate. */
- return __insn_v1shli(v, 1);
-}
-
-/*
- * The MASK() operation returns 0xFF in any byte for which the high
- * bit is 1, 0x00 for any byte for which the high bit is 0.
- */
-static inline __attribute_const__ u64 MASK(u64 v)
-{
- /* Vector One Byte Shift Right Signed Immediate. */
- return __insn_v1shrsi(v, 7);
-}
-
-
-void raid6_tilegx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u64 *p, *q;
- int d, z, z0;
-
- u64 wd$$, wq$$, wp$$, w1$$, w2$$;
- u64 x1d = NBYTES(0x1d);
- u64 * z0ptr;
-
- z0 = disks - 3; /* Highest data disk */
- p = (u64 *)dptr[z0+1]; /* XOR parity */
- q = (u64 *)dptr[z0+2]; /* RS syndrome */
-
- z0ptr = (u64 *)&dptr[z0][0];
- for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
- wq$$ = wp$$ = *z0ptr++;
- for ( z = z0-1 ; z >= 0 ; z-- ) {
- wd$$ = *(u64 *)&dptr[z][d+$$*NSIZE];
- wp$$ = wp$$ ^ wd$$;
- w2$$ = MASK(wq$$);
- w1$$ = SHLBYTE(wq$$);
- w2$$ = w2$$ & x1d;
- w1$$ = w1$$ ^ w2$$;
- wq$$ = w1$$ ^ wd$$;
- }
- *p++ = wp$$;
- *q++ = wq$$;
- }
-}
-
-const struct raid6_calls raid6_tilegx$# = {
- raid6_tilegx$#_gen_syndrome,
- NULL, /* XOR not yet implemented */
- NULL,
- "tilegx$#",
- 0
-};
diff --git a/lib/raid6/vpermxor.uc b/lib/raid6/vpermxor.uc
new file mode 100644
index 000000000000..10475dc423c1
--- /dev/null
+++ b/lib/raid6/vpermxor.uc
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2017, Matt Brown, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * vpermxor$#.c
+ *
+ * Based on H. Peter Anvin's paper - The mathematics of RAID-6
+ *
+ * $#-way unrolled portable integer math RAID-6 instruction set
+ * This file is postprocessed using unroll.awk
+ *
+ * vpermxor$#.c makes use of the vpermxor instruction to optimise the RAID6 Q
+ * syndrome calculations.
+ * This can be run on systems which have both Altivec and vpermxor instruction.
+ *
+ * This instruction was introduced in POWER8 - ISA v2.07.
+ */
+
+#include <linux/raid/pq.h>
+#ifdef CONFIG_ALTIVEC
+
+#include <altivec.h>
+#ifdef __KERNEL__
+#include <asm/cputable.h>
+#include <asm/ppc-opcode.h>
+#include <asm/switch_to.h>
+#endif
+
+typedef vector unsigned char unative_t;
+#define NSIZE sizeof(unative_t)
+
+static const vector unsigned char gf_low = {0x1e, 0x1c, 0x1a, 0x18, 0x16, 0x14,
+ 0x12, 0x10, 0x0e, 0x0c, 0x0a, 0x08,
+ 0x06, 0x04, 0x02,0x00};
+static const vector unsigned char gf_high = {0xfd, 0xdd, 0xbd, 0x9d, 0x7d, 0x5d,
+ 0x3d, 0x1d, 0xe0, 0xc0, 0xa0, 0x80,
+ 0x60, 0x40, 0x20, 0x00};
+
+static void noinline raid6_vpermxor$#_gen_syndrome_real(int disks, size_t bytes,
+ void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+ unative_t wp$$, wq$$, wd$$;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ for (d = 0; d < bytes; d += NSIZE*$#) {
+ wp$$ = wq$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
+
+ for (z = z0-1; z>=0; z--) {
+ wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ /* P syndrome */
+ wp$$ = vec_xor(wp$$, wd$$);
+
+ /* Q syndrome */
+ asm(VPERMXOR(%0,%1,%2,%3):"=v"(wq$$):"v"(gf_high), "v"(gf_low), "v"(wq$$));
+ wq$$ = vec_xor(wq$$, wd$$);
+ }
+ *(unative_t *)&p[d+NSIZE*$$] = wp$$;
+ *(unative_t *)&q[d+NSIZE*$$] = wq$$;
+ }
+}
+
+static void raid6_vpermxor$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ preempt_disable();
+ enable_kernel_altivec();
+
+ raid6_vpermxor$#_gen_syndrome_real(disks, bytes, ptrs);
+
+ disable_kernel_altivec();
+ preempt_enable();
+}
+
+int raid6_have_altivec_vpermxor(void);
+#if $# == 1
+int raid6_have_altivec_vpermxor(void)
+{
+ /* Check if arch has both altivec and the vpermxor instructions */
+# ifdef __KERNEL__
+ return (cpu_has_feature(CPU_FTR_ALTIVEC_COMP) &&
+ cpu_has_feature(CPU_FTR_ARCH_207S));
+# else
+ return 1;
+#endif
+
+}
+#endif
+
+const struct raid6_calls raid6_vpermxor$# = {
+ raid6_vpermxor$#_gen_syndrome,
+ NULL,
+ raid6_have_altivec_vpermxor,
+ "vpermxor$#",
+ 0
+};
+#endif
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 7d36c1e27ff6..b7055b2a07d3 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -247,7 +247,7 @@ static int __init rbtree_test_init(void)
cycles_t time1, time2, time;
struct rb_node *node;
- nodes = kmalloc(nnodes * sizeof(*nodes), GFP_KERNEL);
+ nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
if (!nodes)
return -ENOMEM;
diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c
index fcb4ce682c6f..bf043258fa00 100644
--- a/lib/reciprocal_div.c
+++ b/lib/reciprocal_div.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <asm/div64.h>
#include <linux/reciprocal_div.h>
@@ -26,3 +27,43 @@ struct reciprocal_value reciprocal_value(u32 d)
return R;
}
EXPORT_SYMBOL(reciprocal_value);
+
+struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec)
+{
+ struct reciprocal_value_adv R;
+ u32 l, post_shift;
+ u64 mhigh, mlow;
+
+ /* ceil(log2(d)) */
+ l = fls(d - 1);
+ /* NOTE: mlow/mhigh could overflow u64 when l == 32. This case needs to
+ * be handled before calling "reciprocal_value_adv", please see the
+ * comment at include/linux/reciprocal_div.h.
+ */
+ WARN(l == 32,
+ "ceil(log2(0x%08x)) == 32, %s doesn't support such divisor",
+ d, __func__);
+ post_shift = l;
+ mlow = 1ULL << (32 + l);
+ do_div(mlow, d);
+ mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec));
+ do_div(mhigh, d);
+
+ for (; post_shift > 0; post_shift--) {
+ u64 lo = mlow >> 1, hi = mhigh >> 1;
+
+ if (lo >= hi)
+ break;
+
+ mlow = lo;
+ mhigh = hi;
+ }
+
+ R.m = (u32)mhigh;
+ R.sh = post_shift;
+ R.exp = l;
+ R.is_wide_m = mhigh > U32_MAX;
+
+ return R;
+}
+EXPORT_SYMBOL(reciprocal_value_adv);
diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c
index 0ec3f257ffdf..1db74eb098d0 100644
--- a/lib/reed_solomon/decode_rs.c
+++ b/lib/reed_solomon/decode_rs.c
@@ -1,22 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * lib/reed_solomon/decode_rs.c
- *
- * Overview:
- * Generic Reed Solomon encoder / decoder library
+ * Generic Reed Solomon encoder / decoder library
*
* Copyright 2002, Phil Karn, KA9Q
* May be used under the terms of the GNU General Public License (GPL)
*
* Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de)
*
- * $Id: decode_rs.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $
- *
- */
-
-/* Generic data width independent code which is included by the
- * wrappers.
+ * Generic data width independent code which is included by the wrappers.
*/
{
+ struct rs_codec *rs = rsc->codec;
int deg_lambda, el, deg_omega;
int i, j, r, k, pad;
int nn = rs->nn;
@@ -27,16 +21,22 @@
uint16_t *alpha_to = rs->alpha_to;
uint16_t *index_of = rs->index_of;
uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
- /* Err+Eras Locator poly and syndrome poly The maximum value
- * of nroots is 8. So the necessary stack size will be about
- * 220 bytes max.
- */
- uint16_t lambda[nroots + 1], syn[nroots];
- uint16_t b[nroots + 1], t[nroots + 1], omega[nroots + 1];
- uint16_t root[nroots], reg[nroots + 1], loc[nroots];
int count = 0;
uint16_t msk = (uint16_t) rs->nn;
+ /*
+ * The decoder buffers are in the rs control struct. They are
+ * arrays sized [nroots + 1]
+ */
+ uint16_t *lambda = rsc->buffers + RS_DECODE_LAMBDA * (nroots + 1);
+ uint16_t *syn = rsc->buffers + RS_DECODE_SYN * (nroots + 1);
+ uint16_t *b = rsc->buffers + RS_DECODE_B * (nroots + 1);
+ uint16_t *t = rsc->buffers + RS_DECODE_T * (nroots + 1);
+ uint16_t *omega = rsc->buffers + RS_DECODE_OMEGA * (nroots + 1);
+ uint16_t *root = rsc->buffers + RS_DECODE_ROOT * (nroots + 1);
+ uint16_t *reg = rsc->buffers + RS_DECODE_REG * (nroots + 1);
+ uint16_t *loc = rsc->buffers + RS_DECODE_LOC * (nroots + 1);
+
/* Check length parameter for validity */
pad = nn - nroots - len;
BUG_ON(pad < 0 || pad >= nn);
diff --git a/lib/reed_solomon/encode_rs.c b/lib/reed_solomon/encode_rs.c
index 0b5b1a6728ec..9112d46e869e 100644
--- a/lib/reed_solomon/encode_rs.c
+++ b/lib/reed_solomon/encode_rs.c
@@ -1,23 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * lib/reed_solomon/encode_rs.c
- *
- * Overview:
- * Generic Reed Solomon encoder / decoder library
+ * Generic Reed Solomon encoder / decoder library
*
* Copyright 2002, Phil Karn, KA9Q
* May be used under the terms of the GNU General Public License (GPL)
*
* Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de)
*
- * $Id: encode_rs.c,v 1.5 2005/11/07 11:14:59 gleixner Exp $
- *
- */
-
-/* Generic data width independent code which is included by the
- * wrappers.
- * int encode_rsX (struct rs_control *rs, uintX_t *data, int len, uintY_t *par)
+ * Generic data width independent code which is included by the wrappers.
*/
{
+ struct rs_codec *rs = rsc->codec;
int i, j, pad;
int nn = rs->nn;
int nroots = rs->nroots;
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index 06d04cfa9339..e5fdc8b9e856 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -1,43 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * lib/reed_solomon/reed_solomon.c
- *
- * Overview:
- * Generic Reed Solomon encoder / decoder library
+ * Generic Reed Solomon encoder / decoder library
*
* Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
*
* Reed Solomon code lifted from reed solomon library written by Phil Karn
* Copyright 2002 Phil Karn, KA9Q
*
- * $Id: rslib.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Description:
*
* The generic Reed Solomon library provides runtime configurable
* encoding / decoding of RS codes.
- * Each user must call init_rs to get a pointer to a rs_control
- * structure for the given rs parameters. This structure is either
- * generated or a already available matching control structure is used.
- * If a structure is generated then the polynomial arrays for
- * fast encoding / decoding are built. This can take some time so
- * make sure not to call this function from a time critical path.
- * Usually a module / driver should initialize the necessary
- * rs_control structure on module / driver init and release it
- * on exit.
- * The encoding puts the calculated syndrome into a given syndrome
- * buffer.
- * The decoding is a two step process. The first step calculates
- * the syndrome over the received (data + syndrome) and calls the
- * second stage, which does the decoding / error correction itself.
- * Many hw encoders provide a syndrome calculation over the received
- * data + syndrome and can call the second stage directly.
*
+ * Each user must call init_rs to get a pointer to a rs_control structure
+ * for the given rs parameters. The control struct is unique per instance.
+ * It points to a codec which can be shared by multiple control structures.
+ * If a codec is newly allocated then the polynomial arrays for fast
+ * encoding / decoding are built. This can take some time so make sure not
+ * to call this function from a time critical path. Usually a module /
+ * driver should initialize the necessary rs_control structure on module /
+ * driver init and release it on exit.
+ *
+ * The encoding puts the calculated syndrome into a given syndrome buffer.
+ *
+ * The decoding is a two step process. The first step calculates the
+ * syndrome over the received (data + syndrome) and calls the second stage,
+ * which does the decoding / error correction itself. Many hw encoders
+ * provide a syndrome calculation over the received data + syndrome and can
+ * call the second stage directly.
*/
-
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -46,32 +37,44 @@
#include <linux/slab.h>
#include <linux/mutex.h>
-/* This list holds all currently allocated rs control structures */
-static LIST_HEAD (rslist);
+enum {
+ RS_DECODE_LAMBDA,
+ RS_DECODE_SYN,
+ RS_DECODE_B,
+ RS_DECODE_T,
+ RS_DECODE_OMEGA,
+ RS_DECODE_ROOT,
+ RS_DECODE_REG,
+ RS_DECODE_LOC,
+ RS_DECODE_NUM_BUFFERS
+};
+
+/* This list holds all currently allocated rs codec structures */
+static LIST_HEAD(codec_list);
/* Protection for the list */
static DEFINE_MUTEX(rslistlock);
/**
- * rs_init - Initialize a Reed-Solomon codec
+ * codec_init - Initialize a Reed-Solomon codec
* @symsize: symbol size, bits (1-8)
* @gfpoly: Field generator polynomial coefficients
* @gffunc: Field generator function
* @fcr: first root of RS code generator polynomial, index form
* @prim: primitive element to generate polynomial roots
* @nroots: RS code generator polynomial degree (number of roots)
+ * @gfp: GFP_ flags for allocations
*
- * Allocate a control structure and the polynom arrays for faster
+ * Allocate a codec structure and the polynom arrays for faster
* en/decoding. Fill the arrays according to the given parameters.
*/
-static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int),
- int fcr, int prim, int nroots)
+static struct rs_codec *codec_init(int symsize, int gfpoly, int (*gffunc)(int),
+ int fcr, int prim, int nroots, gfp_t gfp)
{
- struct rs_control *rs;
int i, j, sr, root, iprim;
+ struct rs_codec *rs;
- /* Allocate the control structure */
- rs = kmalloc(sizeof (struct rs_control), GFP_KERNEL);
- if (rs == NULL)
+ rs = kzalloc(sizeof(*rs), gfp);
+ if (!rs)
return NULL;
INIT_LIST_HEAD(&rs->list);
@@ -85,17 +88,17 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int),
rs->gffunc = gffunc;
/* Allocate the arrays */
- rs->alpha_to = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL);
+ rs->alpha_to = kmalloc_array(rs->nn + 1, sizeof(uint16_t), gfp);
if (rs->alpha_to == NULL)
- goto errrs;
+ goto err;
- rs->index_of = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL);
+ rs->index_of = kmalloc_array(rs->nn + 1, sizeof(uint16_t), gfp);
if (rs->index_of == NULL)
- goto erralp;
+ goto err;
- rs->genpoly = kmalloc(sizeof(uint16_t) * (rs->nroots + 1), GFP_KERNEL);
+ rs->genpoly = kmalloc_array(rs->nroots + 1, sizeof(uint16_t), gfp);
if(rs->genpoly == NULL)
- goto erridx;
+ goto err;
/* Generate Galois field lookup tables */
rs->index_of[0] = rs->nn; /* log(zero) = -inf */
@@ -120,7 +123,7 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int),
}
/* If it's not primitive, exit */
if(sr != rs->alpha_to[0])
- goto errpol;
+ goto err;
/* Find prim-th root of 1, used in decoding */
for(iprim = 1; (iprim % prim) != 0; iprim += rs->nn);
@@ -148,42 +151,52 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int),
/* convert rs->genpoly[] to index form for quicker encoding */
for (i = 0; i <= nroots; i++)
rs->genpoly[i] = rs->index_of[rs->genpoly[i]];
+
+ rs->users = 1;
+ list_add(&rs->list, &codec_list);
return rs;
- /* Error exit */
-errpol:
+err:
kfree(rs->genpoly);
-erridx:
kfree(rs->index_of);
-erralp:
kfree(rs->alpha_to);
-errrs:
kfree(rs);
return NULL;
}
/**
- * free_rs - Free the rs control structure, if it is no longer used
- * @rs: the control structure which is not longer used by the
+ * free_rs - Free the rs control structure
+ * @rs: The control structure which is not longer used by the
* caller
+ *
+ * Free the control structure. If @rs is the last user of the associated
+ * codec, free the codec as well.
*/
void free_rs(struct rs_control *rs)
{
+ struct rs_codec *cd;
+
+ if (!rs)
+ return;
+
+ cd = rs->codec;
mutex_lock(&rslistlock);
- rs->users--;
- if(!rs->users) {
- list_del(&rs->list);
- kfree(rs->alpha_to);
- kfree(rs->index_of);
- kfree(rs->genpoly);
- kfree(rs);
+ cd->users--;
+ if(!cd->users) {
+ list_del(&cd->list);
+ kfree(cd->alpha_to);
+ kfree(cd->index_of);
+ kfree(cd->genpoly);
+ kfree(cd);
}
mutex_unlock(&rslistlock);
+ kfree(rs);
}
+EXPORT_SYMBOL_GPL(free_rs);
/**
- * init_rs_internal - Find a matching or allocate a new rs control structure
+ * init_rs_internal - Allocate rs control, find a matching codec or allocate a new one
* @symsize: the symbol size (number of bits)
* @gfpoly: the extended Galois field generator polynomial coefficients,
* with the 0th coefficient in the low order bit. The polynomial
@@ -191,55 +204,69 @@ void free_rs(struct rs_control *rs)
* @gffunc: pointer to function to generate the next field element,
* or the multiplicative identity element if given 0. Used
* instead of gfpoly if gfpoly is 0
- * @fcr: the first consecutive root of the rs code generator polynomial
+ * @fcr: the first consecutive root of the rs code generator polynomial
* in index form
* @prim: primitive element to generate polynomial roots
* @nroots: RS code generator polynomial degree (number of roots)
+ * @gfp: GFP_ flags for allocations
*/
static struct rs_control *init_rs_internal(int symsize, int gfpoly,
- int (*gffunc)(int), int fcr,
- int prim, int nroots)
+ int (*gffunc)(int), int fcr,
+ int prim, int nroots, gfp_t gfp)
{
- struct list_head *tmp;
- struct rs_control *rs;
+ struct list_head *tmp;
+ struct rs_control *rs;
+ unsigned int bsize;
/* Sanity checks */
if (symsize < 1)
return NULL;
if (fcr < 0 || fcr >= (1<<symsize))
- return NULL;
+ return NULL;
if (prim <= 0 || prim >= (1<<symsize))
- return NULL;
+ return NULL;
if (nroots < 0 || nroots >= (1<<symsize))
return NULL;
+ /*
+ * The decoder needs buffers in each control struct instance to
+ * avoid variable size or large fixed size allocations on
+ * stack. Size the buffers to arrays of [nroots + 1].
+ */
+ bsize = sizeof(uint16_t) * RS_DECODE_NUM_BUFFERS * (nroots + 1);
+ rs = kzalloc(sizeof(*rs) + bsize, gfp);
+ if (!rs)
+ return NULL;
+
mutex_lock(&rslistlock);
/* Walk through the list and look for a matching entry */
- list_for_each(tmp, &rslist) {
- rs = list_entry(tmp, struct rs_control, list);
- if (symsize != rs->mm)
+ list_for_each(tmp, &codec_list) {
+ struct rs_codec *cd = list_entry(tmp, struct rs_codec, list);
+
+ if (symsize != cd->mm)
continue;
- if (gfpoly != rs->gfpoly)
+ if (gfpoly != cd->gfpoly)
continue;
- if (gffunc != rs->gffunc)
+ if (gffunc != cd->gffunc)
continue;
- if (fcr != rs->fcr)
+ if (fcr != cd->fcr)
continue;
- if (prim != rs->prim)
+ if (prim != cd->prim)
continue;
- if (nroots != rs->nroots)
+ if (nroots != cd->nroots)
continue;
/* We have a matching one already */
- rs->users++;
+ cd->users++;
+ rs->codec = cd;
goto out;
}
/* Create a new one */
- rs = rs_init(symsize, gfpoly, gffunc, fcr, prim, nroots);
- if (rs) {
- rs->users = 1;
- list_add(&rs->list, &rslist);
+ rs->codec = codec_init(symsize, gfpoly, gffunc, fcr, prim, nroots, gfp);
+ if (!rs->codec) {
+ kfree(rs);
+ rs = NULL;
}
out:
mutex_unlock(&rslistlock);
@@ -247,45 +274,48 @@ out:
}
/**
- * init_rs - Find a matching or allocate a new rs control structure
+ * init_rs_gfp - Create a RS control struct and initialize it
* @symsize: the symbol size (number of bits)
* @gfpoly: the extended Galois field generator polynomial coefficients,
* with the 0th coefficient in the low order bit. The polynomial
* must be primitive;
- * @fcr: the first consecutive root of the rs code generator polynomial
+ * @fcr: the first consecutive root of the rs code generator polynomial
* in index form
* @prim: primitive element to generate polynomial roots
* @nroots: RS code generator polynomial degree (number of roots)
+ * @gfp: Memory allocation flags.
*/
-struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
- int nroots)
+struct rs_control *init_rs_gfp(int symsize, int gfpoly, int fcr, int prim,
+ int nroots, gfp_t gfp)
{
- return init_rs_internal(symsize, gfpoly, NULL, fcr, prim, nroots);
+ return init_rs_internal(symsize, gfpoly, NULL, fcr, prim, nroots, gfp);
}
+EXPORT_SYMBOL_GPL(init_rs_gfp);
/**
- * init_rs_non_canonical - Find a matching or allocate a new rs control
- * structure, for fields with non-canonical
- * representation
+ * init_rs_non_canonical - Allocate rs control struct for fields with
+ * non-canonical representation
* @symsize: the symbol size (number of bits)
* @gffunc: pointer to function to generate the next field element,
* or the multiplicative identity element if given 0. Used
* instead of gfpoly if gfpoly is 0
- * @fcr: the first consecutive root of the rs code generator polynomial
+ * @fcr: the first consecutive root of the rs code generator polynomial
* in index form
* @prim: primitive element to generate polynomial roots
* @nroots: RS code generator polynomial degree (number of roots)
*/
struct rs_control *init_rs_non_canonical(int symsize, int (*gffunc)(int),
- int fcr, int prim, int nroots)
+ int fcr, int prim, int nroots)
{
- return init_rs_internal(symsize, 0, gffunc, fcr, prim, nroots);
+ return init_rs_internal(symsize, 0, gffunc, fcr, prim, nroots,
+ GFP_KERNEL);
}
+EXPORT_SYMBOL_GPL(init_rs_non_canonical);
#ifdef CONFIG_REED_SOLOMON_ENC8
/**
* encode_rs8 - Calculate the parity for data values (8bit data width)
- * @rs: the rs control structure
+ * @rsc: the rs control structure
* @data: data field of a given type
* @len: data length
* @par: parity data, must be initialized by caller (usually all 0)
@@ -295,7 +325,7 @@ struct rs_control *init_rs_non_canonical(int symsize, int (*gffunc)(int),
* symbol size > 8. The calling code must take care of encoding of the
* syndrome result for storage itself.
*/
-int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par,
+int encode_rs8(struct rs_control *rsc, uint8_t *data, int len, uint16_t *par,
uint16_t invmsk)
{
#include "encode_rs.c"
@@ -306,7 +336,7 @@ EXPORT_SYMBOL_GPL(encode_rs8);
#ifdef CONFIG_REED_SOLOMON_DEC8
/**
* decode_rs8 - Decode codeword (8bit data width)
- * @rs: the rs control structure
+ * @rsc: the rs control structure
* @data: data field of a given type
* @par: received parity data field
* @len: data length
@@ -319,9 +349,14 @@ EXPORT_SYMBOL_GPL(encode_rs8);
* The syndrome and parity uses a uint16_t data type to enable
* symbol size > 8. The calling code must take care of decoding of the
* syndrome result and the received parity before calling this code.
+ *
+ * Note: The rs_control struct @rsc contains buffers which are used for
+ * decoding, so the caller has to ensure that decoder invocations are
+ * serialized.
+ *
* Returns the number of corrected bits or -EBADMSG for uncorrectable errors.
*/
-int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len,
+int decode_rs8(struct rs_control *rsc, uint8_t *data, uint16_t *par, int len,
uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
uint16_t *corr)
{
@@ -333,7 +368,7 @@ EXPORT_SYMBOL_GPL(decode_rs8);
#ifdef CONFIG_REED_SOLOMON_ENC16
/**
* encode_rs16 - Calculate the parity for data values (16bit data width)
- * @rs: the rs control structure
+ * @rsc: the rs control structure
* @data: data field of a given type
* @len: data length
* @par: parity data, must be initialized by caller (usually all 0)
@@ -341,7 +376,7 @@ EXPORT_SYMBOL_GPL(decode_rs8);
*
* Each field in the data array contains up to symbol size bits of valid data.
*/
-int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par,
+int encode_rs16(struct rs_control *rsc, uint16_t *data, int len, uint16_t *par,
uint16_t invmsk)
{
#include "encode_rs.c"
@@ -352,7 +387,7 @@ EXPORT_SYMBOL_GPL(encode_rs16);
#ifdef CONFIG_REED_SOLOMON_DEC16
/**
* decode_rs16 - Decode codeword (16bit data width)
- * @rs: the rs control structure
+ * @rsc: the rs control structure
* @data: data field of a given type
* @par: received parity data field
* @len: data length
@@ -363,9 +398,14 @@ EXPORT_SYMBOL_GPL(encode_rs16);
* @corr: buffer to store correction bitmask on eras_pos
*
* Each field in the data array contains up to symbol size bits of valid data.
+ *
+ * Note: The rc_control struct @rsc contains buffers which are used for
+ * decoding, so the caller has to ensure that decoder invocations are
+ * serialized.
+ *
* Returns the number of corrected bits or -EBADMSG for uncorrectable errors.
*/
-int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len,
+int decode_rs16(struct rs_control *rsc, uint16_t *data, uint16_t *par, int len,
uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
uint16_t *corr)
{
@@ -374,10 +414,6 @@ int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len,
EXPORT_SYMBOL_GPL(decode_rs16);
#endif
-EXPORT_SYMBOL_GPL(init_rs);
-EXPORT_SYMBOL_GPL(init_rs_non_canonical);
-EXPORT_SYMBOL_GPL(free_rs);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Reed Solomon encoder/decoder");
MODULE_AUTHOR("Phil Karn, Thomas Gleixner");
diff --git a/lib/refcount.c b/lib/refcount.c
index 0eb48353abe3..ebcf8cd49e05 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -35,13 +35,13 @@
*
*/
+#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/spinlock.h>
#include <linux/bug.h>
-#ifdef CONFIG_REFCOUNT_FULL
-
/**
- * refcount_add_not_zero - add a value to a refcount unless it is 0
+ * refcount_add_not_zero_checked - add a value to a refcount unless it is 0
* @i: the value to add to the refcount
* @r: the refcount
*
@@ -58,7 +58,7 @@
*
* Return: false if the passed refcount is 0, true otherwise
*/
-bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -79,10 +79,10 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
return true;
}
-EXPORT_SYMBOL(refcount_add_not_zero);
+EXPORT_SYMBOL(refcount_add_not_zero_checked);
/**
- * refcount_add - add a value to a refcount
+ * refcount_add_checked - add a value to a refcount
* @i: the value to add to the refcount
* @r: the refcount
*
@@ -97,14 +97,14 @@ EXPORT_SYMBOL(refcount_add_not_zero);
* cases, refcount_inc(), or one of its variants, should instead be used to
* increment a reference count.
*/
-void refcount_add(unsigned int i, refcount_t *r)
+void refcount_add_checked(unsigned int i, refcount_t *r)
{
- WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+ WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n");
}
-EXPORT_SYMBOL(refcount_add);
+EXPORT_SYMBOL(refcount_add_checked);
/**
- * refcount_inc_not_zero - increment a refcount unless it is 0
+ * refcount_inc_not_zero_checked - increment a refcount unless it is 0
* @r: the refcount to increment
*
* Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
@@ -115,7 +115,7 @@ EXPORT_SYMBOL(refcount_add);
*
* Return: true if the increment was successful, false otherwise
*/
-bool refcount_inc_not_zero(refcount_t *r)
+bool refcount_inc_not_zero_checked(refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -134,10 +134,10 @@ bool refcount_inc_not_zero(refcount_t *r)
return true;
}
-EXPORT_SYMBOL(refcount_inc_not_zero);
+EXPORT_SYMBOL(refcount_inc_not_zero_checked);
/**
- * refcount_inc - increment a refcount
+ * refcount_inc_checked - increment a refcount
* @r: the refcount to increment
*
* Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
@@ -148,14 +148,14 @@ EXPORT_SYMBOL(refcount_inc_not_zero);
* Will WARN if the refcount is 0, as this represents a possible use-after-free
* condition.
*/
-void refcount_inc(refcount_t *r)
+void refcount_inc_checked(refcount_t *r)
{
- WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+ WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n");
}
-EXPORT_SYMBOL(refcount_inc);
+EXPORT_SYMBOL(refcount_inc_checked);
/**
- * refcount_sub_and_test - subtract from a refcount and test if it is 0
+ * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0
* @i: amount to subtract from the refcount
* @r: the refcount
*
@@ -174,7 +174,7 @@ EXPORT_SYMBOL(refcount_inc);
*
* Return: true if the resulting refcount is 0, false otherwise
*/
-bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -192,10 +192,10 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
return !new;
}
-EXPORT_SYMBOL(refcount_sub_and_test);
+EXPORT_SYMBOL(refcount_sub_and_test_checked);
/**
- * refcount_dec_and_test - decrement a refcount and test if it is 0
+ * refcount_dec_and_test_checked - decrement a refcount and test if it is 0
* @r: the refcount
*
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
@@ -207,14 +207,14 @@ EXPORT_SYMBOL(refcount_sub_and_test);
*
* Return: true if the resulting refcount is 0, false otherwise
*/
-bool refcount_dec_and_test(refcount_t *r)
+bool refcount_dec_and_test_checked(refcount_t *r)
{
- return refcount_sub_and_test(1, r);
+ return refcount_sub_and_test_checked(1, r);
}
-EXPORT_SYMBOL(refcount_dec_and_test);
+EXPORT_SYMBOL(refcount_dec_and_test_checked);
/**
- * refcount_dec - decrement a refcount
+ * refcount_dec_checked - decrement a refcount
* @r: the refcount
*
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
@@ -223,12 +223,11 @@ EXPORT_SYMBOL(refcount_dec_and_test);
* Provides release memory ordering, such that prior loads and stores are done
* before.
*/
-void refcount_dec(refcount_t *r)
+void refcount_dec_checked(refcount_t *r)
{
- WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+ WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
-EXPORT_SYMBOL(refcount_dec);
-#endif /* CONFIG_REFCOUNT_FULL */
+EXPORT_SYMBOL(refcount_dec_checked);
/**
* refcount_dec_if_one - decrement a refcount if it is 1
@@ -350,3 +349,31 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
}
EXPORT_SYMBOL(refcount_dec_and_lock);
+/**
+ * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
+ * interrupts if able to decrement refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ * @flags: saved IRQ-flags if the is acquired
+ *
+ * Same as refcount_dec_and_lock() above except that the spinlock is acquired
+ * with disabled interupts.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ * otherwise
+ */
+bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
+ unsigned long *flags)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ spin_lock_irqsave(lock, *flags);
+ if (!refcount_dec_and_test(r)) {
+ spin_unlock_irqrestore(lock, *flags);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 47de025b6245..30526afa8343 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -115,8 +115,7 @@ static void bucket_table_free_rcu(struct rcu_head *head)
static union nested_table *nested_table_alloc(struct rhashtable *ht,
union nested_table __rcu **prev,
- unsigned int shifted,
- unsigned int nhash)
+ bool leaf)
{
union nested_table *ntbl;
int i;
@@ -127,10 +126,9 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
- if (ntbl && shifted) {
- for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
- INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
- (i << shifted) | nhash);
+ if (ntbl && leaf) {
+ for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
+ INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
}
rcu_assign_pointer(*prev, ntbl);
@@ -156,7 +154,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
return NULL;
if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
- 0, 0)) {
+ false)) {
kfree(tbl);
return NULL;
}
@@ -175,17 +173,15 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
int i;
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
- if (gfp != GFP_KERNEL)
- tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
- else
- tbl = kvzalloc(size, gfp);
+ tbl = kvzalloc(size, gfp);
size = nbuckets;
- if (tbl == NULL && gfp != GFP_KERNEL) {
+ if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
nbuckets = 0;
}
+
if (tbl == NULL)
return NULL;
@@ -206,7 +202,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
tbl->hash_rnd = get_random_u32();
for (i = 0; i < nbuckets; i++)
- INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
+ INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
return tbl;
}
@@ -227,8 +223,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
- struct bucket_table *new_tbl = rhashtable_last_table(ht,
- rht_dereference_rcu(old_tbl->future_tbl, ht));
+ struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
int err = -EAGAIN;
struct rhash_head *head, *next, *entry;
@@ -298,21 +293,14 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,
struct bucket_table *old_tbl,
struct bucket_table *new_tbl)
{
- /* Protect future_tbl using the first bucket lock. */
- spin_lock_bh(old_tbl->locks);
-
- /* Did somebody beat us to it? */
- if (rcu_access_pointer(old_tbl->future_tbl)) {
- spin_unlock_bh(old_tbl->locks);
- return -EEXIST;
- }
-
/* Make insertions go into the new, empty table right away. Deletions
* and lookups will be attempted in both tables until we synchronize.
+ * As cmpxchg() provides strong barriers, we do not need
+ * rcu_assign_pointer().
*/
- rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
- spin_unlock_bh(old_tbl->locks);
+ if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL)
+ return -EEXIST;
return 0;
}
@@ -333,6 +321,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
err = rhashtable_rehash_chain(ht, old_hash);
if (err)
return err;
+ cond_resched();
}
/* Publish the new table pointer. */
@@ -458,7 +447,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
err = -ENOMEM;
- new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
+ new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
if (new_tbl == NULL)
goto fail;
@@ -474,7 +463,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
fail:
/* Do not fail the insert if someone else did a rehash. */
- if (likely(rcu_dereference_raw(tbl->future_tbl)))
+ if (likely(rcu_access_pointer(tbl->future_tbl)))
return 0;
/* Schedule async rehash to retry allocation in process context. */
@@ -547,7 +536,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
return ERR_CAST(data);
- new_tbl = rcu_dereference(tbl->future_tbl);
+ new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (new_tbl)
return new_tbl;
@@ -606,7 +595,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
break;
spin_unlock_bh(lock);
- tbl = rcu_dereference(tbl->future_tbl);
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
}
data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
@@ -667,8 +656,9 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
* For a completely stable walk you should construct your own data
* structure outside the hash table.
*
- * This function may sleep so you must not call it from interrupt
- * context or with spin locks held.
+ * This function may be called from any process context, including
+ * non-preemptable context, but cannot be called from softirq or
+ * hardirq context.
*
* You must call rhashtable_walk_exit after this function returns.
*/
@@ -725,6 +715,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
__acquires(RCU)
{
struct rhashtable *ht = iter->ht;
+ bool rhlist = ht->rhlist;
rcu_read_lock();
@@ -733,11 +724,52 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
list_del(&iter->walker.list);
spin_unlock(&ht->lock);
- if (!iter->walker.tbl && !iter->end_of_table) {
+ if (iter->end_of_table)
+ return 0;
+ if (!iter->walker.tbl) {
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
+ iter->slot = 0;
+ iter->skip = 0;
return -EAGAIN;
}
+ if (iter->p && !rhlist) {
+ /*
+ * We need to validate that 'p' is still in the table, and
+ * if so, update 'skip'
+ */
+ struct rhash_head *p;
+ int skip = 0;
+ rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
+ skip++;
+ if (p == iter->p) {
+ iter->skip = skip;
+ goto found;
+ }
+ }
+ iter->p = NULL;
+ } else if (iter->p && rhlist) {
+ /* Need to validate that 'list' is still in the table, and
+ * if so, update 'skip' and 'p'.
+ */
+ struct rhash_head *p;
+ struct rhlist_head *list;
+ int skip = 0;
+ rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
+ for (list = container_of(p, struct rhlist_head, rhead);
+ list;
+ list = rcu_dereference(list->next)) {
+ skip++;
+ if (list == iter->list) {
+ iter->p = p;
+ iter->skip = skip;
+ goto found;
+ }
+ }
+ }
+ iter->p = NULL;
+ }
+found:
return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
@@ -913,8 +945,6 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
iter->walker.tbl = NULL;
spin_unlock(&ht->lock);
- iter->p = NULL;
-
out:
rcu_read_unlock();
}
@@ -922,8 +952,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
static size_t rounded_hashtable_size(const struct rhashtable_params *params)
{
- return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
- (unsigned long)params->min_size);
+ size_t retsize;
+
+ if (params->nelem_hint)
+ retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+ (unsigned long)params->min_size);
+ else
+ retsize = max(HASH_DEFAULT_SIZE,
+ (unsigned long)params->min_size);
+
+ return retsize;
}
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
@@ -952,7 +990,6 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
* .key_offset = offsetof(struct test_obj, key),
* .key_len = sizeof(int),
* .hashfn = jhash,
- * .nulls_base = (1U << RHT_BASE_SHIFT),
* };
*
* Configuration Example 2: Variable length keys
@@ -980,15 +1017,10 @@ int rhashtable_init(struct rhashtable *ht,
struct bucket_table *tbl;
size_t size;
- size = HASH_DEFAULT_SIZE;
-
if ((!params->key_len && !params->obj_hashfn) ||
(params->obj_hashfn && !params->obj_cmpfn))
return -EINVAL;
- if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
- return -EINVAL;
-
memset(ht, 0, sizeof(*ht));
mutex_init(&ht->mutex);
spin_lock_init(&ht->lock);
@@ -1008,8 +1040,7 @@ int rhashtable_init(struct rhashtable *ht,
ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
- if (params->nelem_hint)
- size = rounded_hashtable_size(&ht->p);
+ size = rounded_hashtable_size(&ht->p);
if (params->locks_mul)
ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
@@ -1026,9 +1057,16 @@ int rhashtable_init(struct rhashtable *ht,
}
}
+ /*
+ * This is api initialization and thus we need to guarantee the
+ * initial rhashtable allocation. Upon failure, retry with the
+ * smallest possible size with __GFP_NOFAIL semantics.
+ */
tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
- if (tbl == NULL)
- return -ENOMEM;
+ if (unlikely(tbl == NULL)) {
+ size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
+ tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
+ }
atomic_set(&ht->nelems, 0);
@@ -1053,10 +1091,6 @@ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
{
int err;
- /* No rhlist NULLs marking for now. */
- if (params->nulls_base)
- return -EINVAL;
-
err = rhashtable_init(&hlt->ht, params);
hlt->ht.rhlist = true;
return err;
@@ -1101,17 +1135,19 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void (*free_fn)(void *ptr, void *arg),
void *arg)
{
- struct bucket_table *tbl;
+ struct bucket_table *tbl, *next_tbl;
unsigned int i;
cancel_work_sync(&ht->run_work);
mutex_lock(&ht->mutex);
tbl = rht_dereference(ht->tbl, ht);
+restart:
if (free_fn) {
for (i = 0; i < tbl->size; i++) {
struct rhash_head *pos, *next;
+ cond_resched();
for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL;
@@ -1123,7 +1159,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
}
}
+ next_tbl = rht_dereference(tbl->future_tbl, ht);
bucket_table_free(tbl);
+ if (next_tbl) {
+ tbl = next_tbl;
+ goto restart;
+ }
mutex_unlock(&ht->mutex);
}
EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
@@ -1173,25 +1214,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest;
union nested_table *ntbl;
- unsigned int shifted;
- unsigned int nhash;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
hash >>= tbl->nest;
- nhash = index;
- shifted = tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
- size <= (1 << shift) ? shifted : 0, nhash);
+ size <= (1 << shift));
while (ntbl && size > (1 << shift)) {
index = hash & ((1 << shift) - 1);
size >>= shift;
hash >>= shift;
- nhash |= index << shifted;
- shifted += shift;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
- size <= (1 << shift) ? shifted : 0,
- nhash);
+ size <= (1 << shift));
}
if (!ntbl)
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 42b5ca0acf93..fdd1b8aa8ac6 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -52,7 +52,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
return 0;
}
- sb->map = kzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
+ sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
if (!sb->map)
return -ENOMEM;
@@ -100,7 +100,7 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
return -1;
}
- if (!test_and_set_bit(nr, word))
+ if (!test_and_set_bit_lock(nr, word))
break;
hint = nr + 1;
@@ -270,18 +270,33 @@ void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
}
EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
-static unsigned int sbq_calc_wake_batch(unsigned int depth)
+static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
+ unsigned int depth)
{
unsigned int wake_batch;
+ unsigned int shallow_depth;
/*
* For each batch, we wake up one queue. We need to make sure that our
- * batch size is small enough that the full depth of the bitmap is
- * enough to wake up all of the queues.
+ * batch size is small enough that the full depth of the bitmap,
+ * potentially limited by a shallow depth, is enough to wake up all of
+ * the queues.
+ *
+ * Each full word of the bitmap has bits_per_word bits, and there might
+ * be a partial word. There are depth / bits_per_word full words and
+ * depth % bits_per_word bits left over. In bitwise arithmetic:
+ *
+ * bits_per_word = 1 << shift
+ * depth / bits_per_word = depth >> shift
+ * depth % bits_per_word = depth & ((1 << shift) - 1)
+ *
+ * Each word can be limited to sbq->min_shallow_depth bits.
*/
- wake_batch = SBQ_WAKE_BATCH;
- if (wake_batch > depth / SBQ_WAIT_QUEUES)
- wake_batch = max(1U, depth / SBQ_WAIT_QUEUES);
+ shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
+ depth = ((depth >> sbq->sb.shift) * shallow_depth +
+ min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
+ wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
+ SBQ_WAKE_BATCH);
return wake_batch;
}
@@ -307,7 +322,8 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
*per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
}
- sbq->wake_batch = sbq_calc_wake_batch(depth);
+ sbq->min_shallow_depth = UINT_MAX;
+ sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
@@ -327,21 +343,28 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
}
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
-void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
+static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
+ unsigned int depth)
{
- unsigned int wake_batch = sbq_calc_wake_batch(depth);
+ unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
int i;
if (sbq->wake_batch != wake_batch) {
WRITE_ONCE(sbq->wake_batch, wake_batch);
/*
- * Pairs with the memory barrier in sbq_wake_up() to ensure that
- * the batch size is updated before the wait counts.
+ * Pairs with the memory barrier in sbitmap_queue_wake_up()
+ * to ensure that the batch size is updated before the wait
+ * counts.
*/
smp_mb__before_atomic();
for (i = 0; i < SBQ_WAIT_QUEUES; i++)
atomic_set(&sbq->ws[i].wait_cnt, 1);
}
+}
+
+void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
+{
+ sbitmap_queue_update_wake_batch(sbq, depth);
sbitmap_resize(&sbq->sb, depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
@@ -380,6 +403,8 @@ int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
unsigned int hint, depth;
int nr;
+ WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
+
hint = this_cpu_read(*sbq->alloc_hint);
depth = READ_ONCE(sbq->sb.depth);
if (unlikely(hint >= depth)) {
@@ -403,6 +428,14 @@ int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
+void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
+ unsigned int min_shallow_depth)
+{
+ sbq->min_shallow_depth = min_shallow_depth;
+ sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
+
static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
{
int i, wake_index;
@@ -425,52 +458,67 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
return NULL;
}
-static void sbq_wake_up(struct sbitmap_queue *sbq)
+static bool __sbq_wake_up(struct sbitmap_queue *sbq)
{
struct sbq_wait_state *ws;
unsigned int wake_batch;
int wait_cnt;
- /*
- * Pairs with the memory barrier in set_current_state() to ensure the
- * proper ordering of clear_bit()/waitqueue_active() in the waker and
- * test_and_set_bit()/prepare_to_wait()/finish_wait() in the waiter. See
- * the comment on waitqueue_active(). This is __after_atomic because we
- * just did clear_bit() in the caller.
- */
- smp_mb__after_atomic();
-
ws = sbq_wake_ptr(sbq);
if (!ws)
- return;
+ return false;
wait_cnt = atomic_dec_return(&ws->wait_cnt);
if (wait_cnt <= 0) {
+ int ret;
+
wake_batch = READ_ONCE(sbq->wake_batch);
+
/*
* Pairs with the memory barrier in sbitmap_queue_resize() to
* ensure that we see the batch size update before the wait
* count is reset.
*/
smp_mb__before_atomic();
+
/*
- * If there are concurrent callers to sbq_wake_up(), the last
- * one to decrement the wait count below zero will bump it back
- * up. If there is a concurrent resize, the count reset will
- * either cause the cmpxchg to fail or overwrite after the
- * cmpxchg.
+ * For concurrent callers of this, the one that failed the
+ * atomic_cmpxhcg() race should call this function again
+ * to wakeup a new batch on a different 'ws'.
*/
- atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wait_cnt + wake_batch);
- sbq_index_atomic_inc(&sbq->wake_index);
- wake_up_nr(&ws->wait, wake_batch);
+ ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
+ if (ret == wait_cnt) {
+ sbq_index_atomic_inc(&sbq->wake_index);
+ wake_up_nr(&ws->wait, wake_batch);
+ return false;
+ }
+
+ return true;
}
+
+ return false;
+}
+
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
+{
+ while (__sbq_wake_up(sbq))
+ ;
}
+EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu)
{
- sbitmap_clear_bit(&sbq->sb, nr);
- sbq_wake_up(sbq);
+ sbitmap_clear_bit_unlock(&sbq->sb, nr);
+ /*
+ * Pairs with the memory barrier in set_current_state() to ensure the
+ * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
+ * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
+ * waiter. See the comment on waitqueue_active().
+ */
+ smp_mb__after_atomic();
+ sbitmap_queue_wake_up(sbq);
+
if (likely(!sbq->round_robin && nr < sbq->sb.depth))
*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
}
@@ -482,7 +530,7 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
/*
* Pairs with the memory barrier in set_current_state() like in
- * sbq_wake_up().
+ * sbitmap_queue_wake_up().
*/
smp_mb();
wake_index = atomic_read(&sbq->wake_index);
@@ -528,5 +576,6 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
seq_puts(m, "}\n");
seq_printf(m, "round_robin=%d\n", sbq->round_robin);
+ seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_show);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 53728d391d3a..7c6096a71704 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -24,9 +24,6 @@
**/
struct scatterlist *sg_next(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
if (sg_is_last(sg))
return NULL;
@@ -111,10 +108,7 @@ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
for_each_sg(sgl, sg, nents, i)
ret = sg;
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sgl[0].sg_magic != SG_MAGIC);
BUG_ON(!sg_is_last(ret));
-#endif
return ret;
}
EXPORT_SYMBOL(sg_last);
@@ -132,14 +126,7 @@ EXPORT_SYMBOL(sg_last);
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
{
memset(sgl, 0, sizeof(*sgl) * nents);
-#ifdef CONFIG_DEBUG_SG
- {
- unsigned int i;
- for (i = 0; i < nents; i++)
- sgl[i].sg_magic = SG_MAGIC;
- }
-#endif
- sg_mark_end(&sgl[nents - 1]);
+ sg_init_marker(sgl, nents);
}
EXPORT_SYMBOL(sg_init_table);
@@ -177,7 +164,8 @@ static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
return ptr;
} else
- return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
+ return kmalloc_array(nents, sizeof(struct scatterlist),
+ gfp_mask);
}
static void sg_kfree(struct scatterlist *sg, unsigned int nents)
diff --git a/lib/sha256.c b/lib/sha256.c
new file mode 100644
index 000000000000..4400c832e2aa
--- /dev/null
+++ b/lib/sha256.c
@@ -0,0 +1,283 @@
+/*
+ * SHA-256, as specified in
+ * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
+ *
+ * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2014 Red Hat Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/sha256.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+
+static inline u32 Ch(u32 x, u32 y, u32 z)
+{
+ return z ^ (x & (y ^ z));
+}
+
+static inline u32 Maj(u32 x, u32 y, u32 z)
+{
+ return (x & y) | (z & (x | y));
+}
+
+#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
+#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
+#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
+#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
+
+static inline void LOAD_OP(int I, u32 *W, const u8 *input)
+{
+ W[I] = __be32_to_cpu(((__be32 *)(input))[I]);
+}
+
+static inline void BLEND_OP(int I, u32 *W)
+{
+ W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
+}
+
+static void sha256_transform(u32 *state, const u8 *input)
+{
+ u32 a, b, c, d, e, f, g, h, t1, t2;
+ u32 W[64];
+ int i;
+
+ /* load the input */
+ for (i = 0; i < 16; i++)
+ LOAD_OP(i, W, input);
+
+ /* now blend */
+ for (i = 16; i < 64; i++)
+ BLEND_OP(i, W);
+
+ /* load the state into our registers */
+ a = state[0]; b = state[1]; c = state[2]; d = state[3];
+ e = state[4]; f = state[5]; g = state[6]; h = state[7];
+
+ /* now iterate */
+ t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x3956c25b + W[4];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x59f111f1 + W[5];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x923f82a4 + W[6];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0xab1c5ed5 + W[7];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
+
+ t1 = h + e1(e) + Ch(e, f, g) + 0xd807aa98 + W[8];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x12835b01 + W[9];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x243185be + W[10];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x550c7dc3 + W[11];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x72be5d74 + W[12];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x80deb1fe + W[13];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
+
+ t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
+
+ t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
+
+ t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
+
+ t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
+
+ t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
+
+ t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
+
+ state[0] += a; state[1] += b; state[2] += c; state[3] += d;
+ state[4] += e; state[5] += f; state[6] += g; state[7] += h;
+
+ /* clear any sensitive info... */
+ a = b = c = d = e = f = g = h = t1 = t2 = 0;
+ memset(W, 0, 64 * sizeof(u32));
+}
+
+int sha256_init(struct sha256_state *sctx)
+{
+ sctx->state[0] = SHA256_H0;
+ sctx->state[1] = SHA256_H1;
+ sctx->state[2] = SHA256_H2;
+ sctx->state[3] = SHA256_H3;
+ sctx->state[4] = SHA256_H4;
+ sctx->state[5] = SHA256_H5;
+ sctx->state[6] = SHA256_H6;
+ sctx->state[7] = SHA256_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
+{
+ unsigned int partial, done;
+ const u8 *src;
+
+ partial = sctx->count & 0x3f;
+ sctx->count += len;
+ done = 0;
+ src = data;
+
+ if ((partial + len) > 63) {
+ if (partial) {
+ done = -partial;
+ memcpy(sctx->buf + partial, data, done + 64);
+ src = sctx->buf;
+ }
+
+ do {
+ sha256_transform(sctx->state, src);
+ done += 64;
+ src = data + done;
+ } while (done + 63 < len);
+
+ partial = 0;
+ }
+ memcpy(sctx->buf + partial, src, len - done);
+
+ return 0;
+}
+
+int sha256_final(struct sha256_state *sctx, u8 *out)
+{
+ __be32 *dst = (__be32 *)out;
+ __be64 bits;
+ unsigned int index, pad_len;
+ int i;
+ static const u8 padding[64] = { 0x80, };
+
+ /* Save number of bits */
+ bits = cpu_to_be64(sctx->count << 3);
+
+ /* Pad out to 56 mod 64. */
+ index = sctx->count & 0x3f;
+ pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
+ sha256_update(sctx, padding, pad_len);
+
+ /* Append length (before padding) */
+ sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
+
+ /* Store state in digest */
+ for (i = 0; i < 8; i++)
+ dst[i] = cpu_to_be32(sctx->state[i]);
+
+ /* Zeroize sensitive information. */
+ memset(sctx, 0, sizeof(*sctx));
+
+ return 0;
+}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
deleted file mode 100644
index c43ec2271469..000000000000
--- a/lib/swiotlb.c
+++ /dev/null
@@ -1,1135 +0,0 @@
-/*
- * Dynamic DMA mapping support.
- *
- * This implementation is a fallback for platforms that do not support
- * I/O TLBs (aka DMA address translation hardware).
- * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
- * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
- * Copyright (C) 2000, 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
- * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
- * unnecessary i-cache flushing.
- * 04/07/.. ak Better overflow handling. Assorted fixes.
- * 05/09/10 linville Add support for syncing ranges, support syncing for
- * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
- * 08/12/11 beckyb Add highmem support
- */
-
-#include <linux/cache.h>
-#include <linux/dma-direct.h>
-#include <linux/mm.h>
-#include <linux/export.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/swiotlb.h>
-#include <linux/pfn.h>
-#include <linux/types.h>
-#include <linux/ctype.h>
-#include <linux/highmem.h>
-#include <linux/gfp.h>
-#include <linux/scatterlist.h>
-#include <linux/mem_encrypt.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/iommu-helper.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/swiotlb.h>
-
-#define OFFSET(val,align) ((unsigned long) \
- ( (val) & ( (align) - 1)))
-
-#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
-
-/*
- * Minimum IO TLB size to bother booting with. Systems with mainly
- * 64bit capable cards will only lightly use the swiotlb. If we can't
- * allocate a contiguous 1MB, we're probably in trouble anyway.
- */
-#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
-
-enum swiotlb_force swiotlb_force;
-
-/*
- * Used to do a quick range check in swiotlb_tbl_unmap_single and
- * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
- * API.
- */
-static phys_addr_t io_tlb_start, io_tlb_end;
-
-/*
- * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
- * io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
- */
-static unsigned long io_tlb_nslabs;
-
-/*
- * When the IOMMU overflows we return a fallback buffer. This sets the size.
- */
-static unsigned long io_tlb_overflow = 32*1024;
-
-static phys_addr_t io_tlb_overflow_buffer;
-
-/*
- * This is a free list describing the number of free entries available from
- * each index
- */
-static unsigned int *io_tlb_list;
-static unsigned int io_tlb_index;
-
-/*
- * Max segment that we can provide which (if pages are contingous) will
- * not be bounced (unless SWIOTLB_FORCE is set).
- */
-unsigned int max_segment;
-
-/*
- * We need to save away the original address corresponding to a mapped entry
- * for the sync operations.
- */
-#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
-static phys_addr_t *io_tlb_orig_addr;
-
-/*
- * Protect the above data structures in the map and unmap calls
- */
-static DEFINE_SPINLOCK(io_tlb_lock);
-
-static int late_alloc;
-
-static int __init
-setup_io_tlb_npages(char *str)
-{
- if (isdigit(*str)) {
- io_tlb_nslabs = simple_strtoul(str, &str, 0);
- /* avoid tail segment of size < IO_TLB_SEGSIZE */
- io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
- }
- if (*str == ',')
- ++str;
- if (!strcmp(str, "force")) {
- swiotlb_force = SWIOTLB_FORCE;
- } else if (!strcmp(str, "noforce")) {
- swiotlb_force = SWIOTLB_NO_FORCE;
- io_tlb_nslabs = 1;
- }
-
- return 0;
-}
-early_param("swiotlb", setup_io_tlb_npages);
-/* make io_tlb_overflow tunable too? */
-
-unsigned long swiotlb_nr_tbl(void)
-{
- return io_tlb_nslabs;
-}
-EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
-
-unsigned int swiotlb_max_segment(void)
-{
- return max_segment;
-}
-EXPORT_SYMBOL_GPL(swiotlb_max_segment);
-
-void swiotlb_set_max_segment(unsigned int val)
-{
- if (swiotlb_force == SWIOTLB_FORCE)
- max_segment = 1;
- else
- max_segment = rounddown(val, PAGE_SIZE);
-}
-
-/* default to 64MB */
-#define IO_TLB_DEFAULT_SIZE (64UL<<20)
-unsigned long swiotlb_size_or_default(void)
-{
- unsigned long size;
-
- size = io_tlb_nslabs << IO_TLB_SHIFT;
-
- return size ? size : (IO_TLB_DEFAULT_SIZE);
-}
-
-void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { }
-
-/* For swiotlb, clear memory encryption mask from dma addresses */
-static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
- phys_addr_t address)
-{
- return __sme_clr(phys_to_dma(hwdev, address));
-}
-
-/* Note that this doesn't work with highmem page */
-static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
- volatile void *address)
-{
- return phys_to_dma(hwdev, virt_to_phys(address));
-}
-
-static bool no_iotlb_memory;
-
-void swiotlb_print_info(void)
-{
- unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
- unsigned char *vstart, *vend;
-
- if (no_iotlb_memory) {
- pr_warn("software IO TLB: No low mem\n");
- return;
- }
-
- vstart = phys_to_virt(io_tlb_start);
- vend = phys_to_virt(io_tlb_end);
-
- printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
- (unsigned long long)io_tlb_start,
- (unsigned long long)io_tlb_end,
- bytes >> 20, vstart, vend - 1);
-}
-
-/*
- * Early SWIOTLB allocation may be too early to allow an architecture to
- * perform the desired operations. This function allows the architecture to
- * call SWIOTLB when the operations are possible. It needs to be called
- * before the SWIOTLB memory is used.
- */
-void __init swiotlb_update_mem_attributes(void)
-{
- void *vaddr;
- unsigned long bytes;
-
- if (no_iotlb_memory || late_alloc)
- return;
-
- vaddr = phys_to_virt(io_tlb_start);
- bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
- swiotlb_set_mem_attributes(vaddr, bytes);
- memset(vaddr, 0, bytes);
-
- vaddr = phys_to_virt(io_tlb_overflow_buffer);
- bytes = PAGE_ALIGN(io_tlb_overflow);
- swiotlb_set_mem_attributes(vaddr, bytes);
- memset(vaddr, 0, bytes);
-}
-
-int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
-{
- void *v_overflow_buffer;
- unsigned long i, bytes;
-
- bytes = nslabs << IO_TLB_SHIFT;
-
- io_tlb_nslabs = nslabs;
- io_tlb_start = __pa(tlb);
- io_tlb_end = io_tlb_start + bytes;
-
- /*
- * Get the overflow emergency buffer
- */
- v_overflow_buffer = memblock_virt_alloc_low_nopanic(
- PAGE_ALIGN(io_tlb_overflow),
- PAGE_SIZE);
- if (!v_overflow_buffer)
- return -ENOMEM;
-
- io_tlb_overflow_buffer = __pa(v_overflow_buffer);
-
- /*
- * Allocate and initialize the free list array. This array is used
- * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
- * between io_tlb_start and io_tlb_end.
- */
- io_tlb_list = memblock_virt_alloc(
- PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
- PAGE_SIZE);
- io_tlb_orig_addr = memblock_virt_alloc(
- PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
- PAGE_SIZE);
- for (i = 0; i < io_tlb_nslabs; i++) {
- io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
- io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
- }
- io_tlb_index = 0;
-
- if (verbose)
- swiotlb_print_info();
-
- swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
- return 0;
-}
-
-/*
- * Statically reserve bounce buffer space and initialize bounce buffer data
- * structures for the software IO TLB used to implement the DMA API.
- */
-void __init
-swiotlb_init(int verbose)
-{
- size_t default_size = IO_TLB_DEFAULT_SIZE;
- unsigned char *vstart;
- unsigned long bytes;
-
- if (!io_tlb_nslabs) {
- io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
- io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
- }
-
- bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-
- /* Get IO TLB memory from the low pages */
- vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
- if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
- return;
-
- if (io_tlb_start)
- memblock_free_early(io_tlb_start,
- PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
- pr_warn("Cannot allocate SWIOTLB buffer");
- no_iotlb_memory = true;
-}
-
-/*
- * Systems with larger DMA zones (those that don't support ISA) can
- * initialize the swiotlb later using the slab allocator if needed.
- * This should be just like above, but with some error catching.
- */
-int
-swiotlb_late_init_with_default_size(size_t default_size)
-{
- unsigned long bytes, req_nslabs = io_tlb_nslabs;
- unsigned char *vstart = NULL;
- unsigned int order;
- int rc = 0;
-
- if (!io_tlb_nslabs) {
- io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
- io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
- }
-
- /*
- * Get IO TLB memory from the low pages
- */
- order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
- io_tlb_nslabs = SLABS_PER_PAGE << order;
- bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-
- while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
- vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
- order);
- if (vstart)
- break;
- order--;
- }
-
- if (!vstart) {
- io_tlb_nslabs = req_nslabs;
- return -ENOMEM;
- }
- if (order != get_order(bytes)) {
- printk(KERN_WARNING "Warning: only able to allocate %ld MB "
- "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
- io_tlb_nslabs = SLABS_PER_PAGE << order;
- }
- rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
- if (rc)
- free_pages((unsigned long)vstart, order);
-
- return rc;
-}
-
-int
-swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
-{
- unsigned long i, bytes;
- unsigned char *v_overflow_buffer;
-
- bytes = nslabs << IO_TLB_SHIFT;
-
- io_tlb_nslabs = nslabs;
- io_tlb_start = virt_to_phys(tlb);
- io_tlb_end = io_tlb_start + bytes;
-
- swiotlb_set_mem_attributes(tlb, bytes);
- memset(tlb, 0, bytes);
-
- /*
- * Get the overflow emergency buffer
- */
- v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
- get_order(io_tlb_overflow));
- if (!v_overflow_buffer)
- goto cleanup2;
-
- swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow);
- memset(v_overflow_buffer, 0, io_tlb_overflow);
- io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
-
- /*
- * Allocate and initialize the free list array. This array is used
- * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
- * between io_tlb_start and io_tlb_end.
- */
- io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
- get_order(io_tlb_nslabs * sizeof(int)));
- if (!io_tlb_list)
- goto cleanup3;
-
- io_tlb_orig_addr = (phys_addr_t *)
- __get_free_pages(GFP_KERNEL,
- get_order(io_tlb_nslabs *
- sizeof(phys_addr_t)));
- if (!io_tlb_orig_addr)
- goto cleanup4;
-
- for (i = 0; i < io_tlb_nslabs; i++) {
- io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
- io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
- }
- io_tlb_index = 0;
-
- swiotlb_print_info();
-
- late_alloc = 1;
-
- swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
-
- return 0;
-
-cleanup4:
- free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
- sizeof(int)));
- io_tlb_list = NULL;
-cleanup3:
- free_pages((unsigned long)v_overflow_buffer,
- get_order(io_tlb_overflow));
- io_tlb_overflow_buffer = 0;
-cleanup2:
- io_tlb_end = 0;
- io_tlb_start = 0;
- io_tlb_nslabs = 0;
- max_segment = 0;
- return -ENOMEM;
-}
-
-void __init swiotlb_exit(void)
-{
- if (!io_tlb_orig_addr)
- return;
-
- if (late_alloc) {
- free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
- get_order(io_tlb_overflow));
- free_pages((unsigned long)io_tlb_orig_addr,
- get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
- free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
- sizeof(int)));
- free_pages((unsigned long)phys_to_virt(io_tlb_start),
- get_order(io_tlb_nslabs << IO_TLB_SHIFT));
- } else {
- memblock_free_late(io_tlb_overflow_buffer,
- PAGE_ALIGN(io_tlb_overflow));
- memblock_free_late(__pa(io_tlb_orig_addr),
- PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
- memblock_free_late(__pa(io_tlb_list),
- PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
- memblock_free_late(io_tlb_start,
- PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
- }
- io_tlb_nslabs = 0;
- max_segment = 0;
-}
-
-int is_swiotlb_buffer(phys_addr_t paddr)
-{
- return paddr >= io_tlb_start && paddr < io_tlb_end;
-}
-
-/*
- * Bounce: copy the swiotlb buffer back to the original dma location
- */
-static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir)
-{
- unsigned long pfn = PFN_DOWN(orig_addr);
- unsigned char *vaddr = phys_to_virt(tlb_addr);
-
- if (PageHighMem(pfn_to_page(pfn))) {
- /* The buffer does not have a mapping. Map it in and copy */
- unsigned int offset = orig_addr & ~PAGE_MASK;
- char *buffer;
- unsigned int sz = 0;
- unsigned long flags;
-
- while (size) {
- sz = min_t(size_t, PAGE_SIZE - offset, size);
-
- local_irq_save(flags);
- buffer = kmap_atomic(pfn_to_page(pfn));
- if (dir == DMA_TO_DEVICE)
- memcpy(vaddr, buffer + offset, sz);
- else
- memcpy(buffer + offset, vaddr, sz);
- kunmap_atomic(buffer);
- local_irq_restore(flags);
-
- size -= sz;
- pfn++;
- vaddr += sz;
- offset = 0;
- }
- } else if (dir == DMA_TO_DEVICE) {
- memcpy(vaddr, phys_to_virt(orig_addr), size);
- } else {
- memcpy(phys_to_virt(orig_addr), vaddr, size);
- }
-}
-
-phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
- dma_addr_t tbl_dma_addr,
- phys_addr_t orig_addr, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- unsigned long flags;
- phys_addr_t tlb_addr;
- unsigned int nslots, stride, index, wrap;
- int i;
- unsigned long mask;
- unsigned long offset_slots;
- unsigned long max_slots;
-
- if (no_iotlb_memory)
- panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
-
- if (mem_encrypt_active())
- pr_warn_once("%s is active and system is using DMA bounce buffers\n",
- sme_active() ? "SME" : "SEV");
-
- mask = dma_get_seg_boundary(hwdev);
-
- tbl_dma_addr &= mask;
-
- offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-
- /*
- * Carefully handle integer overflow which can occur when mask == ~0UL.
- */
- max_slots = mask + 1
- ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
- : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
-
- /*
- * For mappings greater than or equal to a page, we limit the stride
- * (and hence alignment) to a page size.
- */
- nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- if (size >= PAGE_SIZE)
- stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
- else
- stride = 1;
-
- BUG_ON(!nslots);
-
- /*
- * Find suitable number of IO TLB entries size that will fit this
- * request and allocate a buffer from that IO TLB pool.
- */
- spin_lock_irqsave(&io_tlb_lock, flags);
- index = ALIGN(io_tlb_index, stride);
- if (index >= io_tlb_nslabs)
- index = 0;
- wrap = index;
-
- do {
- while (iommu_is_span_boundary(index, nslots, offset_slots,
- max_slots)) {
- index += stride;
- if (index >= io_tlb_nslabs)
- index = 0;
- if (index == wrap)
- goto not_found;
- }
-
- /*
- * If we find a slot that indicates we have 'nslots' number of
- * contiguous buffers, we allocate the buffers from that slot
- * and mark the entries as '0' indicating unavailable.
- */
- if (io_tlb_list[index] >= nslots) {
- int count = 0;
-
- for (i = index; i < (int) (index + nslots); i++)
- io_tlb_list[i] = 0;
- for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
- io_tlb_list[i] = ++count;
- tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
-
- /*
- * Update the indices to avoid searching in the next
- * round.
- */
- io_tlb_index = ((index + nslots) < io_tlb_nslabs
- ? (index + nslots) : 0);
-
- goto found;
- }
- index += stride;
- if (index >= io_tlb_nslabs)
- index = 0;
- } while (index != wrap);
-
-not_found:
- spin_unlock_irqrestore(&io_tlb_lock, flags);
- if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
- dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
- return SWIOTLB_MAP_ERROR;
-found:
- spin_unlock_irqrestore(&io_tlb_lock, flags);
-
- /*
- * Save away the mapping from the original address to the DMA address.
- * This is needed when we sync the memory. Then we sync the buffer if
- * needed.
- */
- for (i = 0; i < nslots; i++)
- io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
- swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
-
- return tlb_addr;
-}
-
-/*
- * Allocates bounce buffer and returns its kernel virtual address.
- */
-
-static phys_addr_t
-map_single(struct device *hwdev, phys_addr_t phys, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- dma_addr_t start_dma_addr;
-
- if (swiotlb_force == SWIOTLB_NO_FORCE) {
- dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
- &phys);
- return SWIOTLB_MAP_ERROR;
- }
-
- start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start);
- return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
- dir, attrs);
-}
-
-/*
- * dma_addr is the kernel virtual address of the bounce buffer to unmap.
- */
-void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- unsigned long flags;
- int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
- phys_addr_t orig_addr = io_tlb_orig_addr[index];
-
- /*
- * First, sync the memory before unmapping the entry
- */
- if (orig_addr != INVALID_PHYS_ADDR &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
- swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
-
- /*
- * Return the buffer to the free list by setting the corresponding
- * entries to indicate the number of contiguous entries available.
- * While returning the entries to the free list, we merge the entries
- * with slots below and above the pool being returned.
- */
- spin_lock_irqsave(&io_tlb_lock, flags);
- {
- count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
- io_tlb_list[index + nslots] : 0);
- /*
- * Step 1: return the slots to the free list, merging the
- * slots with superceeding slots
- */
- for (i = index + nslots - 1; i >= index; i--) {
- io_tlb_list[i] = ++count;
- io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
- }
- /*
- * Step 2: merge the returned slots with the preceding slots,
- * if available (non zero)
- */
- for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
- io_tlb_list[i] = ++count;
- }
- spin_unlock_irqrestore(&io_tlb_lock, flags);
-}
-
-void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir,
- enum dma_sync_target target)
-{
- int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
- phys_addr_t orig_addr = io_tlb_orig_addr[index];
-
- if (orig_addr == INVALID_PHYS_ADDR)
- return;
- orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
-
- switch (target) {
- case SYNC_FOR_CPU:
- if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
- swiotlb_bounce(orig_addr, tlb_addr,
- size, DMA_FROM_DEVICE);
- else
- BUG_ON(dir != DMA_TO_DEVICE);
- break;
- case SYNC_FOR_DEVICE:
- if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
- swiotlb_bounce(orig_addr, tlb_addr,
- size, DMA_TO_DEVICE);
- else
- BUG_ON(dir != DMA_FROM_DEVICE);
- break;
- default:
- BUG();
- }
-}
-
-static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
- size_t size)
-{
- u64 mask = DMA_BIT_MASK(32);
-
- if (dev && dev->coherent_dma_mask)
- mask = dev->coherent_dma_mask;
- return addr + size - 1 <= mask;
-}
-
-static void *
-swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
- unsigned long attrs)
-{
- phys_addr_t phys_addr;
-
- if (swiotlb_force == SWIOTLB_NO_FORCE)
- goto out_warn;
-
- phys_addr = swiotlb_tbl_map_single(dev,
- swiotlb_phys_to_dma(dev, io_tlb_start),
- 0, size, DMA_FROM_DEVICE, 0);
- if (phys_addr == SWIOTLB_MAP_ERROR)
- goto out_warn;
-
- *dma_handle = swiotlb_phys_to_dma(dev, phys_addr);
- if (dma_coherent_ok(dev, *dma_handle, size))
- goto out_unmap;
-
- memset(phys_to_virt(phys_addr), 0, size);
- return phys_to_virt(phys_addr);
-
-out_unmap:
- dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
- (unsigned long long)(dev ? dev->coherent_dma_mask : 0),
- (unsigned long long)*dma_handle);
-
- /*
- * DMA_TO_DEVICE to avoid memcpy in unmap_single.
- * DMA_ATTR_SKIP_CPU_SYNC is optional.
- */
- swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
-out_warn:
- if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
- dev_warn(dev,
- "swiotlb: coherent allocation failed, size=%zu\n",
- size);
- dump_stack();
- }
- return NULL;
-}
-
-void *
-swiotlb_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags)
-{
- int order = get_order(size);
- unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0;
- void *ret;
-
- ret = (void *)__get_free_pages(flags, order);
- if (ret) {
- *dma_handle = swiotlb_virt_to_bus(hwdev, ret);
- if (dma_coherent_ok(hwdev, *dma_handle, size)) {
- memset(ret, 0, size);
- return ret;
- }
- free_pages((unsigned long)ret, order);
- }
-
- return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs);
-}
-EXPORT_SYMBOL(swiotlb_alloc_coherent);
-
-static bool swiotlb_free_buffer(struct device *dev, size_t size,
- dma_addr_t dma_addr)
-{
- phys_addr_t phys_addr = dma_to_phys(dev, dma_addr);
-
- WARN_ON_ONCE(irqs_disabled());
-
- if (!is_swiotlb_buffer(phys_addr))
- return false;
-
- /*
- * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
- * DMA_ATTR_SKIP_CPU_SYNC is optional.
- */
- swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- return true;
-}
-
-void
-swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dev_addr)
-{
- if (!swiotlb_free_buffer(hwdev, size, dev_addr))
- free_pages((unsigned long)vaddr, get_order(size));
-}
-EXPORT_SYMBOL(swiotlb_free_coherent);
-
-static void
-swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
- int do_panic)
-{
- if (swiotlb_force == SWIOTLB_NO_FORCE)
- return;
-
- /*
- * Ran out of IOMMU space for this operation. This is very bad.
- * Unfortunately the drivers cannot handle this operation properly.
- * unless they check for dma_mapping_error (most don't)
- * When the mapping is small enough return a static buffer to limit
- * the damage, or panic when the transfer is too big.
- */
- dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
- size);
-
- if (size <= io_tlb_overflow || !do_panic)
- return;
-
- if (dir == DMA_BIDIRECTIONAL)
- panic("DMA: Random memory could be DMA accessed\n");
- if (dir == DMA_FROM_DEVICE)
- panic("DMA: Random memory could be DMA written\n");
- if (dir == DMA_TO_DEVICE)
- panic("DMA: Random memory could be DMA read\n");
-}
-
-/*
- * Map a single buffer of the indicated size for DMA in streaming mode. The
- * physical address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory until
- * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
- */
-dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- phys_addr_t map, phys = page_to_phys(page) + offset;
- dma_addr_t dev_addr = phys_to_dma(dev, phys);
-
- BUG_ON(dir == DMA_NONE);
- /*
- * If the address happens to be in the device's DMA window,
- * we can safely return the device addr and not worry about bounce
- * buffering it.
- */
- if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
- return dev_addr;
-
- trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
-
- /* Oh well, have to allocate and map a bounce buffer. */
- map = map_single(dev, phys, size, dir, attrs);
- if (map == SWIOTLB_MAP_ERROR) {
- swiotlb_full(dev, size, dir, 1);
- return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
- }
-
- dev_addr = swiotlb_phys_to_dma(dev, map);
-
- /* Ensure that the address returned is DMA'ble */
- if (dma_capable(dev, dev_addr, size))
- return dev_addr;
-
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
- swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
-
- return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
-}
-
-/*
- * Unmap a single streaming mode DMA translation. The dma_addr and size must
- * match what was provided for in a previous swiotlb_map_page call. All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
-
- BUG_ON(dir == DMA_NONE);
-
- if (is_swiotlb_buffer(paddr)) {
- swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
- return;
- }
-
- if (dir != DMA_FROM_DEVICE)
- return;
-
- /*
- * phys_to_virt doesn't work with hihgmem page but we could
- * call dma_mark_clean() with hihgmem page here. However, we
- * are fine since dma_mark_clean() is null on POWERPC. We can
- * make dma_mark_clean() take a physical address if necessary.
- */
- dma_mark_clean(phys_to_virt(paddr), size);
-}
-
-void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- unmap_single(hwdev, dev_addr, size, dir, attrs);
-}
-
-/*
- * Make physical memory consistent for a single streaming mode DMA translation
- * after a transfer.
- *
- * If you perform a swiotlb_map_page() but wish to interrogate the buffer
- * using the cpu, yet do not wish to teardown the dma mapping, you must
- * call this function before doing so. At the next point you give the dma
- * address back to the card, you must first perform a
- * swiotlb_dma_sync_for_device, and then the device again owns the buffer
- */
-static void
-swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- enum dma_sync_target target)
-{
- phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
-
- BUG_ON(dir == DMA_NONE);
-
- if (is_swiotlb_buffer(paddr)) {
- swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
- return;
- }
-
- if (dir != DMA_FROM_DEVICE)
- return;
-
- dma_mark_clean(phys_to_virt(paddr), size);
-}
-
-void
-swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir)
-{
- swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
-}
-
-void
-swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir)
-{
- swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
-}
-
-/*
- * Map a set of buffers described by scatterlist in streaming mode for DMA.
- * This is the scatter-gather version of the above swiotlb_map_page
- * interface. Here the scatter gather list elements are each tagged with the
- * appropriate dma address and length. They are obtained via
- * sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for swiotlb_map_page are the
- * same here.
- */
-int
-swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir, unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(dir == DMA_NONE);
-
- for_each_sg(sgl, sg, nelems, i) {
- phys_addr_t paddr = sg_phys(sg);
- dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
-
- if (swiotlb_force == SWIOTLB_FORCE ||
- !dma_capable(hwdev, dev_addr, sg->length)) {
- phys_addr_t map = map_single(hwdev, sg_phys(sg),
- sg->length, dir, attrs);
- if (map == SWIOTLB_MAP_ERROR) {
- /* Don't panic here, we expect map_sg users
- to do proper error handling. */
- swiotlb_full(hwdev, sg->length, dir, 0);
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
- swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
- attrs);
- sg_dma_len(sgl) = 0;
- return 0;
- }
- sg->dma_address = swiotlb_phys_to_dma(hwdev, map);
- } else
- sg->dma_address = dev_addr;
- sg_dma_len(sg) = sg->length;
- }
- return nelems;
-}
-
-/*
- * Unmap a set of streaming mode DMA translations. Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
- */
-void
-swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(dir == DMA_NONE);
-
- for_each_sg(sgl, sg, nelems, i)
- unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
- attrs);
-}
-
-/*
- * Make physical memory consistent for a set of streaming mode DMA translations
- * after a transfer.
- *
- * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
- * and usage.
- */
-static void
-swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- enum dma_sync_target target)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sgl, sg, nelems, i)
- swiotlb_sync_single(hwdev, sg->dma_address,
- sg_dma_len(sg), dir, target);
-}
-
-void
-swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
-}
-
-void
-swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
-}
-
-int
-swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
-{
- return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
-}
-
-/*
- * Return whether the given device DMA address mask can be supported
- * properly. For example, if your device can only drive the low 24-bits
- * during bus mastering, then you would pass 0x00ffffff as the mask to
- * this function.
- */
-int
-swiotlb_dma_supported(struct device *hwdev, u64 mask)
-{
- return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
-}
-
-#ifdef CONFIG_DMA_DIRECT_OPS
-void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, unsigned long attrs)
-{
- void *vaddr;
-
- /* temporary workaround: */
- if (gfp & __GFP_NOWARN)
- attrs |= DMA_ATTR_NO_WARN;
-
- /*
- * Don't print a warning when the first allocation attempt fails.
- * swiotlb_alloc_coherent() will print a warning when the DMA memory
- * allocation ultimately failed.
- */
- gfp |= __GFP_NOWARN;
-
- vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
- if (!vaddr)
- vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs);
- return vaddr;
-}
-
-void swiotlb_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, unsigned long attrs)
-{
- if (!swiotlb_free_buffer(dev, size, dma_addr))
- dma_direct_free(dev, size, vaddr, dma_addr, attrs);
-}
-
-const struct dma_map_ops swiotlb_dma_ops = {
- .mapping_error = swiotlb_dma_mapping_error,
- .alloc = swiotlb_alloc,
- .free = swiotlb_free,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = swiotlb_sync_sg_for_device,
- .map_sg = swiotlb_map_sg_attrs,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .map_page = swiotlb_map_page,
- .unmap_page = swiotlb_unmap_page,
- .dma_supported = swiotlb_dma_supported,
-};
-#endif /* CONFIG_DMA_DIRECT_OPS */
diff --git a/lib/test_bitfield.c b/lib/test_bitfield.c
new file mode 100644
index 000000000000..5b8f4108662d
--- /dev/null
+++ b/lib/test_bitfield.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for bitfield helpers.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+
+#define CHECK_ENC_GET_U(tp, v, field, res) do { \
+ { \
+ u##tp _res; \
+ \
+ _res = u##tp##_encode_bits(v, field); \
+ if (_res != res) { \
+ pr_warn("u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n",\
+ (u64)_res); \
+ return -EINVAL; \
+ } \
+ if (u##tp##_get_bits(_res, field) != v) \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET_LE(tp, v, field, res) do { \
+ { \
+ __le##tp _res; \
+ \
+ _res = le##tp##_encode_bits(v, field); \
+ if (_res != cpu_to_le##tp(res)) { \
+ pr_warn("le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
+ (u64)le##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ return -EINVAL; \
+ } \
+ if (le##tp##_get_bits(_res, field) != v) \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET_BE(tp, v, field, res) do { \
+ { \
+ __be##tp _res; \
+ \
+ _res = be##tp##_encode_bits(v, field); \
+ if (_res != cpu_to_be##tp(res)) { \
+ pr_warn("be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
+ (u64)be##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ return -EINVAL; \
+ } \
+ if (be##tp##_get_bits(_res, field) != v) \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET(tp, v, field, res) do { \
+ CHECK_ENC_GET_U(tp, v, field, res); \
+ CHECK_ENC_GET_LE(tp, v, field, res); \
+ CHECK_ENC_GET_BE(tp, v, field, res); \
+ } while (0)
+
+static int test_constants(void)
+{
+ /*
+ * NOTE
+ * This whole function compiles (or at least should, if everything
+ * is going according to plan) to nothing after optimisation.
+ */
+
+ CHECK_ENC_GET(16, 1, 0x000f, 0x0001);
+ CHECK_ENC_GET(16, 3, 0x00f0, 0x0030);
+ CHECK_ENC_GET(16, 5, 0x0f00, 0x0500);
+ CHECK_ENC_GET(16, 7, 0xf000, 0x7000);
+ CHECK_ENC_GET(16, 14, 0x000f, 0x000e);
+ CHECK_ENC_GET(16, 15, 0x00f0, 0x00f0);
+
+ CHECK_ENC_GET_U(8, 1, 0x0f, 0x01);
+ CHECK_ENC_GET_U(8, 3, 0xf0, 0x30);
+ CHECK_ENC_GET_U(8, 14, 0x0f, 0x0e);
+ CHECK_ENC_GET_U(8, 15, 0xf0, 0xf0);
+
+ CHECK_ENC_GET(32, 1, 0x00000f00, 0x00000100);
+ CHECK_ENC_GET(32, 3, 0x0000f000, 0x00003000);
+ CHECK_ENC_GET(32, 5, 0x000f0000, 0x00050000);
+ CHECK_ENC_GET(32, 7, 0x00f00000, 0x00700000);
+ CHECK_ENC_GET(32, 14, 0x0f000000, 0x0e000000);
+ CHECK_ENC_GET(32, 15, 0xf0000000, 0xf0000000);
+
+ CHECK_ENC_GET(64, 1, 0x00000f0000000000ull, 0x0000010000000000ull);
+ CHECK_ENC_GET(64, 3, 0x0000f00000000000ull, 0x0000300000000000ull);
+ CHECK_ENC_GET(64, 5, 0x000f000000000000ull, 0x0005000000000000ull);
+ CHECK_ENC_GET(64, 7, 0x00f0000000000000ull, 0x0070000000000000ull);
+ CHECK_ENC_GET(64, 14, 0x0f00000000000000ull, 0x0e00000000000000ull);
+ CHECK_ENC_GET(64, 15, 0xf000000000000000ull, 0xf000000000000000ull);
+
+ return 0;
+}
+
+#define CHECK(tp, mask) do { \
+ u64 v; \
+ \
+ for (v = 0; v < 1 << hweight32(mask); v++) \
+ if (tp##_encode_bits(v, mask) != v << __ffs64(mask)) \
+ return -EINVAL; \
+ } while (0)
+
+static int test_variables(void)
+{
+ CHECK(u8, 0x0f);
+ CHECK(u8, 0xf0);
+ CHECK(u8, 0x38);
+
+ CHECK(u16, 0x0038);
+ CHECK(u16, 0x0380);
+ CHECK(u16, 0x3800);
+ CHECK(u16, 0x8000);
+
+ CHECK(u32, 0x80000000);
+ CHECK(u32, 0x7f000000);
+ CHECK(u32, 0x07e00000);
+ CHECK(u32, 0x00018000);
+
+ CHECK(u64, 0x8000000000000000ull);
+ CHECK(u64, 0x7f00000000000000ull);
+ CHECK(u64, 0x0001800000000000ull);
+ CHECK(u64, 0x0000000080000000ull);
+ CHECK(u64, 0x000000007f000000ull);
+ CHECK(u64, 0x0000000018000000ull);
+ CHECK(u64, 0x0000001f8000000ull);
+
+ return 0;
+}
+
+static int __init test_bitfields(void)
+{
+ int ret = test_constants();
+
+ if (ret) {
+ pr_warn("constant tests failed!\n");
+ return ret;
+ }
+
+ ret = test_variables();
+ if (ret) {
+ pr_warn("variable tests failed!\n");
+ return ret;
+ }
+
+#ifdef TEST_BITFIELD_COMPILE
+ /* these should fail compilation */
+ CHECK_ENC_GET(16, 16, 0x0f00, 0x1000);
+ u32_encode_bits(7, 0x06000000);
+
+ /* this should at least give a warning */
+ u16_encode_bits(0, 0x60000);
+#endif
+
+ pr_info("tests passed\n");
+
+ return 0;
+}
+module_init(test_bitfields)
+
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index b3f235baa05d..6cd7d0740005 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -255,6 +255,10 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
{-EINVAL, "-1", NULL, 8, 0},
{-EINVAL, "-0", NULL, 8, 0},
{-EINVAL, "10-1", NULL, 8, 0},
+ {-EINVAL, "0-31:", NULL, 8, 0},
+ {-EINVAL, "0-31:0", NULL, 8, 0},
+ {-EINVAL, "0-31:0/0", NULL, 8, 0},
+ {-EINVAL, "0-31:1/0", NULL, 8, 0},
{-EINVAL, "0-31:10/1", NULL, 8, 0},
};
@@ -292,15 +296,17 @@ static void __init test_bitmap_parselist(void)
}
}
+#define EXP_BYTES (sizeof(exp) * 8)
+
static void __init test_bitmap_arr32(void)
{
- unsigned int nbits, next_bit, len = sizeof(exp) * 8;
+ unsigned int nbits, next_bit;
u32 arr[sizeof(exp) / 4];
- DECLARE_BITMAP(bmap2, len);
+ DECLARE_BITMAP(bmap2, EXP_BYTES);
memset(arr, 0xa5, sizeof(arr));
- for (nbits = 0; nbits < len; ++nbits) {
+ for (nbits = 0; nbits < EXP_BYTES; ++nbits) {
bitmap_to_arr32(arr, exp, nbits);
bitmap_from_arr32(bmap2, arr, nbits);
expect_eq_bitmap(bmap2, exp, nbits);
@@ -312,7 +318,7 @@ static void __init test_bitmap_arr32(void)
" tail is not safely cleared: %d\n",
nbits, next_bit);
- if (nbits < len - 32)
+ if (nbits < EXP_BYTES - 32)
expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)],
0xa5a5a5a5);
}
@@ -325,23 +331,32 @@ static void noinline __init test_mem_optimisations(void)
unsigned int start, nbits;
for (start = 0; start < 1024; start += 8) {
- memset(bmap1, 0x5a, sizeof(bmap1));
- memset(bmap2, 0x5a, sizeof(bmap2));
for (nbits = 0; nbits < 1024 - start; nbits += 8) {
+ memset(bmap1, 0x5a, sizeof(bmap1));
+ memset(bmap2, 0x5a, sizeof(bmap2));
+
bitmap_set(bmap1, start, nbits);
__bitmap_set(bmap2, start, nbits);
- if (!bitmap_equal(bmap1, bmap2, 1024))
+ if (!bitmap_equal(bmap1, bmap2, 1024)) {
printk("set not equal %d %d\n", start, nbits);
- if (!__bitmap_equal(bmap1, bmap2, 1024))
+ failed_tests++;
+ }
+ if (!__bitmap_equal(bmap1, bmap2, 1024)) {
printk("set not __equal %d %d\n", start, nbits);
+ failed_tests++;
+ }
bitmap_clear(bmap1, start, nbits);
__bitmap_clear(bmap2, start, nbits);
- if (!bitmap_equal(bmap1, bmap2, 1024))
+ if (!bitmap_equal(bmap1, bmap2, 1024)) {
printk("clear not equal %d %d\n", start, nbits);
- if (!__bitmap_equal(bmap1, bmap2, 1024))
+ failed_tests++;
+ }
+ if (!__bitmap_equal(bmap1, bmap2, 1024)) {
printk("clear not __equal %d %d\n", start,
nbits);
+ failed_tests++;
+ }
}
}
}
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 3e9335493fe4..08d3d59dca17 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -356,29 +356,22 @@ static int bpf_fill_maxinsns11(struct bpf_test *self)
return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
}
-static int bpf_fill_ja(struct bpf_test *self)
-{
- /* Hits exactly 11 passes on x86_64 JIT. */
- return __bpf_fill_ja(self, 12, 9);
-}
-
-static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
+static int bpf_fill_maxinsns12(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
- int i;
+ int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
- for (i = 0; i < len - 1; i += 2) {
- insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
- insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
- SKF_AD_OFF + SKF_AD_CPU);
- }
+ insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0);
- insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);
+ for (i = 1; i < len - 1; i++)
+ insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0);
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
@@ -386,50 +379,22 @@ static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
return 0;
}
-#define PUSH_CNT 68
-/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
-static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
+static int bpf_fill_maxinsns13(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
- struct bpf_insn *insn;
- int i = 0, j, k = 0;
+ struct sock_filter *insn;
+ int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
- insn[i++] = BPF_MOV64_REG(R6, R1);
-loop:
- for (j = 0; j < PUSH_CNT; j++) {
- insn[i++] = BPF_LD_ABS(BPF_B, 0);
- insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
- i++;
- insn[i++] = BPF_MOV64_REG(R1, R6);
- insn[i++] = BPF_MOV64_IMM(R2, 1);
- insn[i++] = BPF_MOV64_IMM(R3, 2);
- insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- bpf_skb_vlan_push_proto.func - __bpf_call_base);
- insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
- i++;
- }
-
- for (j = 0; j < PUSH_CNT; j++) {
- insn[i++] = BPF_LD_ABS(BPF_B, 0);
- insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
- i++;
- insn[i++] = BPF_MOV64_REG(R1, R6);
- insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- bpf_skb_vlan_pop_proto.func - __bpf_call_base);
- insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
- i++;
- }
- if (++k < 5)
- goto loop;
-
- for (; i < len - 1; i++)
- insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xbef);
+ for (i = 0; i < len - 3; i++)
+ insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0);
- insn[len - 1] = BPF_EXIT_INSN();
+ insn[len - 3] = __BPF_STMT(BPF_LD | BPF_IMM, 0xabababab);
+ insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0);
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
@@ -437,58 +402,29 @@ loop:
return 0;
}
-static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self)
+static int bpf_fill_ja(struct bpf_test *self)
{
- struct bpf_insn *insn;
-
- insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL);
- if (!insn)
- return -ENOMEM;
-
- /* Due to func address being non-const, we need to
- * assemble this here.
- */
- insn[0] = BPF_MOV64_REG(R6, R1);
- insn[1] = BPF_LD_ABS(BPF_B, 0);
- insn[2] = BPF_LD_ABS(BPF_H, 0);
- insn[3] = BPF_LD_ABS(BPF_W, 0);
- insn[4] = BPF_MOV64_REG(R7, R6);
- insn[5] = BPF_MOV64_IMM(R6, 0);
- insn[6] = BPF_MOV64_REG(R1, R7);
- insn[7] = BPF_MOV64_IMM(R2, 1);
- insn[8] = BPF_MOV64_IMM(R3, 2);
- insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- bpf_skb_vlan_push_proto.func - __bpf_call_base);
- insn[10] = BPF_MOV64_REG(R6, R7);
- insn[11] = BPF_LD_ABS(BPF_B, 0);
- insn[12] = BPF_LD_ABS(BPF_H, 0);
- insn[13] = BPF_LD_ABS(BPF_W, 0);
- insn[14] = BPF_MOV64_IMM(R0, 42);
- insn[15] = BPF_EXIT_INSN();
-
- self->u.ptr.insns = insn;
- self->u.ptr.len = 16;
-
- return 0;
+ /* Hits exactly 11 passes on x86_64 JIT. */
+ return __bpf_fill_ja(self, 12, 9);
}
-static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
+static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
- struct bpf_insn *insn;
- int i = 0;
+ struct sock_filter *insn;
+ int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
- insn[i++] = BPF_MOV64_REG(R6, R1);
- insn[i++] = BPF_LD_ABS(BPF_B, 0);
- insn[i] = BPF_JMP_IMM(BPF_JEQ, R0, 10, len - i - 2);
- i++;
- while (i < len - 1)
- insn[i++] = BPF_LD_ABS(BPF_B, 1);
- insn[i] = BPF_EXIT_INSN();
+ for (i = 0; i < len - 1; i += 2) {
+ insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
+ insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_CPU);
+ }
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
@@ -1988,40 +1924,6 @@ static struct bpf_test tests[] = {
{ { 0, -1 } }
},
{
- "INT: DIV + ABS",
- .u.insns_int = {
- BPF_ALU64_REG(BPF_MOV, R6, R1),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU64_IMM(BPF_MOV, R2, 2),
- BPF_ALU32_REG(BPF_DIV, R0, R2),
- BPF_ALU64_REG(BPF_MOV, R8, R0),
- BPF_LD_ABS(BPF_B, 4),
- BPF_ALU64_REG(BPF_ADD, R8, R0),
- BPF_LD_IND(BPF_B, R8, -70),
- BPF_EXIT_INSN(),
- },
- INTERNAL,
- { 10, 20, 30, 40, 50 },
- { { 4, 0 }, { 5, 10 } }
- },
- {
- /* This one doesn't go through verifier, but is just raw insn
- * as opposed to cBPF tests from here. Thus div by 0 tests are
- * done in test_verifier in BPF kselftests.
- */
- "INT: DIV by -1",
- .u.insns_int = {
- BPF_ALU64_REG(BPF_MOV, R6, R1),
- BPF_ALU64_IMM(BPF_MOV, R7, -1),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU32_REG(BPF_DIV, R0, R7),
- BPF_EXIT_INSN(),
- },
- INTERNAL,
- { 10, 20, 30, 40, 50 },
- { { 3, 0 }, { 4, 0 } }
- },
- {
"check: missing ret",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 1),
@@ -2383,50 +2285,6 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } }
},
- {
- "nmap reduced",
- .u.insns_int = {
- BPF_MOV64_REG(R6, R1),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26),
- BPF_MOV32_IMM(R0, 18),
- BPF_STX_MEM(BPF_W, R10, R0, -64),
- BPF_LDX_MEM(BPF_W, R7, R10, -64),
- BPF_LD_IND(BPF_W, R7, 14),
- BPF_STX_MEM(BPF_W, R10, R0, -60),
- BPF_MOV32_IMM(R0, 280971478),
- BPF_STX_MEM(BPF_W, R10, R0, -56),
- BPF_LDX_MEM(BPF_W, R7, R10, -56),
- BPF_LDX_MEM(BPF_W, R0, R10, -60),
- BPF_ALU32_REG(BPF_SUB, R0, R7),
- BPF_JMP_IMM(BPF_JNE, R0, 0, 15),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13),
- BPF_MOV32_IMM(R0, 22),
- BPF_STX_MEM(BPF_W, R10, R0, -56),
- BPF_LDX_MEM(BPF_W, R7, R10, -56),
- BPF_LD_IND(BPF_H, R7, 14),
- BPF_STX_MEM(BPF_W, R10, R0, -52),
- BPF_MOV32_IMM(R0, 17366),
- BPF_STX_MEM(BPF_W, R10, R0, -48),
- BPF_LDX_MEM(BPF_W, R7, R10, -48),
- BPF_LDX_MEM(BPF_W, R0, R10, -52),
- BPF_ALU32_REG(BPF_SUB, R0, R7),
- BPF_JMP_IMM(BPF_JNE, R0, 0, 2),
- BPF_MOV32_IMM(R0, 256),
- BPF_EXIT_INSN(),
- BPF_MOV32_IMM(R0, 0),
- BPF_EXIT_INSN(),
- },
- INTERNAL,
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6},
- { { 38, 256 } },
- .stack_depth = 64,
- },
/* BPF_ALU | BPF_MOV | BPF_X */
{
"ALU_MOV_X: dst = 2",
@@ -5424,21 +5282,31 @@ static struct bpf_test tests[] = {
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Ctx heavy transformations",
{ },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+ CLASSIC | FLAG_EXPECTED_FAIL,
+#else
CLASSIC,
+#endif
{ },
{
{ 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
{ 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
},
.fill_helper = bpf_fill_maxinsns6,
+ .expected_errcode = -ENOTSUPP,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Call heavy transformations",
{ },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+#else
CLASSIC | FLAG_NO_DATA,
+#endif
{ },
{ { 1, 0 }, { 10, 0 } },
.fill_helper = bpf_fill_maxinsns7,
+ .expected_errcode = -ENOTSUPP,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Jump heavy test",
@@ -5478,28 +5346,39 @@ static struct bpf_test tests[] = {
.expected_errcode = -ENOTSUPP,
},
{
- "BPF_MAXINSNS: ld_abs+get_processor_id",
+ "BPF_MAXINSNS: jump over MSH",
{ },
- CLASSIC,
- { },
- { { 1, 0xbee } },
- .fill_helper = bpf_fill_ld_abs_get_processor_id,
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ { 0xfa, 0xfb, 0xfc, 0xfd, },
+ { { 4, 0xabababab } },
+ .fill_helper = bpf_fill_maxinsns12,
+ .expected_errcode = -EINVAL,
},
{
- "BPF_MAXINSNS: ld_abs+vlan_push/pop",
+ "BPF_MAXINSNS: exec all MSH",
{ },
- INTERNAL,
- { 0x34 },
- { { ETH_HLEN, 0xbef } },
- .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+ CLASSIC | FLAG_EXPECTED_FAIL,
+#else
+ CLASSIC,
+#endif
+ { 0xfa, 0xfb, 0xfc, 0xfd, },
+ { { 4, 0xababab83 } },
+ .fill_helper = bpf_fill_maxinsns13,
+ .expected_errcode = -ENOTSUPP,
},
{
- "BPF_MAXINSNS: jump around ld_abs",
+ "BPF_MAXINSNS: ld_abs+get_processor_id",
{ },
- INTERNAL,
- { 10, 11 },
- { { 2, 10 } },
- .fill_helper = bpf_fill_jump_around_ld_abs,
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+ CLASSIC | FLAG_EXPECTED_FAIL,
+#else
+ CLASSIC,
+#endif
+ { },
+ { { 1, 0xbee } },
+ .fill_helper = bpf_fill_ld_abs_get_processor_id,
+ .expected_errcode = -ENOTSUPP,
},
/*
* LD_IND / LD_ABS on fragmented SKBs
@@ -5683,6 +5562,53 @@ static struct bpf_test tests[] = {
{ {0x40, 0x05 } },
},
{
+ "LD_IND byte positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xff } },
+ },
+ {
+ "LD_IND byte positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_IND byte negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 } },
+ },
+ {
+ "LD_IND byte negative offset, multiple calls",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 1),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 2),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 3),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 4),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x82 }, },
+ },
+ {
"LD_IND halfword positive offset",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
@@ -5731,6 +5657,39 @@ static struct bpf_test tests[] = {
{ {0x40, 0x66cc } },
},
{
+ "LD_IND halfword positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3d),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xffff } },
+ },
+ {
+ "LD_IND halfword positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_IND halfword negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 } },
+ },
+ {
"LD_IND word positive offset",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
@@ -5821,6 +5780,39 @@ static struct bpf_test tests[] = {
{ {0x40, 0x66cc77dd } },
},
{
+ "LD_IND word positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xffffffff } },
+ },
+ {
+ "LD_IND word positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_IND word negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 } },
+ },
+ {
"LD_ABS byte",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20),
@@ -5838,6 +5830,68 @@ static struct bpf_test tests[] = {
{ {0x40, 0xcc } },
},
{
+ "LD_ABS byte positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xff } },
+ },
+ {
+ "LD_ABS byte positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_ABS byte negative offset, out of bounds load",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, -1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "LD_ABS byte negative offset, in bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x82 }, },
+ },
+ {
+ "LD_ABS byte negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_ABS byte negative offset, multiple calls",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3c),
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3d),
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3e),
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x82 }, },
+ },
+ {
"LD_ABS halfword",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22),
@@ -5872,6 +5926,55 @@ static struct bpf_test tests[] = {
{ {0x40, 0x99ff } },
},
{
+ "LD_ABS halfword positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3e),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xffff } },
+ },
+ {
+ "LD_ABS halfword positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_ABS halfword negative offset, out of bounds load",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, -1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "LD_ABS halfword negative offset, in bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x1982 }, },
+ },
+ {
+ "LD_ABS halfword negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
"LD_ABS word",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c),
@@ -5939,6 +6042,140 @@ static struct bpf_test tests[] = {
},
{ {0x40, 0x88ee99ff } },
},
+ {
+ "LD_ABS word positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xffffffff } },
+ },
+ {
+ "LD_ABS word positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_ABS word negative offset, out of bounds load",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, -1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "LD_ABS word negative offset, in bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x25051982 }, },
+ },
+ {
+ "LD_ABS word negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LDX_MSH standalone, preserved A",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0xffeebbaa }, },
+ },
+ {
+ "LDX_MSH standalone, preserved A 2",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0x175e9d63),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3d),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x175e9d63 }, },
+ },
+ {
+ "LDX_MSH standalone, test result 1",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x14 }, },
+ },
+ {
+ "LDX_MSH standalone, test result 2",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x24 }, },
+ },
+ {
+ "LDX_MSH standalone, negative offset",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, -1),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0 }, },
+ },
+ {
+ "LDX_MSH standalone, negative offset 2",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, SKF_LL_OFF + 0x3e),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x24 }, },
+ },
+ {
+ "LDX_MSH standalone, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x40),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0 }, },
+ },
/*
* verify that the interpreter or JIT correctly sets A and X
* to 0.
@@ -6127,14 +6364,6 @@ static struct bpf_test tests[] = {
{},
{ {0x1, 0x42 } },
},
- {
- "LD_ABS with helper changing skb data",
- { },
- INTERNAL,
- { 0x34 },
- { { ETH_HLEN, 42 } },
- .fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
- },
/* Checking interpreter vs JIT wrt signed extended imms. */
{
"JNE signed compare, test 1",
@@ -6574,6 +6803,93 @@ static bool exclude_test(int test_id)
return test_id < test_range[0] || test_id > test_range[1];
}
+static __init struct sk_buff *build_test_skb(void)
+{
+ u32 headroom = NET_SKB_PAD + NET_IP_ALIGN + ETH_HLEN;
+ struct sk_buff *skb[2];
+ struct page *page[2];
+ int i, data_size = 8;
+
+ for (i = 0; i < 2; i++) {
+ page[i] = alloc_page(GFP_KERNEL);
+ if (!page[i]) {
+ if (i == 0)
+ goto err_page0;
+ else
+ goto err_page1;
+ }
+
+ /* this will set skb[i]->head_frag */
+ skb[i] = dev_alloc_skb(headroom + data_size);
+ if (!skb[i]) {
+ if (i == 0)
+ goto err_skb0;
+ else
+ goto err_skb1;
+ }
+
+ skb_reserve(skb[i], headroom);
+ skb_put(skb[i], data_size);
+ skb[i]->protocol = htons(ETH_P_IP);
+ skb_reset_network_header(skb[i]);
+ skb_set_mac_header(skb[i], -ETH_HLEN);
+
+ skb_add_rx_frag(skb[i], 0, page[i], 0, 64, 64);
+ // skb_headlen(skb[i]): 8, skb[i]->head_frag = 1
+ }
+
+ /* setup shinfo */
+ skb_shinfo(skb[0])->gso_size = 1448;
+ skb_shinfo(skb[0])->gso_type = SKB_GSO_TCPV4;
+ skb_shinfo(skb[0])->gso_type |= SKB_GSO_DODGY;
+ skb_shinfo(skb[0])->gso_segs = 0;
+ skb_shinfo(skb[0])->frag_list = skb[1];
+
+ /* adjust skb[0]'s len */
+ skb[0]->len += skb[1]->len;
+ skb[0]->data_len += skb[1]->data_len;
+ skb[0]->truesize += skb[1]->truesize;
+
+ return skb[0];
+
+err_skb1:
+ __free_page(page[1]);
+err_page1:
+ kfree_skb(skb[0]);
+err_skb0:
+ __free_page(page[0]);
+err_page0:
+ return NULL;
+}
+
+static __init int test_skb_segment(void)
+{
+ netdev_features_t features;
+ struct sk_buff *skb, *segs;
+ int ret = -1;
+
+ features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM;
+ features |= NETIF_F_RXCSUM;
+ skb = build_test_skb();
+ if (!skb) {
+ pr_info("%s: failed to build_test_skb", __func__);
+ goto done;
+ }
+
+ segs = skb_segment(skb, features);
+ if (!IS_ERR(segs)) {
+ kfree_skb_list(segs);
+ ret = 0;
+ pr_info("%s: success in skb_segment!", __func__);
+ } else {
+ pr_info("%s: failed in skb_segment!", __func__);
+ }
+ kfree_skb(skb);
+done:
+ return ret;
+}
+
static __init int test_bpf(void)
{
int i, err_cnt = 0, pass_cnt = 0;
@@ -6632,9 +6948,11 @@ static int __init test_bpf_init(void)
return ret;
ret = test_bpf();
-
destroy_bpf_tests();
- return ret;
+ if (ret)
+ return ret;
+
+ return test_skb_segment();
}
static void __exit test_bpf_exit(void)
diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
index b9cdeecc19dc..d5a06addeb27 100644
--- a/lib/test_debug_virtual.c
+++ b/lib/test_debug_virtual.c
@@ -15,7 +15,7 @@ struct foo {
unsigned int bar;
};
-struct foo *foo;
+static struct foo *foo;
static int __init test_debug_virtual_init(void)
{
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 078a61480573..b984806d7d7b 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -21,6 +21,7 @@
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <linux/vmalloc.h>
#define TEST_FIRMWARE_NAME "test-firmware.bin"
#define TEST_FIRMWARE_NUM_REQS 4
@@ -617,8 +618,9 @@ static ssize_t trigger_batched_requests_store(struct device *dev,
mutex_lock(&test_fw_mutex);
- test_fw_config->reqs = vzalloc(sizeof(struct test_batched_req) *
- test_fw_config->num_requests * 2);
+ test_fw_config->reqs =
+ vzalloc(array3_size(sizeof(struct test_batched_req),
+ test_fw_config->num_requests, 2));
if (!test_fw_config->reqs) {
rc = -ENOMEM;
goto out_unlock;
@@ -719,8 +721,9 @@ ssize_t trigger_batched_requests_async_store(struct device *dev,
mutex_lock(&test_fw_mutex);
- test_fw_config->reqs = vzalloc(sizeof(struct test_batched_req) *
- test_fw_config->num_requests * 2);
+ test_fw_config->reqs =
+ vzalloc(array3_size(sizeof(struct test_batched_req),
+ test_fw_config->num_requests, 2));
if (!test_fw_config->reqs) {
rc = -ENOMEM;
goto out;
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
index 3f415d8101f3..626f580b4ff7 100644
--- a/lib/test_hexdump.c
+++ b/lib/test_hexdump.c
@@ -18,7 +18,7 @@ static const unsigned char data_b[] = {
static const unsigned char data_a[] = ".2.{....p..$}.4...1.....L...C...";
-static const char * const test_data_1_le[] __initconst = {
+static const char * const test_data_1[] __initconst = {
"be", "32", "db", "7b", "0a", "18", "93", "b2",
"70", "ba", "c4", "24", "7d", "83", "34", "9b",
"a6", "9c", "31", "ad", "9c", "0f", "ac", "e9",
@@ -32,16 +32,33 @@ static const char * const test_data_2_le[] __initconst = {
"d14c", "9919", "b143", "0caf",
};
+static const char * const test_data_2_be[] __initconst = {
+ "be32", "db7b", "0a18", "93b2",
+ "70ba", "c424", "7d83", "349b",
+ "a69c", "31ad", "9c0f", "ace9",
+ "4cd1", "1999", "43b1", "af0c",
+};
+
static const char * const test_data_4_le[] __initconst = {
"7bdb32be", "b293180a", "24c4ba70", "9b34837d",
"ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143",
};
+static const char * const test_data_4_be[] __initconst = {
+ "be32db7b", "0a1893b2", "70bac424", "7d83349b",
+ "a69c31ad", "9c0face9", "4cd11999", "43b1af0c",
+};
+
static const char * const test_data_8_le[] __initconst = {
"b293180a7bdb32be", "9b34837d24c4ba70",
"e9ac0f9cad319ca6", "0cafb1439919d14c",
};
+static const char * const test_data_8_be[] __initconst = {
+ "be32db7b0a1893b2", "70bac4247d83349b",
+ "a69c31ad9c0face9", "4cd1199943b1af0c",
+};
+
#define FILL_CHAR '#'
static unsigned total_tests __initdata;
@@ -56,6 +73,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
size_t l = len;
int gs = groupsize, rs = rowsize;
unsigned int i;
+ const bool is_be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
if (rs != 16 && rs != 32)
rs = 16;
@@ -67,13 +85,13 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
gs = 1;
if (gs == 8)
- result = test_data_8_le;
+ result = is_be ? test_data_8_be : test_data_8_le;
else if (gs == 4)
- result = test_data_4_le;
+ result = is_be ? test_data_4_be : test_data_4_le;
else if (gs == 2)
- result = test_data_2_le;
+ result = is_be ? test_data_2_be : test_data_2_le;
else
- result = test_data_1_le;
+ result = test_data_1;
/* hex dump */
p = test;
diff --git a/lib/test_ida.c b/lib/test_ida.c
new file mode 100644
index 000000000000..2d1637d8136b
--- /dev/null
+++ b/lib/test_ida.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_ida.c: Test the IDA API
+ * Copyright (c) 2016-2018 Microsoft Corporation
+ * Copyright (c) 2018 Oracle Corporation
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/idr.h>
+#include <linux/module.h>
+
+static unsigned int tests_run;
+static unsigned int tests_passed;
+
+#ifdef __KERNEL__
+void ida_dump(struct ida *ida) { }
+#endif
+#define IDA_BUG_ON(ida, x) do { \
+ tests_run++; \
+ if (x) { \
+ ida_dump(ida); \
+ dump_stack(); \
+ } else { \
+ tests_passed++; \
+ } \
+} while (0)
+
+/*
+ * Straightforward checks that allocating and freeing IDs work.
+ */
+static void ida_check_alloc(struct ida *ida)
+{
+ int i, id;
+
+ for (i = 0; i < 10000; i++)
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+
+ ida_free(ida, 20);
+ ida_free(ida, 21);
+ for (i = 0; i < 3; i++) {
+ id = ida_alloc(ida, GFP_KERNEL);
+ IDA_BUG_ON(ida, id < 0);
+ if (i == 2)
+ IDA_BUG_ON(ida, id != 10000);
+ }
+
+ for (i = 0; i < 5000; i++)
+ ida_free(ida, i);
+
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 5000, GFP_KERNEL) != 10001);
+ ida_destroy(ida);
+
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/* Destroy an IDA with a single entry at @base */
+static void ida_check_destroy_1(struct ida *ida, unsigned int base)
+{
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) != base);
+ IDA_BUG_ON(ida, ida_is_empty(ida));
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/* Check that ida_destroy and ida_is_empty work */
+static void ida_check_destroy(struct ida *ida)
+{
+ /* Destroy an already-empty IDA */
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+ ida_check_destroy_1(ida, 0);
+ ida_check_destroy_1(ida, 1);
+ ida_check_destroy_1(ida, 1023);
+ ida_check_destroy_1(ida, 1024);
+ ida_check_destroy_1(ida, 12345678);
+}
+
+/*
+ * Check what happens when we fill a leaf and then delete it. This may
+ * discover mishandling of IDR_FREE.
+ */
+static void ida_check_leaf(struct ida *ida, unsigned int base)
+{
+ unsigned long i;
+
+ for (i = 0; i < IDA_BITMAP_BITS; i++) {
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+ base + i);
+ }
+
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != 0);
+ IDA_BUG_ON(ida, ida_is_empty(ida));
+ ida_free(ida, 0);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/*
+ * Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
+ * Allocating up to 2^31-1 should succeed, and then allocating the next one
+ * should fail.
+ */
+static void ida_check_max(struct ida *ida)
+{
+ unsigned long i, j;
+
+ for (j = 1; j < 65537; j *= 2) {
+ unsigned long base = (1UL << 31) - j;
+ for (i = 0; i < j; i++) {
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+ base + i);
+ }
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+ -ENOSPC);
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+ }
+}
+
+/*
+ * Check handling of conversions between exceptional entries and full bitmaps.
+ */
+static void ida_check_conv(struct ida *ida)
+{
+ unsigned long i;
+
+ for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) {
+ IDA_BUG_ON(ida, ida_alloc_min(ida, i + 1, GFP_KERNEL) != i + 1);
+ IDA_BUG_ON(ida, ida_alloc_min(ida, i + BITS_PER_LONG,
+ GFP_KERNEL) != i + BITS_PER_LONG);
+ ida_free(ida, i + 1);
+ ida_free(ida, i + BITS_PER_LONG);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+ }
+
+ for (i = 0; i < IDA_BITMAP_BITS * 2; i++)
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+ for (i = IDA_BITMAP_BITS * 2; i > 0; i--)
+ ida_free(ida, i - 1);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+ for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++)
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+ for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--)
+ ida_free(ida, i - 1);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+static int ida_checks(void)
+{
+ DEFINE_IDA(ida);
+
+ IDA_BUG_ON(&ida, !ida_is_empty(&ida));
+ ida_check_alloc(&ida);
+ ida_check_destroy(&ida);
+ ida_check_leaf(&ida, 0);
+ ida_check_leaf(&ida, 1024);
+ ida_check_leaf(&ida, 1024 * 64);
+ ida_check_max(&ida);
+ ida_check_conv(&ida);
+
+ printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
+ return (tests_run != tests_passed) ? 0 : -EINVAL;
+}
+
+static void ida_exit(void)
+{
+}
+
+module_init(ida_checks);
+module_exit(ida_exit);
+MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 98854a64b014..ec657105edbf 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -567,7 +567,15 @@ static noinline void __init kmem_cache_invalid_free(void)
return;
}
+ /* Trigger invalid free, the object doesn't get freed */
kmem_cache_free(cache, p + 1);
+
+ /*
+ * Properly free the object to prevent the "Objects remaining in
+ * test_cache on __kmem_cache_shutdown" BUG failure.
+ */
+ kmem_cache_free(cache, p);
+
kmem_cache_destroy(cache);
}
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 0e5b7a61460b..e3ddd836491f 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -779,8 +779,9 @@ static int kmod_config_sync_info(struct kmod_test_device *test_dev)
struct test_config *config = &test_dev->config;
free_test_dev_info(test_dev);
- test_dev->info = vzalloc(config->num_threads *
- sizeof(struct kmod_test_device_info));
+ test_dev->info =
+ vzalloc(array_size(sizeof(struct kmod_test_device_info),
+ config->num_threads));
if (!test_dev->info)
return -ENOMEM;
diff --git a/lib/test_overflow.c b/lib/test_overflow.c
new file mode 100644
index 000000000000..fc680562d8b6
--- /dev/null
+++ b/lib/test_overflow.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Test cases for arithmetic overflow checks.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#define DEFINE_TEST_ARRAY(t) \
+ static const struct test_ ## t { \
+ t a, b; \
+ t sum, diff, prod; \
+ bool s_of, d_of, p_of; \
+ } t ## _tests[] __initconst
+
+DEFINE_TEST_ARRAY(u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U8_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U8_MAX, U8_MAX, 1, 0, false, true, false},
+ {U8_MAX, 0, U8_MAX, U8_MAX, 0, false, false, false},
+ {1, U8_MAX, 0, 2, U8_MAX, true, true, false},
+ {U8_MAX, 1, 0, U8_MAX-1, U8_MAX, true, false, false},
+ {U8_MAX, U8_MAX, U8_MAX-1, 0, 1, true, false, true},
+
+ {U8_MAX, U8_MAX-1, U8_MAX-2, 1, 2, true, false, true},
+ {U8_MAX-1, U8_MAX, U8_MAX-2, U8_MAX, 2, true, true, true},
+
+ {1U << 3, 1U << 3, 1U << 4, 0, 1U << 6, false, false, false},
+ {1U << 4, 1U << 4, 1U << 5, 0, 0, false, false, true},
+ {1U << 4, 1U << 3, 3*(1U << 3), 1U << 3, 1U << 7, false, false, false},
+ {1U << 7, 1U << 7, 0, 0, 0, true, false, true},
+
+ {48, 32, 80, 16, 0, false, false, true},
+ {128, 128, 0, 0, 0, true, false, true},
+ {123, 234, 101, 145, 110, true, true, true},
+};
+DEFINE_TEST_ARRAY(u16) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U16_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U16_MAX, U16_MAX, 1, 0, false, true, false},
+ {U16_MAX, 0, U16_MAX, U16_MAX, 0, false, false, false},
+ {1, U16_MAX, 0, 2, U16_MAX, true, true, false},
+ {U16_MAX, 1, 0, U16_MAX-1, U16_MAX, true, false, false},
+ {U16_MAX, U16_MAX, U16_MAX-1, 0, 1, true, false, true},
+
+ {U16_MAX, U16_MAX-1, U16_MAX-2, 1, 2, true, false, true},
+ {U16_MAX-1, U16_MAX, U16_MAX-2, U16_MAX, 2, true, true, true},
+
+ {1U << 7, 1U << 7, 1U << 8, 0, 1U << 14, false, false, false},
+ {1U << 8, 1U << 8, 1U << 9, 0, 0, false, false, true},
+ {1U << 8, 1U << 7, 3*(1U << 7), 1U << 7, 1U << 15, false, false, false},
+ {1U << 15, 1U << 15, 0, 0, 0, true, false, true},
+
+ {123, 234, 357, 65425, 28782, false, true, false},
+ {1234, 2345, 3579, 64425, 10146, false, true, true},
+};
+DEFINE_TEST_ARRAY(u32) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U32_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U32_MAX, U32_MAX, 1, 0, false, true, false},
+ {U32_MAX, 0, U32_MAX, U32_MAX, 0, false, false, false},
+ {1, U32_MAX, 0, 2, U32_MAX, true, true, false},
+ {U32_MAX, 1, 0, U32_MAX-1, U32_MAX, true, false, false},
+ {U32_MAX, U32_MAX, U32_MAX-1, 0, 1, true, false, true},
+
+ {U32_MAX, U32_MAX-1, U32_MAX-2, 1, 2, true, false, true},
+ {U32_MAX-1, U32_MAX, U32_MAX-2, U32_MAX, 2, true, true, true},
+
+ {1U << 15, 1U << 15, 1U << 16, 0, 1U << 30, false, false, false},
+ {1U << 16, 1U << 16, 1U << 17, 0, 0, false, false, true},
+ {1U << 16, 1U << 15, 3*(1U << 15), 1U << 15, 1U << 31, false, false, false},
+ {1U << 31, 1U << 31, 0, 0, 0, true, false, true},
+
+ {-2U, 1U, -1U, -3U, -2U, false, false, false},
+ {-4U, 5U, 1U, -9U, -20U, true, false, true},
+};
+
+DEFINE_TEST_ARRAY(u64) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U64_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U64_MAX, U64_MAX, 1, 0, false, true, false},
+ {U64_MAX, 0, U64_MAX, U64_MAX, 0, false, false, false},
+ {1, U64_MAX, 0, 2, U64_MAX, true, true, false},
+ {U64_MAX, 1, 0, U64_MAX-1, U64_MAX, true, false, false},
+ {U64_MAX, U64_MAX, U64_MAX-1, 0, 1, true, false, true},
+
+ {U64_MAX, U64_MAX-1, U64_MAX-2, 1, 2, true, false, true},
+ {U64_MAX-1, U64_MAX, U64_MAX-2, U64_MAX, 2, true, true, true},
+
+ {1ULL << 31, 1ULL << 31, 1ULL << 32, 0, 1ULL << 62, false, false, false},
+ {1ULL << 32, 1ULL << 32, 1ULL << 33, 0, 0, false, false, true},
+ {1ULL << 32, 1ULL << 31, 3*(1ULL << 31), 1ULL << 31, 1ULL << 63, false, false, false},
+ {1ULL << 63, 1ULL << 63, 0, 0, 0, true, false, true},
+ {1000000000ULL /* 10^9 */, 10000000000ULL /* 10^10 */,
+ 11000000000ULL, 18446744064709551616ULL, 10000000000000000000ULL,
+ false, true, false},
+ {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true},
+};
+
+DEFINE_TEST_ARRAY(s8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S8_MAX, S8_MAX, -S8_MAX, 0, false, false, false},
+ {S8_MAX, 0, S8_MAX, S8_MAX, 0, false, false, false},
+ {0, S8_MIN, S8_MIN, S8_MIN, 0, false, true, false},
+ {S8_MIN, 0, S8_MIN, S8_MIN, 0, false, false, false},
+
+ {-1, S8_MIN, S8_MAX, S8_MAX, S8_MIN, true, false, true},
+ {S8_MIN, -1, S8_MAX, -S8_MAX, S8_MIN, true, false, true},
+ {-1, S8_MAX, S8_MAX-1, S8_MIN, -S8_MAX, false, false, false},
+ {S8_MAX, -1, S8_MAX-1, S8_MIN, -S8_MAX, false, true, false},
+ {-1, -S8_MAX, S8_MIN, S8_MAX-1, S8_MAX, false, false, false},
+ {-S8_MAX, -1, S8_MIN, S8_MIN+2, S8_MAX, false, false, false},
+
+ {1, S8_MIN, -S8_MAX, -S8_MAX, S8_MIN, false, true, false},
+ {S8_MIN, 1, -S8_MAX, S8_MAX, S8_MIN, false, true, false},
+ {1, S8_MAX, S8_MIN, S8_MIN+2, S8_MAX, true, false, false},
+ {S8_MAX, 1, S8_MIN, S8_MAX-1, S8_MAX, true, false, false},
+
+ {S8_MIN, S8_MIN, 0, 0, 0, true, false, true},
+ {S8_MAX, S8_MAX, -2, 0, 1, true, false, true},
+
+ {-4, -32, -36, 28, -128, false, false, true},
+ {-4, 32, 28, -36, -128, false, false, false},
+};
+
+DEFINE_TEST_ARRAY(s16) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S16_MAX, S16_MAX, -S16_MAX, 0, false, false, false},
+ {S16_MAX, 0, S16_MAX, S16_MAX, 0, false, false, false},
+ {0, S16_MIN, S16_MIN, S16_MIN, 0, false, true, false},
+ {S16_MIN, 0, S16_MIN, S16_MIN, 0, false, false, false},
+
+ {-1, S16_MIN, S16_MAX, S16_MAX, S16_MIN, true, false, true},
+ {S16_MIN, -1, S16_MAX, -S16_MAX, S16_MIN, true, false, true},
+ {-1, S16_MAX, S16_MAX-1, S16_MIN, -S16_MAX, false, false, false},
+ {S16_MAX, -1, S16_MAX-1, S16_MIN, -S16_MAX, false, true, false},
+ {-1, -S16_MAX, S16_MIN, S16_MAX-1, S16_MAX, false, false, false},
+ {-S16_MAX, -1, S16_MIN, S16_MIN+2, S16_MAX, false, false, false},
+
+ {1, S16_MIN, -S16_MAX, -S16_MAX, S16_MIN, false, true, false},
+ {S16_MIN, 1, -S16_MAX, S16_MAX, S16_MIN, false, true, false},
+ {1, S16_MAX, S16_MIN, S16_MIN+2, S16_MAX, true, false, false},
+ {S16_MAX, 1, S16_MIN, S16_MAX-1, S16_MAX, true, false, false},
+
+ {S16_MIN, S16_MIN, 0, 0, 0, true, false, true},
+ {S16_MAX, S16_MAX, -2, 0, 1, true, false, true},
+};
+DEFINE_TEST_ARRAY(s32) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S32_MAX, S32_MAX, -S32_MAX, 0, false, false, false},
+ {S32_MAX, 0, S32_MAX, S32_MAX, 0, false, false, false},
+ {0, S32_MIN, S32_MIN, S32_MIN, 0, false, true, false},
+ {S32_MIN, 0, S32_MIN, S32_MIN, 0, false, false, false},
+
+ {-1, S32_MIN, S32_MAX, S32_MAX, S32_MIN, true, false, true},
+ {S32_MIN, -1, S32_MAX, -S32_MAX, S32_MIN, true, false, true},
+ {-1, S32_MAX, S32_MAX-1, S32_MIN, -S32_MAX, false, false, false},
+ {S32_MAX, -1, S32_MAX-1, S32_MIN, -S32_MAX, false, true, false},
+ {-1, -S32_MAX, S32_MIN, S32_MAX-1, S32_MAX, false, false, false},
+ {-S32_MAX, -1, S32_MIN, S32_MIN+2, S32_MAX, false, false, false},
+
+ {1, S32_MIN, -S32_MAX, -S32_MAX, S32_MIN, false, true, false},
+ {S32_MIN, 1, -S32_MAX, S32_MAX, S32_MIN, false, true, false},
+ {1, S32_MAX, S32_MIN, S32_MIN+2, S32_MAX, true, false, false},
+ {S32_MAX, 1, S32_MIN, S32_MAX-1, S32_MAX, true, false, false},
+
+ {S32_MIN, S32_MIN, 0, 0, 0, true, false, true},
+ {S32_MAX, S32_MAX, -2, 0, 1, true, false, true},
+};
+DEFINE_TEST_ARRAY(s64) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S64_MAX, S64_MAX, -S64_MAX, 0, false, false, false},
+ {S64_MAX, 0, S64_MAX, S64_MAX, 0, false, false, false},
+ {0, S64_MIN, S64_MIN, S64_MIN, 0, false, true, false},
+ {S64_MIN, 0, S64_MIN, S64_MIN, 0, false, false, false},
+
+ {-1, S64_MIN, S64_MAX, S64_MAX, S64_MIN, true, false, true},
+ {S64_MIN, -1, S64_MAX, -S64_MAX, S64_MIN, true, false, true},
+ {-1, S64_MAX, S64_MAX-1, S64_MIN, -S64_MAX, false, false, false},
+ {S64_MAX, -1, S64_MAX-1, S64_MIN, -S64_MAX, false, true, false},
+ {-1, -S64_MAX, S64_MIN, S64_MAX-1, S64_MAX, false, false, false},
+ {-S64_MAX, -1, S64_MIN, S64_MIN+2, S64_MAX, false, false, false},
+
+ {1, S64_MIN, -S64_MAX, -S64_MAX, S64_MIN, false, true, false},
+ {S64_MIN, 1, -S64_MAX, S64_MAX, S64_MIN, false, true, false},
+ {1, S64_MAX, S64_MIN, S64_MIN+2, S64_MAX, true, false, false},
+ {S64_MAX, 1, S64_MIN, S64_MAX-1, S64_MAX, true, false, false},
+
+ {S64_MIN, S64_MIN, 0, 0, 0, true, false, true},
+ {S64_MAX, S64_MAX, -2, 0, 1, true, false, true},
+
+ {-1, -1, -2, 0, 1, false, false, false},
+ {-1, -128, -129, 127, 128, false, false, false},
+ {-128, -1, -129, -127, 128, false, false, false},
+ {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false},
+};
+
+#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \
+ t _r; \
+ bool _of; \
+ \
+ _of = check_ ## op ## _overflow(a, b, &_r); \
+ if (_of != of) { \
+ pr_warn("expected "fmt" "sym" "fmt \
+ " to%s overflow (type %s)\n", \
+ a, b, of ? "" : " not", #t); \
+ err = 1; \
+ } \
+ if (_r != r) { \
+ pr_warn("expected "fmt" "sym" "fmt" == " \
+ fmt", got "fmt" (type %s)\n", \
+ a, b, r, _r, #t); \
+ err = 1; \
+ } \
+} while (0)
+
+#define DEFINE_TEST_FUNC(t, fmt) \
+static int __init do_test_ ## t(const struct test_ ## t *p) \
+{ \
+ int err = 0; \
+ \
+ check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
+ check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
+ check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \
+ check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \
+ check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
+ \
+ return err; \
+} \
+ \
+static int __init test_ ## t ## _overflow(void) { \
+ int err = 0; \
+ unsigned i; \
+ \
+ pr_info("%-3s: %zu arithmetic tests\n", #t, \
+ ARRAY_SIZE(t ## _tests)); \
+ for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \
+ err |= do_test_ ## t(&t ## _tests[i]); \
+ return err; \
+}
+
+DEFINE_TEST_FUNC(u8, "%d");
+DEFINE_TEST_FUNC(s8, "%d");
+DEFINE_TEST_FUNC(u16, "%d");
+DEFINE_TEST_FUNC(s16, "%d");
+DEFINE_TEST_FUNC(u32, "%u");
+DEFINE_TEST_FUNC(s32, "%d");
+#if BITS_PER_LONG == 64
+DEFINE_TEST_FUNC(u64, "%llu");
+DEFINE_TEST_FUNC(s64, "%lld");
+#endif
+
+static int __init test_overflow_calculation(void)
+{
+ int err = 0;
+
+ err |= test_u8_overflow();
+ err |= test_s8_overflow();
+ err |= test_u16_overflow();
+ err |= test_s16_overflow();
+ err |= test_u32_overflow();
+ err |= test_s32_overflow();
+#if BITS_PER_LONG == 64
+ err |= test_u64_overflow();
+ err |= test_s64_overflow();
+#endif
+
+ return err;
+}
+
+static int __init test_overflow_shift(void)
+{
+ int err = 0;
+
+/* Args are: value, shift, type, expected result, overflow expected */
+#define TEST_ONE_SHIFT(a, s, t, expect, of) ({ \
+ int __failed = 0; \
+ typeof(a) __a = (a); \
+ typeof(s) __s = (s); \
+ t __e = (expect); \
+ t __d; \
+ bool __of = check_shl_overflow(__a, __s, &__d); \
+ if (__of != of) { \
+ pr_warn("expected (%s)(%s << %s) to%s overflow\n", \
+ #t, #a, #s, of ? "" : " not"); \
+ __failed = 1; \
+ } else if (!__of && __d != __e) { \
+ pr_warn("expected (%s)(%s << %s) == %s\n", \
+ #t, #a, #s, #expect); \
+ if ((t)-1 < 0) \
+ pr_warn("got %lld\n", (s64)__d); \
+ else \
+ pr_warn("got %llu\n", (u64)__d); \
+ __failed = 1; \
+ } \
+ if (!__failed) \
+ pr_info("ok: (%s)(%s << %s) == %s\n", #t, #a, #s, \
+ of ? "overflow" : #expect); \
+ __failed; \
+})
+
+ /* Sane shifts. */
+ err |= TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false);
+ err |= TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false);
+ err |= TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false);
+ err |= TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false);
+ err |= TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false);
+ err |= TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false);
+ err |= TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false);
+ err |= TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false);
+ err |= TEST_ONE_SHIFT(1, 0, int, 1 << 0, false);
+ err |= TEST_ONE_SHIFT(1, 16, int, 1 << 16, false);
+ err |= TEST_ONE_SHIFT(1, 30, int, 1 << 30, false);
+ err |= TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false);
+ err |= TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false);
+ err |= TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false);
+ err |= TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false);
+ err |= TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false);
+ err |= TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false);
+ err |= TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false);
+ err |= TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false);
+ err |= TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false);
+ err |= TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false);
+ err |= TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false);
+ err |= TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false);
+ err |= TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false);
+ err |= TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false);
+ err |= TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64,
+ 0xFFFFFFFFULL << 32, false);
+
+ /* Sane shift: start and end with 0, without a too-wide shift. */
+ err |= TEST_ONE_SHIFT(0, 7, u8, 0, false);
+ err |= TEST_ONE_SHIFT(0, 15, u16, 0, false);
+ err |= TEST_ONE_SHIFT(0, 31, unsigned int, 0, false);
+ err |= TEST_ONE_SHIFT(0, 31, u32, 0, false);
+ err |= TEST_ONE_SHIFT(0, 63, u64, 0, false);
+
+ /* Sane shift: start and end with 0, without reaching signed bit. */
+ err |= TEST_ONE_SHIFT(0, 6, s8, 0, false);
+ err |= TEST_ONE_SHIFT(0, 14, s16, 0, false);
+ err |= TEST_ONE_SHIFT(0, 30, int, 0, false);
+ err |= TEST_ONE_SHIFT(0, 30, s32, 0, false);
+ err |= TEST_ONE_SHIFT(0, 62, s64, 0, false);
+
+ /* Overflow: shifted the bit off the end. */
+ err |= TEST_ONE_SHIFT(1, 8, u8, 0, true);
+ err |= TEST_ONE_SHIFT(1, 16, u16, 0, true);
+ err |= TEST_ONE_SHIFT(1, 32, unsigned int, 0, true);
+ err |= TEST_ONE_SHIFT(1, 32, u32, 0, true);
+ err |= TEST_ONE_SHIFT(1, 64, u64, 0, true);
+
+ /* Overflow: shifted into the signed bit. */
+ err |= TEST_ONE_SHIFT(1, 7, s8, 0, true);
+ err |= TEST_ONE_SHIFT(1, 15, s16, 0, true);
+ err |= TEST_ONE_SHIFT(1, 31, int, 0, true);
+ err |= TEST_ONE_SHIFT(1, 31, s32, 0, true);
+ err |= TEST_ONE_SHIFT(1, 63, s64, 0, true);
+
+ /* Overflow: high bit falls off unsigned types. */
+ /* 10010110 */
+ err |= TEST_ONE_SHIFT(150, 1, u8, 0, true);
+ /* 1000100010010110 */
+ err |= TEST_ONE_SHIFT(34966, 1, u16, 0, true);
+ /* 10000100000010001000100010010110 */
+ err |= TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true);
+ err |= TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true);
+ /* 1000001000010000010000000100000010000100000010001000100010010110 */
+ err |= TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true);
+
+ /* Overflow: bit shifted into signed bit on signed types. */
+ /* 01001011 */
+ err |= TEST_ONE_SHIFT(75, 1, s8, 0, true);
+ /* 0100010001001011 */
+ err |= TEST_ONE_SHIFT(17483, 1, s16, 0, true);
+ /* 01000010000001000100010001001011 */
+ err |= TEST_ONE_SHIFT(1107575883, 1, s32, 0, true);
+ err |= TEST_ONE_SHIFT(1107575883, 1, int, 0, true);
+ /* 0100000100001000001000000010000001000010000001000100010001001011 */
+ err |= TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true);
+
+ /* Overflow: bit shifted past signed bit on signed types. */
+ /* 01001011 */
+ err |= TEST_ONE_SHIFT(75, 2, s8, 0, true);
+ /* 0100010001001011 */
+ err |= TEST_ONE_SHIFT(17483, 2, s16, 0, true);
+ /* 01000010000001000100010001001011 */
+ err |= TEST_ONE_SHIFT(1107575883, 2, s32, 0, true);
+ err |= TEST_ONE_SHIFT(1107575883, 2, int, 0, true);
+ /* 0100000100001000001000000010000001000010000001000100010001001011 */
+ err |= TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true);
+
+ /* Overflow: values larger than destination type. */
+ err |= TEST_ONE_SHIFT(0x100, 0, u8, 0, true);
+ err |= TEST_ONE_SHIFT(0xFF, 0, s8, 0, true);
+ err |= TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true);
+ err |= TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true);
+ err |= TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true);
+ err |= TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true);
+ err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true);
+ err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true);
+ err |= TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true);
+
+ /* Nonsense: negative initial value. */
+ err |= TEST_ONE_SHIFT(-1, 0, s8, 0, true);
+ err |= TEST_ONE_SHIFT(-1, 0, u8, 0, true);
+ err |= TEST_ONE_SHIFT(-5, 0, s16, 0, true);
+ err |= TEST_ONE_SHIFT(-5, 0, u16, 0, true);
+ err |= TEST_ONE_SHIFT(-10, 0, int, 0, true);
+ err |= TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true);
+ err |= TEST_ONE_SHIFT(-100, 0, s32, 0, true);
+ err |= TEST_ONE_SHIFT(-100, 0, u32, 0, true);
+ err |= TEST_ONE_SHIFT(-10000, 0, s64, 0, true);
+ err |= TEST_ONE_SHIFT(-10000, 0, u64, 0, true);
+
+ /* Nonsense: negative shift values. */
+ err |= TEST_ONE_SHIFT(0, -5, s8, 0, true);
+ err |= TEST_ONE_SHIFT(0, -5, u8, 0, true);
+ err |= TEST_ONE_SHIFT(0, -10, s16, 0, true);
+ err |= TEST_ONE_SHIFT(0, -10, u16, 0, true);
+ err |= TEST_ONE_SHIFT(0, -15, int, 0, true);
+ err |= TEST_ONE_SHIFT(0, -15, unsigned int, 0, true);
+ err |= TEST_ONE_SHIFT(0, -20, s32, 0, true);
+ err |= TEST_ONE_SHIFT(0, -20, u32, 0, true);
+ err |= TEST_ONE_SHIFT(0, -30, s64, 0, true);
+ err |= TEST_ONE_SHIFT(0, -30, u64, 0, true);
+
+ /* Overflow: shifted at or beyond entire type's bit width. */
+ err |= TEST_ONE_SHIFT(0, 8, u8, 0, true);
+ err |= TEST_ONE_SHIFT(0, 9, u8, 0, true);
+ err |= TEST_ONE_SHIFT(0, 8, s8, 0, true);
+ err |= TEST_ONE_SHIFT(0, 9, s8, 0, true);
+ err |= TEST_ONE_SHIFT(0, 16, u16, 0, true);
+ err |= TEST_ONE_SHIFT(0, 17, u16, 0, true);
+ err |= TEST_ONE_SHIFT(0, 16, s16, 0, true);
+ err |= TEST_ONE_SHIFT(0, 17, s16, 0, true);
+ err |= TEST_ONE_SHIFT(0, 32, u32, 0, true);
+ err |= TEST_ONE_SHIFT(0, 33, u32, 0, true);
+ err |= TEST_ONE_SHIFT(0, 32, int, 0, true);
+ err |= TEST_ONE_SHIFT(0, 33, int, 0, true);
+ err |= TEST_ONE_SHIFT(0, 32, s32, 0, true);
+ err |= TEST_ONE_SHIFT(0, 33, s32, 0, true);
+ err |= TEST_ONE_SHIFT(0, 64, u64, 0, true);
+ err |= TEST_ONE_SHIFT(0, 65, u64, 0, true);
+ err |= TEST_ONE_SHIFT(0, 64, s64, 0, true);
+ err |= TEST_ONE_SHIFT(0, 65, s64, 0, true);
+
+ /*
+ * Corner case: for unsigned types, we fail when we've shifted
+ * through the entire width of bits. For signed types, we might
+ * want to match this behavior, but that would mean noticing if
+ * we shift through all but the signed bit, and this is not
+ * currently detected (but we'll notice an overflow into the
+ * signed bit). So, for now, we will test this condition but
+ * mark it as not expected to overflow.
+ */
+ err |= TEST_ONE_SHIFT(0, 7, s8, 0, false);
+ err |= TEST_ONE_SHIFT(0, 15, s16, 0, false);
+ err |= TEST_ONE_SHIFT(0, 31, int, 0, false);
+ err |= TEST_ONE_SHIFT(0, 31, s32, 0, false);
+ err |= TEST_ONE_SHIFT(0, 63, s64, 0, false);
+
+ return err;
+}
+
+/*
+ * Deal with the various forms of allocator arguments. See comments above
+ * the DEFINE_TEST_ALLOC() instances for mapping of the "bits".
+ */
+#define alloc010(alloc, arg, sz) alloc(sz, GFP_KERNEL)
+#define alloc011(alloc, arg, sz) alloc(sz, GFP_KERNEL, NUMA_NO_NODE)
+#define alloc000(alloc, arg, sz) alloc(sz)
+#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE)
+#define alloc110(alloc, arg, sz) alloc(arg, sz, GFP_KERNEL)
+#define free0(free, arg, ptr) free(ptr)
+#define free1(free, arg, ptr) free(arg, ptr)
+
+/* Wrap around to 8K */
+#define TEST_SIZE (9 << PAGE_SHIFT)
+
+#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\
+static int __init test_ ## func (void *arg) \
+{ \
+ volatile size_t a = TEST_SIZE; \
+ volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \
+ void *ptr; \
+ \
+ /* Tiny allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\
+ if (!ptr) { \
+ pr_warn(#func " failed regular allocation?!\n"); \
+ return 1; \
+ } \
+ free ## want_arg (free_func, arg, ptr); \
+ \
+ /* Wrapped allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
+ a * b); \
+ if (!ptr) { \
+ pr_warn(#func " unexpectedly failed bad wrapping?!\n"); \
+ return 1; \
+ } \
+ free ## want_arg (free_func, arg, ptr); \
+ \
+ /* Saturated allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
+ array_size(a, b)); \
+ if (ptr) { \
+ pr_warn(#func " missed saturation!\n"); \
+ free ## want_arg (free_func, arg, ptr); \
+ return 1; \
+ } \
+ pr_info(#func " detected saturation\n"); \
+ return 0; \
+}
+
+/*
+ * Allocator uses a trailing node argument --------+ (e.g. kmalloc_node())
+ * Allocator uses the gfp_t argument -----------+ | (e.g. kmalloc())
+ * Allocator uses a special leading argument + | | (e.g. devm_kmalloc())
+ * | | |
+ */
+DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(vmalloc, vfree, 0, 0, 0);
+DEFINE_TEST_ALLOC(vmalloc_node, vfree, 0, 0, 1);
+DEFINE_TEST_ALLOC(vzalloc, vfree, 0, 0, 0);
+DEFINE_TEST_ALLOC(vzalloc_node, vfree, 0, 0, 1);
+DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0);
+DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0);
+
+static int __init test_overflow_allocation(void)
+{
+ const char device_name[] = "overflow-test";
+ struct device *dev;
+ int err = 0;
+
+ /* Create dummy device for devm_kmalloc()-family tests. */
+ dev = root_device_register(device_name);
+ if (IS_ERR(dev)) {
+ pr_warn("Cannot register test device\n");
+ return 1;
+ }
+
+ err |= test_kmalloc(NULL);
+ err |= test_kmalloc_node(NULL);
+ err |= test_kzalloc(NULL);
+ err |= test_kzalloc_node(NULL);
+ err |= test_kvmalloc(NULL);
+ err |= test_kvmalloc_node(NULL);
+ err |= test_kvzalloc(NULL);
+ err |= test_kvzalloc_node(NULL);
+ err |= test_vmalloc(NULL);
+ err |= test_vmalloc_node(NULL);
+ err |= test_vzalloc(NULL);
+ err |= test_vzalloc_node(NULL);
+ err |= test_devm_kmalloc(dev);
+ err |= test_devm_kzalloc(dev);
+
+ device_unregister(dev);
+
+ return err;
+}
+
+static int __init test_module_init(void)
+{
+ int err = 0;
+
+ err |= test_overflow_calculation();
+ err |= test_overflow_shift();
+ err |= test_overflow_allocation();
+
+ if (err) {
+ pr_warn("FAIL!\n");
+ err = -EINVAL;
+ } else {
+ pr_info("all tests passed\n");
+ }
+
+ return err;
+}
+
+static void __exit test_module_exit(void)
+{ }
+
+module_init(test_module_init);
+module_exit(test_module_exit);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 71ebfa43ad05..53527ea822b5 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -204,8 +204,9 @@ test_string(void)
#if BITS_PER_LONG == 64
#define PTR_WIDTH 16
-#define PTR ((void *)0xffff0123456789ab)
+#define PTR ((void *)0xffff0123456789abUL)
#define PTR_STR "ffff0123456789ab"
+#define PTR_VAL_NO_CRNG "(____ptrval____)"
#define ZEROS "00000000" /* hex 32 zero bits */
static int __init
@@ -216,7 +217,16 @@ plain_format(void)
nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
- if (nchars != PTR_WIDTH || strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
+ if (nchars != PTR_WIDTH)
+ return -1;
+
+ if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
+ pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
+ PTR_VAL_NO_CRNG);
+ return 0;
+ }
+
+ if (strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
return -1;
return 0;
@@ -227,6 +237,7 @@ plain_format(void)
#define PTR_WIDTH 8
#define PTR ((void *)0x456789ab)
#define PTR_STR "456789ab"
+#define PTR_VAL_NO_CRNG "(ptrval)"
static int __init
plain_format(void)
@@ -245,7 +256,16 @@ plain_hash(void)
nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
- if (nchars != PTR_WIDTH || strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
+ if (nchars != PTR_WIDTH)
+ return -1;
+
+ if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
+ pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
+ PTR_VAL_NO_CRNG);
+ return 0;
+ }
+
+ if (strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
return -1;
return 0;
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index f4000c137dbe..82ac39ce5310 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -83,7 +83,7 @@ static u32 my_hashfn(const void *data, u32 len, u32 seed)
{
const struct test_obj_rhl *obj = data;
- return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE;
+ return (obj->value.id % 10);
}
static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
@@ -99,7 +99,6 @@ static struct rhashtable_params test_rht_params = {
.key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(struct test_obj_val),
.hashfn = jhash,
- .nulls_base = (3U << RHT_BASE_SHIFT),
};
static struct rhashtable_params test_rht_params_dup = {
@@ -285,17 +284,17 @@ static int __init test_rhltable(unsigned int entries)
if (entries == 0)
entries = 1;
- rhl_test_objects = vzalloc(sizeof(*rhl_test_objects) * entries);
+ rhl_test_objects = vzalloc(array_size(entries,
+ sizeof(*rhl_test_objects)));
if (!rhl_test_objects)
return -ENOMEM;
ret = -ENOMEM;
- obj_in_table = vzalloc(BITS_TO_LONGS(entries) * sizeof(unsigned long));
+ obj_in_table = vzalloc(array_size(sizeof(unsigned long),
+ BITS_TO_LONGS(entries)));
if (!obj_in_table)
goto out_free;
- /* nulls_base not supported in rhlist interface */
- test_rht_params.nulls_base = 0;
err = rhltable_init(&rhlt, &test_rht_params);
if (WARN_ON(err))
goto out_free;
@@ -499,6 +498,8 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
unsigned int i, cnt = 0;
ht = &rhlt->ht;
+ /* Take the mutex to avoid RCU warning */
+ mutex_lock(&ht->mutex);
tbl = rht_dereference(ht->tbl, ht);
for (i = 0; i < tbl->size; i++) {
struct rhash_head *pos, *next;
@@ -532,6 +533,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
}
}
printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
+ mutex_unlock(&ht->mutex);
return cnt;
}
@@ -706,7 +708,8 @@ static int __init test_rht_init(void)
test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries);
test_rht_params.nelem_hint = size;
- objs = vzalloc((test_rht_params.max_size + 1) * sizeof(struct test_obj));
+ objs = vzalloc(array_size(sizeof(struct test_obj),
+ test_rht_params.max_size + 1));
if (!objs)
return -ENOMEM;
@@ -753,10 +756,10 @@ static int __init test_rht_init(void)
pr_info("Testing concurrent rhashtable access from %d threads\n",
tcount);
sema_init(&prestart_sem, 1 - tcount);
- tdata = vzalloc(tcount * sizeof(struct thread_data));
+ tdata = vzalloc(array_size(tcount, sizeof(struct thread_data)));
if (!tdata)
return -ENOMEM;
- objs = vzalloc(tcount * entries * sizeof(struct test_obj));
+ objs = vzalloc(array3_size(sizeof(struct test_obj), tcount, entries));
if (!objs) {
vfree(tdata);
return -ENOMEM;
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
new file mode 100644
index 000000000000..280f4979d00e
--- /dev/null
+++ b/lib/test_ubsan.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+typedef void(*test_ubsan_fp)(void);
+
+static void test_ubsan_add_overflow(void)
+{
+ volatile int val = INT_MAX;
+
+ val += 2;
+}
+
+static void test_ubsan_sub_overflow(void)
+{
+ volatile int val = INT_MIN;
+ volatile int val2 = 2;
+
+ val -= val2;
+}
+
+static void test_ubsan_mul_overflow(void)
+{
+ volatile int val = INT_MAX / 2;
+
+ val *= 3;
+}
+
+static void test_ubsan_negate_overflow(void)
+{
+ volatile int val = INT_MIN;
+
+ val = -val;
+}
+
+static void test_ubsan_divrem_overflow(void)
+{
+ volatile int val = 16;
+ volatile int val2 = 0;
+
+ val /= val2;
+}
+
+static void test_ubsan_vla_bound_not_positive(void)
+{
+ volatile int size = -1;
+ char buf[size];
+
+ (void)buf;
+}
+
+static void test_ubsan_shift_out_of_bounds(void)
+{
+ volatile int val = -1;
+ int val2 = 10;
+
+ val2 <<= val;
+}
+
+static void test_ubsan_out_of_bounds(void)
+{
+ volatile int i = 4, j = 5;
+ volatile int arr[i];
+
+ arr[j] = i;
+}
+
+static void test_ubsan_load_invalid_value(void)
+{
+ volatile char *dst, *src;
+ bool val, val2, *ptr;
+ char c = 4;
+
+ dst = (char *)&val;
+ src = &c;
+ *dst = *src;
+
+ ptr = &val2;
+ val2 = val;
+}
+
+static void test_ubsan_null_ptr_deref(void)
+{
+ volatile int *ptr = NULL;
+ int val;
+
+ val = *ptr;
+}
+
+static void test_ubsan_misaligned_access(void)
+{
+ volatile char arr[5] __aligned(4) = {1, 2, 3, 4, 5};
+ volatile int *ptr, val = 6;
+
+ ptr = (int *)(arr + 1);
+ *ptr = val;
+}
+
+static void test_ubsan_object_size_mismatch(void)
+{
+ /* "((aligned(8)))" helps this not into be misaligned for ptr-access. */
+ volatile int val __aligned(8) = 4;
+ volatile long long *ptr, val2;
+
+ ptr = (long long *)&val;
+ val2 = *ptr;
+}
+
+static const test_ubsan_fp test_ubsan_array[] = {
+ test_ubsan_add_overflow,
+ test_ubsan_sub_overflow,
+ test_ubsan_mul_overflow,
+ test_ubsan_negate_overflow,
+ test_ubsan_divrem_overflow,
+ test_ubsan_vla_bound_not_positive,
+ test_ubsan_shift_out_of_bounds,
+ test_ubsan_out_of_bounds,
+ test_ubsan_load_invalid_value,
+ //test_ubsan_null_ptr_deref, /* exclude it because there is a crash */
+ test_ubsan_misaligned_access,
+ test_ubsan_object_size_mismatch,
+};
+
+static int __init test_ubsan_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_ubsan_array); i++)
+ test_ubsan_array[i]();
+
+ (void)test_ubsan_null_ptr_deref; /* to avoid unsed-function warning */
+ return 0;
+}
+module_init(test_ubsan_init);
+
+static void __exit test_ubsan_exit(void)
+{
+ /* do nothing */
+}
+module_exit(test_ubsan_exit);
+
+MODULE_AUTHOR("Jinbum Park <jinb.park7@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 4621db801b23..e161f0498f42 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -31,11 +31,8 @@
* their capability at compile-time, we just have to opt-out certain archs.
*/
#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
- !defined(CONFIG_BLACKFIN) && \
- !defined(CONFIG_M32R) && \
!defined(CONFIG_M68K) && \
!defined(CONFIG_MICROBLAZE) && \
- !defined(CONFIG_MN10300) && \
!defined(CONFIG_NIOS2) && \
!defined(CONFIG_PPC32) && \
!defined(CONFIG_SUPERH))
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 0b79908dfe89..5939549c0e7b 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -10,7 +10,10 @@
* Pablo Neira Ayuso <pablo@netfilter.org>
*
* ==========================================================================
- *
+ */
+
+/**
+ * DOC: ts_intro
* INTRODUCTION
*
* The textsearch infrastructure provides text searching facilities for
@@ -19,7 +22,9 @@
*
* ARCHITECTURE
*
- * User
+ * .. code-block:: none
+ *
+ * User
* +----------------+
* | finish()|<--------------(6)-----------------+
* |get_next_block()|<--------------(5)---------------+ |
@@ -33,21 +38,21 @@
* | (3)|----->| find()/next() |-----------+ |
* | (7)|----->| destroy() |----------------------+
* +----------------+ +---------------+
- *
- * (1) User configures a search by calling _prepare() specifying the
- * search parameters such as the pattern and algorithm name.
+ *
+ * (1) User configures a search by calling textsearch_prepare() specifying
+ * the search parameters such as the pattern and algorithm name.
* (2) Core requests the algorithm to allocate and initialize a search
* configuration according to the specified parameters.
- * (3) User starts the search(es) by calling _find() or _next() to
- * fetch subsequent occurrences. A state variable is provided
- * to the algorithm to store persistent variables.
+ * (3) User starts the search(es) by calling textsearch_find() or
+ * textsearch_next() to fetch subsequent occurrences. A state variable
+ * is provided to the algorithm to store persistent variables.
* (4) Core eventually resets the search offset and forwards the find()
* request to the algorithm.
* (5) Algorithm calls get_next_block() provided by the user continuously
* to fetch the data to be searched in block by block.
* (6) Algorithm invokes finish() after the last call to get_next_block
* to clean up any leftovers from get_next_block. (Optional)
- * (7) User destroys the configuration by calling _destroy().
+ * (7) User destroys the configuration by calling textsearch_destroy().
* (8) Core notifies the algorithm to destroy algorithm specific
* allocations. (Optional)
*
@@ -62,9 +67,10 @@
* amount of times and even in parallel as long as a separate struct
* ts_state variable is provided to every instance.
*
- * The actual search is performed by either calling textsearch_find_-
- * continuous() for linear data or by providing an own get_next_block()
- * implementation and calling textsearch_find(). Both functions return
+ * The actual search is performed by either calling
+ * textsearch_find_continuous() for linear data or by providing
+ * an own get_next_block() implementation and
+ * calling textsearch_find(). Both functions return
* the position of the first occurrence of the pattern or UINT_MAX if
* no match was found. Subsequent occurrences can be found by calling
* textsearch_next() regardless of the linearity of the data.
@@ -72,7 +78,7 @@
* Once you're done using a configuration it must be given back via
* textsearch_destroy.
*
- * EXAMPLE
+ * EXAMPLE::
*
* int pos;
* struct ts_config *conf;
@@ -87,13 +93,13 @@
* goto errout;
* }
*
- * pos = textsearch_find_continuous(conf, &state, example, strlen(example));
+ * pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
* if (pos != UINT_MAX)
- * panic("Oh my god, dancing chickens at %d\n", pos);
+ * panic("Oh my god, dancing chickens at \%d\n", pos);
*
* textsearch_destroy(conf);
- * ==========================================================================
*/
+/* ========================================================================== */
#include <linux/module.h>
#include <linux/types.h>
@@ -225,7 +231,7 @@ static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
*
* Returns the position of first occurrence of the pattern or
* %UINT_MAX if no occurrence was found.
- */
+ */
unsigned int textsearch_find_continuous(struct ts_config *conf,
struct ts_state *state,
const void *data, unsigned int len)
diff --git a/lib/ucmpdi2.c b/lib/ucmpdi2.c
index 25ca2d4c1e19..597998169a96 100644
--- a/lib/ucmpdi2.c
+++ b/lib/ucmpdi2.c
@@ -17,7 +17,7 @@
#include <linux/module.h>
#include <linux/libgcc.h>
-word_type __ucmpdi2(unsigned long long a, unsigned long long b)
+word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b)
{
const DWunion au = {.ll = a};
const DWunion bu = {.ll = b};
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
index d7e06b28de38..0a559a42359b 100644
--- a/lib/ucs2_string.c
+++ b/lib/ucs2_string.c
@@ -112,3 +112,5 @@ ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
return j;
}
EXPORT_SYMBOL(ucs2_as_utf8);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index d7a708f82559..d5b3a3f95c01 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -336,7 +336,7 @@ char *put_dec(char *buf, unsigned long long n)
*
* If speed is not important, use snprintf(). It's easy to read the code.
*/
-int num_to_str(char *buf, int size, unsigned long long num)
+int num_to_str(char *buf, int size, unsigned long long num, unsigned int width)
{
/* put_dec requires 2-byte alignment of the buffer. */
char tmp[sizeof(num) * 3] __aligned(2);
@@ -350,11 +350,21 @@ int num_to_str(char *buf, int size, unsigned long long num)
len = put_dec(tmp, num) - tmp;
}
- if (len > size)
+ if (len > size || width > size)
return 0;
+
+ if (width > len) {
+ width = width - len;
+ for (idx = 0; idx < width; idx++)
+ buf[idx] = ' ';
+ } else {
+ width = 0;
+ }
+
for (idx = 0; idx < len; ++idx)
- buf[idx] = tmp[len - idx - 1];
- return len;
+ buf[idx + width] = tmp[len - idx - 1];
+
+ return len + width;
}
#define SIGN 1 /* unsigned/signed, must be 1 */
@@ -693,6 +703,22 @@ char *symbol_string(char *buf, char *end, void *ptr,
#endif
}
+static const struct printf_spec default_str_spec = {
+ .field_width = -1,
+ .precision = -1,
+};
+
+static const struct printf_spec default_flag_spec = {
+ .base = 16,
+ .precision = -1,
+ .flags = SPECIAL | SMALL,
+};
+
+static const struct printf_spec default_dec_spec = {
+ .base = 10,
+ .precision = -1,
+};
+
static noinline_for_stack
char *resource_string(char *buf, char *end, struct resource *res,
struct printf_spec spec, const char *fmt)
@@ -722,21 +748,11 @@ char *resource_string(char *buf, char *end, struct resource *res,
.precision = -1,
.flags = SMALL | ZEROPAD,
};
- static const struct printf_spec dec_spec = {
- .base = 10,
- .precision = -1,
- .flags = 0,
- };
static const struct printf_spec str_spec = {
.field_width = -1,
.precision = 10,
.flags = LEFT,
};
- static const struct printf_spec flag_spec = {
- .base = 16,
- .precision = -1,
- .flags = SPECIAL | SMALL,
- };
/* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8)
* 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */
@@ -760,10 +776,10 @@ char *resource_string(char *buf, char *end, struct resource *res,
specp = &mem_spec;
} else if (res->flags & IORESOURCE_IRQ) {
p = string(p, pend, "irq ", str_spec);
- specp = &dec_spec;
+ specp = &default_dec_spec;
} else if (res->flags & IORESOURCE_DMA) {
p = string(p, pend, "dma ", str_spec);
- specp = &dec_spec;
+ specp = &default_dec_spec;
} else if (res->flags & IORESOURCE_BUS) {
p = string(p, pend, "bus ", str_spec);
specp = &bus_spec;
@@ -793,7 +809,7 @@ char *resource_string(char *buf, char *end, struct resource *res,
p = string(p, pend, " disabled", str_spec);
} else {
p = string(p, pend, " flags ", str_spec);
- p = number(p, pend, res->flags, flag_spec);
+ p = number(p, pend, res->flags, default_flag_spec);
}
*p++ = ']';
*p = '\0';
@@ -903,9 +919,6 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
int cur, rbot, rtop;
bool first = true;
- /* reused to print numbers */
- spec = (struct printf_spec){ .base = 10 };
-
rbot = cur = find_first_bit(bitmap, nr_bits);
while (cur < nr_bits) {
rtop = cur;
@@ -920,13 +933,13 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
}
first = false;
- buf = number(buf, end, rbot, spec);
+ buf = number(buf, end, rbot, default_dec_spec);
if (rbot < rtop) {
if (buf < end)
*buf = '-';
buf++;
- buf = number(buf, end, rtop, spec);
+ buf = number(buf, end, rtop, default_dec_spec);
}
rbot = cur;
@@ -1344,11 +1357,9 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
return string(buf, end, uuid, spec);
}
-int kptr_restrict __read_mostly;
-
static noinline_for_stack
-char *restricted_pointer(char *buf, char *end, const void *ptr,
- struct printf_spec spec)
+char *pointer_string(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
{
spec.base = 16;
spec.flags |= SMALL;
@@ -1357,6 +1368,15 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
spec.flags |= ZEROPAD;
}
+ return number(buf, end, (unsigned long int)ptr, spec);
+}
+
+int kptr_restrict __read_mostly;
+
+static noinline_for_stack
+char *restricted_pointer(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
switch (kptr_restrict) {
case 0:
/* Always print %pK values */
@@ -1368,8 +1388,11 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
* kptr_restrict==1 cannot be used in IRQ context
* because its test for CAP_SYSLOG would be meaningless.
*/
- if (in_irq() || in_serving_softirq() || in_nmi())
+ if (in_irq() || in_serving_softirq() || in_nmi()) {
+ if (spec.field_width == -1)
+ spec.field_width = 2 * sizeof(ptr);
return string(buf, end, "pK-error", spec);
+ }
/*
* Only print the real pointer value if the current
@@ -1394,7 +1417,7 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
break;
}
- return number(buf, end, (unsigned long)ptr, spec);
+ return pointer_string(buf, end, ptr, spec);
}
static noinline_for_stack
@@ -1446,9 +1469,6 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
return string(buf, end, NULL, spec);
switch (fmt[1]) {
- case 'r':
- return number(buf, end, clk_get_rate(clk), spec);
-
case 'n':
default:
#ifdef CONFIG_COMMON_CLK
@@ -1464,23 +1484,13 @@ char *format_flags(char *buf, char *end, unsigned long flags,
const struct trace_print_flags *names)
{
unsigned long mask;
- const struct printf_spec strspec = {
- .field_width = -1,
- .precision = -1,
- };
- const struct printf_spec numspec = {
- .flags = SPECIAL|SMALL,
- .field_width = -1,
- .precision = -1,
- .base = 16,
- };
for ( ; flags && names->name; names++) {
mask = names->mask;
if ((flags & mask) != mask)
continue;
- buf = string(buf, end, names->name, strspec);
+ buf = string(buf, end, names->name, default_str_spec);
flags &= ~mask;
if (flags) {
@@ -1491,7 +1501,7 @@ char *format_flags(char *buf, char *end, unsigned long flags,
}
if (flags)
- buf = number(buf, end, flags, numspec);
+ buf = number(buf, end, flags, default_flag_spec);
return buf;
}
@@ -1538,22 +1548,18 @@ char *device_node_gen_full_name(const struct device_node *np, char *buf, char *e
{
int depth;
const struct device_node *parent = np->parent;
- static const struct printf_spec strspec = {
- .field_width = -1,
- .precision = -1,
- };
/* special case for root node */
if (!parent)
- return string(buf, end, "/", strspec);
+ return string(buf, end, "/", default_str_spec);
for (depth = 0; parent->parent; depth++)
parent = parent->parent;
for ( ; depth >= 0; depth--) {
- buf = string(buf, end, "/", strspec);
+ buf = string(buf, end, "/", default_str_spec);
buf = string(buf, end, device_node_name_for_depth(np, depth),
- strspec);
+ default_str_spec);
}
return buf;
}
@@ -1645,33 +1651,33 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
return widen_string(buf, buf - buf_start, end, spec);
}
-static noinline_for_stack
-char *pointer_string(char *buf, char *end, const void *ptr,
- struct printf_spec spec)
-{
- spec.base = 16;
- spec.flags |= SMALL;
- if (spec.field_width == -1) {
- spec.field_width = 2 * sizeof(ptr);
- spec.flags |= ZEROPAD;
- }
+/* Make pointers available for printing early in the boot sequence. */
+static int debug_boot_weak_hash __ro_after_init;
- return number(buf, end, (unsigned long int)ptr, spec);
+static int __init debug_boot_weak_hash_enable(char *str)
+{
+ debug_boot_weak_hash = 1;
+ pr_info("debug_boot_weak_hash enabled\n");
+ return 0;
}
+early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
-static bool have_filled_random_ptr_key __read_mostly;
+static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
static siphash_key_t ptr_key __read_mostly;
-static void fill_random_ptr_key(struct random_ready_callback *unused)
+static void enable_ptr_key_workfn(struct work_struct *work)
{
get_random_bytes(&ptr_key, sizeof(ptr_key));
- /*
- * have_filled_random_ptr_key==true is dependent on get_random_bytes().
- * ptr_to_id() needs to see have_filled_random_ptr_key==true
- * after get_random_bytes() returns.
- */
- smp_mb();
- WRITE_ONCE(have_filled_random_ptr_key, true);
+ /* Needs to run from preemptible context */
+ static_branch_disable(&not_filled_random_ptr_key);
+}
+
+static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
+
+static void fill_random_ptr_key(struct random_ready_callback *unused)
+{
+ /* This may be in an interrupt handler. */
+ queue_work(system_unbound_wq, &enable_ptr_key_work);
}
static struct random_ready_callback random_ready = {
@@ -1680,12 +1686,21 @@ static struct random_ready_callback random_ready = {
static int __init initialize_ptr_random(void)
{
- int ret = add_random_ready_callback(&random_ready);
+ int key_size = sizeof(ptr_key);
+ int ret;
+ /* Use hw RNG if available. */
+ if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
+ static_branch_disable(&not_filled_random_ptr_key);
+ return 0;
+ }
+
+ ret = add_random_ready_callback(&random_ready);
if (!ret) {
return 0;
} else if (ret == -EALREADY) {
- fill_random_ptr_key(&random_ready);
+ /* This is in preemptible context */
+ enable_ptr_key_workfn(&enable_ptr_key_work);
return 0;
}
@@ -1696,13 +1711,19 @@ early_initcall(initialize_ptr_random);
/* Maps a pointer to a 32 bit unique identifier. */
static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
{
+ const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
unsigned long hashval;
- const int default_width = 2 * sizeof(ptr);
- if (unlikely(!have_filled_random_ptr_key)) {
- spec.field_width = default_width;
+ /* When debugging early boot use non-cryptographically secure hash. */
+ if (unlikely(debug_boot_weak_hash)) {
+ hashval = hash_long((unsigned long)ptr, 32);
+ return pointer_string(buf, end, (const void *)hashval, spec);
+ }
+
+ if (static_branch_unlikely(&not_filled_random_ptr_key)) {
+ spec.field_width = 2 * sizeof(ptr);
/* string length must be less than default_width */
- return string(buf, end, "(ptrval)", spec);
+ return string(buf, end, str, spec);
}
#ifdef CONFIG_64BIT
@@ -1715,15 +1736,7 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
#else
hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
#endif
-
- spec.flags |= SMALL;
- if (spec.field_width == -1) {
- spec.field_width = default_width;
- spec.flags |= ZEROPAD;
- }
- spec.base = 16;
-
- return number(buf, end, hashval, spec);
+ return pointer_string(buf, end, (const void *)hashval, spec);
}
/*
@@ -1736,10 +1749,10 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
*
* Right now we handle:
*
- * - 'F' For symbolic function descriptor pointers with offset
- * - 'f' For simple symbolic function names without offset
- * - 'S' For symbolic direct pointers with offset
- * - 's' For symbolic direct pointers without offset
+ * - 'S' For symbolic direct pointers (or function descriptors) with offset
+ * - 's' For symbolic direct pointers (or function descriptors) without offset
+ * - 'F' Same as 'S'
+ * - 'f' Same as 's'
* - '[FfSs]R' as above with __builtin_extract_return_addr() translation
* - 'B' For backtraced symbolic direct pointers with offset
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
@@ -1836,10 +1849,6 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
* ** When making changes please also update:
* Documentation/core-api/printk-formats.rst
*
- * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
- * function pointers are really function descriptors, which contain a
- * pointer to the real address.
- *
* Note: The default behaviour (unadorned %p) is to hash the address,
* rendering it useful as a unique identifier.
*/
@@ -1958,6 +1967,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'F':
return device_node_string(buf, end, ptr, spec, fmt + 1);
}
+ break;
case 'x':
return pointer_string(buf, end, ptr, spec);
}
@@ -2115,6 +2125,7 @@ qualifier:
case 'x':
spec->flags |= SMALL;
+ /* fall through */
case 'X':
spec->base = 16;
@@ -2591,6 +2602,8 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
case 's':
case 'F':
case 'f':
+ case 'x':
+ case 'K':
save_arg(void *);
break;
default:
@@ -2765,6 +2778,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
case 's':
case 'F':
case 'f':
+ case 'x':
+ case 'K':
process = true;
break;
default:
@@ -3069,8 +3084,10 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
break;
case 'i':
base = 0;
+ /* fall through */
case 'd':
is_sign = true;
+ /* fall through */
case 'u':
break;
case '%':
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 34532d14fd4c..25a5d87e2e4c 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -15,6 +15,7 @@
* but they are bigger and use more memory for the lookup table.
*/
+#include <linux/crc32poly.h>
#include "xz_private.h"
/*
@@ -29,7 +30,7 @@ STATIC_RW_DATA uint32_t xz_crc32_table[256];
XZ_EXTERN void xz_crc32_init(void)
{
- const uint32_t poly = 0xEDB88320;
+ const uint32_t poly = CRC32_POLY_LE;
uint32_t i;
uint32_t j;
diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
index dd0a359c135b..7920cbbfeae9 100644
--- a/lib/zstd/Makefile
+++ b/lib/zstd/Makefile
@@ -3,16 +3,7 @@ obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
ccflags-y += -O3
-# Object files unique to zstd_compress and zstd_decompress
-zstd_compress-y := fse_compress.o huf_compress.o compress.o
-zstd_decompress-y := huf_decompress.o decompress.o
-
-# These object files are shared between the modules.
-# Always add them to zstd_compress.
-# Unless both zstd_compress and zstd_decompress are built in
-# then also add them to zstd_decompress.
-zstd_compress-y += entropy_common.o fse_decompress.o zstd_common.o
-
-ifneq ($(CONFIG_ZSTD_COMPRESS)$(CONFIG_ZSTD_DECOMPRESS),yy)
- zstd_decompress-y += entropy_common.o fse_decompress.o zstd_common.o
-endif
+zstd_compress-y := fse_compress.o huf_compress.o compress.o \
+ entropy_common.o fse_decompress.o zstd_common.o
+zstd_decompress-y := huf_decompress.o decompress.o \
+ entropy_common.o fse_decompress.o zstd_common.o