summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.kasan7
-rw-r--r--lib/crypto/mpi/mpi-mul.c2
-rw-r--r--lib/iov_iter.c25
-rw-r--r--lib/objpool.c18
-rw-r--r--lib/slub_kunit.c2
5 files changed, 31 insertions, 23 deletions
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 233ab2096924..98016e137b7f 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -22,11 +22,8 @@ config ARCH_DISABLE_KASAN_INLINE
config CC_HAS_KASAN_GENERIC
def_bool $(cc-option, -fsanitize=kernel-address)
-# GCC appears to ignore no_sanitize_address when -fsanitize=kernel-hwaddress
-# is passed. See https://bugzilla.kernel.org/show_bug.cgi?id=218854 (and
-# the linked LKML thread) for more details.
config CC_HAS_KASAN_SW_TAGS
- def_bool !CC_IS_GCC && $(cc-option, -fsanitize=kernel-hwaddress)
+ def_bool $(cc-option, -fsanitize=kernel-hwaddress)
# This option is only required for software KASAN modes.
# Old GCC versions do not have proper support for no_sanitize_address.
@@ -101,7 +98,7 @@ config KASAN_SW_TAGS
help
Enables Software Tag-Based KASAN.
- Requires Clang.
+ Requires GCC 11+ or Clang.
Supported only on arm64 CPUs and relies on Top Byte Ignore.
diff --git a/lib/crypto/mpi/mpi-mul.c b/lib/crypto/mpi/mpi-mul.c
index 892a246216b9..7e6ff1ce3e9b 100644
--- a/lib/crypto/mpi/mpi-mul.c
+++ b/lib/crypto/mpi/mpi-mul.c
@@ -21,7 +21,7 @@ int mpi_mul(MPI w, MPI u, MPI v)
int usign, vsign, sign_product;
int assign_wp = 0;
mpi_ptr_t tmp_limb = NULL;
- int err;
+ int err = 0;
if (u->nlimbs < v->nlimbs) {
/* Swap U and V. */
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 1abb32c0da50..908e75a28d90 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -461,6 +461,8 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
size_t bytes, struct iov_iter *i)
{
size_t n, copied = 0;
+ bool uses_kmap = IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) ||
+ PageHighMem(page);
if (!page_copy_sane(page, offset, bytes))
return 0;
@@ -471,7 +473,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
char *p;
n = bytes - copied;
- if (PageHighMem(page)) {
+ if (uses_kmap) {
page += offset / PAGE_SIZE;
offset %= PAGE_SIZE;
n = min_t(size_t, n, PAGE_SIZE - offset);
@@ -482,7 +484,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
kunmap_atomic(p);
copied += n;
offset += n;
- } while (PageHighMem(page) && copied != bytes && n > 0);
+ } while (uses_kmap && copied != bytes && n > 0);
return copied;
}
@@ -1021,15 +1023,18 @@ static ssize_t iter_folioq_get_pages(struct iov_iter *iter,
size_t offset = iov_offset, fsize = folioq_folio_size(folioq, slot);
size_t part = PAGE_SIZE - offset % PAGE_SIZE;
- part = umin(part, umin(maxsize - extracted, fsize - offset));
- count -= part;
- iov_offset += part;
- extracted += part;
+ if (offset < fsize) {
+ part = umin(part, umin(maxsize - extracted, fsize - offset));
+ count -= part;
+ iov_offset += part;
+ extracted += part;
+
+ *pages = folio_page(folio, offset / PAGE_SIZE);
+ get_page(*pages);
+ pages++;
+ maxpages--;
+ }
- *pages = folio_page(folio, offset / PAGE_SIZE);
- get_page(*pages);
- pages++;
- maxpages--;
if (maxpages == 0 || extracted >= maxsize)
break;
diff --git a/lib/objpool.c b/lib/objpool.c
index 234f9d0bd081..b998b720c732 100644
--- a/lib/objpool.c
+++ b/lib/objpool.c
@@ -74,15 +74,21 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
* warm caches and TLB hits. in default vmalloc is used to
* reduce the pressure of kernel slab system. as we know,
* mimimal size of vmalloc is one page since vmalloc would
- * always align the requested size to page size
+ * always align the requested size to page size.
+ * but if vmalloc fails or it is not available (e.g. GFP_ATOMIC)
+ * allocate percpu slot with kmalloc.
*/
- if (pool->gfp & GFP_ATOMIC)
- slot = kmalloc_node(size, pool->gfp, cpu_to_node(i));
- else
+ slot = NULL;
+
+ if ((pool->gfp & (GFP_ATOMIC | GFP_KERNEL)) != GFP_ATOMIC)
slot = __vmalloc_node(size, sizeof(void *), pool->gfp,
cpu_to_node(i), __builtin_return_address(0));
- if (!slot)
- return -ENOMEM;
+
+ if (!slot) {
+ slot = kmalloc_node(size, pool->gfp, cpu_to_node(i));
+ if (!slot)
+ return -ENOMEM;
+ }
memset(slot, 0, size);
pool->cpu_slots[i] = slot;
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index 80e39f003344..33564f965958 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -141,7 +141,7 @@ static void test_kmalloc_redzone_access(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
- u8 *p = __kmalloc_cache_noprof(s, GFP_KERNEL, 18);
+ u8 *p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 18));
kasan_disable_current();