summaryrefslogtreecommitdiff
path: root/arch/s390/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/debug.c12
-rw-r--r--arch/s390/kernel/diag/diag310.c2
-rw-r--r--arch/s390/kernel/diag/diag324.c6
-rw-r--r--arch/s390/kernel/early.c3
-rw-r--r--arch/s390/kernel/hiperdispatch.c2
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c106
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/skey.c2
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kernel/topology.c20
-rw-r--r--arch/s390/kernel/uv.c16
-rw-r--r--arch/s390/kernel/vmlinux.lds.S10
14 files changed, 79 insertions, 107 deletions
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 95ecad9c7d7d..a8915663e917 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -4,6 +4,7 @@
* This code generates raw asm output which is post-processed to extract
* and format the required data.
*/
+#define COMPILE_OFFSETS
#include <linux/kbuild.h>
#include <linux/sched.h>
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index c62100dc62c8..6a26f202441d 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1416,18 +1416,12 @@ static inline char *debug_get_user_string(const char __user *user_buf,
{
char *buffer;
- buffer = kmalloc(user_len + 1, GFP_KERNEL);
- if (!buffer)
- return ERR_PTR(-ENOMEM);
- if (copy_from_user(buffer, user_buf, user_len) != 0) {
- kfree(buffer);
- return ERR_PTR(-EFAULT);
- }
+ buffer = memdup_user_nul(user_buf, user_len);
+ if (IS_ERR(buffer))
+ return buffer;
/* got the string, now strip linefeed. */
if (buffer[user_len - 1] == '\n')
buffer[user_len - 1] = 0;
- else
- buffer[user_len] = 0;
return buffer;
}
diff --git a/arch/s390/kernel/diag/diag310.c b/arch/s390/kernel/diag/diag310.c
index d6a34454aa5a..f411562aa7f6 100644
--- a/arch/s390/kernel/diag/diag310.c
+++ b/arch/s390/kernel/diag/diag310.c
@@ -66,7 +66,7 @@ static inline unsigned long diag310(unsigned long subcode, unsigned long size, v
union register_pair rp = { .even = (unsigned long)addr, .odd = size };
diag_stat_inc(DIAG_STAT_X310);
- asm volatile("diag %[rp],%[subcode],0x310\n"
+ asm volatile("diag %[rp],%[subcode],0x310"
: [rp] "+d" (rp.pair)
: [subcode] "d" (subcode)
: "memory");
diff --git a/arch/s390/kernel/diag/diag324.c b/arch/s390/kernel/diag/diag324.c
index 7fa4c0b7eb6c..fe325c2a2d0d 100644
--- a/arch/s390/kernel/diag/diag324.c
+++ b/arch/s390/kernel/diag/diag324.c
@@ -101,7 +101,7 @@ static unsigned long diag324(unsigned long subcode, void *addr)
union register_pair rp = { .even = (unsigned long)addr };
diag_stat_inc(DIAG_STAT_X324);
- asm volatile("diag %[rp],%[subcode],0x324\n"
+ asm volatile("diag %[rp],%[subcode],0x324"
: [rp] "+d" (rp.pair)
: [subcode] "d" (subcode)
: "memory");
@@ -116,7 +116,7 @@ static void pibwork_handler(struct work_struct *work)
mutex_lock(&pibmutex);
timedout = ktime_add_ns(data->expire, PIBWORK_DELAY);
if (ktime_before(ktime_get(), timedout)) {
- mod_delayed_work(system_wq, &pibwork, nsecs_to_jiffies(PIBWORK_DELAY));
+ mod_delayed_work(system_percpu_wq, &pibwork, nsecs_to_jiffies(PIBWORK_DELAY));
goto out;
}
vfree(data->pib);
@@ -174,7 +174,7 @@ long diag324_pibbuf(unsigned long arg)
pib_update(data);
data->sequence++;
data->expire = ktime_add_ns(ktime_get(), tod_to_ns(data->pib->intv));
- mod_delayed_work(system_wq, &pibwork, nsecs_to_jiffies(PIBWORK_DELAY));
+ mod_delayed_work(system_percpu_wq, &pibwork, nsecs_to_jiffies(PIBWORK_DELAY));
first = false;
}
rc = data->rc;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 9adfbdd377dc..544e5403dd91 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <asm/asm-extable.h>
#include <linux/memblock.h>
+#include <linux/kasan.h>
#include <asm/access-regs.h>
#include <asm/asm-offsets.h>
#include <asm/machine.h>
@@ -65,7 +66,7 @@ static void __init kasan_early_init(void)
{
#ifdef CONFIG_KASAN
init_task.kasan_depth = 0;
- pr_info("KernelAddressSanitizer initialized\n");
+ kasan_init_generic();
#endif
}
diff --git a/arch/s390/kernel/hiperdispatch.c b/arch/s390/kernel/hiperdispatch.c
index e7b66d046e8d..2507bc3f7757 100644
--- a/arch/s390/kernel/hiperdispatch.c
+++ b/arch/s390/kernel/hiperdispatch.c
@@ -191,7 +191,7 @@ int hd_enable_hiperdispatch(void)
return 0;
if (hd_online_cores <= hd_entitled_cores)
return 0;
- mod_delayed_work(system_wq, &hd_capacity_work, HD_DELAY_INTERVAL * hd_delay_factor);
+ mod_delayed_work(system_dfl_wq, &hd_capacity_work, HD_DELAY_INTERVAL * hd_delay_factor);
hd_update_capacities();
return 1;
}
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
index 9455f213dc20..62bf8a15bf32 100644
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -80,6 +80,15 @@ static int paicrypt_root_alloc(void)
/* Release the PMU if event is the last perf event */
static DEFINE_MUTEX(pai_reserve_mutex);
+/* Free all memory allocated for event counting/sampling setup */
+static void paicrypt_free(struct paicrypt_mapptr *mp)
+{
+ free_page((unsigned long)mp->mapptr->page);
+ kvfree(mp->mapptr->save);
+ kfree(mp->mapptr);
+ mp->mapptr = NULL;
+}
+
/* Adjust usage counters and remove allocated memory when all users are
* gone.
*/
@@ -93,15 +102,8 @@ static void paicrypt_event_destroy_cpu(struct perf_event *event, int cpu)
"refcnt %u\n", __func__, event->attr.config,
event->cpu, cpump->active_events,
refcount_read(&cpump->refcnt));
- if (refcount_dec_and_test(&cpump->refcnt)) {
- debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
- __func__, (unsigned long)cpump->page,
- cpump->save);
- free_page((unsigned long)cpump->page);
- kvfree(cpump->save);
- kfree(cpump);
- mp->mapptr = NULL;
- }
+ if (refcount_dec_and_test(&cpump->refcnt))
+ paicrypt_free(mp);
paicrypt_root_free();
mutex_unlock(&pai_reserve_mutex);
}
@@ -175,14 +177,13 @@ static u64 paicrypt_getall(struct perf_event *event)
*
* Allocate the memory for the event.
*/
-static struct paicrypt_map *paicrypt_busy(struct perf_event *event, int cpu)
+static int paicrypt_alloc_cpu(struct perf_event *event, int cpu)
{
struct paicrypt_map *cpump = NULL;
struct paicrypt_mapptr *mp;
int rc;
mutex_lock(&pai_reserve_mutex);
-
/* Allocate root node */
rc = paicrypt_root_alloc();
if (rc)
@@ -192,58 +193,44 @@ static struct paicrypt_map *paicrypt_busy(struct perf_event *event, int cpu)
mp = per_cpu_ptr(paicrypt_root.mapptr, cpu);
cpump = mp->mapptr;
if (!cpump) { /* Paicrypt_map allocated? */
+ rc = -ENOMEM;
cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
- if (!cpump) {
- rc = -ENOMEM;
- goto free_root;
+ if (!cpump)
+ goto undo;
+ /* Allocate memory for counter page and counter extraction.
+ * Only the first counting event has to allocate a page.
+ */
+ mp->mapptr = cpump;
+ cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ cpump->save = kvmalloc_array(paicrypt_cnt + 1,
+ sizeof(struct pai_userdata),
+ GFP_KERNEL);
+ if (!cpump->page || !cpump->save) {
+ paicrypt_free(mp);
+ goto undo;
}
INIT_LIST_HEAD(&cpump->syswide_list);
- }
-
- /* Allocate memory for counter page and counter extraction.
- * Only the first counting event has to allocate a page.
- */
- if (cpump->page) {
+ refcount_set(&cpump->refcnt, 1);
+ rc = 0;
+ } else {
refcount_inc(&cpump->refcnt);
- goto unlock;
}
- rc = -ENOMEM;
- cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
- if (!cpump->page)
- goto free_paicrypt_map;
- cpump->save = kvmalloc_array(paicrypt_cnt + 1,
- sizeof(struct pai_userdata), GFP_KERNEL);
- if (!cpump->save) {
- free_page((unsigned long)cpump->page);
- cpump->page = NULL;
- goto free_paicrypt_map;
+undo:
+ if (rc) {
+ /* Error in allocation of event, decrement anchor. Since
+ * the event in not created, its destroy() function is never
+ * invoked. Adjust the reference counter for the anchor.
+ */
+ paicrypt_root_free();
}
-
- /* Set mode and reference count */
- rc = 0;
- refcount_set(&cpump->refcnt, 1);
- mp->mapptr = cpump;
- debug_sprintf_event(cfm_dbg, 5, "%s users %d refcnt %u page %#lx "
- "save %p rc %d\n", __func__, cpump->active_events,
- refcount_read(&cpump->refcnt),
- (unsigned long)cpump->page, cpump->save, rc);
- goto unlock;
-
-free_paicrypt_map:
- /* Undo memory allocation */
- kfree(cpump);
- mp->mapptr = NULL;
-free_root:
- paicrypt_root_free();
unlock:
mutex_unlock(&pai_reserve_mutex);
- return rc ? ERR_PTR(rc) : cpump;
+ return rc;
}
-static int paicrypt_event_init_all(struct perf_event *event)
+static int paicrypt_alloc(struct perf_event *event)
{
- struct paicrypt_map *cpump;
struct cpumask *maskptr;
int cpu, rc = -ENOMEM;
@@ -252,12 +239,11 @@ static int paicrypt_event_init_all(struct perf_event *event)
goto out;
for_each_online_cpu(cpu) {
- cpump = paicrypt_busy(event, cpu);
- if (IS_ERR(cpump)) {
+ rc = paicrypt_alloc_cpu(event, cpu);
+ if (rc) {
for_each_cpu(cpu, maskptr)
paicrypt_event_destroy_cpu(event, cpu);
kfree(maskptr);
- rc = PTR_ERR(cpump);
goto out;
}
cpumask_set_cpu(cpu, maskptr);
@@ -279,7 +265,6 @@ out:
static int paicrypt_event_init(struct perf_event *event)
{
struct perf_event_attr *a = &event->attr;
- struct paicrypt_map *cpump;
int rc = 0;
/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
@@ -301,13 +286,10 @@ static int paicrypt_event_init(struct perf_event *event)
}
}
- if (event->cpu >= 0) {
- cpump = paicrypt_busy(event, event->cpu);
- if (IS_ERR(cpump))
- rc = PTR_ERR(cpump);
- } else {
- rc = paicrypt_event_init_all(event);
- }
+ if (event->cpu >= 0)
+ rc = paicrypt_alloc_cpu(event, event->cpu);
+ else
+ rc = paicrypt_alloc(event);
if (rc) {
free_page(PAI_SAVE_AREA(event));
goto out;
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index f55f09cda6f8..b107dbca4ed7 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -106,7 +106,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long new_stackp = args->stack;
unsigned long tls = args->tls;
struct fake_frame
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7b529868789f..892fce2b7549 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -839,7 +839,7 @@ static void __init setup_control_program_code(void)
return;
diag_stat_inc(DIAG_STAT_X318);
- asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
+ asm volatile("diag %0,0,0x318" : : "d" (diag318_info.val));
}
/*
diff --git a/arch/s390/kernel/skey.c b/arch/s390/kernel/skey.c
index ba049fd103c2..cc869de6e3a5 100644
--- a/arch/s390/kernel/skey.c
+++ b/arch/s390/kernel/skey.c
@@ -11,7 +11,7 @@ static inline unsigned long load_real_address(unsigned long address)
unsigned long real;
asm volatile(
- " lra %[real],0(%[address])\n"
+ " lra %[real],0(%[address])"
: [real] "=d" (real)
: [address] "a" (address)
: "cc");
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index e88ebe5339fc..da84c0dc6b7e 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -340,7 +340,7 @@ static void pcpu_delegate(struct pcpu *pcpu, int cpu,
"0: sigp 0,%0,%2 # sigp restart to target cpu\n"
" brc 2,0b # busy, try again\n"
"1: sigp 0,%1,%3 # sigp stop to current cpu\n"
- " brc 2,1b # busy, try again\n"
+ " brc 2,1b # busy, try again"
: : "d" (pcpu->address), "d" (source_cpu),
"K" (SIGP_RESTART), "K" (SIGP_STOP)
: "0", "1", "cc");
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 46569b8e47dd..1594c80e9bc4 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -509,33 +509,27 @@ int topology_cpu_init(struct cpu *cpu)
return rc;
}
-static const struct cpumask *cpu_thread_mask(int cpu)
-{
- return &cpu_topology[cpu].thread_mask;
-}
-
-
const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_topology[cpu].core_mask;
}
-static const struct cpumask *cpu_book_mask(int cpu)
+static const struct cpumask *tl_book_mask(struct sched_domain_topology_level *tl, int cpu)
{
return &cpu_topology[cpu].book_mask;
}
-static const struct cpumask *cpu_drawer_mask(int cpu)
+static const struct cpumask *tl_drawer_mask(struct sched_domain_topology_level *tl, int cpu)
{
return &cpu_topology[cpu].drawer_mask;
}
static struct sched_domain_topology_level s390_topology[] = {
- SDTL_INIT(cpu_thread_mask, cpu_smt_flags, SMT),
- SDTL_INIT(cpu_coregroup_mask, cpu_core_flags, MC),
- SDTL_INIT(cpu_book_mask, NULL, BOOK),
- SDTL_INIT(cpu_drawer_mask, NULL, DRAWER),
- SDTL_INIT(cpu_cpu_mask, NULL, PKG),
+ SDTL_INIT(tl_smt_mask, cpu_smt_flags, SMT),
+ SDTL_INIT(tl_mc_mask, cpu_core_flags, MC),
+ SDTL_INIT(tl_book_mask, NULL, BOOK),
+ SDTL_INIT(tl_drawer_mask, NULL, DRAWER),
+ SDTL_INIT(tl_pkg_mask, NULL, PKG),
{ NULL, },
};
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 47f574cd1728..5d17609bcfe1 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -144,7 +144,7 @@ int uv_destroy_folio(struct folio *folio)
folio_get(folio);
rc = uv_destroy(folio_to_phys(folio));
if (!rc)
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
folio_put(folio);
return rc;
}
@@ -193,7 +193,7 @@ int uv_convert_from_secure_folio(struct folio *folio)
folio_get(folio);
rc = uv_convert_from_secure(folio_to_phys(folio));
if (!rc)
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
folio_put(folio);
return rc;
}
@@ -289,7 +289,7 @@ static int __make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
expected = expected_folio_refs(folio) + 1;
if (!folio_ref_freeze(folio, expected))
return -EBUSY;
- set_bit(PG_arch_1, &folio->flags);
+ set_bit(PG_arch_1, &folio->flags.f);
/*
* If the UVC does not succeed or fail immediately, we don't want to
* loop for long, or we might get stall notifications.
@@ -483,18 +483,18 @@ int arch_make_folio_accessible(struct folio *folio)
* convert_to_secure.
* As secure pages are never large folios, both variants can co-exists.
*/
- if (!test_bit(PG_arch_1, &folio->flags))
+ if (!test_bit(PG_arch_1, &folio->flags.f))
return 0;
rc = uv_pin_shared(folio_to_phys(folio));
if (!rc) {
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
return 0;
}
rc = uv_convert_from_secure(folio_to_phys(folio));
if (!rc) {
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
return 0;
}
@@ -866,8 +866,8 @@ static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],
return -ENOENT;
}
-/*
- * Do the actual search for `uv_get_secret_metadata`.
+/**
+ * uv_find_secret() - search secret metadata for a given secret id.
* @secret_id: search pattern.
* @list: ephemeral buffer space
* @secret: output data, containing the secret's metadata.
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 1c606dfa595d..feecf1a6ddb4 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -209,6 +209,11 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_end = . ;
+ /* Debugging sections. */
+ STABS_DEBUG
+ DWARF_DEBUG
+ ELF_DETAILS
+
/*
* uncompressed image info used by the decompressor
* it should match struct vmlinux_info
@@ -239,11 +244,6 @@ SECTIONS
#endif
} :NONE
- /* Debugging sections. */
- STABS_DEBUG
- DWARF_DEBUG
- ELF_DETAILS
-
/*
* Make sure that the .got.plt is either completely empty or it
* contains only the three reserved double words.