summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup/legacy_freezer.c2
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/power/console.c8
-rw-r--r--kernel/power/hibernate.c6
-rw-r--r--kernel/power/main.c79
-rw-r--r--kernel/power/power.h1
-rw-r--r--kernel/power/snapshot.c13
-rw-r--r--kernel/power/suspend.c12
-rw-r--r--kernel/power/swap.c256
-rw-r--r--kernel/power/user.c4
10 files changed, 252 insertions, 131 deletions
diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
index dd9417425d92..915b02f65980 100644
--- a/kernel/cgroup/legacy_freezer.c
+++ b/kernel/cgroup/legacy_freezer.c
@@ -63,7 +63,7 @@ static struct freezer *parent_freezer(struct freezer *freezer)
return css_freezer(freezer->css.parent);
}
-bool cgroup_freezing(struct task_struct *task)
+bool cgroup1_freezing(struct task_struct *task)
{
bool ret;
diff --git a/kernel/freezer.c b/kernel/freezer.c
index ddc11a8bd2ea..a76bf957fb32 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -44,7 +44,7 @@ bool freezing_slow_path(struct task_struct *p)
if (tsk_is_oom_victim(p))
return false;
- if (pm_nosig_freezing || cgroup_freezing(p))
+ if (pm_nosig_freezing || cgroup1_freezing(p))
return true;
if (pm_freezing && !(p->flags & PF_KTHREAD))
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 19c48aa5355d..a906a0ac0f9b 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -44,9 +44,10 @@ static LIST_HEAD(pm_vt_switch_list);
* no_console_suspend argument has been passed on the command line, VT
* switches will occur.
*/
-void pm_vt_switch_required(struct device *dev, bool required)
+int pm_vt_switch_required(struct device *dev, bool required)
{
struct pm_vt_switch *entry, *tmp;
+ int ret = 0;
mutex_lock(&vt_switch_mutex);
list_for_each_entry(tmp, &pm_vt_switch_list, head) {
@@ -58,8 +59,10 @@ void pm_vt_switch_required(struct device *dev, bool required)
}
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
+ if (!entry) {
+ ret = -ENOMEM;
goto out;
+ }
entry->required = required;
entry->dev = dev;
@@ -67,6 +70,7 @@ void pm_vt_switch_required(struct device *dev, bool required)
list_add(&entry->head, &pm_vt_switch_list);
out:
mutex_unlock(&vt_switch_mutex);
+ return ret;
}
EXPORT_SYMBOL(pm_vt_switch_required);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 26e45f86b955..af8d07bafe02 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -820,7 +820,10 @@ int hibernate(void)
if (error)
goto Restore;
- ksys_sync_helper();
+ error = pm_sleep_fs_sync();
+ if (error)
+ goto Notify;
+
filesystems_freeze(filesystem_freeze_enabled);
error = freeze_processes();
@@ -891,6 +894,7 @@ int hibernate(void)
freezer_test_done = false;
Exit:
filesystems_thaw();
+ Notify:
pm_notifier_call_chain(PM_POST_HIBERNATION);
Restore:
pm_restore_console();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index a6cbc3f4347a..03b2c5495c77 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -18,6 +18,8 @@
#include <linux/suspend.h>
#include <linux/syscalls.h>
#include <linux/pm_runtime.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
#include "power.h"
@@ -92,6 +94,61 @@ void ksys_sync_helper(void)
}
EXPORT_SYMBOL_GPL(ksys_sync_helper);
+#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION)
+/* Wakeup events handling resolution while syncing file systems in jiffies */
+#define PM_FS_SYNC_WAKEUP_RESOLUTION 5
+
+static atomic_t pm_fs_sync_count = ATOMIC_INIT(0);
+static struct workqueue_struct *pm_fs_sync_wq;
+static DECLARE_WAIT_QUEUE_HEAD(pm_fs_sync_wait);
+
+static bool pm_fs_sync_completed(void)
+{
+ return atomic_read(&pm_fs_sync_count) == 0;
+}
+
+static void pm_fs_sync_work_fn(struct work_struct *work)
+{
+ ksys_sync_helper();
+
+ if (atomic_dec_and_test(&pm_fs_sync_count))
+ wake_up(&pm_fs_sync_wait);
+}
+static DECLARE_WORK(pm_fs_sync_work, pm_fs_sync_work_fn);
+
+/**
+ * pm_sleep_fs_sync() - Sync file systems in an interruptible way
+ *
+ * Return: 0 on successful file system sync, or -EBUSY if the file system sync
+ * was aborted.
+ */
+int pm_sleep_fs_sync(void)
+{
+ pm_wakeup_clear(0);
+
+ /*
+ * Take back-to-back sleeps into account by queuing a subsequent fs sync
+ * only if the previous fs sync is running or is not queued. Multiple fs
+ * syncs increase the likelihood of saving the latest files immediately
+ * before sleep.
+ */
+ if (!work_pending(&pm_fs_sync_work)) {
+ atomic_inc(&pm_fs_sync_count);
+ queue_work(pm_fs_sync_wq, &pm_fs_sync_work);
+ }
+
+ while (!pm_fs_sync_completed()) {
+ if (pm_wakeup_pending())
+ return -EBUSY;
+
+ wait_event_timeout(pm_fs_sync_wait, pm_fs_sync_completed(),
+ PM_FS_SYNC_WAKEUP_RESOLUTION);
+ }
+
+ return 0;
+}
+#endif /* CONFIG_SUSPEND || CONFIG_HIBERNATION */
+
/* Routines for PM-transition notifications */
static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
@@ -231,10 +288,10 @@ static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr
power_attr(mem_sleep);
/*
- * sync_on_suspend: invoke ksys_sync_helper() before suspend.
+ * sync_on_suspend: Sync file systems before suspend.
*
- * show() returns whether ksys_sync_helper() is invoked before suspend.
- * store() accepts 0 or 1. 0 disables ksys_sync_helper() and 1 enables it.
+ * show() returns whether file systems sync before suspend is enabled.
+ * store() accepts 0 or 1. 0 disables file systems sync and 1 enables it.
*/
bool sync_on_suspend_enabled = !IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC);
@@ -1066,16 +1123,26 @@ static const struct attribute_group *attr_groups[] = {
struct workqueue_struct *pm_wq;
EXPORT_SYMBOL_GPL(pm_wq);
-static int __init pm_start_workqueue(void)
+static int __init pm_start_workqueues(void)
{
pm_wq = alloc_workqueue("pm", WQ_FREEZABLE | WQ_UNBOUND, 0);
+ if (!pm_wq)
+ return -ENOMEM;
- return pm_wq ? 0 : -ENOMEM;
+#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION)
+ pm_fs_sync_wq = alloc_ordered_workqueue("pm_fs_sync", 0);
+ if (!pm_fs_sync_wq) {
+ destroy_workqueue(pm_wq);
+ return -ENOMEM;
+ }
+#endif
+
+ return 0;
}
static int __init pm_init(void)
{
- int error = pm_start_workqueue();
+ int error = pm_start_workqueues();
if (error)
return error;
hibernate_image_size_init();
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 7ccd709af93f..75b63843886e 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -19,6 +19,7 @@ struct swsusp_info {
} __aligned(PAGE_SIZE);
#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION)
+extern int pm_sleep_fs_sync(void);
extern bool filesystem_freeze_enabled;
#endif
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 645f42e40478..0a946932d5c1 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -2110,22 +2110,20 @@ asmlinkage __visible int swsusp_save(void)
{
unsigned int nr_pages, nr_highmem;
- pr_info("Creating image:\n");
+ pm_deferred_pr_dbg("Creating image\n");
drain_local_pages(NULL);
nr_pages = count_data_pages();
nr_highmem = count_highmem_pages();
- pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
+ pm_deferred_pr_dbg("Need to copy %u pages\n", nr_pages + nr_highmem);
if (!enough_free_mem(nr_pages, nr_highmem)) {
- pr_err("Not enough free memory\n");
+ pm_deferred_pr_dbg("Not enough free memory for image creation\n");
return -ENOMEM;
}
- if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
- pr_err("Memory allocation failed\n");
+ if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem))
return -ENOMEM;
- }
/*
* During allocating of suspend pagedir, new cold pages may appear.
@@ -2144,7 +2142,8 @@ asmlinkage __visible int swsusp_save(void)
nr_zero_pages = nr_pages - nr_copy_pages;
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
- pr_info("Image created (%d pages copied, %d zero pages)\n", nr_copy_pages, nr_zero_pages);
+ pm_deferred_pr_dbg("Image created (%d pages copied, %d zero pages)\n",
+ nr_copy_pages, nr_zero_pages);
return 0;
}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 3d4ebedad69f..2da4482bb6eb 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -344,10 +344,14 @@ MODULE_PARM_DESC(pm_test_delay,
static int suspend_test(int level)
{
#ifdef CONFIG_PM_DEBUG
+ int i;
+
if (pm_test_level == level) {
pr_info("suspend debug: Waiting for %d second(s).\n",
pm_test_delay);
- mdelay(pm_test_delay * 1000);
+ for (i = 0; i < pm_test_delay && !pm_wakeup_pending(); i++)
+ msleep(1000);
+
return 1;
}
#endif /* !CONFIG_PM_DEBUG */
@@ -589,7 +593,11 @@ static int enter_state(suspend_state_t state)
if (sync_on_suspend_enabled) {
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
- ksys_sync_helper();
+
+ error = pm_sleep_fs_sync();
+ if (error)
+ goto Unlock;
+
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
}
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 70ae21f7370d..33a186373bef 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -46,19 +46,18 @@ static bool clean_pages_on_read;
static bool clean_pages_on_decompress;
/*
- * The swap map is a data structure used for keeping track of each page
- * written to a swap partition. It consists of many swap_map_page
- * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
- * These structures are stored on the swap and linked together with the
- * help of the .next_swap member.
+ * The swap map is a data structure used for keeping track of each page
+ * written to a swap partition. It consists of many swap_map_page structures
+ * that contain each an array of MAP_PAGE_ENTRIES swap entries. These
+ * structures are stored on the swap and linked together with the help of the
+ * .next_swap member.
*
- * The swap map is created during suspend. The swap map pages are
- * allocated and populated one at a time, so we only need one memory
- * page to set up the entire structure.
+ * The swap map is created during suspend. The swap map pages are allocated and
+ * populated one at a time, so we only need one memory page to set up the entire
+ * structure.
*
- * During resume we pick up all swap_map_page structures into a list.
+ * During resume we pick up all swap_map_page structures into a list.
*/
-
#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
/*
@@ -89,10 +88,8 @@ struct swap_map_page_list {
};
/*
- * The swap_map_handle structure is used for handling swap in
- * a file-alike way
+ * The swap_map_handle structure is used for handling swap in a file-alike way.
*/
-
struct swap_map_handle {
struct swap_map_page *cur;
struct swap_map_page_list *maps;
@@ -117,10 +114,9 @@ struct swsusp_header {
static struct swsusp_header *swsusp_header;
/*
- * The following functions are used for tracing the allocated
- * swap pages, so that they can be freed in case of an error.
+ * The following functions are used for tracing the allocated swap pages, so
+ * that they can be freed in case of an error.
*/
-
struct swsusp_extent {
struct rb_node node;
unsigned long start;
@@ -170,15 +166,14 @@ static int swsusp_extents_insert(unsigned long swap_offset)
return 0;
}
-/*
- * alloc_swapdev_block - allocate a swap page and register that it has
- * been allocated, so that it can be freed in case of an error.
- */
-
sector_t alloc_swapdev_block(int swap)
{
unsigned long offset;
+ /*
+ * Allocate a swap page and register that it has been allocated, so that
+ * it can be freed in case of an error.
+ */
offset = swp_offset(get_swap_page_of_type(swap));
if (offset) {
if (swsusp_extents_insert(offset))
@@ -189,16 +184,14 @@ sector_t alloc_swapdev_block(int swap)
return 0;
}
-/*
- * free_all_swap_pages - free swap pages allocated for saving image data.
- * It also frees the extents used to register which swap entries had been
- * allocated.
- */
-
void free_all_swap_pages(int swap)
{
struct rb_node *node;
+ /*
+ * Free swap pages allocated for saving image data. It also frees the
+ * extents used to register which swap entries had been allocated.
+ */
while ((node = swsusp_extents.rb_node)) {
struct swsusp_extent *ext;
@@ -303,6 +296,7 @@ static int hib_wait_io(struct hib_bio_batch *hb)
/*
* Saving part
*/
+
static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
{
int error;
@@ -336,16 +330,14 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
*/
unsigned int swsusp_header_flags;
-/**
- * swsusp_swap_check - check if the resume device is a swap device
- * and get its index (if so)
- *
- * This is called before saving image
- */
static int swsusp_swap_check(void)
{
int res;
+ /*
+ * Check if the resume device is a swap device and get its index (if so).
+ * This is called before saving the image.
+ */
if (swsusp_resume_device)
res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
else
@@ -362,13 +354,6 @@ static int swsusp_swap_check(void)
return 0;
}
-/**
- * write_page - Write one page to given swap location.
- * @buf: Address we're writing.
- * @offset: Offset of the swap page we're writing to.
- * @hb: bio completion batch
- */
-
static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
{
gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
@@ -519,17 +504,14 @@ static int swap_writer_finish(struct swap_map_handle *handle,
CMP_HEADER, PAGE_SIZE)
#define CMP_SIZE (CMP_PAGES * PAGE_SIZE)
-/* Maximum number of threads for compression/decompression. */
-#define CMP_THREADS 3
+/* Default number of threads for compression/decompression. */
+#define CMP_THREADS 3
+static unsigned int hibernate_compression_threads = CMP_THREADS;
/* Minimum/maximum number of pages for read buffering. */
#define CMP_MIN_RD_PAGES 1024
#define CMP_MAX_RD_PAGES 8192
-/**
- * save_image - save the suspend image data
- */
-
static int save_image(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_write)
@@ -585,13 +567,48 @@ struct crc_data {
wait_queue_head_t go; /* start crc update */
wait_queue_head_t done; /* crc update done */
u32 *crc32; /* points to handle's crc32 */
- size_t *unc_len[CMP_THREADS]; /* uncompressed lengths */
- unsigned char *unc[CMP_THREADS]; /* uncompressed data */
+ size_t **unc_len; /* uncompressed lengths */
+ unsigned char **unc; /* uncompressed data */
};
-/*
- * CRC32 update function that runs in its own thread.
- */
+static struct crc_data *alloc_crc_data(int nr_threads)
+{
+ struct crc_data *crc;
+
+ crc = kzalloc(sizeof(*crc), GFP_KERNEL);
+ if (!crc)
+ return NULL;
+
+ crc->unc = kcalloc(nr_threads, sizeof(*crc->unc), GFP_KERNEL);
+ if (!crc->unc)
+ goto err_free_crc;
+
+ crc->unc_len = kcalloc(nr_threads, sizeof(*crc->unc_len), GFP_KERNEL);
+ if (!crc->unc_len)
+ goto err_free_unc;
+
+ return crc;
+
+err_free_unc:
+ kfree(crc->unc);
+err_free_crc:
+ kfree(crc);
+ return NULL;
+}
+
+static void free_crc_data(struct crc_data *crc)
+{
+ if (!crc)
+ return;
+
+ if (crc->thr)
+ kthread_stop(crc->thr);
+
+ kfree(crc->unc_len);
+ kfree(crc->unc);
+ kfree(crc);
+}
+
static int crc32_threadfn(void *data)
{
struct crc_data *d = data;
@@ -616,6 +633,7 @@ static int crc32_threadfn(void *data)
}
return 0;
}
+
/*
* Structure used for data compression.
*/
@@ -637,9 +655,6 @@ struct cmp_data {
/* Indicates the image size after compression */
static atomic64_t compressed_size = ATOMIC_INIT(0);
-/*
- * Compression function that runs in its own thread.
- */
static int compress_threadfn(void *data)
{
struct cmp_data *d = data;
@@ -671,12 +686,6 @@ static int compress_threadfn(void *data)
return 0;
}
-/**
- * save_compressed_image - Save the suspend image data after compression.
- * @handle: Swap map handle to use for saving the image.
- * @snapshot: Image to read data from.
- * @nr_to_write: Number of pages to save.
- */
static int save_compressed_image(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_write)
@@ -703,7 +712,7 @@ static int save_compressed_image(struct swap_map_handle *handle,
* footprint.
*/
nr_threads = num_online_cpus() - 1;
- nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
+ nr_threads = clamp_val(nr_threads, 1, hibernate_compression_threads);
page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
if (!page) {
@@ -719,7 +728,7 @@ static int save_compressed_image(struct swap_map_handle *handle,
goto out_clean;
}
- crc = kzalloc(sizeof(*crc), GFP_KERNEL);
+ crc = alloc_crc_data(nr_threads);
if (!crc) {
pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
@@ -888,11 +897,7 @@ out_finish:
out_clean:
hib_finish_batch(&hb);
- if (crc) {
- if (crc->thr)
- kthread_stop(crc->thr);
- kfree(crc);
- }
+ free_crc_data(crc);
if (data) {
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
@@ -908,13 +913,6 @@ out_clean:
return ret;
}
-/**
- * enough_swap - Make sure we have enough swap to save the image.
- *
- * Returns TRUE or FALSE after checking the total amount of swap
- * space available from the resume partition.
- */
-
static int enough_swap(unsigned int nr_pages)
{
unsigned int free_swap = count_swap_pages(root_swap, 1);
@@ -927,15 +925,16 @@ static int enough_swap(unsigned int nr_pages)
}
/**
- * swsusp_write - Write entire image and metadata.
- * @flags: flags to pass to the "boot" kernel in the image header
+ * swsusp_write - Write entire image and metadata.
+ * @flags: flags to pass to the "boot" kernel in the image header
+ *
+ * It is important _NOT_ to umount filesystems at this point. We want them
+ * synced (in case something goes wrong) but we DO not want to mark filesystem
+ * clean: it is not. (And it does not matter, if we resume correctly, we'll mark
+ * system clean, anyway.)
*
- * It is important _NOT_ to umount filesystems at this point. We want
- * them synced (in case something goes wrong) but we DO not want to mark
- * filesystem clean: it is not. (And it does not matter, if we resume
- * correctly, we'll mark system clean, anyway.)
+ * Return: 0 on success, negative error code on failure.
*/
-
int swsusp_write(unsigned int flags)
{
struct swap_map_handle handle;
@@ -978,8 +977,8 @@ out_finish:
}
/*
- * The following functions allow us to read data using a swap map
- * in a file-like way.
+ * The following functions allow us to read data using a swap map in a file-like
+ * way.
*/
static void release_swap_reader(struct swap_map_handle *handle)
@@ -1081,12 +1080,6 @@ static int swap_reader_finish(struct swap_map_handle *handle)
return 0;
}
-/**
- * load_image - load the image using the swap map handle
- * @handle and the snapshot handle @snapshot
- * (assume there are @nr_pages pages to load)
- */
-
static int load_image(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_read)
@@ -1157,9 +1150,6 @@ struct dec_data {
unsigned char cmp[CMP_SIZE]; /* compressed buffer */
};
-/*
- * Decompression function that runs in its own thread.
- */
static int decompress_threadfn(void *data)
{
struct dec_data *d = data;
@@ -1194,12 +1184,6 @@ static int decompress_threadfn(void *data)
return 0;
}
-/**
- * load_compressed_image - Load compressed image data and decompress it.
- * @handle: Swap map handle to use for loading data.
- * @snapshot: Image to copy uncompressed data into.
- * @nr_to_read: Number of pages to load.
- */
static int load_compressed_image(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_read)
@@ -1227,7 +1211,7 @@ static int load_compressed_image(struct swap_map_handle *handle,
* footprint.
*/
nr_threads = num_online_cpus() - 1;
- nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
+ nr_threads = clamp_val(nr_threads, 1, hibernate_compression_threads);
page = vmalloc_array(CMP_MAX_RD_PAGES, sizeof(*page));
if (!page) {
@@ -1243,7 +1227,7 @@ static int load_compressed_image(struct swap_map_handle *handle,
goto out_clean;
}
- crc = kzalloc(sizeof(*crc), GFP_KERNEL);
+ crc = alloc_crc_data(nr_threads);
if (!crc) {
pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
@@ -1510,11 +1494,7 @@ out_clean:
hib_finish_batch(&hb);
for (i = 0; i < ring_size; i++)
free_page((unsigned long)page[i]);
- if (crc) {
- if (crc->thr)
- kthread_stop(crc->thr);
- kfree(crc);
- }
+ free_crc_data(crc);
if (data) {
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
@@ -1533,8 +1513,9 @@ out_clean:
* swsusp_read - read the hibernation image.
* @flags_p: flags passed by the "frozen" kernel in the image header should
* be written into this memory location
+ *
+ * Return: 0 on success, negative error code on failure.
*/
-
int swsusp_read(unsigned int *flags_p)
{
int error;
@@ -1571,8 +1552,9 @@ static void *swsusp_holder;
/**
* swsusp_check - Open the resume device and check for the swsusp signature.
* @exclusive: Open the resume device exclusively.
+ *
+ * Return: 0 if a valid image is found, negative error code otherwise.
*/
-
int swsusp_check(bool exclusive)
{
void *holder = exclusive ? &swsusp_holder : NULL;
@@ -1622,7 +1604,6 @@ put:
/**
* swsusp_close - close resume device.
*/
-
void swsusp_close(void)
{
if (IS_ERR(hib_resume_bdev_file)) {
@@ -1634,9 +1615,10 @@ void swsusp_close(void)
}
/**
- * swsusp_unmark - Unmark swsusp signature in the resume device
+ * swsusp_unmark - Unmark swsusp signature in the resume device
+ *
+ * Return: 0 on success, negative error code on failure.
*/
-
#ifdef CONFIG_SUSPEND
int swsusp_unmark(void)
{
@@ -1662,8 +1644,46 @@ int swsusp_unmark(void)
}
#endif
+static ssize_t hibernate_compression_threads_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", hibernate_compression_threads);
+}
+
+static ssize_t hibernate_compression_threads_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 1)
+ return -EINVAL;
+
+ hibernate_compression_threads = val;
+ return n;
+}
+power_attr(hibernate_compression_threads);
+
+static struct attribute *g[] = {
+ &hibernate_compression_threads_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group attr_group = {
+ .attrs = g,
+};
+
static int __init swsusp_header_init(void)
{
+ int error;
+
+ error = sysfs_create_group(power_kobj, &attr_group);
+ if (error)
+ return -ENOMEM;
+
swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
if (!swsusp_header)
panic("Could not allocate memory for swsusp_header\n");
@@ -1671,3 +1691,19 @@ static int __init swsusp_header_init(void)
}
core_initcall(swsusp_header_init);
+
+static int __init hibernate_compression_threads_setup(char *str)
+{
+ int rc = kstrtouint(str, 0, &hibernate_compression_threads);
+
+ if (rc)
+ return rc;
+
+ if (hibernate_compression_threads < 1)
+ hibernate_compression_threads = CMP_THREADS;
+
+ return 1;
+
+}
+
+__setup("hibernate_compression_threads=", hibernate_compression_threads_setup);
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 3f9e3efb9f6e..4401cfe26e5c 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -278,7 +278,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
if (data->frozen)
break;
- ksys_sync_helper();
+ error = pm_sleep_fs_sync();
+ if (error)
+ break;
error = freeze_processes();
if (error)