Creation of Cybook 2416 (actually Gen4) repository
This commit is contained in:
173
kernel/power/Kconfig
Normal file
173
kernel/power/Kconfig
Normal file
@@ -0,0 +1,173 @@
|
||||
config PM
|
||||
bool "Power Management support"
|
||||
depends on !IA64_HP_SIM
|
||||
---help---
|
||||
"Power Management" means that parts of your computer are shut
|
||||
off or put into a power conserving "sleep" mode if they are not
|
||||
being used. There are two competing standards for doing this: APM
|
||||
and ACPI. If you want to use either one, say Y here and then also
|
||||
to the requisite support below.
|
||||
|
||||
Power Management is most important for battery powered laptop
|
||||
computers; if you have a laptop, check out the Linux Laptop home
|
||||
page on the WWW at <http://www.linux-on-laptops.com/> or
|
||||
Tuxmobil - Linux on Mobile Computers at <http://www.tuxmobil.org/>
|
||||
and the Battery Powered Linux mini-HOWTO, available from
|
||||
<http://www.tldp.org/docs.html#howto>.
|
||||
|
||||
Note that, even if you say N here, Linux on the x86 architecture
|
||||
will issue the hlt instruction if nothing is to be done, thereby
|
||||
sending the processor to sleep and saving power.
|
||||
|
||||
config PM_LEGACY
|
||||
bool "Legacy Power Management API (DEPRECATED)"
|
||||
depends on PM
|
||||
default n
|
||||
---help---
|
||||
Support for pm_register() and friends. This old API is obsoleted
|
||||
by the driver model.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config PM_CPU_MODE
|
||||
bool "PM_CPU_MODE"
|
||||
depends on PM
|
||||
default y
|
||||
---help---
|
||||
This is for the power consumption of CPU mode
|
||||
|
||||
config PM_DEBUG
|
||||
bool "Power Management Debug Support"
|
||||
depends on PM
|
||||
---help---
|
||||
This option enables verbose debugging support in the Power Management
|
||||
code. This is helpful when debugging and reporting various PM bugs,
|
||||
like suspend support.
|
||||
|
||||
config DISABLE_CONSOLE_SUSPEND
|
||||
bool "Keep console(s) enabled during suspend/resume (DANGEROUS)"
|
||||
depends on PM && PM_DEBUG
|
||||
default n
|
||||
---help---
|
||||
This option turns off the console suspend mechanism that prevents
|
||||
debug messages from reaching the console during the suspend/resume
|
||||
operations. This may be helpful when debugging device drivers'
|
||||
suspend/resume routines, but may itself lead to problems, for example
|
||||
if netconsole is used.
|
||||
|
||||
config PM_TRACE
|
||||
bool "Suspend/resume event tracing"
|
||||
depends on PM && PM_DEBUG && X86_32 && EXPERIMENTAL
|
||||
default n
|
||||
---help---
|
||||
This enables some cheesy code to save the last PM event point in the
|
||||
RTC across reboots, so that you can debug a machine that just hangs
|
||||
during suspend (or more commonly, during resume).
|
||||
|
||||
To use this debugging feature you should attempt to suspend the machine,
|
||||
then reboot it, then run
|
||||
|
||||
dmesg -s 1000000 | grep 'hash matches'
|
||||
|
||||
CAUTION: this option will cause your machine's real-time clock to be
|
||||
set to an invalid time after a resume.
|
||||
|
||||
config PM_SYSFS_DEPRECATED
|
||||
bool "Driver model /sys/devices/.../power/state files (DEPRECATED)"
|
||||
depends on PM && SYSFS
|
||||
default n
|
||||
help
|
||||
The driver model started out with a sysfs file intended to provide
|
||||
a userspace hook for device power management. This feature has never
|
||||
worked very well, except for limited testing purposes, and so it will
|
||||
be removed. It's not clear that a generic mechanism could really
|
||||
handle the wide variability of device power states; any replacements
|
||||
are likely to be bus or driver specific.
|
||||
|
||||
config SOFTWARE_SUSPEND
|
||||
bool "Software Suspend"
|
||||
depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP))
|
||||
---help---
|
||||
Enable the suspend to disk (STD) functionality.
|
||||
|
||||
You can suspend your machine with 'echo disk > /sys/power/state'.
|
||||
Alternatively, you can use the additional userland tools available
|
||||
from <http://suspend.sf.net>.
|
||||
|
||||
In principle it does not require ACPI or APM, although for example
|
||||
ACPI will be used if available.
|
||||
|
||||
It creates an image which is saved in your active swap. Upon the next
|
||||
boot, pass the 'resume=/dev/swappartition' argument to the kernel to
|
||||
have it detect the saved image, restore memory state from it, and
|
||||
continue to run as before. If you do not want the previous state to
|
||||
be reloaded, then use the 'noresume' kernel command line argument.
|
||||
Note, however, that fsck will be run on your filesystems and you will
|
||||
need to run mkswap against the swap partition used for the suspend.
|
||||
|
||||
It also works with swap files to a limited extent (for details see
|
||||
<file:Documentation/power/swsusp-and-swap-files.txt>).
|
||||
|
||||
Right now you may boot without resuming and resume later but in the
|
||||
meantime you cannot use the swap partition(s)/file(s) involved in
|
||||
suspending. Also in this case you must not use the filesystems
|
||||
that were mounted before the suspend. In particular, you MUST NOT
|
||||
MOUNT any journaled filesystems mounted before the suspend or they
|
||||
will get corrupted in a nasty way.
|
||||
|
||||
For more information take a look at <file:Documentation/power/swsusp.txt>.
|
||||
|
||||
config PM_STD_PARTITION
|
||||
string "Default resume partition"
|
||||
depends on SOFTWARE_SUSPEND
|
||||
default ""
|
||||
---help---
|
||||
The default resume partition is the partition that the suspend-
|
||||
to-disk implementation will look for a suspended disk image.
|
||||
|
||||
The partition specified here will be different for almost every user.
|
||||
It should be a valid swap partition (at least for now) that is turned
|
||||
on before suspending.
|
||||
|
||||
The partition specified can be overridden by specifying:
|
||||
|
||||
resume=/dev/<other device>
|
||||
|
||||
which will set the resume partition to the device specified.
|
||||
|
||||
Note there is currently not a way to specify which device to save the
|
||||
suspended image to. It will simply pick the first available swap
|
||||
device.
|
||||
|
||||
config SUSPEND_SMP
|
||||
bool
|
||||
depends on HOTPLUG_CPU && X86 && PM
|
||||
default y
|
||||
|
||||
config APM_EMULATION
|
||||
tristate "Advanced Power Management Emulation"
|
||||
depends on PM && SYS_SUPPORTS_APM_EMULATION
|
||||
help
|
||||
APM is a BIOS specification for saving power using several different
|
||||
techniques. This is mostly useful for battery powered laptops with
|
||||
APM compliant BIOSes. If you say Y here, the system time will be
|
||||
reset after a RESUME operation, the /proc/apm device will provide
|
||||
battery status information, and user-space programs will receive
|
||||
notification of APM "events" (e.g. battery status change).
|
||||
|
||||
In order to use APM, you will need supporting software. For location
|
||||
and more information, read <file:Documentation/pm.txt> and the
|
||||
Battery Powered Linux mini-HOWTO, available from
|
||||
<http://www.tldp.org/docs.html#howto>.
|
||||
|
||||
This driver does not spin down disk drives (see the hdparm(8)
|
||||
manpage ("man 8 hdparm") for that), and it doesn't turn off
|
||||
VESA-compliant "green" monitors.
|
||||
|
||||
Generally, if you don't have a battery in your machine, there isn't
|
||||
much point in using this driver and you should say N. If you get
|
||||
random kernel OOPSes or reboots that don't seem to be related to
|
||||
anything, try disabling/enabling this option (or disabling/enabling
|
||||
APM in your BIOS).
|
||||
|
||||
source "drivers/char/s3c-dvfs/Kconfig"
|
||||
10
kernel/power/Makefile
Normal file
10
kernel/power/Makefile
Normal file
@@ -0,0 +1,10 @@
|
||||
|
||||
ifeq ($(CONFIG_PM_DEBUG),y)
|
||||
EXTRA_CFLAGS += -DDEBUG
|
||||
endif
|
||||
|
||||
obj-y := main.o process.o console.o
|
||||
obj-$(CONFIG_PM_LEGACY) += pm.o
|
||||
obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o disk.o snapshot.o swap.o user.o
|
||||
|
||||
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
|
||||
58
kernel/power/console.c
Normal file
58
kernel/power/console.c
Normal file
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
* drivers/power/process.c - Functions for saving/restoring console.
|
||||
*
|
||||
* Originally from swsusp.
|
||||
*/
|
||||
|
||||
#include <linux/vt_kern.h>
|
||||
#include <linux/kbd_kern.h>
|
||||
#include <linux/console.h>
|
||||
#include "power.h"
|
||||
|
||||
#if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
|
||||
#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
|
||||
|
||||
static int orig_fgconsole, orig_kmsg;
|
||||
|
||||
int pm_prepare_console(void)
|
||||
{
|
||||
acquire_console_sem();
|
||||
|
||||
orig_fgconsole = fg_console;
|
||||
|
||||
if (vc_allocate(SUSPEND_CONSOLE)) {
|
||||
/* we can't have a free VC for now. Too bad,
|
||||
* we don't want to mess the screen for now. */
|
||||
release_console_sem();
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (set_console(SUSPEND_CONSOLE)) {
|
||||
/*
|
||||
* We're unable to switch to the SUSPEND_CONSOLE.
|
||||
* Let the calling function know so it can decide
|
||||
* what to do.
|
||||
*/
|
||||
release_console_sem();
|
||||
return 1;
|
||||
}
|
||||
release_console_sem();
|
||||
|
||||
if (vt_waitactive(SUSPEND_CONSOLE)) {
|
||||
pr_debug("Suspend: Can't switch VCs.");
|
||||
return 1;
|
||||
}
|
||||
orig_kmsg = kmsg_redirect;
|
||||
kmsg_redirect = SUSPEND_CONSOLE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void pm_restore_console(void)
|
||||
{
|
||||
acquire_console_sem();
|
||||
set_console(orig_fgconsole);
|
||||
release_console_sem();
|
||||
kmsg_redirect = orig_kmsg;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
465
kernel/power/disk.c
Normal file
465
kernel/power/disk.c
Normal file
@@ -0,0 +1,465 @@
|
||||
/*
|
||||
* kernel/power/disk.c - Suspend-to-disk support.
|
||||
*
|
||||
* Copyright (c) 2003 Patrick Mochel
|
||||
* Copyright (c) 2003 Open Source Development Lab
|
||||
* Copyright (c) 2004 Pavel Machek <pavel@suse.cz>
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include "power.h"
|
||||
|
||||
|
||||
static int noresume = 0;
|
||||
char resume_file[256] = CONFIG_PM_STD_PARTITION;
|
||||
dev_t swsusp_resume_device;
|
||||
sector_t swsusp_resume_block;
|
||||
|
||||
/**
|
||||
* platform_prepare - prepare the machine for hibernation using the
|
||||
* platform driver if so configured and return an error code if it fails
|
||||
*/
|
||||
|
||||
static inline int platform_prepare(void)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (pm_disk_mode == PM_DISK_PLATFORM) {
|
||||
if (pm_ops && pm_ops->prepare)
|
||||
error = pm_ops->prepare(PM_SUSPEND_DISK);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* power_down - Shut machine down for hibernate.
|
||||
* @mode: Suspend-to-disk mode
|
||||
*
|
||||
* Use the platform driver, if configured so, and return gracefully if it
|
||||
* fails.
|
||||
* Otherwise, try to power off and reboot. If they fail, halt the machine,
|
||||
* there ain't no turning back.
|
||||
*/
|
||||
|
||||
static void power_down(suspend_disk_method_t mode)
|
||||
{
|
||||
switch(mode) {
|
||||
case PM_DISK_PLATFORM:
|
||||
if (pm_ops && pm_ops->enter) {
|
||||
kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
|
||||
pm_ops->enter(PM_SUSPEND_DISK);
|
||||
break;
|
||||
}
|
||||
case PM_DISK_SHUTDOWN:
|
||||
kernel_power_off();
|
||||
break;
|
||||
case PM_DISK_REBOOT:
|
||||
kernel_restart(NULL);
|
||||
break;
|
||||
}
|
||||
kernel_halt();
|
||||
/* Valid image is on the disk, if we continue we risk serious data corruption
|
||||
after resume. */
|
||||
printk(KERN_CRIT "Please power me down manually\n");
|
||||
while(1);
|
||||
}
|
||||
|
||||
static inline void platform_finish(void)
|
||||
{
|
||||
if (pm_disk_mode == PM_DISK_PLATFORM) {
|
||||
if (pm_ops && pm_ops->finish)
|
||||
pm_ops->finish(PM_SUSPEND_DISK);
|
||||
}
|
||||
}
|
||||
|
||||
static void unprepare_processes(void)
|
||||
{
|
||||
thaw_processes();
|
||||
pm_restore_console();
|
||||
}
|
||||
|
||||
static int prepare_processes(void)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
pm_prepare_console();
|
||||
if (freeze_processes()) {
|
||||
error = -EBUSY;
|
||||
unprepare_processes();
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_suspend_disk - The granpappy of hibernation power management.
|
||||
*
|
||||
* If we're going through the firmware, then get it over with quickly.
|
||||
*
|
||||
* If not, then call swsusp to do its thing, then figure out how
|
||||
* to power down the system.
|
||||
*/
|
||||
|
||||
int pm_suspend_disk(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = prepare_processes();
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (pm_disk_mode == PM_DISK_TESTPROC) {
|
||||
printk("swsusp debug: Waiting for 5 seconds.\n");
|
||||
mdelay(5000);
|
||||
goto Thaw;
|
||||
}
|
||||
/* Free memory before shutting down devices. */
|
||||
error = swsusp_shrink_memory();
|
||||
if (error)
|
||||
goto Thaw;
|
||||
|
||||
error = platform_prepare();
|
||||
if (error)
|
||||
goto Thaw;
|
||||
|
||||
suspend_console();
|
||||
error = device_suspend(PMSG_FREEZE);
|
||||
if (error) {
|
||||
printk(KERN_ERR "PM: Some devices failed to suspend\n");
|
||||
goto Resume_devices;
|
||||
}
|
||||
error = disable_nonboot_cpus();
|
||||
if (error)
|
||||
goto Enable_cpus;
|
||||
|
||||
if (pm_disk_mode == PM_DISK_TEST) {
|
||||
printk("swsusp debug: Waiting for 5 seconds.\n");
|
||||
mdelay(5000);
|
||||
goto Enable_cpus;
|
||||
}
|
||||
|
||||
pr_debug("PM: snapshotting memory.\n");
|
||||
in_suspend = 1;
|
||||
error = swsusp_suspend();
|
||||
if (error)
|
||||
goto Enable_cpus;
|
||||
|
||||
if (in_suspend) {
|
||||
enable_nonboot_cpus();
|
||||
platform_finish();
|
||||
device_resume();
|
||||
resume_console();
|
||||
pr_debug("PM: writing image.\n");
|
||||
error = swsusp_write();
|
||||
if (!error)
|
||||
power_down(pm_disk_mode);
|
||||
else {
|
||||
swsusp_free();
|
||||
goto Thaw;
|
||||
}
|
||||
} else {
|
||||
pr_debug("PM: Image restored successfully.\n");
|
||||
}
|
||||
|
||||
swsusp_free();
|
||||
Enable_cpus:
|
||||
enable_nonboot_cpus();
|
||||
Resume_devices:
|
||||
platform_finish();
|
||||
device_resume();
|
||||
resume_console();
|
||||
Thaw:
|
||||
unprepare_processes();
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* software_resume - Resume from a saved image.
|
||||
*
|
||||
* Called as a late_initcall (so all devices are discovered and
|
||||
* initialized), we call swsusp to see if we have a saved image or not.
|
||||
* If so, we quiesce devices, the restore the saved image. We will
|
||||
* return above (in pm_suspend_disk() ) if everything goes well.
|
||||
* Otherwise, we fail gracefully and return to the normally
|
||||
* scheduled program.
|
||||
*
|
||||
*/
|
||||
|
||||
static int software_resume(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
if (!swsusp_resume_device) {
|
||||
if (!strlen(resume_file)) {
|
||||
mutex_unlock(&pm_mutex);
|
||||
return -ENOENT;
|
||||
}
|
||||
swsusp_resume_device = name_to_dev_t(resume_file);
|
||||
pr_debug("swsusp: Resume From Partition %s\n", resume_file);
|
||||
} else {
|
||||
pr_debug("swsusp: Resume From Partition %d:%d\n",
|
||||
MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
|
||||
}
|
||||
|
||||
if (noresume) {
|
||||
/**
|
||||
* FIXME: If noresume is specified, we need to find the partition
|
||||
* and reset it back to normal swap space.
|
||||
*/
|
||||
mutex_unlock(&pm_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_debug("PM: Checking swsusp image.\n");
|
||||
|
||||
error = swsusp_check();
|
||||
if (error)
|
||||
goto Done;
|
||||
|
||||
pr_debug("PM: Preparing processes for restore.\n");
|
||||
|
||||
error = prepare_processes();
|
||||
if (error) {
|
||||
swsusp_close();
|
||||
goto Done;
|
||||
}
|
||||
|
||||
pr_debug("PM: Reading swsusp image.\n");
|
||||
|
||||
error = swsusp_read();
|
||||
if (error) {
|
||||
swsusp_free();
|
||||
goto Thaw;
|
||||
}
|
||||
|
||||
pr_debug("PM: Preparing devices for restore.\n");
|
||||
|
||||
suspend_console();
|
||||
error = device_suspend(PMSG_PRETHAW);
|
||||
if (error)
|
||||
goto Free;
|
||||
|
||||
error = disable_nonboot_cpus();
|
||||
if (!error)
|
||||
swsusp_resume();
|
||||
|
||||
enable_nonboot_cpus();
|
||||
Free:
|
||||
swsusp_free();
|
||||
device_resume();
|
||||
resume_console();
|
||||
Thaw:
|
||||
printk(KERN_ERR "PM: Restore failed, recovering.\n");
|
||||
unprepare_processes();
|
||||
Done:
|
||||
/* For success case, the suspend path will release the lock */
|
||||
mutex_unlock(&pm_mutex);
|
||||
pr_debug("PM: Resume from disk failed.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall(software_resume);
|
||||
|
||||
|
||||
static const char * const pm_disk_modes[] = {
|
||||
[PM_DISK_FIRMWARE] = "firmware",
|
||||
[PM_DISK_PLATFORM] = "platform",
|
||||
[PM_DISK_SHUTDOWN] = "shutdown",
|
||||
[PM_DISK_REBOOT] = "reboot",
|
||||
[PM_DISK_TEST] = "test",
|
||||
[PM_DISK_TESTPROC] = "testproc",
|
||||
};
|
||||
|
||||
/**
|
||||
* disk - Control suspend-to-disk mode
|
||||
*
|
||||
* Suspend-to-disk can be handled in several ways. The greatest
|
||||
* distinction is who writes memory to disk - the firmware or the OS.
|
||||
* If the firmware does it, we assume that it also handles suspending
|
||||
* the system.
|
||||
* If the OS does it, then we have three options for putting the system
|
||||
* to sleep - using the platform driver (e.g. ACPI or other PM registers),
|
||||
* powering off the system or rebooting the system (for testing).
|
||||
*
|
||||
* The system will support either 'firmware' or 'platform', and that is
|
||||
* known a priori (and encoded in pm_ops). But, the user may choose
|
||||
* 'shutdown' or 'reboot' as alternatives.
|
||||
*
|
||||
* show() will display what the mode is currently set to.
|
||||
* store() will accept one of
|
||||
*
|
||||
* 'firmware'
|
||||
* 'platform'
|
||||
* 'shutdown'
|
||||
* 'reboot'
|
||||
*
|
||||
* It will only change to 'firmware' or 'platform' if the system
|
||||
* supports it (as determined from pm_ops->pm_disk_mode).
|
||||
*/
|
||||
|
||||
static ssize_t disk_show(struct subsystem * subsys, char * buf)
|
||||
{
|
||||
return sprintf(buf, "%s\n", pm_disk_modes[pm_disk_mode]);
|
||||
}
|
||||
|
||||
|
||||
static ssize_t disk_store(struct subsystem * s, const char * buf, size_t n)
|
||||
{
|
||||
int error = 0;
|
||||
int i;
|
||||
int len;
|
||||
char *p;
|
||||
suspend_disk_method_t mode = 0;
|
||||
|
||||
p = memchr(buf, '\n', n);
|
||||
len = p ? p - buf : n;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
for (i = PM_DISK_FIRMWARE; i < PM_DISK_MAX; i++) {
|
||||
if (!strncmp(buf, pm_disk_modes[i], len)) {
|
||||
mode = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (mode) {
|
||||
if (mode == PM_DISK_SHUTDOWN || mode == PM_DISK_REBOOT ||
|
||||
mode == PM_DISK_TEST || mode == PM_DISK_TESTPROC) {
|
||||
pm_disk_mode = mode;
|
||||
} else {
|
||||
if (pm_ops && pm_ops->enter &&
|
||||
(mode == pm_ops->pm_disk_mode))
|
||||
pm_disk_mode = mode;
|
||||
else
|
||||
error = -EINVAL;
|
||||
}
|
||||
} else {
|
||||
error = -EINVAL;
|
||||
}
|
||||
|
||||
pr_debug("PM: suspend-to-disk mode set to '%s'\n",
|
||||
pm_disk_modes[mode]);
|
||||
mutex_unlock(&pm_mutex);
|
||||
return error ? error : n;
|
||||
}
|
||||
|
||||
power_attr(disk);
|
||||
|
||||
static ssize_t resume_show(struct subsystem * subsys, char *buf)
|
||||
{
|
||||
return sprintf(buf,"%d:%d\n", MAJOR(swsusp_resume_device),
|
||||
MINOR(swsusp_resume_device));
|
||||
}
|
||||
|
||||
static ssize_t resume_store(struct subsystem *subsys, const char *buf, size_t n)
|
||||
{
|
||||
unsigned int maj, min;
|
||||
dev_t res;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (sscanf(buf, "%u:%u", &maj, &min) != 2)
|
||||
goto out;
|
||||
|
||||
res = MKDEV(maj,min);
|
||||
if (maj != MAJOR(res) || min != MINOR(res))
|
||||
goto out;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
swsusp_resume_device = res;
|
||||
mutex_unlock(&pm_mutex);
|
||||
printk("Attempting manual resume\n");
|
||||
noresume = 0;
|
||||
software_resume();
|
||||
ret = n;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
power_attr(resume);
|
||||
|
||||
static ssize_t image_size_show(struct subsystem * subsys, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%lu\n", image_size);
|
||||
}
|
||||
|
||||
static ssize_t image_size_store(struct subsystem * subsys, const char * buf, size_t n)
|
||||
{
|
||||
unsigned long size;
|
||||
|
||||
if (sscanf(buf, "%lu", &size) == 1) {
|
||||
image_size = size;
|
||||
return n;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
power_attr(image_size);
|
||||
|
||||
static struct attribute * g[] = {
|
||||
&disk_attr.attr,
|
||||
&resume_attr.attr,
|
||||
&image_size_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
||||
static struct attribute_group attr_group = {
|
||||
.attrs = g,
|
||||
};
|
||||
|
||||
|
||||
static int __init pm_disk_init(void)
|
||||
{
|
||||
return sysfs_create_group(&power_subsys.kset.kobj,&attr_group);
|
||||
}
|
||||
|
||||
core_initcall(pm_disk_init);
|
||||
|
||||
|
||||
static int __init resume_setup(char *str)
|
||||
{
|
||||
if (noresume)
|
||||
return 1;
|
||||
|
||||
strncpy( resume_file, str, 255 );
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __init resume_offset_setup(char *str)
|
||||
{
|
||||
unsigned long long offset;
|
||||
|
||||
if (noresume)
|
||||
return 1;
|
||||
|
||||
if (sscanf(str, "%llu", &offset) == 1)
|
||||
swsusp_resume_block = offset;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __init noresume_setup(char *str)
|
||||
{
|
||||
noresume = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("noresume", noresume_setup);
|
||||
__setup("resume_offset=", resume_offset_setup);
|
||||
__setup("resume=", resume_setup);
|
||||
451
kernel/power/main.c
Normal file
451
kernel/power/main.c
Normal file
@@ -0,0 +1,451 @@
|
||||
/*
|
||||
* kernel/power/main.c - PM subsystem core functionality.
|
||||
*
|
||||
* Copyright (c) 2003 Patrick Mochel
|
||||
* Copyright (c) 2003 Open Source Development Lab
|
||||
*
|
||||
* This file is released under the GPLv2
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/resume-trace.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/vmstat.h>
|
||||
#include "power.h"
|
||||
#if 0
|
||||
/* Qisda, ShiYong Lin, 2009/07/18, Send message when sleep{*/
|
||||
#include <linux/input.h>
|
||||
/* Qisda, ShiYong Lin, 2009/07/18, Send message when sleep}*/
|
||||
#endif
|
||||
#ifdef CONFIG_PM_CPU_MODE
|
||||
extern unsigned char pm_cpu_mode;
|
||||
#endif
|
||||
|
||||
/*This is just an arbitrary number */
|
||||
#define FREE_PAGE_NUMBER (100)
|
||||
|
||||
DEFINE_MUTEX(pm_mutex);
|
||||
|
||||
struct pm_ops *pm_ops;
|
||||
suspend_disk_method_t pm_disk_mode = PM_DISK_PLATFORM;
|
||||
/* Qisda, ShiYong Lin, 2009/07/18, Send message when sleep{*/
|
||||
extern void pm_keypad_message_to_ap (void);
|
||||
/* Qisda, ShiYong Lin, 2009/07/18, Send message when sleep}*/
|
||||
|
||||
/**
|
||||
* pm_set_ops - Set the global power method table.
|
||||
* @ops: Pointer to ops structure.
|
||||
*/
|
||||
|
||||
void pm_set_ops(struct pm_ops * ops)
|
||||
{
|
||||
mutex_lock(&pm_mutex);
|
||||
pm_ops = ops;
|
||||
mutex_unlock(&pm_mutex);
|
||||
}
|
||||
|
||||
static inline void pm_finish(suspend_state_t state)
|
||||
{
|
||||
if (pm_ops->finish)
|
||||
pm_ops->finish(state);
|
||||
}
|
||||
|
||||
/**
|
||||
* suspend_prepare - Do prep work before entering low-power state.
|
||||
* @state: State we're entering.
|
||||
*
|
||||
* This is common code that is called for each state that we're
|
||||
* entering. Allocate a console, stop all processes, then make sure
|
||||
* the platform can enter the requested state.
|
||||
*/
|
||||
|
||||
static int suspend_prepare(suspend_state_t state)
|
||||
{
|
||||
int error;
|
||||
unsigned int free_pages;
|
||||
|
||||
if (!pm_ops || !pm_ops->enter)
|
||||
return -EPERM;
|
||||
|
||||
pm_prepare_console();
|
||||
|
||||
if (freeze_processes()) {
|
||||
error = -EAGAIN;
|
||||
goto Thaw;
|
||||
}
|
||||
|
||||
if ((free_pages = global_page_state(NR_FREE_PAGES))
|
||||
< FREE_PAGE_NUMBER) {
|
||||
pr_debug("PM: free some memory\n");
|
||||
shrink_all_memory(FREE_PAGE_NUMBER - free_pages);
|
||||
if (nr_free_pages() < FREE_PAGE_NUMBER) {
|
||||
error = -ENOMEM;
|
||||
printk(KERN_ERR "PM: No enough memory\n");
|
||||
goto Thaw;
|
||||
}
|
||||
}
|
||||
|
||||
if (pm_ops->prepare) {
|
||||
if ((error = pm_ops->prepare(state)))
|
||||
goto Thaw;
|
||||
}
|
||||
|
||||
suspend_console();
|
||||
error = device_suspend(PMSG_SUSPEND);
|
||||
if (error) {
|
||||
printk(KERN_ERR "Some devices failed to suspend\n");
|
||||
goto Resume_devices;
|
||||
}
|
||||
error = disable_nonboot_cpus();
|
||||
if (!error)
|
||||
return 0;
|
||||
|
||||
enable_nonboot_cpus();
|
||||
Resume_devices:
|
||||
pm_finish(state);
|
||||
device_resume();
|
||||
resume_console();
|
||||
Thaw:
|
||||
thaw_processes();
|
||||
pm_restore_console();
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
int suspend_enter(suspend_state_t state)
|
||||
{
|
||||
int error = 0;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if ((error = device_power_down(PMSG_SUSPEND))) {
|
||||
printk(KERN_ERR "Some devices failed to power down\n");
|
||||
goto Done;
|
||||
}
|
||||
error = pm_ops->enter(state);
|
||||
device_power_up();
|
||||
Done:
|
||||
local_irq_restore(flags);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* suspend_finish - Do final work before exiting suspend sequence.
|
||||
* @state: State we're coming out of.
|
||||
*
|
||||
* Call platform code to clean up, restart processes, and free the
|
||||
* console that we've allocated. This is not called for suspend-to-disk.
|
||||
*/
|
||||
|
||||
static void suspend_finish(suspend_state_t state)
|
||||
{
|
||||
enable_nonboot_cpus();
|
||||
pm_finish(state);
|
||||
device_resume();
|
||||
resume_console();
|
||||
thaw_processes();
|
||||
pm_restore_console();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
static const char * const pm_states[PM_SUSPEND_MAX] = {
|
||||
[PM_SUSPEND_STANDBY] = "standby",
|
||||
[PM_SUSPEND_MEM] = "mem",
|
||||
#ifdef CONFIG_PM_CPU_MODE
|
||||
[PM_SUSPEND_CPU_MODE] = "cpu",
|
||||
#endif
|
||||
#ifdef CONFIG_SOFTWARE_SUSPEND
|
||||
[PM_SUSPEND_DISK] = "disk",
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline int valid_state(suspend_state_t state)
|
||||
{
|
||||
/* Suspend-to-disk does not really need low-level support.
|
||||
* It can work with reboot if needed. */
|
||||
if (state == PM_SUSPEND_DISK)
|
||||
return 1;
|
||||
|
||||
/* all other states need lowlevel support and need to be
|
||||
* valid to the lowlevel implementation, no valid callback
|
||||
* implies that all are valid. */
|
||||
if (!pm_ops || (pm_ops->valid && !pm_ops->valid(state)))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_CPU_MODE
|
||||
static int suspend_pm_cpu_mode_prepare(suspend_state_t state)
|
||||
{
|
||||
return suspend_prepare(state);
|
||||
}
|
||||
|
||||
|
||||
int suspend_pm_cpu_mode_enter(suspend_state_t state)
|
||||
{
|
||||
int error = 0;
|
||||
unsigned long flags;
|
||||
printk("suspend_cpu_mode_enter\n");
|
||||
local_irq_save(flags);
|
||||
|
||||
/*
|
||||
if ((error = device_power_down(PMSG_SUSPEND))) {
|
||||
printk(KERN_ERR "Some devices failed to power down\n");
|
||||
goto Done;
|
||||
}
|
||||
*/
|
||||
error = pm_ops->enter(state);
|
||||
|
||||
device_power_up();
|
||||
Done:
|
||||
local_irq_restore(flags);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
static void suspend_pm_cpu_mode_finish(suspend_state_t state)
|
||||
{
|
||||
enable_nonboot_cpus();
|
||||
pm_finish(state);
|
||||
device_resume();
|
||||
// s3c24xx_cpu_mode_serial_resume();
|
||||
resume_console();
|
||||
thaw_processes();
|
||||
pm_restore_console();
|
||||
}
|
||||
|
||||
static int enter_pm_cpu_mode(suspend_state_t state)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (!valid_state(state))
|
||||
return -ENODEV;
|
||||
|
||||
if (!mutex_trylock(&pm_mutex))
|
||||
return -EBUSY;
|
||||
|
||||
if (state == PM_SUSPEND_DISK) {
|
||||
error = pm_suspend_disk();
|
||||
goto Unlock;
|
||||
}
|
||||
|
||||
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
|
||||
if ((error = suspend_pm_cpu_mode_prepare(state)))
|
||||
goto Unlock;
|
||||
|
||||
// pr_debug("PM: Entering %s sleep\n", pm_states[state]);
|
||||
error = suspend_pm_cpu_mode_enter(state);
|
||||
pr_debug("PM: Finishing wakeup.\n");
|
||||
suspend_pm_cpu_mode_finish(state);
|
||||
Unlock:
|
||||
mutex_unlock(&pm_mutex);
|
||||
/* Qisda, ShiYong Lin, 2009/09/28, Add the sleep event message when sleep {*/
|
||||
s3c_keypad_pm_sleep_message_to_ap(0);
|
||||
// printk(KERN_ERR "Sleep end enter_pm_cpu_mode, %d\n", state);
|
||||
/* } Qisda, ShiYong Lin, 2009/09/28, Add the sleep event message when sleep */
|
||||
return error;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* enter_state - Do common work of entering low-power state.
|
||||
* @state: pm_state structure for state we're entering.
|
||||
*
|
||||
* Make sure we're the only ones trying to enter a sleep state. Fail
|
||||
* if someone has beat us to it, since we don't want anything weird to
|
||||
* happen when we wake up.
|
||||
* Then, do the setup for suspend, enter the state, and cleaup (after
|
||||
* we've woken up).
|
||||
*/
|
||||
|
||||
static int enter_state(suspend_state_t state)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (!valid_state(state))
|
||||
return -ENODEV;
|
||||
if (!mutex_trylock(&pm_mutex))
|
||||
return -EBUSY;
|
||||
|
||||
if (state == PM_SUSPEND_DISK) {
|
||||
error = pm_suspend_disk();
|
||||
goto Unlock;
|
||||
}
|
||||
|
||||
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
|
||||
if ((error = suspend_prepare(state)))
|
||||
goto Unlock;
|
||||
|
||||
pr_debug("PM: Entering %s sleep\n", pm_states[state]);
|
||||
error = suspend_enter(state);
|
||||
|
||||
pr_debug("PM: Finishing wakeup.\n");
|
||||
suspend_finish(state);
|
||||
Unlock:
|
||||
mutex_unlock(&pm_mutex);
|
||||
/* Qisda, ShiYong Lin, 2009/09/28, Add the sleep event message when sleep {*/
|
||||
s3c_keypad_pm_sleep_message_to_ap(0);
|
||||
/* } Qisda, ShiYong Lin, 2009/09/28, Add the sleep event message when sleep */
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is main interface to the outside world. It needs to be
|
||||
* called from process context.
|
||||
*/
|
||||
int software_suspend(void)
|
||||
{
|
||||
return enter_state(PM_SUSPEND_DISK);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* pm_suspend - Externally visible function for suspending system.
|
||||
* @state: Enumarted value of state to enter.
|
||||
*
|
||||
* Determine whether or not value is within range, get state
|
||||
* structure, and enter (above).
|
||||
*/
|
||||
|
||||
int pm_suspend(suspend_state_t state)
|
||||
{
|
||||
if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
|
||||
return enter_state(state);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(pm_suspend);
|
||||
|
||||
decl_subsys(power,NULL,NULL);
|
||||
|
||||
|
||||
/**
|
||||
* state - control system power state.
|
||||
*
|
||||
* show() returns what states are supported, which is hard-coded to
|
||||
* 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
|
||||
* 'disk' (Suspend-to-Disk).
|
||||
*
|
||||
* store() accepts one of those strings, translates it into the
|
||||
* proper enumerated value, and initiates a suspend transition.
|
||||
*/
|
||||
|
||||
static ssize_t state_show(struct subsystem * subsys, char * buf)
|
||||
{
|
||||
int i;
|
||||
char * s = buf;
|
||||
|
||||
for (i = 0; i < PM_SUSPEND_MAX; i++) {
|
||||
if (pm_states[i] && valid_state(i))
|
||||
s += sprintf(s,"%s ", pm_states[i]);
|
||||
}
|
||||
s += sprintf(s,"\n");
|
||||
return (s - buf);
|
||||
}
|
||||
|
||||
static ssize_t state_store(struct subsystem * subsys, const char * buf, size_t n)
|
||||
{
|
||||
suspend_state_t state = PM_SUSPEND_STANDBY;
|
||||
const char * const *s;
|
||||
char *p;
|
||||
int error;
|
||||
int len;
|
||||
|
||||
p = memchr(buf, '\n', n);
|
||||
len = p ? p - buf : n;
|
||||
|
||||
for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {
|
||||
if (*s && !strncmp(buf, *s, len))
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_CPU_MODE
|
||||
printk(KERN_ERR "state_store, %d\n", state);
|
||||
if (state < PM_SUSPEND_MAX && (state == PM_SUSPEND_CPU_MODE ||
|
||||
state == PM_SUSPEND_MEM) ){
|
||||
if(pm_cpu_mode){
|
||||
// state = 0;
|
||||
error = enter_pm_cpu_mode(state);
|
||||
}
|
||||
else{
|
||||
error = enter_state(state);
|
||||
}
|
||||
}
|
||||
else{
|
||||
error = -EINVAL;
|
||||
}
|
||||
printk(KERN_ERR "end, leave state_store, %d\n", state);
|
||||
#else
|
||||
if (state < PM_SUSPEND_MAX && *s)
|
||||
error = enter_state(state);
|
||||
else
|
||||
error = -EINVAL;
|
||||
#endif
|
||||
return error ? error : n;
|
||||
}
|
||||
|
||||
power_attr(state);
|
||||
|
||||
#ifdef CONFIG_PM_TRACE
|
||||
int pm_trace_enabled;
|
||||
|
||||
static ssize_t pm_trace_show(struct subsystem * subsys, char * buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", pm_trace_enabled);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
pm_trace_store(struct subsystem * subsys, const char * buf, size_t n)
|
||||
{
|
||||
int val;
|
||||
|
||||
if (sscanf(buf, "%d", &val) == 1) {
|
||||
pm_trace_enabled = !!val;
|
||||
return n;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
power_attr(pm_trace);
|
||||
|
||||
static struct attribute * g[] = {
|
||||
&state_attr.attr,
|
||||
&pm_trace_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
#else
|
||||
static struct attribute * g[] = {
|
||||
&state_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
#endif /* CONFIG_PM_TRACE */
|
||||
|
||||
static struct attribute_group attr_group = {
|
||||
.attrs = g,
|
||||
};
|
||||
|
||||
|
||||
static int __init pm_init(void)
|
||||
{
|
||||
int error = subsystem_register(&power_subsys);
|
||||
if (!error)
|
||||
error = sysfs_create_group(&power_subsys.kset.kobj,&attr_group);
|
||||
return error;
|
||||
}
|
||||
|
||||
core_initcall(pm_init);
|
||||
209
kernel/power/pm.c
Normal file
209
kernel/power/pm.c
Normal file
@@ -0,0 +1,209 @@
|
||||
/*
|
||||
* pm.c - Power management interface
|
||||
*
|
||||
* Copyright (C) 2000 Andrew Henroid
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/pm_legacy.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
int pm_active;
|
||||
|
||||
/*
|
||||
* Locking notes:
|
||||
* pm_devs_lock can be a semaphore providing pm ops are not called
|
||||
* from an interrupt handler (already a bad idea so no change here). Each
|
||||
* change must be protected so that an unlink of an entry doesn't clash
|
||||
* with a pm send - which is permitted to sleep in the current architecture
|
||||
*
|
||||
* Module unloads clashing with pm events now work out safely, the module
|
||||
* unload path will block until the event has been sent. It may well block
|
||||
* until a resume but that will be fine.
|
||||
*/
|
||||
|
||||
static DEFINE_MUTEX(pm_devs_lock);
|
||||
static LIST_HEAD(pm_devs);
|
||||
|
||||
/**
|
||||
* pm_register - register a device with power management
|
||||
* @type: device type
|
||||
* @id: device ID
|
||||
* @callback: callback function
|
||||
*
|
||||
* Add a device to the list of devices that wish to be notified about
|
||||
* power management events. A &pm_dev structure is returned on success,
|
||||
* on failure the return is %NULL.
|
||||
*
|
||||
* The callback function will be called in process context and
|
||||
* it may sleep.
|
||||
*/
|
||||
|
||||
struct pm_dev *pm_register(pm_dev_t type,
|
||||
unsigned long id,
|
||||
pm_callback callback)
|
||||
{
|
||||
struct pm_dev *dev = kzalloc(sizeof(struct pm_dev), GFP_KERNEL);
|
||||
if (dev) {
|
||||
dev->type = type;
|
||||
dev->id = id;
|
||||
dev->callback = callback;
|
||||
|
||||
mutex_lock(&pm_devs_lock);
|
||||
list_add(&dev->entry, &pm_devs);
|
||||
mutex_unlock(&pm_devs_lock);
|
||||
}
|
||||
return dev;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_send - send request to a single device
|
||||
* @dev: device to send to
|
||||
* @rqst: power management request
|
||||
* @data: data for the callback
|
||||
*
|
||||
* Issue a power management request to a given device. The
|
||||
* %PM_SUSPEND and %PM_RESUME events are handled specially. The
|
||||
* data field must hold the intended next state. No call is made
|
||||
* if the state matches.
|
||||
*
|
||||
* BUGS: what stops two power management requests occurring in parallel
|
||||
* and conflicting.
|
||||
*
|
||||
* WARNING: Calling pm_send directly is not generally recommended, in
|
||||
* particular there is no locking against the pm_dev going away. The
|
||||
* caller must maintain all needed locking or have 'inside knowledge'
|
||||
* on the safety. Also remember that this function is not locked against
|
||||
* pm_unregister. This means that you must handle SMP races on callback
|
||||
* execution and unload yourself.
|
||||
*/
|
||||
|
||||
static int pm_send(struct pm_dev *dev, pm_request_t rqst, void *data)
|
||||
{
|
||||
int status = 0;
|
||||
unsigned long prev_state, next_state;
|
||||
|
||||
if (in_interrupt())
|
||||
BUG();
|
||||
|
||||
switch (rqst) {
|
||||
case PM_SUSPEND:
|
||||
case PM_RESUME:
|
||||
prev_state = dev->state;
|
||||
next_state = (unsigned long) data;
|
||||
if (prev_state != next_state) {
|
||||
if (dev->callback)
|
||||
status = (*dev->callback)(dev, rqst, data);
|
||||
if (!status) {
|
||||
dev->state = next_state;
|
||||
dev->prev_state = prev_state;
|
||||
}
|
||||
}
|
||||
else {
|
||||
dev->prev_state = prev_state;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (dev->callback)
|
||||
status = (*dev->callback)(dev, rqst, data);
|
||||
break;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
/*
|
||||
* Undo incomplete request
|
||||
*/
|
||||
static void pm_undo_all(struct pm_dev *last)
|
||||
{
|
||||
struct list_head *entry = last->entry.prev;
|
||||
while (entry != &pm_devs) {
|
||||
struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
|
||||
if (dev->state != dev->prev_state) {
|
||||
/* previous state was zero (running) resume or
|
||||
* previous state was non-zero (suspended) suspend
|
||||
*/
|
||||
pm_request_t undo = (dev->prev_state
|
||||
? PM_SUSPEND:PM_RESUME);
|
||||
pm_send(dev, undo, (void*) dev->prev_state);
|
||||
}
|
||||
entry = entry->prev;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_send_all - send request to all managed devices
|
||||
* @rqst: power management request
|
||||
* @data: data for the callback
|
||||
*
|
||||
* Issue a power management request to a all devices. The
|
||||
* %PM_SUSPEND events are handled specially. Any device is
|
||||
* permitted to fail a suspend by returning a non zero (error)
|
||||
* value from its callback function. If any device vetoes a
|
||||
* suspend request then all other devices that have suspended
|
||||
* during the processing of this request are restored to their
|
||||
* previous state.
|
||||
*
|
||||
* WARNING: This function takes the pm_devs_lock. The lock is not dropped until
|
||||
* the callbacks have completed. This prevents races against pm locking
|
||||
* functions, races against module unload pm_unregister code. It does
|
||||
* mean however that you must not issue pm_ functions within the callback
|
||||
* or you will deadlock and users will hate you.
|
||||
*
|
||||
* Zero is returned on success. If a suspend fails then the status
|
||||
* from the device that vetoes the suspend is returned.
|
||||
*
|
||||
* BUGS: what stops two power management requests occurring in parallel
|
||||
* and conflicting.
|
||||
*/
|
||||
|
||||
int pm_send_all(pm_request_t rqst, void *data)
|
||||
{
|
||||
struct list_head *entry;
|
||||
|
||||
mutex_lock(&pm_devs_lock);
|
||||
entry = pm_devs.next;
|
||||
while (entry != &pm_devs) {
|
||||
struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
|
||||
if (dev->callback) {
|
||||
int status = pm_send(dev, rqst, data);
|
||||
if (status) {
|
||||
/* return devices to previous state on
|
||||
* failed suspend request
|
||||
*/
|
||||
if (rqst == PM_SUSPEND)
|
||||
pm_undo_all(dev);
|
||||
mutex_unlock(&pm_devs_lock);
|
||||
return status;
|
||||
}
|
||||
}
|
||||
entry = entry->next;
|
||||
}
|
||||
mutex_unlock(&pm_devs_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(pm_register);
|
||||
EXPORT_SYMBOL(pm_send_all);
|
||||
EXPORT_SYMBOL(pm_active);
|
||||
|
||||
|
||||
179
kernel/power/power.h
Normal file
179
kernel/power/power.h
Normal file
@@ -0,0 +1,179 @@
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/utsname.h>
|
||||
|
||||
struct swsusp_info {
|
||||
struct new_utsname uts;
|
||||
u32 version_code;
|
||||
unsigned long num_physpages;
|
||||
int cpus;
|
||||
unsigned long image_pages;
|
||||
unsigned long pages;
|
||||
unsigned long size;
|
||||
} __attribute__((aligned(PAGE_SIZE)));
|
||||
|
||||
|
||||
|
||||
#ifdef CONFIG_SOFTWARE_SUSPEND
|
||||
extern int pm_suspend_disk(void);
|
||||
|
||||
#else
|
||||
static inline int pm_suspend_disk(void)
|
||||
{
|
||||
return -EPERM;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern struct mutex pm_mutex;
|
||||
|
||||
#define power_attr(_name) \
|
||||
static struct subsys_attribute _name##_attr = { \
|
||||
.attr = { \
|
||||
.name = __stringify(_name), \
|
||||
.mode = 0644, \
|
||||
}, \
|
||||
.show = _name##_show, \
|
||||
.store = _name##_store, \
|
||||
}
|
||||
|
||||
extern struct subsystem power_subsys;
|
||||
|
||||
/* References to section boundaries */
|
||||
extern const void __nosave_begin, __nosave_end;
|
||||
|
||||
/* Preferred image size in bytes (default 500 MB) */
|
||||
extern unsigned long image_size;
|
||||
extern int in_suspend;
|
||||
extern dev_t swsusp_resume_device;
|
||||
extern sector_t swsusp_resume_block;
|
||||
|
||||
extern asmlinkage int swsusp_arch_suspend(void);
|
||||
extern asmlinkage int swsusp_arch_resume(void);
|
||||
|
||||
extern unsigned int count_data_pages(void);
|
||||
|
||||
/**
|
||||
* Auxiliary structure used for reading the snapshot image data and
|
||||
* metadata from and writing them to the list of page backup entries
|
||||
* (PBEs) which is the main data structure of swsusp.
|
||||
*
|
||||
* Using struct snapshot_handle we can transfer the image, including its
|
||||
* metadata, as a continuous sequence of bytes with the help of
|
||||
* snapshot_read_next() and snapshot_write_next().
|
||||
*
|
||||
* The code that writes the image to a storage or transfers it to
|
||||
* the user land is required to use snapshot_read_next() for this
|
||||
* purpose and it should not make any assumptions regarding the internal
|
||||
* structure of the image. Similarly, the code that reads the image from
|
||||
* a storage or transfers it from the user land is required to use
|
||||
* snapshot_write_next().
|
||||
*
|
||||
* This may allow us to change the internal structure of the image
|
||||
* in the future with considerably less effort.
|
||||
*/
|
||||
|
||||
struct snapshot_handle {
|
||||
loff_t offset; /* number of the last byte ready for reading
|
||||
* or writing in the sequence
|
||||
*/
|
||||
unsigned int cur; /* number of the block of PAGE_SIZE bytes the
|
||||
* next operation will refer to (ie. current)
|
||||
*/
|
||||
unsigned int cur_offset; /* offset with respect to the current
|
||||
* block (for the next operation)
|
||||
*/
|
||||
unsigned int prev; /* number of the block of PAGE_SIZE bytes that
|
||||
* was the current one previously
|
||||
*/
|
||||
void *buffer; /* address of the block to read from
|
||||
* or write to
|
||||
*/
|
||||
unsigned int buf_offset; /* location to read from or write to,
|
||||
* given as a displacement from 'buffer'
|
||||
*/
|
||||
int sync_read; /* Set to one to notify the caller of
|
||||
* snapshot_write_next() that it may
|
||||
* need to call wait_on_bio_chain()
|
||||
*/
|
||||
};
|
||||
|
||||
/* This macro returns the address from/to which the caller of
|
||||
* snapshot_read_next()/snapshot_write_next() is allowed to
|
||||
* read/write data after the function returns
|
||||
*/
|
||||
#define data_of(handle) ((handle).buffer + (handle).buf_offset)
|
||||
|
||||
extern unsigned int snapshot_additional_pages(struct zone *zone);
|
||||
extern int snapshot_read_next(struct snapshot_handle *handle, size_t count);
|
||||
extern int snapshot_write_next(struct snapshot_handle *handle, size_t count);
|
||||
extern void snapshot_write_finalize(struct snapshot_handle *handle);
|
||||
extern int snapshot_image_loaded(struct snapshot_handle *handle);
|
||||
|
||||
/*
|
||||
* This structure is used to pass the values needed for the identification
|
||||
* of the resume swap area from a user space to the kernel via the
|
||||
* SNAPSHOT_SET_SWAP_AREA ioctl
|
||||
*/
|
||||
struct resume_swap_area {
|
||||
loff_t offset;
|
||||
u_int32_t dev;
|
||||
} __attribute__((packed));
|
||||
|
||||
#define SNAPSHOT_IOC_MAGIC '3'
|
||||
#define SNAPSHOT_FREEZE _IO(SNAPSHOT_IOC_MAGIC, 1)
|
||||
#define SNAPSHOT_UNFREEZE _IO(SNAPSHOT_IOC_MAGIC, 2)
|
||||
#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
|
||||
#define SNAPSHOT_ATOMIC_RESTORE _IO(SNAPSHOT_IOC_MAGIC, 4)
|
||||
#define SNAPSHOT_FREE _IO(SNAPSHOT_IOC_MAGIC, 5)
|
||||
#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
|
||||
#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
|
||||
#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
|
||||
#define SNAPSHOT_FREE_SWAP_PAGES _IO(SNAPSHOT_IOC_MAGIC, 9)
|
||||
#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
|
||||
#define SNAPSHOT_S2RAM _IO(SNAPSHOT_IOC_MAGIC, 11)
|
||||
#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
|
||||
#define SNAPSHOT_SET_SWAP_AREA _IOW(SNAPSHOT_IOC_MAGIC, 13, \
|
||||
struct resume_swap_area)
|
||||
#define SNAPSHOT_IOC_MAXNR 13
|
||||
|
||||
#define PMOPS_PREPARE 1
|
||||
#define PMOPS_ENTER 2
|
||||
#define PMOPS_FINISH 3
|
||||
|
||||
/**
|
||||
* The bitmap is used for tracing allocated swap pages
|
||||
*
|
||||
* The entire bitmap consists of a number of bitmap_page
|
||||
* structures linked with the help of the .next member.
|
||||
* Thus each page can be allocated individually, so we only
|
||||
* need to make 0-order memory allocations to create
|
||||
* the bitmap.
|
||||
*/
|
||||
|
||||
#define BITMAP_PAGE_SIZE (PAGE_SIZE - sizeof(void *))
|
||||
#define BITMAP_PAGE_CHUNKS (BITMAP_PAGE_SIZE / sizeof(long))
|
||||
#define BITS_PER_CHUNK (sizeof(long) * 8)
|
||||
#define BITMAP_PAGE_BITS (BITMAP_PAGE_CHUNKS * BITS_PER_CHUNK)
|
||||
|
||||
struct bitmap_page {
|
||||
unsigned long chunks[BITMAP_PAGE_CHUNKS];
|
||||
struct bitmap_page *next;
|
||||
};
|
||||
|
||||
extern void free_bitmap(struct bitmap_page *bitmap);
|
||||
extern struct bitmap_page *alloc_bitmap(unsigned int nr_bits);
|
||||
extern sector_t alloc_swapdev_block(int swap, struct bitmap_page *bitmap);
|
||||
extern void free_all_swap_pages(int swap, struct bitmap_page *bitmap);
|
||||
|
||||
extern int swsusp_check(void);
|
||||
extern int swsusp_shrink_memory(void);
|
||||
extern void swsusp_free(void);
|
||||
extern int swsusp_suspend(void);
|
||||
extern int swsusp_resume(void);
|
||||
extern int swsusp_read(void);
|
||||
extern int swsusp_write(void);
|
||||
extern void swsusp_close(void);
|
||||
extern int suspend_enter(suspend_state_t state);
|
||||
|
||||
struct timeval;
|
||||
extern void swsusp_show_speed(struct timeval *, struct timeval *,
|
||||
unsigned int, char *);
|
||||
44
kernel/power/poweroff.c
Normal file
44
kernel/power/poweroff.c
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* poweroff.c - sysrq handler to gracefully power down machine.
|
||||
*
|
||||
* This file is released under the GPL v2
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sysrq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/reboot.h>
|
||||
|
||||
/*
|
||||
* When the user hits Sys-Rq o to power down the machine this is the
|
||||
* callback we use.
|
||||
*/
|
||||
|
||||
static void do_poweroff(struct work_struct *dummy)
|
||||
{
|
||||
kernel_power_off();
|
||||
}
|
||||
|
||||
static DECLARE_WORK(poweroff_work, do_poweroff);
|
||||
|
||||
static void handle_poweroff(int key, struct tty_struct *tty)
|
||||
{
|
||||
schedule_work(&poweroff_work);
|
||||
}
|
||||
|
||||
static struct sysrq_key_op sysrq_poweroff_op = {
|
||||
.handler = handle_poweroff,
|
||||
.help_msg = "powerOff",
|
||||
.action_msg = "Power Off",
|
||||
.enable_mask = SYSRQ_ENABLE_BOOT,
|
||||
};
|
||||
|
||||
static int pm_sysrq_init(void)
|
||||
{
|
||||
register_sysrq_key('o', &sysrq_poweroff_op);
|
||||
return 0;
|
||||
}
|
||||
|
||||
subsys_initcall(pm_sysrq_init);
|
||||
219
kernel/power/process.c
Normal file
219
kernel/power/process.c
Normal file
@@ -0,0 +1,219 @@
|
||||
/*
|
||||
* drivers/power/process.c - Functions for starting/stopping processes on
|
||||
* suspend transitions.
|
||||
*
|
||||
* Originally from swsusp.
|
||||
*/
|
||||
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
/*
|
||||
* Timeout for stopping processes
|
||||
*/
|
||||
#define TIMEOUT (20 * HZ)
|
||||
|
||||
#define FREEZER_KERNEL_THREADS 0
|
||||
#define FREEZER_USER_SPACE 1
|
||||
|
||||
static inline int freezeable(struct task_struct * p)
|
||||
{
|
||||
if ((p == current) ||
|
||||
(p->flags & PF_NOFREEZE) ||
|
||||
(p->exit_state == EXIT_ZOMBIE) ||
|
||||
(p->exit_state == EXIT_DEAD))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Refrigerator is place where frozen processes are stored :-). */
|
||||
void refrigerator(void)
|
||||
{
|
||||
/* Hmm, should we be allowed to suspend when there are realtime
|
||||
processes around? */
|
||||
long save;
|
||||
save = current->state;
|
||||
pr_debug("%s entered refrigerator\n", current->comm);
|
||||
|
||||
frozen_process(current);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
recalc_sigpending(); /* We sent fake signal, clean it up */
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
while (frozen(current)) {
|
||||
current->state = TASK_UNINTERRUPTIBLE;
|
||||
schedule();
|
||||
}
|
||||
pr_debug("%s left refrigerator\n", current->comm);
|
||||
current->state = save;
|
||||
}
|
||||
|
||||
static inline void freeze_process(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!freezing(p)) {
|
||||
rmb();
|
||||
if (!frozen(p)) {
|
||||
if (p->state == TASK_STOPPED)
|
||||
force_sig_specific(SIGSTOP, p);
|
||||
|
||||
freeze(p);
|
||||
spin_lock_irqsave(&p->sighand->siglock, flags);
|
||||
signal_wake_up(p, p->state == TASK_STOPPED);
|
||||
spin_unlock_irqrestore(&p->sighand->siglock, flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void cancel_freezing(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (freezing(p)) {
|
||||
pr_debug(" clean up: %s\n", p->comm);
|
||||
do_not_freeze(p);
|
||||
spin_lock_irqsave(&p->sighand->siglock, flags);
|
||||
recalc_sigpending_tsk(p);
|
||||
spin_unlock_irqrestore(&p->sighand->siglock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int is_user_space(struct task_struct *p)
|
||||
{
|
||||
return p->mm && !(p->flags & PF_BORROWED_MM);
|
||||
}
|
||||
|
||||
static unsigned int try_to_freeze_tasks(int freeze_user_space)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
unsigned long end_time;
|
||||
unsigned int todo;
|
||||
|
||||
end_time = jiffies + TIMEOUT;
|
||||
do {
|
||||
todo = 0;
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
if (!freezeable(p))
|
||||
continue;
|
||||
|
||||
if (frozen(p))
|
||||
continue;
|
||||
|
||||
if (p->state == TASK_TRACED && frozen(p->parent)) {
|
||||
cancel_freezing(p);
|
||||
continue;
|
||||
}
|
||||
if (is_user_space(p)) {
|
||||
if (!freeze_user_space)
|
||||
continue;
|
||||
|
||||
/* Freeze the task unless there is a vfork
|
||||
* completion pending
|
||||
*/
|
||||
if (!p->vfork_done)
|
||||
freeze_process(p);
|
||||
} else {
|
||||
if (freeze_user_space)
|
||||
continue;
|
||||
|
||||
freeze_process(p);
|
||||
}
|
||||
todo++;
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
yield(); /* Yield is okay here */
|
||||
if (todo && time_after(jiffies, end_time))
|
||||
break;
|
||||
} while (todo);
|
||||
|
||||
if (todo) {
|
||||
/* This does not unfreeze processes that are already frozen
|
||||
* (we have slightly ugly calling convention in that respect,
|
||||
* and caller must call thaw_processes() if something fails),
|
||||
* but it cleans up leftover PF_FREEZE requests.
|
||||
*/
|
||||
printk("\n");
|
||||
printk(KERN_ERR "Stopping %s timed out after %d seconds "
|
||||
"(%d tasks refusing to freeze):\n",
|
||||
freeze_user_space ? "user space processes" :
|
||||
"kernel threads",
|
||||
TIMEOUT / HZ, todo);
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
if (is_user_space(p) == !freeze_user_space)
|
||||
continue;
|
||||
|
||||
if (freezeable(p) && !frozen(p))
|
||||
printk(KERN_ERR " %s\n", p->comm);
|
||||
|
||||
cancel_freezing(p);
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
}
|
||||
|
||||
return todo;
|
||||
}
|
||||
|
||||
/**
|
||||
* freeze_processes - tell processes to enter the refrigerator
|
||||
*
|
||||
* Returns 0 on success, or the number of processes that didn't freeze,
|
||||
* although they were told to.
|
||||
*/
|
||||
int freeze_processes(void)
|
||||
{
|
||||
unsigned int nr_unfrozen;
|
||||
|
||||
printk("Stopping tasks ... ");
|
||||
nr_unfrozen = try_to_freeze_tasks(FREEZER_USER_SPACE);
|
||||
if (nr_unfrozen)
|
||||
return nr_unfrozen;
|
||||
|
||||
sys_sync();
|
||||
nr_unfrozen = try_to_freeze_tasks(FREEZER_KERNEL_THREADS);
|
||||
if (nr_unfrozen)
|
||||
return nr_unfrozen;
|
||||
|
||||
printk("done.\n");
|
||||
BUG_ON(in_atomic());
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void thaw_tasks(int thaw_user_space)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
if (!freezeable(p))
|
||||
continue;
|
||||
|
||||
if (is_user_space(p) == !thaw_user_space)
|
||||
continue;
|
||||
|
||||
if (!thaw_process(p))
|
||||
printk(KERN_WARNING " Strange, %s not stopped\n",
|
||||
p->comm );
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
}
|
||||
|
||||
void thaw_processes(void)
|
||||
{
|
||||
printk("Restarting tasks ... ");
|
||||
thaw_tasks(FREEZER_KERNEL_THREADS);
|
||||
thaw_tasks(FREEZER_USER_SPACE);
|
||||
schedule();
|
||||
printk("done.\n");
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(refrigerator);
|
||||
1739
kernel/power/snapshot.c
Normal file
1739
kernel/power/snapshot.c
Normal file
File diff suppressed because it is too large
Load Diff
634
kernel/power/swap.c
Normal file
634
kernel/power/swap.c
Normal file
@@ -0,0 +1,634 @@
|
||||
/*
|
||||
* linux/kernel/power/swap.c
|
||||
*
|
||||
* This file provides functions for reading the suspend image from
|
||||
* and writing it to a swap partition.
|
||||
*
|
||||
* Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz>
|
||||
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/swapops.h>
|
||||
#include <linux/pm.h>
|
||||
|
||||
#include "power.h"
|
||||
|
||||
extern char resume_file[];
|
||||
|
||||
#define SWSUSP_SIG "S1SUSPEND"
|
||||
|
||||
static struct swsusp_header {
|
||||
char reserved[PAGE_SIZE - 20 - sizeof(sector_t)];
|
||||
sector_t image;
|
||||
char orig_sig[10];
|
||||
char sig[10];
|
||||
} __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header;
|
||||
|
||||
/*
|
||||
* General things
|
||||
*/
|
||||
|
||||
static unsigned short root_swap = 0xffff;
|
||||
static struct block_device *resume_bdev;
|
||||
|
||||
/**
|
||||
* submit - submit BIO request.
|
||||
* @rw: READ or WRITE.
|
||||
* @off physical offset of page.
|
||||
* @page: page we're reading or writing.
|
||||
* @bio_chain: list of pending biod (for async reading)
|
||||
*
|
||||
* Straight from the textbook - allocate and initialize the bio.
|
||||
* If we're reading, make sure the page is marked as dirty.
|
||||
* Then submit it and, if @bio_chain == NULL, wait.
|
||||
*/
|
||||
static int submit(int rw, pgoff_t page_off, struct page *page,
|
||||
struct bio **bio_chain)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
bio->bi_sector = page_off * (PAGE_SIZE >> 9);
|
||||
bio->bi_bdev = resume_bdev;
|
||||
bio->bi_end_io = end_swap_bio_read;
|
||||
|
||||
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
|
||||
printk("swsusp: ERROR: adding page to bio at %ld\n", page_off);
|
||||
bio_put(bio);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
lock_page(page);
|
||||
bio_get(bio);
|
||||
|
||||
if (bio_chain == NULL) {
|
||||
submit_bio(rw | (1 << BIO_RW_SYNC), bio);
|
||||
wait_on_page_locked(page);
|
||||
if (rw == READ)
|
||||
bio_set_pages_dirty(bio);
|
||||
bio_put(bio);
|
||||
} else {
|
||||
if (rw == READ)
|
||||
get_page(page); /* These pages are freed later */
|
||||
bio->bi_private = *bio_chain;
|
||||
*bio_chain = bio;
|
||||
submit_bio(rw | (1 << BIO_RW_SYNC), bio);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
|
||||
{
|
||||
return submit(READ, page_off, virt_to_page(addr), bio_chain);
|
||||
}
|
||||
|
||||
static int bio_write_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
|
||||
{
|
||||
return submit(WRITE, page_off, virt_to_page(addr), bio_chain);
|
||||
}
|
||||
|
||||
static int wait_on_bio_chain(struct bio **bio_chain)
|
||||
{
|
||||
struct bio *bio;
|
||||
struct bio *next_bio;
|
||||
int ret = 0;
|
||||
|
||||
if (bio_chain == NULL)
|
||||
return 0;
|
||||
|
||||
bio = *bio_chain;
|
||||
if (bio == NULL)
|
||||
return 0;
|
||||
while (bio) {
|
||||
struct page *page;
|
||||
|
||||
next_bio = bio->bi_private;
|
||||
page = bio->bi_io_vec[0].bv_page;
|
||||
wait_on_page_locked(page);
|
||||
if (!PageUptodate(page) || PageError(page))
|
||||
ret = -EIO;
|
||||
put_page(page);
|
||||
bio_put(bio);
|
||||
bio = next_bio;
|
||||
}
|
||||
*bio_chain = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Saving part
|
||||
*/
|
||||
|
||||
static int mark_swapfiles(sector_t start)
|
||||
{
|
||||
int error;
|
||||
|
||||
bio_read_page(swsusp_resume_block, &swsusp_header, NULL);
|
||||
if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) ||
|
||||
!memcmp("SWAPSPACE2",swsusp_header.sig, 10)) {
|
||||
memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10);
|
||||
memcpy(swsusp_header.sig,SWSUSP_SIG, 10);
|
||||
swsusp_header.image = start;
|
||||
error = bio_write_page(swsusp_resume_block,
|
||||
&swsusp_header, NULL);
|
||||
} else {
|
||||
printk(KERN_ERR "swsusp: Swap header not found!\n");
|
||||
error = -ENODEV;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* swsusp_swap_check - check if the resume device is a swap device
|
||||
* and get its index (if so)
|
||||
*/
|
||||
|
||||
static int swsusp_swap_check(void) /* This is called before saving image */
|
||||
{
|
||||
int res;
|
||||
|
||||
res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
|
||||
&resume_bdev);
|
||||
if (res < 0)
|
||||
return res;
|
||||
|
||||
root_swap = res;
|
||||
res = blkdev_get(resume_bdev, FMODE_WRITE, O_RDWR);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
res = set_blocksize(resume_bdev, PAGE_SIZE);
|
||||
if (res < 0)
|
||||
blkdev_put(resume_bdev);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* write_page - Write one page to given swap location.
|
||||
* @buf: Address we're writing.
|
||||
* @offset: Offset of the swap page we're writing to.
|
||||
* @bio_chain: Link the next write BIO here
|
||||
*/
|
||||
|
||||
static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
|
||||
{
|
||||
void *src;
|
||||
|
||||
if (!offset)
|
||||
return -ENOSPC;
|
||||
|
||||
if (bio_chain) {
|
||||
src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
|
||||
if (src) {
|
||||
memcpy(src, buf, PAGE_SIZE);
|
||||
} else {
|
||||
WARN_ON_ONCE(1);
|
||||
bio_chain = NULL; /* Go synchronous */
|
||||
src = buf;
|
||||
}
|
||||
} else {
|
||||
src = buf;
|
||||
}
|
||||
return bio_write_page(offset, src, bio_chain);
|
||||
}
|
||||
|
||||
/*
|
||||
* The swap map is a data structure used for keeping track of each page
|
||||
* written to a swap partition. It consists of many swap_map_page
|
||||
* structures that contain each an array of MAP_PAGE_SIZE swap entries.
|
||||
* These structures are stored on the swap and linked together with the
|
||||
* help of the .next_swap member.
|
||||
*
|
||||
* The swap map is created during suspend. The swap map pages are
|
||||
* allocated and populated one at a time, so we only need one memory
|
||||
* page to set up the entire structure.
|
||||
*
|
||||
* During resume we also only need to use one swap_map_page structure
|
||||
* at a time.
|
||||
*/
|
||||
|
||||
#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
|
||||
|
||||
struct swap_map_page {
|
||||
sector_t entries[MAP_PAGE_ENTRIES];
|
||||
sector_t next_swap;
|
||||
};
|
||||
|
||||
/**
|
||||
* The swap_map_handle structure is used for handling swap in
|
||||
* a file-alike way
|
||||
*/
|
||||
|
||||
struct swap_map_handle {
|
||||
struct swap_map_page *cur;
|
||||
sector_t cur_swap;
|
||||
struct bitmap_page *bitmap;
|
||||
unsigned int k;
|
||||
};
|
||||
|
||||
static void release_swap_writer(struct swap_map_handle *handle)
|
||||
{
|
||||
if (handle->cur)
|
||||
free_page((unsigned long)handle->cur);
|
||||
handle->cur = NULL;
|
||||
if (handle->bitmap)
|
||||
free_bitmap(handle->bitmap);
|
||||
handle->bitmap = NULL;
|
||||
}
|
||||
|
||||
static int get_swap_writer(struct swap_map_handle *handle)
|
||||
{
|
||||
handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!handle->cur)
|
||||
return -ENOMEM;
|
||||
handle->bitmap = alloc_bitmap(count_swap_pages(root_swap, 0));
|
||||
if (!handle->bitmap) {
|
||||
release_swap_writer(handle);
|
||||
return -ENOMEM;
|
||||
}
|
||||
handle->cur_swap = alloc_swapdev_block(root_swap, handle->bitmap);
|
||||
if (!handle->cur_swap) {
|
||||
release_swap_writer(handle);
|
||||
return -ENOSPC;
|
||||
}
|
||||
handle->k = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int swap_write_page(struct swap_map_handle *handle, void *buf,
|
||||
struct bio **bio_chain)
|
||||
{
|
||||
int error = 0;
|
||||
sector_t offset;
|
||||
|
||||
if (!handle->cur)
|
||||
return -EINVAL;
|
||||
offset = alloc_swapdev_block(root_swap, handle->bitmap);
|
||||
error = write_page(buf, offset, bio_chain);
|
||||
if (error)
|
||||
return error;
|
||||
handle->cur->entries[handle->k++] = offset;
|
||||
if (handle->k >= MAP_PAGE_ENTRIES) {
|
||||
error = wait_on_bio_chain(bio_chain);
|
||||
if (error)
|
||||
goto out;
|
||||
offset = alloc_swapdev_block(root_swap, handle->bitmap);
|
||||
if (!offset)
|
||||
return -ENOSPC;
|
||||
handle->cur->next_swap = offset;
|
||||
error = write_page(handle->cur, handle->cur_swap, NULL);
|
||||
if (error)
|
||||
goto out;
|
||||
memset(handle->cur, 0, PAGE_SIZE);
|
||||
handle->cur_swap = offset;
|
||||
handle->k = 0;
|
||||
}
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int flush_swap_writer(struct swap_map_handle *handle)
|
||||
{
|
||||
if (handle->cur && handle->cur_swap)
|
||||
return write_page(handle->cur, handle->cur_swap, NULL);
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* save_image - save the suspend image data
|
||||
*/
|
||||
|
||||
static int save_image(struct swap_map_handle *handle,
|
||||
struct snapshot_handle *snapshot,
|
||||
unsigned int nr_to_write)
|
||||
{
|
||||
unsigned int m;
|
||||
int ret;
|
||||
int error = 0;
|
||||
int nr_pages;
|
||||
int err2;
|
||||
struct bio *bio;
|
||||
struct timeval start;
|
||||
struct timeval stop;
|
||||
|
||||
printk("Saving image data pages (%u pages) ... ", nr_to_write);
|
||||
m = nr_to_write / 100;
|
||||
if (!m)
|
||||
m = 1;
|
||||
nr_pages = 0;
|
||||
bio = NULL;
|
||||
do_gettimeofday(&start);
|
||||
do {
|
||||
ret = snapshot_read_next(snapshot, PAGE_SIZE);
|
||||
if (ret > 0) {
|
||||
error = swap_write_page(handle, data_of(*snapshot),
|
||||
&bio);
|
||||
if (error)
|
||||
break;
|
||||
if (!(nr_pages % m))
|
||||
printk("\b\b\b\b%3d%%", nr_pages / m);
|
||||
nr_pages++;
|
||||
}
|
||||
} while (ret > 0);
|
||||
err2 = wait_on_bio_chain(&bio);
|
||||
do_gettimeofday(&stop);
|
||||
if (!error)
|
||||
error = err2;
|
||||
if (!error)
|
||||
printk("\b\b\b\bdone\n");
|
||||
swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* enough_swap - Make sure we have enough swap to save the image.
|
||||
*
|
||||
* Returns TRUE or FALSE after checking the total amount of swap
|
||||
* space avaiable from the resume partition.
|
||||
*/
|
||||
|
||||
static int enough_swap(unsigned int nr_pages)
|
||||
{
|
||||
unsigned int free_swap = count_swap_pages(root_swap, 1);
|
||||
|
||||
pr_debug("swsusp: free swap pages: %u\n", free_swap);
|
||||
return free_swap > nr_pages + PAGES_FOR_IO;
|
||||
}
|
||||
|
||||
/**
|
||||
* swsusp_write - Write entire image and metadata.
|
||||
*
|
||||
* It is important _NOT_ to umount filesystems at this point. We want
|
||||
* them synced (in case something goes wrong) but we DO not want to mark
|
||||
* filesystem clean: it is not. (And it does not matter, if we resume
|
||||
* correctly, we'll mark system clean, anyway.)
|
||||
*/
|
||||
|
||||
int swsusp_write(void)
|
||||
{
|
||||
struct swap_map_handle handle;
|
||||
struct snapshot_handle snapshot;
|
||||
struct swsusp_info *header;
|
||||
int error;
|
||||
|
||||
error = swsusp_swap_check();
|
||||
if (error) {
|
||||
printk(KERN_ERR "swsusp: Cannot find swap device, try "
|
||||
"swapon -a.\n");
|
||||
return error;
|
||||
}
|
||||
memset(&snapshot, 0, sizeof(struct snapshot_handle));
|
||||
error = snapshot_read_next(&snapshot, PAGE_SIZE);
|
||||
if (error < PAGE_SIZE) {
|
||||
if (error >= 0)
|
||||
error = -EFAULT;
|
||||
|
||||
goto out;
|
||||
}
|
||||
header = (struct swsusp_info *)data_of(snapshot);
|
||||
if (!enough_swap(header->pages)) {
|
||||
printk(KERN_ERR "swsusp: Not enough free swap\n");
|
||||
error = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
error = get_swap_writer(&handle);
|
||||
if (!error) {
|
||||
sector_t start = handle.cur_swap;
|
||||
|
||||
error = swap_write_page(&handle, header, NULL);
|
||||
if (!error)
|
||||
error = save_image(&handle, &snapshot,
|
||||
header->pages - 1);
|
||||
|
||||
if (!error) {
|
||||
flush_swap_writer(&handle);
|
||||
printk("S");
|
||||
error = mark_swapfiles(start);
|
||||
printk("|\n");
|
||||
}
|
||||
}
|
||||
if (error)
|
||||
free_all_swap_pages(root_swap, handle.bitmap);
|
||||
release_swap_writer(&handle);
|
||||
out:
|
||||
swsusp_close();
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* The following functions allow us to read data using a swap map
|
||||
* in a file-alike way
|
||||
*/
|
||||
|
||||
static void release_swap_reader(struct swap_map_handle *handle)
|
||||
{
|
||||
if (handle->cur)
|
||||
free_page((unsigned long)handle->cur);
|
||||
handle->cur = NULL;
|
||||
}
|
||||
|
||||
static int get_swap_reader(struct swap_map_handle *handle, sector_t start)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (!start)
|
||||
return -EINVAL;
|
||||
|
||||
handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH);
|
||||
if (!handle->cur)
|
||||
return -ENOMEM;
|
||||
|
||||
error = bio_read_page(start, handle->cur, NULL);
|
||||
if (error) {
|
||||
release_swap_reader(handle);
|
||||
return error;
|
||||
}
|
||||
handle->k = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int swap_read_page(struct swap_map_handle *handle, void *buf,
|
||||
struct bio **bio_chain)
|
||||
{
|
||||
sector_t offset;
|
||||
int error;
|
||||
|
||||
if (!handle->cur)
|
||||
return -EINVAL;
|
||||
offset = handle->cur->entries[handle->k];
|
||||
if (!offset)
|
||||
return -EFAULT;
|
||||
error = bio_read_page(offset, buf, bio_chain);
|
||||
if (error)
|
||||
return error;
|
||||
if (++handle->k >= MAP_PAGE_ENTRIES) {
|
||||
error = wait_on_bio_chain(bio_chain);
|
||||
handle->k = 0;
|
||||
offset = handle->cur->next_swap;
|
||||
if (!offset)
|
||||
release_swap_reader(handle);
|
||||
else if (!error)
|
||||
error = bio_read_page(offset, handle->cur, NULL);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* load_image - load the image using the swap map handle
|
||||
* @handle and the snapshot handle @snapshot
|
||||
* (assume there are @nr_pages pages to load)
|
||||
*/
|
||||
|
||||
static int load_image(struct swap_map_handle *handle,
|
||||
struct snapshot_handle *snapshot,
|
||||
unsigned int nr_to_read)
|
||||
{
|
||||
unsigned int m;
|
||||
int error = 0;
|
||||
struct timeval start;
|
||||
struct timeval stop;
|
||||
struct bio *bio;
|
||||
int err2;
|
||||
unsigned nr_pages;
|
||||
|
||||
printk("Loading image data pages (%u pages) ... ", nr_to_read);
|
||||
m = nr_to_read / 100;
|
||||
if (!m)
|
||||
m = 1;
|
||||
nr_pages = 0;
|
||||
bio = NULL;
|
||||
do_gettimeofday(&start);
|
||||
for ( ; ; ) {
|
||||
error = snapshot_write_next(snapshot, PAGE_SIZE);
|
||||
if (error <= 0)
|
||||
break;
|
||||
error = swap_read_page(handle, data_of(*snapshot), &bio);
|
||||
if (error)
|
||||
break;
|
||||
if (snapshot->sync_read)
|
||||
error = wait_on_bio_chain(&bio);
|
||||
if (error)
|
||||
break;
|
||||
if (!(nr_pages % m))
|
||||
printk("\b\b\b\b%3d%%", nr_pages / m);
|
||||
nr_pages++;
|
||||
}
|
||||
err2 = wait_on_bio_chain(&bio);
|
||||
do_gettimeofday(&stop);
|
||||
if (!error)
|
||||
error = err2;
|
||||
if (!error) {
|
||||
printk("\b\b\b\bdone\n");
|
||||
snapshot_write_finalize(snapshot);
|
||||
if (!snapshot_image_loaded(snapshot))
|
||||
error = -ENODATA;
|
||||
}
|
||||
swsusp_show_speed(&start, &stop, nr_to_read, "Read");
|
||||
return error;
|
||||
}
|
||||
|
||||
int swsusp_read(void)
|
||||
{
|
||||
int error;
|
||||
struct swap_map_handle handle;
|
||||
struct snapshot_handle snapshot;
|
||||
struct swsusp_info *header;
|
||||
|
||||
if (IS_ERR(resume_bdev)) {
|
||||
pr_debug("swsusp: block device not initialised\n");
|
||||
return PTR_ERR(resume_bdev);
|
||||
}
|
||||
|
||||
memset(&snapshot, 0, sizeof(struct snapshot_handle));
|
||||
error = snapshot_write_next(&snapshot, PAGE_SIZE);
|
||||
if (error < PAGE_SIZE)
|
||||
return error < 0 ? error : -EFAULT;
|
||||
header = (struct swsusp_info *)data_of(snapshot);
|
||||
error = get_swap_reader(&handle, swsusp_header.image);
|
||||
if (!error)
|
||||
error = swap_read_page(&handle, header, NULL);
|
||||
if (!error)
|
||||
error = load_image(&handle, &snapshot, header->pages - 1);
|
||||
release_swap_reader(&handle);
|
||||
|
||||
blkdev_put(resume_bdev);
|
||||
|
||||
if (!error)
|
||||
pr_debug("swsusp: Reading resume file was successful\n");
|
||||
else
|
||||
pr_debug("swsusp: Error %d resuming\n", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* swsusp_check - Check for swsusp signature in the resume device
|
||||
*/
|
||||
|
||||
int swsusp_check(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
|
||||
if (!IS_ERR(resume_bdev)) {
|
||||
set_blocksize(resume_bdev, PAGE_SIZE);
|
||||
memset(&swsusp_header, 0, sizeof(swsusp_header));
|
||||
error = bio_read_page(swsusp_resume_block,
|
||||
&swsusp_header, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) {
|
||||
memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10);
|
||||
/* Reset swap signature now */
|
||||
error = bio_write_page(swsusp_resume_block,
|
||||
&swsusp_header, NULL);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
if (error)
|
||||
blkdev_put(resume_bdev);
|
||||
else
|
||||
pr_debug("swsusp: Signature found, resuming\n");
|
||||
} else {
|
||||
error = PTR_ERR(resume_bdev);
|
||||
}
|
||||
|
||||
if (error)
|
||||
pr_debug("swsusp: Error %d check for resume file\n", error);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* swsusp_close - close swap device.
|
||||
*/
|
||||
|
||||
void swsusp_close(void)
|
||||
{
|
||||
if (IS_ERR(resume_bdev)) {
|
||||
pr_debug("swsusp: block device not initialised\n");
|
||||
return;
|
||||
}
|
||||
|
||||
blkdev_put(resume_bdev);
|
||||
}
|
||||
330
kernel/power/swsusp.c
Normal file
330
kernel/power/swsusp.c
Normal file
@@ -0,0 +1,330 @@
|
||||
/*
|
||||
* linux/kernel/power/swsusp.c
|
||||
*
|
||||
* This file provides code to write suspend image to swap and read it back.
|
||||
*
|
||||
* Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu>
|
||||
* Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz>
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*
|
||||
* I'd like to thank the following people for their work:
|
||||
*
|
||||
* Pavel Machek <pavel@ucw.cz>:
|
||||
* Modifications, defectiveness pointing, being with me at the very beginning,
|
||||
* suspend to swap space, stop all tasks. Port to 2.4.18-ac and 2.5.17.
|
||||
*
|
||||
* Steve Doddi <dirk@loth.demon.co.uk>:
|
||||
* Support the possibility of hardware state restoring.
|
||||
*
|
||||
* Raph <grey.havens@earthling.net>:
|
||||
* Support for preserving states of network devices and virtual console
|
||||
* (including X and svgatextmode)
|
||||
*
|
||||
* Kurt Garloff <garloff@suse.de>:
|
||||
* Straightened the critical function in order to prevent compilers from
|
||||
* playing tricks with local variables.
|
||||
*
|
||||
* Andreas Mohr <a.mohr@mailto.de>
|
||||
*
|
||||
* Alex Badea <vampire@go.ro>:
|
||||
* Fixed runaway init
|
||||
*
|
||||
* Rafael J. Wysocki <rjw@sisk.pl>
|
||||
* Reworked the freeing of memory and the handling of swap
|
||||
*
|
||||
* More state savers are welcome. Especially for the scsi layer...
|
||||
*
|
||||
* For TODOs,FIXMEs also look in Documentation/power/swsusp.txt
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/major.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/swapops.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/time.h>
|
||||
|
||||
#include "power.h"
|
||||
|
||||
/*
|
||||
* Preferred image size in bytes (tunable via /sys/power/image_size).
|
||||
* When it is set to N, swsusp will do its best to ensure the image
|
||||
* size will not exceed N bytes, but if that is impossible, it will
|
||||
* try to create the smallest image possible.
|
||||
*/
|
||||
unsigned long image_size = 500 * 1024 * 1024;
|
||||
|
||||
int in_suspend __nosavedata = 0;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
unsigned int count_highmem_pages(void);
|
||||
int restore_highmem(void);
|
||||
#else
|
||||
static inline int restore_highmem(void) { return 0; }
|
||||
static inline unsigned int count_highmem_pages(void) { return 0; }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* The following functions are used for tracing the allocated
|
||||
* swap pages, so that they can be freed in case of an error.
|
||||
*
|
||||
* The functions operate on a linked bitmap structure defined
|
||||
* in power.h
|
||||
*/
|
||||
|
||||
void free_bitmap(struct bitmap_page *bitmap)
|
||||
{
|
||||
struct bitmap_page *bp;
|
||||
|
||||
while (bitmap) {
|
||||
bp = bitmap->next;
|
||||
free_page((unsigned long)bitmap);
|
||||
bitmap = bp;
|
||||
}
|
||||
}
|
||||
|
||||
struct bitmap_page *alloc_bitmap(unsigned int nr_bits)
|
||||
{
|
||||
struct bitmap_page *bitmap, *bp;
|
||||
unsigned int n;
|
||||
|
||||
if (!nr_bits)
|
||||
return NULL;
|
||||
|
||||
bitmap = (struct bitmap_page *)get_zeroed_page(GFP_KERNEL);
|
||||
bp = bitmap;
|
||||
for (n = BITMAP_PAGE_BITS; n < nr_bits; n += BITMAP_PAGE_BITS) {
|
||||
bp->next = (struct bitmap_page *)get_zeroed_page(GFP_KERNEL);
|
||||
bp = bp->next;
|
||||
if (!bp) {
|
||||
free_bitmap(bitmap);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
return bitmap;
|
||||
}
|
||||
|
||||
static int bitmap_set(struct bitmap_page *bitmap, unsigned long bit)
|
||||
{
|
||||
unsigned int n;
|
||||
|
||||
n = BITMAP_PAGE_BITS;
|
||||
while (bitmap && n <= bit) {
|
||||
n += BITMAP_PAGE_BITS;
|
||||
bitmap = bitmap->next;
|
||||
}
|
||||
if (!bitmap)
|
||||
return -EINVAL;
|
||||
n -= BITMAP_PAGE_BITS;
|
||||
bit -= n;
|
||||
n = 0;
|
||||
while (bit >= BITS_PER_CHUNK) {
|
||||
bit -= BITS_PER_CHUNK;
|
||||
n++;
|
||||
}
|
||||
bitmap->chunks[n] |= (1UL << bit);
|
||||
return 0;
|
||||
}
|
||||
|
||||
sector_t alloc_swapdev_block(int swap, struct bitmap_page *bitmap)
|
||||
{
|
||||
unsigned long offset;
|
||||
|
||||
offset = swp_offset(get_swap_page_of_type(swap));
|
||||
if (offset) {
|
||||
if (bitmap_set(bitmap, offset))
|
||||
swap_free(swp_entry(swap, offset));
|
||||
else
|
||||
return swapdev_block(swap, offset);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void free_all_swap_pages(int swap, struct bitmap_page *bitmap)
|
||||
{
|
||||
unsigned int bit, n;
|
||||
unsigned long test;
|
||||
|
||||
bit = 0;
|
||||
while (bitmap) {
|
||||
for (n = 0; n < BITMAP_PAGE_CHUNKS; n++)
|
||||
for (test = 1UL; test; test <<= 1) {
|
||||
if (bitmap->chunks[n] & test)
|
||||
swap_free(swp_entry(swap, bit));
|
||||
bit++;
|
||||
}
|
||||
bitmap = bitmap->next;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* swsusp_show_speed - print the time elapsed between two events represented by
|
||||
* @start and @stop
|
||||
*
|
||||
* @nr_pages - number of pages processed between @start and @stop
|
||||
* @msg - introductory message to print
|
||||
*/
|
||||
|
||||
void swsusp_show_speed(struct timeval *start, struct timeval *stop,
|
||||
unsigned nr_pages, char *msg)
|
||||
{
|
||||
s64 elapsed_centisecs64;
|
||||
int centisecs;
|
||||
int k;
|
||||
int kps;
|
||||
|
||||
elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
|
||||
do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
|
||||
centisecs = elapsed_centisecs64;
|
||||
if (centisecs == 0)
|
||||
centisecs = 1; /* avoid div-by-zero */
|
||||
k = nr_pages * (PAGE_SIZE / 1024);
|
||||
kps = (k * 100) / centisecs;
|
||||
printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k,
|
||||
centisecs / 100, centisecs % 100,
|
||||
kps / 1000, (kps % 1000) / 10);
|
||||
}
|
||||
|
||||
/**
|
||||
* swsusp_shrink_memory - Try to free as much memory as needed
|
||||
*
|
||||
* ... but do not OOM-kill anyone
|
||||
*
|
||||
* Notice: all userland should be stopped before it is called, or
|
||||
* livelock is possible.
|
||||
*/
|
||||
|
||||
#define SHRINK_BITE 10000
|
||||
static inline unsigned long __shrink_memory(long tmp)
|
||||
{
|
||||
if (tmp > SHRINK_BITE)
|
||||
tmp = SHRINK_BITE;
|
||||
return shrink_all_memory(tmp);
|
||||
}
|
||||
|
||||
int swsusp_shrink_memory(void)
|
||||
{
|
||||
long tmp;
|
||||
struct zone *zone;
|
||||
unsigned long pages = 0;
|
||||
unsigned int i = 0;
|
||||
char *p = "-\\|/";
|
||||
struct timeval start, stop;
|
||||
|
||||
printk("Shrinking memory... ");
|
||||
do_gettimeofday(&start);
|
||||
do {
|
||||
long size, highmem_size;
|
||||
|
||||
highmem_size = count_highmem_pages();
|
||||
size = count_data_pages() + PAGES_FOR_IO;
|
||||
tmp = size;
|
||||
size += highmem_size;
|
||||
for_each_zone (zone)
|
||||
if (populated_zone(zone)) {
|
||||
tmp += snapshot_additional_pages(zone);
|
||||
if (is_highmem(zone)) {
|
||||
highmem_size -=
|
||||
zone_page_state(zone, NR_FREE_PAGES);
|
||||
} else {
|
||||
tmp -= zone_page_state(zone, NR_FREE_PAGES);
|
||||
tmp += zone->lowmem_reserve[ZONE_NORMAL];
|
||||
}
|
||||
}
|
||||
|
||||
if (highmem_size < 0)
|
||||
highmem_size = 0;
|
||||
|
||||
tmp += highmem_size;
|
||||
if (tmp > 0) {
|
||||
tmp = __shrink_memory(tmp);
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
pages += tmp;
|
||||
} else if (size > image_size / PAGE_SIZE) {
|
||||
tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
|
||||
pages += tmp;
|
||||
}
|
||||
printk("\b%c", p[i++%4]);
|
||||
} while (tmp > 0);
|
||||
do_gettimeofday(&stop);
|
||||
printk("\bdone (%lu pages freed)\n", pages);
|
||||
swsusp_show_speed(&start, &stop, pages, "Freed");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int swsusp_suspend(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
if ((error = arch_prepare_suspend()))
|
||||
return error;
|
||||
|
||||
local_irq_disable();
|
||||
/* At this point, device_suspend() has been called, but *not*
|
||||
* device_power_down(). We *must* device_power_down() now.
|
||||
* Otherwise, drivers for some devices (e.g. interrupt controllers)
|
||||
* become desynchronized with the actual state of the hardware
|
||||
* at resume time, and evil weirdness ensues.
|
||||
*/
|
||||
if ((error = device_power_down(PMSG_FREEZE))) {
|
||||
printk(KERN_ERR "Some devices failed to power down, aborting suspend\n");
|
||||
goto Enable_irqs;
|
||||
}
|
||||
|
||||
save_processor_state();
|
||||
if ((error = swsusp_arch_suspend()))
|
||||
printk(KERN_ERR "Error %d suspending\n", error);
|
||||
/* Restore control flow magically appears here */
|
||||
restore_processor_state();
|
||||
/* NOTE: device_power_up() is just a resume() for devices
|
||||
* that suspended with irqs off ... no overall powerup.
|
||||
*/
|
||||
device_power_up();
|
||||
Enable_irqs:
|
||||
local_irq_enable();
|
||||
return error;
|
||||
}
|
||||
|
||||
int swsusp_resume(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
local_irq_disable();
|
||||
/* NOTE: device_power_down() is just a suspend() with irqs off;
|
||||
* it has no special "power things down" semantics
|
||||
*/
|
||||
if (device_power_down(PMSG_PRETHAW))
|
||||
printk(KERN_ERR "Some devices failed to power down, very bad\n");
|
||||
/* We'll ignore saved state, but this gets preempt count (etc) right */
|
||||
save_processor_state();
|
||||
error = restore_highmem();
|
||||
if (!error) {
|
||||
error = swsusp_arch_resume();
|
||||
/* The code below is only ever reached in case of a failure.
|
||||
* Otherwise execution continues at place where
|
||||
* swsusp_arch_suspend() was called
|
||||
*/
|
||||
BUG_ON(!error);
|
||||
/* This call to restore_highmem() undos the previous one */
|
||||
restore_highmem();
|
||||
}
|
||||
/* The only reason why swsusp_arch_resume() can fail is memory being
|
||||
* very tight, so we have to free it as soon as we can to avoid
|
||||
* subsequent failures
|
||||
*/
|
||||
swsusp_free();
|
||||
restore_processor_state();
|
||||
touch_softlockup_watchdog();
|
||||
device_power_up();
|
||||
local_irq_enable();
|
||||
return error;
|
||||
}
|
||||
481
kernel/power/user.c
Normal file
481
kernel/power/user.c
Normal file
@@ -0,0 +1,481 @@
|
||||
/*
|
||||
* linux/kernel/power/user.c
|
||||
*
|
||||
* This file provides the user space interface for software suspend/resume.
|
||||
*
|
||||
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/swapops.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include "power.h"
|
||||
|
||||
#define SNAPSHOT_MINOR 231
|
||||
|
||||
static struct snapshot_data {
|
||||
struct snapshot_handle handle;
|
||||
int swap;
|
||||
struct bitmap_page *bitmap;
|
||||
int mode;
|
||||
char frozen;
|
||||
char ready;
|
||||
char platform_suspend;
|
||||
} snapshot_state;
|
||||
|
||||
static atomic_t device_available = ATOMIC_INIT(1);
|
||||
|
||||
static int snapshot_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct snapshot_data *data;
|
||||
|
||||
if (!atomic_add_unless(&device_available, -1, 0))
|
||||
return -EBUSY;
|
||||
|
||||
if ((filp->f_flags & O_ACCMODE) == O_RDWR)
|
||||
return -ENOSYS;
|
||||
|
||||
nonseekable_open(inode, filp);
|
||||
data = &snapshot_state;
|
||||
filp->private_data = data;
|
||||
memset(&data->handle, 0, sizeof(struct snapshot_handle));
|
||||
if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
|
||||
data->swap = swsusp_resume_device ?
|
||||
swap_type_of(swsusp_resume_device, 0, NULL) : -1;
|
||||
data->mode = O_RDONLY;
|
||||
} else {
|
||||
data->swap = -1;
|
||||
data->mode = O_WRONLY;
|
||||
}
|
||||
data->bitmap = NULL;
|
||||
data->frozen = 0;
|
||||
data->ready = 0;
|
||||
data->platform_suspend = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int snapshot_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct snapshot_data *data;
|
||||
|
||||
swsusp_free();
|
||||
data = filp->private_data;
|
||||
free_all_swap_pages(data->swap, data->bitmap);
|
||||
free_bitmap(data->bitmap);
|
||||
if (data->frozen) {
|
||||
mutex_lock(&pm_mutex);
|
||||
thaw_processes();
|
||||
enable_nonboot_cpus();
|
||||
mutex_unlock(&pm_mutex);
|
||||
}
|
||||
atomic_inc(&device_available);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t snapshot_read(struct file *filp, char __user *buf,
|
||||
size_t count, loff_t *offp)
|
||||
{
|
||||
struct snapshot_data *data;
|
||||
ssize_t res;
|
||||
|
||||
data = filp->private_data;
|
||||
res = snapshot_read_next(&data->handle, count);
|
||||
if (res > 0) {
|
||||
if (copy_to_user(buf, data_of(data->handle), res))
|
||||
res = -EFAULT;
|
||||
else
|
||||
*offp = data->handle.offset;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static ssize_t snapshot_write(struct file *filp, const char __user *buf,
|
||||
size_t count, loff_t *offp)
|
||||
{
|
||||
struct snapshot_data *data;
|
||||
ssize_t res;
|
||||
|
||||
data = filp->private_data;
|
||||
res = snapshot_write_next(&data->handle, count);
|
||||
if (res > 0) {
|
||||
if (copy_from_user(data_of(data->handle), buf, res))
|
||||
res = -EFAULT;
|
||||
else
|
||||
*offp = data->handle.offset;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline int platform_prepare(void)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (pm_ops && pm_ops->prepare)
|
||||
error = pm_ops->prepare(PM_SUSPEND_DISK);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static inline void platform_finish(void)
|
||||
{
|
||||
if (pm_ops && pm_ops->finish)
|
||||
pm_ops->finish(PM_SUSPEND_DISK);
|
||||
}
|
||||
|
||||
static inline int snapshot_suspend(int platform_suspend)
|
||||
{
|
||||
int error;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
/* Free memory before shutting down devices. */
|
||||
error = swsusp_shrink_memory();
|
||||
if (error)
|
||||
goto Finish;
|
||||
|
||||
if (platform_suspend) {
|
||||
error = platform_prepare();
|
||||
if (error)
|
||||
goto Finish;
|
||||
}
|
||||
suspend_console();
|
||||
error = device_suspend(PMSG_FREEZE);
|
||||
if (error)
|
||||
goto Resume_devices;
|
||||
|
||||
error = disable_nonboot_cpus();
|
||||
if (!error) {
|
||||
in_suspend = 1;
|
||||
error = swsusp_suspend();
|
||||
}
|
||||
enable_nonboot_cpus();
|
||||
Resume_devices:
|
||||
if (platform_suspend)
|
||||
platform_finish();
|
||||
|
||||
device_resume();
|
||||
resume_console();
|
||||
Finish:
|
||||
mutex_unlock(&pm_mutex);
|
||||
return error;
|
||||
}
|
||||
|
||||
static inline int snapshot_restore(int platform_suspend)
|
||||
{
|
||||
int error;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
pm_prepare_console();
|
||||
if (platform_suspend) {
|
||||
error = platform_prepare();
|
||||
if (error)
|
||||
goto Finish;
|
||||
}
|
||||
suspend_console();
|
||||
error = device_suspend(PMSG_PRETHAW);
|
||||
if (error)
|
||||
goto Resume_devices;
|
||||
|
||||
error = disable_nonboot_cpus();
|
||||
if (!error)
|
||||
error = swsusp_resume();
|
||||
|
||||
enable_nonboot_cpus();
|
||||
Resume_devices:
|
||||
if (platform_suspend)
|
||||
platform_finish();
|
||||
|
||||
device_resume();
|
||||
resume_console();
|
||||
Finish:
|
||||
pm_restore_console();
|
||||
mutex_unlock(&pm_mutex);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int snapshot_ioctl(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
int error = 0;
|
||||
struct snapshot_data *data;
|
||||
loff_t avail;
|
||||
sector_t offset;
|
||||
|
||||
if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
|
||||
return -ENOTTY;
|
||||
if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR)
|
||||
return -ENOTTY;
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
data = filp->private_data;
|
||||
|
||||
switch (cmd) {
|
||||
|
||||
case SNAPSHOT_FREEZE:
|
||||
if (data->frozen)
|
||||
break;
|
||||
mutex_lock(&pm_mutex);
|
||||
if (freeze_processes()) {
|
||||
thaw_processes();
|
||||
error = -EBUSY;
|
||||
}
|
||||
mutex_unlock(&pm_mutex);
|
||||
if (!error)
|
||||
data->frozen = 1;
|
||||
break;
|
||||
|
||||
case SNAPSHOT_UNFREEZE:
|
||||
if (!data->frozen)
|
||||
break;
|
||||
mutex_lock(&pm_mutex);
|
||||
thaw_processes();
|
||||
mutex_unlock(&pm_mutex);
|
||||
data->frozen = 0;
|
||||
break;
|
||||
|
||||
case SNAPSHOT_ATOMIC_SNAPSHOT:
|
||||
if (data->mode != O_RDONLY || !data->frozen || data->ready) {
|
||||
error = -EPERM;
|
||||
break;
|
||||
}
|
||||
error = snapshot_suspend(data->platform_suspend);
|
||||
if (!error)
|
||||
error = put_user(in_suspend, (unsigned int __user *)arg);
|
||||
if (!error)
|
||||
data->ready = 1;
|
||||
break;
|
||||
|
||||
case SNAPSHOT_ATOMIC_RESTORE:
|
||||
snapshot_write_finalize(&data->handle);
|
||||
if (data->mode != O_WRONLY || !data->frozen ||
|
||||
!snapshot_image_loaded(&data->handle)) {
|
||||
error = -EPERM;
|
||||
break;
|
||||
}
|
||||
error = snapshot_restore(data->platform_suspend);
|
||||
break;
|
||||
|
||||
case SNAPSHOT_FREE:
|
||||
swsusp_free();
|
||||
memset(&data->handle, 0, sizeof(struct snapshot_handle));
|
||||
data->ready = 0;
|
||||
break;
|
||||
|
||||
case SNAPSHOT_SET_IMAGE_SIZE:
|
||||
image_size = arg;
|
||||
break;
|
||||
|
||||
case SNAPSHOT_AVAIL_SWAP:
|
||||
avail = count_swap_pages(data->swap, 1);
|
||||
avail <<= PAGE_SHIFT;
|
||||
error = put_user(avail, (loff_t __user *)arg);
|
||||
break;
|
||||
|
||||
case SNAPSHOT_GET_SWAP_PAGE:
|
||||
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
|
||||
error = -ENODEV;
|
||||
break;
|
||||
}
|
||||
if (!data->bitmap) {
|
||||
data->bitmap = alloc_bitmap(count_swap_pages(data->swap, 0));
|
||||
if (!data->bitmap) {
|
||||
error = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
}
|
||||
offset = alloc_swapdev_block(data->swap, data->bitmap);
|
||||
if (offset) {
|
||||
offset <<= PAGE_SHIFT;
|
||||
error = put_user(offset, (sector_t __user *)arg);
|
||||
} else {
|
||||
error = -ENOSPC;
|
||||
}
|
||||
break;
|
||||
|
||||
case SNAPSHOT_FREE_SWAP_PAGES:
|
||||
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
|
||||
error = -ENODEV;
|
||||
break;
|
||||
}
|
||||
free_all_swap_pages(data->swap, data->bitmap);
|
||||
free_bitmap(data->bitmap);
|
||||
data->bitmap = NULL;
|
||||
break;
|
||||
|
||||
case SNAPSHOT_SET_SWAP_FILE:
|
||||
if (!data->bitmap) {
|
||||
/*
|
||||
* User space encodes device types as two-byte values,
|
||||
* so we need to recode them
|
||||
*/
|
||||
if (old_decode_dev(arg)) {
|
||||
data->swap = swap_type_of(old_decode_dev(arg),
|
||||
0, NULL);
|
||||
if (data->swap < 0)
|
||||
error = -ENODEV;
|
||||
} else {
|
||||
data->swap = -1;
|
||||
error = -EINVAL;
|
||||
}
|
||||
} else {
|
||||
error = -EPERM;
|
||||
}
|
||||
break;
|
||||
|
||||
case SNAPSHOT_S2RAM:
|
||||
if (!pm_ops) {
|
||||
error = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!data->frozen) {
|
||||
error = -EPERM;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!mutex_trylock(&pm_mutex)) {
|
||||
error = -EBUSY;
|
||||
break;
|
||||
}
|
||||
|
||||
if (pm_ops->prepare) {
|
||||
error = pm_ops->prepare(PM_SUSPEND_MEM);
|
||||
if (error)
|
||||
goto OutS3;
|
||||
}
|
||||
|
||||
/* Put devices to sleep */
|
||||
suspend_console();
|
||||
error = device_suspend(PMSG_SUSPEND);
|
||||
if (error) {
|
||||
printk(KERN_ERR "Failed to suspend some devices.\n");
|
||||
} else {
|
||||
error = disable_nonboot_cpus();
|
||||
if (!error) {
|
||||
/* Enter S3, system is already frozen */
|
||||
suspend_enter(PM_SUSPEND_MEM);
|
||||
enable_nonboot_cpus();
|
||||
}
|
||||
/* Wake up devices */
|
||||
device_resume();
|
||||
}
|
||||
resume_console();
|
||||
if (pm_ops->finish)
|
||||
pm_ops->finish(PM_SUSPEND_MEM);
|
||||
|
||||
OutS3:
|
||||
mutex_unlock(&pm_mutex);
|
||||
break;
|
||||
|
||||
case SNAPSHOT_PMOPS:
|
||||
error = -EINVAL;
|
||||
|
||||
switch (arg) {
|
||||
|
||||
case PMOPS_PREPARE:
|
||||
if (pm_ops && pm_ops->enter) {
|
||||
data->platform_suspend = 1;
|
||||
error = 0;
|
||||
} else {
|
||||
error = -ENOSYS;
|
||||
}
|
||||
break;
|
||||
|
||||
case PMOPS_ENTER:
|
||||
if (data->platform_suspend) {
|
||||
kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
|
||||
error = pm_ops->enter(PM_SUSPEND_DISK);
|
||||
error = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case PMOPS_FINISH:
|
||||
if (data->platform_suspend)
|
||||
error = 0;
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);
|
||||
|
||||
}
|
||||
break;
|
||||
|
||||
case SNAPSHOT_SET_SWAP_AREA:
|
||||
if (data->bitmap) {
|
||||
error = -EPERM;
|
||||
} else {
|
||||
struct resume_swap_area swap_area;
|
||||
dev_t swdev;
|
||||
|
||||
error = copy_from_user(&swap_area, (void __user *)arg,
|
||||
sizeof(struct resume_swap_area));
|
||||
if (error) {
|
||||
error = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* User space encodes device types as two-byte values,
|
||||
* so we need to recode them
|
||||
*/
|
||||
swdev = old_decode_dev(swap_area.dev);
|
||||
if (swdev) {
|
||||
offset = swap_area.offset;
|
||||
data->swap = swap_type_of(swdev, offset, NULL);
|
||||
if (data->swap < 0)
|
||||
error = -ENODEV;
|
||||
} else {
|
||||
data->swap = -1;
|
||||
error = -EINVAL;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
error = -ENOTTY;
|
||||
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static const struct file_operations snapshot_fops = {
|
||||
.open = snapshot_open,
|
||||
.release = snapshot_release,
|
||||
.read = snapshot_read,
|
||||
.write = snapshot_write,
|
||||
.llseek = no_llseek,
|
||||
.ioctl = snapshot_ioctl,
|
||||
};
|
||||
|
||||
static struct miscdevice snapshot_device = {
|
||||
.minor = SNAPSHOT_MINOR,
|
||||
.name = "snapshot",
|
||||
.fops = &snapshot_fops,
|
||||
};
|
||||
|
||||
static int __init snapshot_device_init(void)
|
||||
{
|
||||
return misc_register(&snapshot_device);
|
||||
};
|
||||
|
||||
device_initcall(snapshot_device_init);
|
||||
Reference in New Issue
Block a user