Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6
* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (72 commits) Revert "x86/PCI: ACPI based PCI gap calculation" PCI: remove unnecessary volatile in PCIe hotplug struct controller x86/PCI: ACPI based PCI gap calculation PCI: include linux/pm_wakeup.h for device_set_wakeup_capable PCI PM: Fix pci_prepare_to_sleep x86/PCI: Fix PCI config space for domains > 0 Fix acpi_pm_device_sleep_wake() by providing a stub for CONFIG_PM_SLEEP=n PCI: Simplify PCI device PM code PCI PM: Introduce pci_prepare_to_sleep and pci_back_from_sleep PCI ACPI: Rework PCI handling of wake-up ACPI: Introduce new device wakeup flag 'prepared' ACPI: Introduce acpi_device_sleep_wake function PCI: rework pci_set_power_state function to call platform first PCI: Introduce platform_pci_power_manageable function ACPI: Introduce acpi_bus_power_manageable function PCI: make pci_name use dev_name PCI: handle pci_name() being const PCI: add stub for pci_set_consistent_dma_mask() PCI: remove unused arch pcibios_update_resource() functions PCI: fix pci_setup_device()'s sprinting into a const buffer ... Fixed up conflicts in various files (arch/x86/kernel/setup_64.c, arch/x86/pci/irq.c, arch/x86/pci/pci.h, drivers/acpi/sleep/main.c, drivers/pci/pci.c, drivers/pci/pci.h, include/acpi/acpi_bus.h) from x86 and ACPI updates manually.
This commit is contained in:
commit
dc7c65db28
84 changed files with 4005 additions and 1758 deletions
|
@ -235,6 +235,9 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
|
|||
int acpi_check_mem_region(resource_size_t start, resource_size_t n,
|
||||
const char *name);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
void __init acpi_old_suspend_ordering(void);
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
#else /* CONFIG_ACPI */
|
||||
|
||||
static inline int early_acpi_boot_init(void)
|
||||
|
|
|
@ -68,6 +68,8 @@ struct bus_type {
|
|||
int (*resume_early)(struct device *dev);
|
||||
int (*resume)(struct device *dev);
|
||||
|
||||
struct pm_ext_ops *pm;
|
||||
|
||||
struct bus_type_private *p;
|
||||
};
|
||||
|
||||
|
@ -131,6 +133,8 @@ struct device_driver {
|
|||
int (*resume) (struct device *dev);
|
||||
struct attribute_group **groups;
|
||||
|
||||
struct pm_ops *pm;
|
||||
|
||||
struct driver_private *p;
|
||||
};
|
||||
|
||||
|
@ -197,6 +201,8 @@ struct class {
|
|||
|
||||
int (*suspend)(struct device *dev, pm_message_t state);
|
||||
int (*resume)(struct device *dev);
|
||||
|
||||
struct pm_ops *pm;
|
||||
};
|
||||
|
||||
extern int __must_check class_register(struct class *class);
|
||||
|
@ -248,8 +254,11 @@ struct device_type {
|
|||
struct attribute_group **groups;
|
||||
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
|
||||
void (*release)(struct device *dev);
|
||||
|
||||
int (*suspend)(struct device *dev, pm_message_t state);
|
||||
int (*resume)(struct device *dev);
|
||||
|
||||
struct pm_ops *pm;
|
||||
};
|
||||
|
||||
/* interface for exporting device attributes */
|
||||
|
|
|
@ -17,8 +17,7 @@
|
|||
#ifndef LINUX_PCI_H
|
||||
#define LINUX_PCI_H
|
||||
|
||||
/* Include the pci register defines */
|
||||
#include <linux/pci_regs.h>
|
||||
#include <linux/pci_regs.h> /* The pci register defines */
|
||||
|
||||
/*
|
||||
* The PCI interface treats multi-function devices as independent
|
||||
|
@ -49,12 +48,22 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
/* Include the ID list */
|
||||
#include <linux/pci_ids.h>
|
||||
|
||||
/* pci_slot represents a physical slot */
|
||||
struct pci_slot {
|
||||
struct pci_bus *bus; /* The bus this slot is on */
|
||||
struct list_head list; /* node in list of slots on this bus */
|
||||
struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */
|
||||
unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
|
||||
struct kobject kobj;
|
||||
};
|
||||
|
||||
/* File state for mmap()s on /proc/bus/pci/X/Y */
|
||||
enum pci_mmap_state {
|
||||
pci_mmap_io,
|
||||
|
@ -142,6 +151,7 @@ struct pci_dev {
|
|||
|
||||
void *sysdata; /* hook for sys-specific extension */
|
||||
struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
|
||||
struct pci_slot *slot; /* Physical slot this device is in */
|
||||
|
||||
unsigned int devfn; /* encoded device & function index */
|
||||
unsigned short vendor;
|
||||
|
@ -167,6 +177,13 @@ struct pci_dev {
|
|||
pci_power_t current_state; /* Current operating state. In ACPI-speak,
|
||||
this is D0-D3, D0 being fully functional,
|
||||
and D3 being off. */
|
||||
int pm_cap; /* PM capability offset in the
|
||||
configuration space */
|
||||
unsigned int pme_support:5; /* Bitmask of states from which PME#
|
||||
can be generated */
|
||||
unsigned int d1_support:1; /* Low power state D1 is supported */
|
||||
unsigned int d2_support:1; /* Low power state D2 is supported */
|
||||
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
|
||||
|
||||
#ifdef CONFIG_PCIEASPM
|
||||
struct pcie_link_state *link_state; /* ASPM link state. */
|
||||
|
@ -191,7 +208,6 @@ struct pci_dev {
|
|||
unsigned int is_added:1;
|
||||
unsigned int is_busmaster:1; /* device is busmaster */
|
||||
unsigned int no_msi:1; /* device may not use msi */
|
||||
unsigned int no_d1d2:1; /* only allow d0 or d3 */
|
||||
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
|
||||
unsigned int broken_parity_status:1; /* Device generates false positive parity */
|
||||
unsigned int msi_enabled:1;
|
||||
|
@ -267,6 +283,7 @@ struct pci_bus {
|
|||
struct list_head children; /* list of child buses */
|
||||
struct list_head devices; /* list of devices on this bus */
|
||||
struct pci_dev *self; /* bridge device as seen by parent */
|
||||
struct list_head slots; /* list of slots on this bus */
|
||||
struct resource *resource[PCI_BUS_NUM_RESOURCES];
|
||||
/* address space routed to this bus */
|
||||
|
||||
|
@ -328,7 +345,7 @@ struct pci_bus_region {
|
|||
struct pci_dynids {
|
||||
spinlock_t lock; /* protects list, index */
|
||||
struct list_head list; /* for IDs added at runtime */
|
||||
unsigned int use_driver_data:1; /* pci_driver->driver_data is used */
|
||||
unsigned int use_driver_data:1; /* pci_device_id->driver_data is used */
|
||||
};
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
@ -390,7 +407,7 @@ struct pci_driver {
|
|||
int (*resume_early) (struct pci_dev *dev);
|
||||
int (*resume) (struct pci_dev *dev); /* Device woken up */
|
||||
void (*shutdown) (struct pci_dev *dev);
|
||||
|
||||
struct pm_ext_ops *pm;
|
||||
struct pci_error_handlers *err_handler;
|
||||
struct device_driver driver;
|
||||
struct pci_dynids dynids;
|
||||
|
@ -489,6 +506,10 @@ struct pci_bus *pci_create_bus(struct device *parent, int bus,
|
|||
struct pci_ops *ops, void *sysdata);
|
||||
struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
|
||||
int busnr);
|
||||
struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
|
||||
const char *name);
|
||||
void pci_destroy_slot(struct pci_slot *slot);
|
||||
void pci_update_slot_number(struct pci_slot *slot, int slot_nr);
|
||||
int pci_scan_slot(struct pci_bus *bus, int devfn);
|
||||
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
|
||||
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
|
||||
|
@ -618,6 +639,8 @@ int pci_restore_state(struct pci_dev *dev);
|
|||
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
|
||||
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
|
||||
int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
|
||||
int pci_prepare_to_sleep(struct pci_dev *dev);
|
||||
int pci_back_from_sleep(struct pci_dev *dev);
|
||||
|
||||
/* Functions for PCI Hotplug drivers to use */
|
||||
int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
|
||||
|
@ -839,6 +862,11 @@ static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
|
||||
unsigned int size)
|
||||
{
|
||||
|
@ -977,9 +1005,9 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
|
|||
/* If you want to know what to call your pci_dev, ask this function.
|
||||
* Again, it's a wrapper around the generic device.
|
||||
*/
|
||||
static inline char *pci_name(struct pci_dev *pdev)
|
||||
static inline const char *pci_name(struct pci_dev *pdev)
|
||||
{
|
||||
return pdev->dev.bus_id;
|
||||
return dev_name(&pdev->dev);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1014,7 +1042,9 @@ enum pci_fixup_pass {
|
|||
pci_fixup_header, /* After reading configuration header */
|
||||
pci_fixup_final, /* Final phase of device fixups */
|
||||
pci_fixup_enable, /* pci_enable_device() time */
|
||||
pci_fixup_resume, /* pci_enable_device() time */
|
||||
pci_fixup_resume, /* pci_device_resume() */
|
||||
pci_fixup_suspend, /* pci_device_suspend */
|
||||
pci_fixup_resume_early, /* pci_device_resume_early() */
|
||||
};
|
||||
|
||||
/* Anonymous variables would be nice... */
|
||||
|
@ -1036,6 +1066,12 @@ enum pci_fixup_pass {
|
|||
#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
|
||||
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
|
||||
resume##vendor##device##hook, vendor, device, hook)
|
||||
#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
|
||||
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
|
||||
resume_early##vendor##device##hook, vendor, device, hook)
|
||||
#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
|
||||
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
|
||||
suspend##vendor##device##hook, vendor, device, hook)
|
||||
|
||||
|
||||
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
|
||||
|
@ -1060,7 +1096,10 @@ extern int pci_pci_problems;
|
|||
extern unsigned long pci_cardbus_io_size;
|
||||
extern unsigned long pci_cardbus_mem_size;
|
||||
|
||||
extern int pcibios_add_platform_entries(struct pci_dev *dev);
|
||||
int pcibios_add_platform_entries(struct pci_dev *dev);
|
||||
void pcibios_disable_device(struct pci_dev *dev);
|
||||
int pcibios_set_pcie_reset_state(struct pci_dev *dev,
|
||||
enum pcie_reset_state state);
|
||||
|
||||
#ifdef CONFIG_PCI_MMCONFIG
|
||||
extern void __init pci_mmcfg_early_init(void);
|
||||
|
|
|
@ -95,9 +95,6 @@ struct hotplug_slot_attribute {
|
|||
* @get_adapter_status: Called to get see if an adapter is present in the slot or not.
|
||||
* If this field is NULL, the value passed in the struct hotplug_slot_info
|
||||
* will be used when this value is requested by a user.
|
||||
* @get_address: Called to get pci address of a slot.
|
||||
* If this field is NULL, the value passed in the struct hotplug_slot_info
|
||||
* will be used when this value is requested by a user.
|
||||
* @get_max_bus_speed: Called to get the max bus speed for a slot.
|
||||
* If this field is NULL, the value passed in the struct hotplug_slot_info
|
||||
* will be used when this value is requested by a user.
|
||||
|
@ -120,7 +117,6 @@ struct hotplug_slot_ops {
|
|||
int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
|
||||
int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
|
||||
int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
|
||||
int (*get_address) (struct hotplug_slot *slot, u32 *value);
|
||||
int (*get_max_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value);
|
||||
int (*get_cur_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value);
|
||||
};
|
||||
|
@ -140,7 +136,6 @@ struct hotplug_slot_info {
|
|||
u8 attention_status;
|
||||
u8 latch_status;
|
||||
u8 adapter_status;
|
||||
u32 address;
|
||||
enum pci_bus_speed max_bus_speed;
|
||||
enum pci_bus_speed cur_bus_speed;
|
||||
};
|
||||
|
@ -166,15 +161,14 @@ struct hotplug_slot {
|
|||
|
||||
/* Variables below this are for use only by the hotplug pci core. */
|
||||
struct list_head slot_list;
|
||||
struct kobject kobj;
|
||||
struct pci_slot *pci_slot;
|
||||
};
|
||||
#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj)
|
||||
|
||||
extern int pci_hp_register (struct hotplug_slot *slot);
|
||||
extern int pci_hp_deregister (struct hotplug_slot *slot);
|
||||
extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr);
|
||||
extern int pci_hp_deregister(struct hotplug_slot *slot);
|
||||
extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot,
|
||||
struct hotplug_slot_info *info);
|
||||
extern struct kset *pci_hotplug_slots_kset;
|
||||
|
||||
/* PCI Setting Record (Type 0) */
|
||||
struct hpp_type0 {
|
||||
|
@ -227,9 +221,9 @@ struct hotplug_params {
|
|||
#include <acpi/acpi.h>
|
||||
#include <acpi/acpi_bus.h>
|
||||
#include <acpi/actypes.h>
|
||||
extern acpi_status acpi_run_oshp(acpi_handle handle);
|
||||
extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
|
||||
struct hotplug_params *hpp);
|
||||
int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
|
||||
int acpi_root_bridge(acpi_handle handle);
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -231,6 +231,7 @@
|
|||
#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
|
||||
#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
|
||||
#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
|
||||
#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
|
||||
#define PCI_PM_CTRL 4 /* PM control and status register */
|
||||
#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
|
||||
#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */
|
||||
|
|
|
@ -53,6 +53,7 @@ struct platform_driver {
|
|||
int (*suspend_late)(struct platform_device *, pm_message_t state);
|
||||
int (*resume_early)(struct platform_device *);
|
||||
int (*resume)(struct platform_device *);
|
||||
struct pm_ext_ops *pm;
|
||||
struct device_driver driver;
|
||||
};
|
||||
|
||||
|
|
|
@ -112,7 +112,9 @@ typedef struct pm_message {
|
|||
int event;
|
||||
} pm_message_t;
|
||||
|
||||
/*
|
||||
/**
|
||||
* struct pm_ops - device PM callbacks
|
||||
*
|
||||
* Several driver power state transitions are externally visible, affecting
|
||||
* the state of pending I/O queues and (for drivers that touch hardware)
|
||||
* interrupts, wakeups, DMA, and other hardware state. There may also be
|
||||
|
@ -120,6 +122,284 @@ typedef struct pm_message {
|
|||
* to the rest of the driver stack (such as a driver that's ON gating off
|
||||
* clocks which are not in active use).
|
||||
*
|
||||
* The externally visible transitions are handled with the help of the following
|
||||
* callbacks included in this structure:
|
||||
*
|
||||
* @prepare: Prepare the device for the upcoming transition, but do NOT change
|
||||
* its hardware state. Prevent new children of the device from being
|
||||
* registered after @prepare() returns (the driver's subsystem and
|
||||
* generally the rest of the kernel is supposed to prevent new calls to the
|
||||
* probe method from being made too once @prepare() has succeeded). If
|
||||
* @prepare() detects a situation it cannot handle (e.g. registration of a
|
||||
* child already in progress), it may return -EAGAIN, so that the PM core
|
||||
* can execute it once again (e.g. after the new child has been registered)
|
||||
* to recover from the race condition. This method is executed for all
|
||||
* kinds of suspend transitions and is followed by one of the suspend
|
||||
* callbacks: @suspend(), @freeze(), or @poweroff().
|
||||
* The PM core executes @prepare() for all devices before starting to
|
||||
* execute suspend callbacks for any of them, so drivers may assume all of
|
||||
* the other devices to be present and functional while @prepare() is being
|
||||
* executed. In particular, it is safe to make GFP_KERNEL memory
|
||||
* allocations from within @prepare(). However, drivers may NOT assume
|
||||
* anything about the availability of the user space at that time and it
|
||||
* is not correct to request firmware from within @prepare() (it's too
|
||||
* late to do that). [To work around this limitation, drivers may
|
||||
* register suspend and hibernation notifiers that are executed before the
|
||||
* freezing of tasks.]
|
||||
*
|
||||
* @complete: Undo the changes made by @prepare(). This method is executed for
|
||||
* all kinds of resume transitions, following one of the resume callbacks:
|
||||
* @resume(), @thaw(), @restore(). Also called if the state transition
|
||||
* fails before the driver's suspend callback (@suspend(), @freeze(),
|
||||
* @poweroff()) can be executed (e.g. if the suspend callback fails for one
|
||||
* of the other devices that the PM core has unsuccessfully attempted to
|
||||
* suspend earlier).
|
||||
* The PM core executes @complete() after it has executed the appropriate
|
||||
* resume callback for all devices.
|
||||
*
|
||||
* @suspend: Executed before putting the system into a sleep state in which the
|
||||
* contents of main memory are preserved. Quiesce the device, put it into
|
||||
* a low power state appropriate for the upcoming system state (such as
|
||||
* PCI_D3hot), and enable wakeup events as appropriate.
|
||||
*
|
||||
* @resume: Executed after waking the system up from a sleep state in which the
|
||||
* contents of main memory were preserved. Put the device into the
|
||||
* appropriate state, according to the information saved in memory by the
|
||||
* preceding @suspend(). The driver starts working again, responding to
|
||||
* hardware events and software requests. The hardware may have gone
|
||||
* through a power-off reset, or it may have maintained state from the
|
||||
* previous suspend() which the driver may rely on while resuming. On most
|
||||
* platforms, there are no restrictions on availability of resources like
|
||||
* clocks during @resume().
|
||||
*
|
||||
* @freeze: Hibernation-specific, executed before creating a hibernation image.
|
||||
* Quiesce operations so that a consistent image can be created, but do NOT
|
||||
* otherwise put the device into a low power device state and do NOT emit
|
||||
* system wakeup events. Save in main memory the device settings to be
|
||||
* used by @restore() during the subsequent resume from hibernation or by
|
||||
* the subsequent @thaw(), if the creation of the image or the restoration
|
||||
* of main memory contents from it fails.
|
||||
*
|
||||
* @thaw: Hibernation-specific, executed after creating a hibernation image OR
|
||||
* if the creation of the image fails. Also executed after a failing
|
||||
* attempt to restore the contents of main memory from such an image.
|
||||
* Undo the changes made by the preceding @freeze(), so the device can be
|
||||
* operated in the same way as immediately before the call to @freeze().
|
||||
*
|
||||
* @poweroff: Hibernation-specific, executed after saving a hibernation image.
|
||||
* Quiesce the device, put it into a low power state appropriate for the
|
||||
* upcoming system state (such as PCI_D3hot), and enable wakeup events as
|
||||
* appropriate.
|
||||
*
|
||||
* @restore: Hibernation-specific, executed after restoring the contents of main
|
||||
* memory from a hibernation image. Driver starts working again,
|
||||
* responding to hardware events and software requests. Drivers may NOT
|
||||
* make ANY assumptions about the hardware state right prior to @restore().
|
||||
* On most platforms, there are no restrictions on availability of
|
||||
* resources like clocks during @restore().
|
||||
*
|
||||
* All of the above callbacks, except for @complete(), return error codes.
|
||||
* However, the error codes returned by the resume operations, @resume(),
|
||||
* @thaw(), and @restore(), do not cause the PM core to abort the resume
|
||||
* transition during which they are returned. The error codes returned in
|
||||
* that cases are only printed by the PM core to the system logs for debugging
|
||||
* purposes. Still, it is recommended that drivers only return error codes
|
||||
* from their resume methods in case of an unrecoverable failure (i.e. when the
|
||||
* device being handled refuses to resume and becomes unusable) to allow us to
|
||||
* modify the PM core in the future, so that it can avoid attempting to handle
|
||||
* devices that failed to resume and their children.
|
||||
*
|
||||
* It is allowed to unregister devices while the above callbacks are being
|
||||
* executed. However, it is not allowed to unregister a device from within any
|
||||
* of its own callbacks.
|
||||
*/
|
||||
|
||||
struct pm_ops {
|
||||
int (*prepare)(struct device *dev);
|
||||
void (*complete)(struct device *dev);
|
||||
int (*suspend)(struct device *dev);
|
||||
int (*resume)(struct device *dev);
|
||||
int (*freeze)(struct device *dev);
|
||||
int (*thaw)(struct device *dev);
|
||||
int (*poweroff)(struct device *dev);
|
||||
int (*restore)(struct device *dev);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pm_ext_ops - extended device PM callbacks
|
||||
*
|
||||
* Some devices require certain operations related to suspend and hibernation
|
||||
* to be carried out with interrupts disabled. Thus, 'struct pm_ext_ops' below
|
||||
* is defined, adding callbacks to be executed with interrupts disabled to
|
||||
* 'struct pm_ops'.
|
||||
*
|
||||
* The following callbacks included in 'struct pm_ext_ops' are executed with
|
||||
* the nonboot CPUs switched off and with interrupts disabled on the only
|
||||
* functional CPU. They also are executed with the PM core list of devices
|
||||
* locked, so they must NOT unregister any devices.
|
||||
*
|
||||
* @suspend_noirq: Complete the operations of ->suspend() by carrying out any
|
||||
* actions required for suspending the device that need interrupts to be
|
||||
* disabled
|
||||
*
|
||||
* @resume_noirq: Prepare for the execution of ->resume() by carrying out any
|
||||
* actions required for resuming the device that need interrupts to be
|
||||
* disabled
|
||||
*
|
||||
* @freeze_noirq: Complete the operations of ->freeze() by carrying out any
|
||||
* actions required for freezing the device that need interrupts to be
|
||||
* disabled
|
||||
*
|
||||
* @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any
|
||||
* actions required for thawing the device that need interrupts to be
|
||||
* disabled
|
||||
*
|
||||
* @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any
|
||||
* actions required for handling the device that need interrupts to be
|
||||
* disabled
|
||||
*
|
||||
* @restore_noirq: Prepare for the execution of ->restore() by carrying out any
|
||||
* actions required for restoring the operations of the device that need
|
||||
* interrupts to be disabled
|
||||
*
|
||||
* All of the above callbacks return error codes, but the error codes returned
|
||||
* by the resume operations, @resume_noirq(), @thaw_noirq(), and
|
||||
* @restore_noirq(), do not cause the PM core to abort the resume transition
|
||||
* during which they are returned. The error codes returned in that cases are
|
||||
* only printed by the PM core to the system logs for debugging purposes.
|
||||
* Still, as stated above, it is recommended that drivers only return error
|
||||
* codes from their resume methods if the device being handled fails to resume
|
||||
* and is not usable any more.
|
||||
*/
|
||||
|
||||
struct pm_ext_ops {
|
||||
struct pm_ops base;
|
||||
int (*suspend_noirq)(struct device *dev);
|
||||
int (*resume_noirq)(struct device *dev);
|
||||
int (*freeze_noirq)(struct device *dev);
|
||||
int (*thaw_noirq)(struct device *dev);
|
||||
int (*poweroff_noirq)(struct device *dev);
|
||||
int (*restore_noirq)(struct device *dev);
|
||||
};
|
||||
|
||||
/**
|
||||
* PM_EVENT_ messages
|
||||
*
|
||||
* The following PM_EVENT_ messages are defined for the internal use of the PM
|
||||
* core, in order to provide a mechanism allowing the high level suspend and
|
||||
* hibernation code to convey the necessary information to the device PM core
|
||||
* code:
|
||||
*
|
||||
* ON No transition.
|
||||
*
|
||||
* FREEZE System is going to hibernate, call ->prepare() and ->freeze()
|
||||
* for all devices.
|
||||
*
|
||||
* SUSPEND System is going to suspend, call ->prepare() and ->suspend()
|
||||
* for all devices.
|
||||
*
|
||||
* HIBERNATE Hibernation image has been saved, call ->prepare() and
|
||||
* ->poweroff() for all devices.
|
||||
*
|
||||
* QUIESCE Contents of main memory are going to be restored from a (loaded)
|
||||
* hibernation image, call ->prepare() and ->freeze() for all
|
||||
* devices.
|
||||
*
|
||||
* RESUME System is resuming, call ->resume() and ->complete() for all
|
||||
* devices.
|
||||
*
|
||||
* THAW Hibernation image has been created, call ->thaw() and
|
||||
* ->complete() for all devices.
|
||||
*
|
||||
* RESTORE Contents of main memory have been restored from a hibernation
|
||||
* image, call ->restore() and ->complete() for all devices.
|
||||
*
|
||||
* RECOVER Creation of a hibernation image or restoration of the main
|
||||
* memory contents from a hibernation image has failed, call
|
||||
* ->thaw() and ->complete() for all devices.
|
||||
*/
|
||||
|
||||
#define PM_EVENT_ON 0x0000
|
||||
#define PM_EVENT_FREEZE 0x0001
|
||||
#define PM_EVENT_SUSPEND 0x0002
|
||||
#define PM_EVENT_HIBERNATE 0x0004
|
||||
#define PM_EVENT_QUIESCE 0x0008
|
||||
#define PM_EVENT_RESUME 0x0010
|
||||
#define PM_EVENT_THAW 0x0020
|
||||
#define PM_EVENT_RESTORE 0x0040
|
||||
#define PM_EVENT_RECOVER 0x0080
|
||||
|
||||
#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
|
||||
|
||||
#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
|
||||
#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
|
||||
#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
|
||||
#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
|
||||
#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, })
|
||||
#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, })
|
||||
#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })
|
||||
#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, })
|
||||
#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
|
||||
|
||||
/**
|
||||
* Device power management states
|
||||
*
|
||||
* These state labels are used internally by the PM core to indicate the current
|
||||
* status of a device with respect to the PM core operations.
|
||||
*
|
||||
* DPM_ON Device is regarded as operational. Set this way
|
||||
* initially and when ->complete() is about to be called.
|
||||
* Also set when ->prepare() fails.
|
||||
*
|
||||
* DPM_PREPARING Device is going to be prepared for a PM transition. Set
|
||||
* when ->prepare() is about to be called.
|
||||
*
|
||||
* DPM_RESUMING Device is going to be resumed. Set when ->resume(),
|
||||
* ->thaw(), or ->restore() is about to be called.
|
||||
*
|
||||
* DPM_SUSPENDING Device has been prepared for a power transition. Set
|
||||
* when ->prepare() has just succeeded.
|
||||
*
|
||||
* DPM_OFF Device is regarded as inactive. Set immediately after
|
||||
* ->suspend(), ->freeze(), or ->poweroff() has succeeded.
|
||||
* Also set when ->resume()_noirq, ->thaw_noirq(), or
|
||||
* ->restore_noirq() is about to be called.
|
||||
*
|
||||
* DPM_OFF_IRQ Device is in a "deep sleep". Set immediately after
|
||||
* ->suspend_noirq(), ->freeze_noirq(), or
|
||||
* ->poweroff_noirq() has just succeeded.
|
||||
*/
|
||||
|
||||
enum dpm_state {
|
||||
DPM_INVALID,
|
||||
DPM_ON,
|
||||
DPM_PREPARING,
|
||||
DPM_RESUMING,
|
||||
DPM_SUSPENDING,
|
||||
DPM_OFF,
|
||||
DPM_OFF_IRQ,
|
||||
};
|
||||
|
||||
struct dev_pm_info {
|
||||
pm_message_t power_state;
|
||||
unsigned can_wakeup:1;
|
||||
unsigned should_wakeup:1;
|
||||
enum dpm_state status; /* Owned by the PM core */
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
struct list_head entry;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* The PM_EVENT_ messages are also used by drivers implementing the legacy
|
||||
* suspend framework, based on the ->suspend() and ->resume() callbacks common
|
||||
* for suspend and hibernation transitions, according to the rules below.
|
||||
*/
|
||||
|
||||
/* Necessary, because several drivers use PM_EVENT_PRETHAW */
|
||||
#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE
|
||||
|
||||
/*
|
||||
* One transition is triggered by resume(), after a suspend() call; the
|
||||
* message is implicit:
|
||||
*
|
||||
|
@ -164,35 +444,13 @@ typedef struct pm_message {
|
|||
* or from system low-power states such as standby or suspend-to-RAM.
|
||||
*/
|
||||
|
||||
#define PM_EVENT_ON 0
|
||||
#define PM_EVENT_FREEZE 1
|
||||
#define PM_EVENT_SUSPEND 2
|
||||
#define PM_EVENT_HIBERNATE 4
|
||||
#define PM_EVENT_PRETHAW 8
|
||||
|
||||
#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
|
||||
|
||||
#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
|
||||
#define PMSG_PRETHAW ((struct pm_message){ .event = PM_EVENT_PRETHAW, })
|
||||
#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
|
||||
#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
|
||||
#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
|
||||
|
||||
struct dev_pm_info {
|
||||
pm_message_t power_state;
|
||||
unsigned can_wakeup:1;
|
||||
unsigned should_wakeup:1;
|
||||
bool sleeping:1; /* Owned by the PM core */
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
struct list_head entry;
|
||||
#endif
|
||||
};
|
||||
|
||||
extern int device_power_down(pm_message_t state);
|
||||
extern void device_power_up(void);
|
||||
extern void device_resume(void);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
extern void device_pm_lock(void);
|
||||
extern void device_power_up(pm_message_t state);
|
||||
extern void device_resume(pm_message_t state);
|
||||
|
||||
extern void device_pm_unlock(void);
|
||||
extern int device_power_down(pm_message_t state);
|
||||
extern int device_suspend(pm_message_t state);
|
||||
extern int device_prepare_suspend(pm_message_t state);
|
||||
|
||||
|
|
|
@ -35,6 +35,11 @@ static inline void device_init_wakeup(struct device *dev, int val)
|
|||
dev->power.can_wakeup = dev->power.should_wakeup = !!val;
|
||||
}
|
||||
|
||||
static inline void device_set_wakeup_capable(struct device *dev, int val)
|
||||
{
|
||||
dev->power.can_wakeup = !!val;
|
||||
}
|
||||
|
||||
static inline int device_can_wakeup(struct device *dev)
|
||||
{
|
||||
return dev->power.can_wakeup;
|
||||
|
@ -47,21 +52,7 @@ static inline void device_set_wakeup_enable(struct device *dev, int val)
|
|||
|
||||
static inline int device_may_wakeup(struct device *dev)
|
||||
{
|
||||
return dev->power.can_wakeup & dev->power.should_wakeup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Platform hook to activate device wakeup capability, if that's not already
|
||||
* handled by enable_irq_wake() etc.
|
||||
* Returns zero on success, else negative errno
|
||||
*/
|
||||
extern int (*platform_enable_wakeup)(struct device *dev, int is_on);
|
||||
|
||||
static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
|
||||
{
|
||||
if (platform_enable_wakeup)
|
||||
return (*platform_enable_wakeup)(dev, is_on);
|
||||
return 0;
|
||||
return dev->power.can_wakeup && dev->power.should_wakeup;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PM */
|
||||
|
@ -72,6 +63,8 @@ static inline void device_init_wakeup(struct device *dev, int val)
|
|||
dev->power.can_wakeup = !!val;
|
||||
}
|
||||
|
||||
static inline void device_set_wakeup_capable(struct device *dev, int val) { }
|
||||
|
||||
static inline int device_can_wakeup(struct device *dev)
|
||||
{
|
||||
return dev->power.can_wakeup;
|
||||
|
@ -80,11 +73,6 @@ static inline int device_can_wakeup(struct device *dev)
|
|||
#define device_set_wakeup_enable(dev, val) do {} while (0)
|
||||
#define device_may_wakeup(dev) 0
|
||||
|
||||
static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_PM */
|
||||
|
||||
#endif /* _LINUX_PM_WAKEUP_H */
|
||||
|
|
|
@ -86,6 +86,11 @@ typedef int __bitwise suspend_state_t;
|
|||
* that implement @begin(), but platforms implementing @begin() should
|
||||
* also provide a @end() which cleans up transitions aborted before
|
||||
* @enter().
|
||||
*
|
||||
* @recover: Recover the platform from a suspend failure.
|
||||
* Called by the PM core if the suspending of devices fails.
|
||||
* This callback is optional and should only be implemented by platforms
|
||||
* which require special recovery actions in that situation.
|
||||
*/
|
||||
struct platform_suspend_ops {
|
||||
int (*valid)(suspend_state_t state);
|
||||
|
@ -94,6 +99,7 @@ struct platform_suspend_ops {
|
|||
int (*enter)(suspend_state_t state);
|
||||
void (*finish)(void);
|
||||
void (*end)(void);
|
||||
void (*recover)(void);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
|
@ -149,7 +155,7 @@ extern void mark_free_pages(struct zone *zone);
|
|||
* The methods in this structure allow a platform to carry out special
|
||||
* operations required by it during a hibernation transition.
|
||||
*
|
||||
* All the methods below must be implemented.
|
||||
* All the methods below, except for @recover(), must be implemented.
|
||||
*
|
||||
* @begin: Tell the platform driver that we're starting hibernation.
|
||||
* Called right after shrinking memory and before freezing devices.
|
||||
|
@ -189,6 +195,11 @@ extern void mark_free_pages(struct zone *zone);
|
|||
* @restore_cleanup: Clean up after a failing image restoration.
|
||||
* Called right after the nonboot CPUs have been enabled and before
|
||||
* thawing devices (runs with IRQs on).
|
||||
*
|
||||
* @recover: Recover the platform from a failure to suspend devices.
|
||||
* Called by the PM core if the suspending of devices during hibernation
|
||||
* fails. This callback is optional and should only be implemented by
|
||||
* platforms which require special recovery actions in that situation.
|
||||
*/
|
||||
struct platform_hibernation_ops {
|
||||
int (*begin)(void);
|
||||
|
@ -200,6 +211,7 @@ struct platform_hibernation_ops {
|
|||
void (*leave)(void);
|
||||
int (*pre_restore)(void);
|
||||
void (*restore_cleanup)(void);
|
||||
void (*recover)(void);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue