Merge branch 'master' into pm-sleep

* master: (848 commits)
  SELinux: Fix RCU deref check warning in sel_netport_insert()
  binary_sysctl(): fix memory leak
  mm/vmalloc.c: remove static declaration of va from __get_vm_area_node
  ipmi_watchdog: restore settings when BMC reset
  oom: fix integer overflow of points in oom_badness
  memcg: keep root group unchanged if creation fails
  nilfs2: potential integer overflow in nilfs_ioctl_clean_segments()
  nilfs2: unbreak compat ioctl
  cpusets: stall when updating mems_allowed for mempolicy or disjoint nodemask
  evm: prevent racing during tfm allocation
  evm: key must be set once during initialization
  mmc: vub300: fix type of firmware_rom_wait_states module parameter
  Revert "mmc: enable runtime PM by default"
  mmc: sdhci: remove "state" argument from sdhci_suspend_host
  x86, dumpstack: Fix code bytes breakage due to missing KERN_CONT
  IB/qib: Correct sense on freectxts increment and decrement
  RDMA/cma: Verify private data length
  cgroups: fix a css_set not found bug in cgroup_attach_proc
  oprofile: Fix uninitialized memory access when writing to writing to oprofilefs
  Revert "xen/pv-on-hvm kexec: add xs_reset_watches to shutdown watches from old kernel"
  ...

Conflicts:
	kernel/cgroup_freezer.c
This commit is contained in:
Rafael J. Wysocki 2011-12-21 21:59:45 +01:00
commit b00f4dc5ff
858 changed files with 11340 additions and 6666 deletions

View file

@ -805,9 +805,6 @@ extern void blk_unprep_request(struct request *);
*/
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
request_fn_proc *,
spinlock_t *, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
request_fn_proc *, spinlock_t *);

View file

@ -71,7 +71,7 @@ struct timecounter {
/**
* cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
* @tc: Pointer to cycle counter.
* @cc: Pointer to cycle counter.
* @cycles: Cycles
*
* XXX - This could use some mult_lxl_ll() asm optimization. Same code
@ -114,7 +114,7 @@ extern u64 timecounter_read(struct timecounter *tc);
* time base as values returned by
* timecounter_read()
* @tc: Pointer to time counter.
* @cycle: a value returned by tc->cc->read()
* @cycle_tstamp: a value returned by tc->cc->read()
*
* Cycle counts that are converted correctly as long as they
* fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
@ -156,10 +156,12 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
* @max_idle_ns: max idle time permitted by the clocksource (nsecs)
* @maxadj: maximum adjustment value to mult (~11%)
* @flags: flags describing special properties
* @archdata: arch-specific data
* @suspend: suspend function for the clocksource, if necessary
* @resume: resume function for the clocksource, if necessary
* @cycle_last: most recent cycle counter value seen by ::read()
*/
struct clocksource {
/*
@ -172,7 +174,7 @@ struct clocksource {
u32 mult;
u32 shift;
u64 max_idle_ns;
u32 maxadj;
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
struct arch_clocksource_data archdata;
#endif
@ -186,6 +188,7 @@ struct clocksource {
void (*suspend)(struct clocksource *cs);
void (*resume)(struct clocksource *cs);
/* private: */
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */
struct list_head wd_list;
@ -260,6 +263,9 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
/**
* clocksource_cyc2ns - converts clocksource cycles to nanoseconds
* @cycles: cycles
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
*
* Converts cycles to nanoseconds, using the given mult and shift.
*

View file

@ -552,5 +552,14 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
extern void __user *compat_alloc_user_space(unsigned long len);
asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
const struct compat_iovec __user *lvec,
unsigned long liovcnt, const struct compat_iovec __user *rvec,
unsigned long riovcnt, unsigned long flags);
asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
const struct compat_iovec __user *lvec,
unsigned long liovcnt, const struct compat_iovec __user *rvec,
unsigned long riovcnt, unsigned long flags);
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */

View file

@ -339,7 +339,8 @@ extern int d_validate(struct dentry *, struct dentry *);
*/
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
extern char *__d_path(const struct path *path, struct path *root, char *, int);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
extern char *d_path(const struct path *, char *, int);
extern char *d_path_with_unreachable(const struct path *, char *, int);
extern char *dentry_path_raw(struct dentry *, char *, int);

View file

@ -31,6 +31,7 @@ extern void free_dmar_iommu(struct intel_iommu *iommu);
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
extern int intel_iommu_enabled;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
@ -44,6 +45,7 @@ static inline void free_dmar_iommu(struct intel_iommu *iommu)
{
}
#define dmar_disabled (1)
#define intel_iommu_enabled (0)
#endif

View file

@ -393,8 +393,8 @@ struct inodes_stat_t {
#include <linux/semaphore.h>
#include <linux/fiemap.h>
#include <linux/rculist_bl.h>
#include <linux/shrinker.h>
#include <linux/atomic.h>
#include <linux/shrinker.h>
#include <asm/byteorder.h>
@ -1942,6 +1942,7 @@ extern int fd_statfs(int, struct kstatfs *);
extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
extern bool our_mnt(struct vfsmount *mnt);
extern int current_umask(void);

View file

@ -172,6 +172,7 @@ enum {
TRACE_EVENT_FL_FILTERED_BIT,
TRACE_EVENT_FL_RECORDED_CMD_BIT,
TRACE_EVENT_FL_CAP_ANY_BIT,
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
};
enum {
@ -179,6 +180,7 @@ enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
};
struct ftrace_event_call {

View file

@ -126,6 +126,8 @@ extern struct cred init_cred;
# define INIT_PERF_EVENTS(tsk)
#endif
#define INIT_TASK_COMM "swapper"
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@ -162,7 +164,7 @@ extern struct cred init_cred;
.group_leader = &tsk, \
RCU_INIT_POINTER(.real_cred, &init_cred), \
RCU_INIT_POINTER(.cred, &init_cred), \
.comm = "swapper", \
.comm = INIT_TASK_COMM, \
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \

View file

@ -185,7 +185,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define rounddown_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
(n == 1) ? 0 : \
(1UL << ilog2(n))) : \
__rounddown_pow_of_two(n) \
)

View file

@ -10,6 +10,7 @@
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
#include <linux/range.h>

View file

@ -218,6 +218,7 @@ struct mmc_card {
#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */
#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
#define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */
#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
/* byte mode */
unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */
#define MMC_NO_POWER_NOTIFICATION 0
@ -433,6 +434,11 @@ static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
}
static inline int mmc_card_long_read_time(const struct mmc_card *c)
{
return c->quirks & MMC_QUIRK_LONG_READ_TIME;
}
#define mmc_card_name(c) ((c)->cid.prod_name)
#define mmc_card_id(c) (dev_name(&(c)->dev))

View file

@ -2536,6 +2536,8 @@ extern void net_disable_timestamp(void);
extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
extern void dev_seq_stop(struct seq_file *seq, void *v);
extern int dev_seq_open_ops(struct inode *inode, struct file *file,
const struct seq_operations *ops);
#endif
extern int netdev_class_create_file(struct class_attribute *class_attr);

View file

@ -12,7 +12,7 @@ struct pci_ats {
unsigned int is_enabled:1; /* Enable bit is set */
};
#ifdef CONFIG_PCI_IOV
#ifdef CONFIG_PCI_ATS
extern int pci_enable_ats(struct pci_dev *dev, int ps);
extern void pci_disable_ats(struct pci_dev *dev);
@ -29,7 +29,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
return dev->ats && dev->ats->is_enabled;
}
#else /* CONFIG_PCI_IOV */
#else /* CONFIG_PCI_ATS */
static inline int pci_enable_ats(struct pci_dev *dev, int ps)
{
@ -50,7 +50,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
return 0;
}
#endif /* CONFIG_PCI_IOV */
#endif /* CONFIG_PCI_ATS */
#ifdef CONFIG_PCI_PRI

View file

@ -338,7 +338,7 @@ struct pci_dev {
struct list_head msi_list;
#endif
struct pci_vpd *vpd;
#ifdef CONFIG_PCI_IOV
#ifdef CONFIG_PCI_ATS
union {
struct pci_sriov *sriov; /* SR-IOV capability related */
struct pci_dev *physfn; /* the PF this VF is associated with */

View file

@ -517,8 +517,12 @@
#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
#define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603
#define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604
#define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001

View file

@ -822,6 +822,7 @@ struct perf_event {
int mmap_locked;
struct user_struct *mmap_user;
struct ring_buffer *rb;
struct list_head rb_entry;
/* poll related */
wait_queue_head_t waitq;

View file

@ -30,7 +30,7 @@
*/
struct tc_stats {
__u64 bytes; /* NUmber of enqueues bytes */
__u64 bytes; /* Number of enqueued bytes */
__u32 packets; /* Number of enqueued packets */
__u32 drops; /* Packets dropped because of lack of resources */
__u32 overlimits; /* Number of throttle events when this
@ -297,7 +297,7 @@ struct tc_htb_glob {
__u32 debug; /* debug flags */
/* stats */
__u32 direct_pkts; /* count of non shapped packets */
__u32 direct_pkts; /* count of non shaped packets */
};
enum {
TCA_HTB_UNSPEC,
@ -503,7 +503,7 @@ enum {
};
#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
/* State transition probablities for 4 state model */
/* State transition probabilities for 4 state model */
struct tc_netem_gimodel {
__u32 p13;
__u32 p31;

View file

@ -54,118 +54,145 @@ typedef struct pm_message {
/**
* struct dev_pm_ops - device PM callbacks
*
* Several driver power state transitions are externally visible, affecting
* Several device power state transitions are externally visible, affecting
* the state of pending I/O queues and (for drivers that touch hardware)
* interrupts, wakeups, DMA, and other hardware state. There may also be
* internal transitions to various low power modes, which are transparent
* internal transitions to various low-power modes which are transparent
* to the rest of the driver stack (such as a driver that's ON gating off
* clocks which are not in active use).
*
* The externally visible transitions are handled with the help of the following
* callbacks included in this structure:
* The externally visible transitions are handled with the help of callbacks
* included in this structure in such a way that two levels of callbacks are
* involved. First, the PM core executes callbacks provided by PM domains,
* device types, classes and bus types. They are the subsystem-level callbacks
* supposed to execute callbacks provided by device drivers, although they may
* choose not to do that. If the driver callbacks are executed, they have to
* collaborate with the subsystem-level callbacks to achieve the goals
* appropriate for the given system transition, given transition phase and the
* subsystem the device belongs to.
*
* @prepare: Prepare the device for the upcoming transition, but do NOT change
* its hardware state. Prevent new children of the device from being
* registered after @prepare() returns (the driver's subsystem and
* generally the rest of the kernel is supposed to prevent new calls to the
* probe method from being made too once @prepare() has succeeded). If
* @prepare() detects a situation it cannot handle (e.g. registration of a
* child already in progress), it may return -EAGAIN, so that the PM core
* can execute it once again (e.g. after the new child has been registered)
* to recover from the race condition. This method is executed for all
* kinds of suspend transitions and is followed by one of the suspend
* callbacks: @suspend(), @freeze(), or @poweroff().
* The PM core executes @prepare() for all devices before starting to
* execute suspend callbacks for any of them, so drivers may assume all of
* the other devices to be present and functional while @prepare() is being
* executed. In particular, it is safe to make GFP_KERNEL memory
* allocations from within @prepare(). However, drivers may NOT assume
* anything about the availability of the user space at that time and it
* is not correct to request firmware from within @prepare() (it's too
* late to do that). [To work around this limitation, drivers may
* register suspend and hibernation notifiers that are executed before the
* freezing of tasks.]
* @prepare: The principal role of this callback is to prevent new children of
* the device from being registered after it has returned (the driver's
* subsystem and generally the rest of the kernel is supposed to prevent
* new calls to the probe method from being made too once @prepare() has
* succeeded). If @prepare() detects a situation it cannot handle (e.g.
* registration of a child already in progress), it may return -EAGAIN, so
* that the PM core can execute it once again (e.g. after a new child has
* been registered) to recover from the race condition.
* This method is executed for all kinds of suspend transitions and is
* followed by one of the suspend callbacks: @suspend(), @freeze(), or
* @poweroff(). The PM core executes subsystem-level @prepare() for all
* devices before starting to invoke suspend callbacks for any of them, so
* generally devices may be assumed to be functional or to respond to
* runtime resume requests while @prepare() is being executed. However,
* device drivers may NOT assume anything about the availability of user
* space at that time and it is NOT valid to request firmware from within
* @prepare() (it's too late to do that). It also is NOT valid to allocate
* substantial amounts of memory from @prepare() in the GFP_KERNEL mode.
* [To work around these limitations, drivers may register suspend and
* hibernation notifiers to be executed before the freezing of tasks.]
*
* @complete: Undo the changes made by @prepare(). This method is executed for
* all kinds of resume transitions, following one of the resume callbacks:
* @resume(), @thaw(), @restore(). Also called if the state transition
* fails before the driver's suspend callback (@suspend(), @freeze(),
* @poweroff()) can be executed (e.g. if the suspend callback fails for one
* fails before the driver's suspend callback: @suspend(), @freeze() or
* @poweroff(), can be executed (e.g. if the suspend callback fails for one
* of the other devices that the PM core has unsuccessfully attempted to
* suspend earlier).
* The PM core executes @complete() after it has executed the appropriate
* resume callback for all devices.
* The PM core executes subsystem-level @complete() after it has executed
* the appropriate resume callbacks for all devices.
*
* @suspend: Executed before putting the system into a sleep state in which the
* contents of main memory are preserved. Quiesce the device, put it into
* a low power state appropriate for the upcoming system state (such as
* PCI_D3hot), and enable wakeup events as appropriate.
* contents of main memory are preserved. The exact action to perform
* depends on the device's subsystem (PM domain, device type, class or bus
* type), but generally the device must be quiescent after subsystem-level
* @suspend() has returned, so that it doesn't do any I/O or DMA.
* Subsystem-level @suspend() is executed for all devices after invoking
* subsystem-level @prepare() for all of them.
*
* @resume: Executed after waking the system up from a sleep state in which the
* contents of main memory were preserved. Put the device into the
* appropriate state, according to the information saved in memory by the
* preceding @suspend(). The driver starts working again, responding to
* hardware events and software requests. The hardware may have gone
* through a power-off reset, or it may have maintained state from the
* previous suspend() which the driver may rely on while resuming. On most
* platforms, there are no restrictions on availability of resources like
* clocks during @resume().
* contents of main memory were preserved. The exact action to perform
* depends on the device's subsystem, but generally the driver is expected
* to start working again, responding to hardware events and software
* requests (the device itself may be left in a low-power state, waiting
* for a runtime resume to occur). The state of the device at the time its
* driver's @resume() callback is run depends on the platform and subsystem
* the device belongs to. On most platforms, there are no restrictions on
* availability of resources like clocks during @resume().
* Subsystem-level @resume() is executed for all devices after invoking
* subsystem-level @resume_noirq() for all of them.
*
* @freeze: Hibernation-specific, executed before creating a hibernation image.
* Quiesce operations so that a consistent image can be created, but do NOT
* otherwise put the device into a low power device state and do NOT emit
* system wakeup events. Save in main memory the device settings to be
* used by @restore() during the subsequent resume from hibernation or by
* the subsequent @thaw(), if the creation of the image or the restoration
* of main memory contents from it fails.
* Analogous to @suspend(), but it should not enable the device to signal
* wakeup events or change its power state. The majority of subsystems
* (with the notable exception of the PCI bus type) expect the driver-level
* @freeze() to save the device settings in memory to be used by @restore()
* during the subsequent resume from hibernation.
* Subsystem-level @freeze() is executed for all devices after invoking
* subsystem-level @prepare() for all of them.
*
* @thaw: Hibernation-specific, executed after creating a hibernation image OR
* if the creation of the image fails. Also executed after a failing
* if the creation of an image has failed. Also executed after a failing
* attempt to restore the contents of main memory from such an image.
* Undo the changes made by the preceding @freeze(), so the device can be
* operated in the same way as immediately before the call to @freeze().
* Subsystem-level @thaw() is executed for all devices after invoking
* subsystem-level @thaw_noirq() for all of them. It also may be executed
* directly after @freeze() in case of a transition error.
*
* @poweroff: Hibernation-specific, executed after saving a hibernation image.
* Quiesce the device, put it into a low power state appropriate for the
* upcoming system state (such as PCI_D3hot), and enable wakeup events as
* appropriate.
* Analogous to @suspend(), but it need not save the device's settings in
* memory.
* Subsystem-level @poweroff() is executed for all devices after invoking
* subsystem-level @prepare() for all of them.
*
* @restore: Hibernation-specific, executed after restoring the contents of main
* memory from a hibernation image. Driver starts working again,
* responding to hardware events and software requests. Drivers may NOT
* make ANY assumptions about the hardware state right prior to @restore().
* On most platforms, there are no restrictions on availability of
* resources like clocks during @restore().
* memory from a hibernation image, analogous to @resume().
*
* @suspend_noirq: Complete the operations of ->suspend() by carrying out any
* actions required for suspending the device that need interrupts to be
* disabled
* @suspend_noirq: Complete the actions started by @suspend(). Carry out any
* additional operations required for suspending the device that might be
* racing with its driver's interrupt handler, which is guaranteed not to
* run while @suspend_noirq() is being executed.
* It generally is expected that the device will be in a low-power state
* (appropriate for the target system sleep state) after subsystem-level
* @suspend_noirq() has returned successfully. If the device can generate
* system wakeup signals and is enabled to wake up the system, it should be
* configured to do so at that time. However, depending on the platform
* and device's subsystem, @suspend() may be allowed to put the device into
* the low-power state and configure it to generate wakeup signals, in
* which case it generally is not necessary to define @suspend_noirq().
*
* @resume_noirq: Prepare for the execution of ->resume() by carrying out any
* actions required for resuming the device that need interrupts to be
* disabled
* @resume_noirq: Prepare for the execution of @resume() by carrying out any
* operations required for resuming the device that might be racing with
* its driver's interrupt handler, which is guaranteed not to run while
* @resume_noirq() is being executed.
*
* @freeze_noirq: Complete the operations of ->freeze() by carrying out any
* actions required for freezing the device that need interrupts to be
* disabled
* @freeze_noirq: Complete the actions started by @freeze(). Carry out any
* additional operations required for freezing the device that might be
* racing with its driver's interrupt handler, which is guaranteed not to
* run while @freeze_noirq() is being executed.
* The power state of the device should not be changed by either @freeze()
* or @freeze_noirq() and it should not be configured to signal system
* wakeup by any of these callbacks.
*
* @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any
* actions required for thawing the device that need interrupts to be
* disabled
* @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
* operations required for thawing the device that might be racing with its
* driver's interrupt handler, which is guaranteed not to run while
* @thaw_noirq() is being executed.
*
* @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any
* actions required for handling the device that need interrupts to be
* disabled
* @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to
* @suspend_noirq(), but it need not save the device's settings in memory.
*
* @restore_noirq: Prepare for the execution of ->restore() by carrying out any
* actions required for restoring the operations of the device that need
* interrupts to be disabled
* @restore_noirq: Prepare for the execution of @restore() by carrying out any
* operations required for thawing the device that might be racing with its
* driver's interrupt handler, which is guaranteed not to run while
* @restore_noirq() is being executed. Analogous to @resume_noirq().
*
* All of the above callbacks, except for @complete(), return error codes.
* However, the error codes returned by the resume operations, @resume(),
* @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do
* @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do
* not cause the PM core to abort the resume transition during which they are
* returned. The error codes returned in that cases are only printed by the PM
* returned. The error codes returned in those cases are only printed by the PM
* core to the system logs for debugging purposes. Still, it is recommended
* that drivers only return error codes from their resume methods in case of an
* unrecoverable failure (i.e. when the device being handled refuses to resume
@ -174,31 +201,43 @@ typedef struct pm_message {
* their children.
*
* It is allowed to unregister devices while the above callbacks are being
* executed. However, it is not allowed to unregister a device from within any
* of its own callbacks.
* executed. However, a callback routine must NOT try to unregister the device
* it was called for, although it may unregister children of that device (for
* example, if it detects that a child was unplugged while the system was
* asleep).
*
* There also are the following callbacks related to run-time power management
* of devices:
* Refer to Documentation/power/devices.txt for more information about the role
* of the above callbacks in the system suspend process.
*
* There also are callbacks related to runtime power management of devices.
* Again, these callbacks are executed by the PM core only for subsystems
* (PM domains, device types, classes and bus types) and the subsystem-level
* callbacks are supposed to invoke the driver callbacks. Moreover, the exact
* actions to be performed by a device driver's callbacks generally depend on
* the platform and subsystem the device belongs to.
*
* @runtime_suspend: Prepare the device for a condition in which it won't be
* able to communicate with the CPU(s) and RAM due to power management.
* This need not mean that the device should be put into a low power state.
* This need not mean that the device should be put into a low-power state.
* For example, if the device is behind a link which is about to be turned
* off, the device may remain at full power. If the device does go to low
* power and is capable of generating run-time wake-up events, remote
* wake-up (i.e., a hardware mechanism allowing the device to request a
* change of its power state via a wake-up event, such as PCI PME) should
* be enabled for it.
* power and is capable of generating runtime wakeup events, remote wakeup
* (i.e., a hardware mechanism allowing the device to request a change of
* its power state via an interrupt) should be enabled for it.
*
* @runtime_resume: Put the device into the fully active state in response to a
* wake-up event generated by hardware or at the request of software. If
* necessary, put the device into the full power state and restore its
* wakeup event generated by hardware or at the request of software. If
* necessary, put the device into the full-power state and restore its
* registers, so that it is fully operational.
*
* @runtime_idle: Device appears to be inactive and it might be put into a low
* power state if all of the necessary conditions are satisfied. Check
* @runtime_idle: Device appears to be inactive and it might be put into a
* low-power state if all of the necessary conditions are satisfied. Check
* these conditions and handle the device as appropriate, possibly queueing
* a suspend request for it. The return value is ignored by the PM core.
*
* Refer to Documentation/power/runtime_pm.txt for more information about the
* role of the above callbacks in device runtime power management.
*
*/
struct dev_pm_ops {

View file

@ -35,10 +35,12 @@ struct pstore_info {
spinlock_t buf_lock; /* serialize access to 'buf' */
char *buf;
size_t bufsize;
struct mutex read_mutex; /* serialize open/read/close */
int (*open)(struct pstore_info *psi);
int (*close)(struct pstore_info *psi);
ssize_t (*read)(u64 *id, enum pstore_type_id *type,
struct timespec *time, struct pstore_info *psi);
struct timespec *time, char **buf,
struct pstore_info *psi);
int (*write)(enum pstore_type_id type, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi);
int (*erase)(enum pstore_type_id type, u64 id,

View file

@ -35,7 +35,7 @@ struct shrinker {
/* These are for internal use */
struct list_head list;
long nr; /* objs pending delete */
atomic_long_t nr_in_batch; /* objs pending delete */
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
extern void register_shrinker(struct shrinker *);

View file

@ -24,7 +24,7 @@ struct sigma_firmware {
struct sigma_firmware_header {
unsigned char magic[7];
u8 version;
u32 crc;
__le32 crc;
};
enum {
@ -40,19 +40,14 @@ enum {
struct sigma_action {
u8 instr;
u8 len_hi;
u16 len;
u16 addr;
__le16 len;
__be16 addr;
unsigned char payload[];
};
static inline u32 sigma_action_len(struct sigma_action *sa)
{
return (sa->len_hi << 16) | sa->len;
}
static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len)
{
return sizeof(*sa) + payload_len + (payload_len % 2);
return (sa->len_hi << 16) | le16_to_cpu(sa->len);
}
extern int process_sigma_firmware(struct i2c_client *client, const char *name);

View file

@ -85,6 +85,8 @@
* @reset: reset the device
* vdev: the virtio device
* After this, status and feature negotiation must be done again
* Device must not be reset from its vq/config callbacks, or in
* parallel with being added/removed.
* @find_vqs: find virtqueues and instantiate them.
* vdev: the virtio_device
* nvqs: the number of virtqueues to find

View file

@ -63,7 +63,7 @@
#define VIRTIO_MMIO_GUEST_FEATURES 0x020
/* Activated features set selector - Write Only */
#define VIRTIO_MMIO_GUEST_FEATURES_SET 0x024
#define VIRTIO_MMIO_GUEST_FEATURES_SEL 0x024
/* Guest's memory page size in bytes - Write Only */
#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028