Merge branches 'at91', 'dcache', 'ftrace', 'hwbpt', 'misc', 'mmci', 's3c', 'st-ux' and 'unwind' into devel
This commit is contained in:
parent
8ed9059533
5fb31a96e1
80be7a7f64
19852e5900
29e29f2748
725343fa74
9e978f0962
f3af03de0b
5333a3de3c
commit
23beab76b4
921 changed files with 14129 additions and 5741 deletions
|
|
@ -304,8 +304,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
|
|||
OSC_PCI_EXPRESS_PME_CONTROL | \
|
||||
OSC_PCI_EXPRESS_AER_CONTROL | \
|
||||
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
|
||||
|
||||
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
|
||||
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
|
||||
u32 *mask, u32 req);
|
||||
extern void acpi_early_init(void);
|
||||
|
||||
#else /* !CONFIG_ACPI */
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/resource.h>
|
||||
|
||||
#define AMBA_NR_IRQS 2
|
||||
#define AMBA_CID 0xb105f00d
|
||||
|
||||
struct clk;
|
||||
|
||||
|
|
@ -70,9 +71,15 @@ void amba_release_regions(struct amba_device *);
|
|||
#define amba_pclk_disable(d) \
|
||||
do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
|
||||
|
||||
#define amba_config(d) (((d)->periphid >> 24) & 0xff)
|
||||
#define amba_rev(d) (((d)->periphid >> 20) & 0x0f)
|
||||
#define amba_manf(d) (((d)->periphid >> 12) & 0xff)
|
||||
#define amba_part(d) ((d)->periphid & 0xfff)
|
||||
/* Some drivers don't use the struct amba_device */
|
||||
#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
|
||||
#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
|
||||
#define AMBA_MANF_BITS(a) (((a) >> 12) & 0xff)
|
||||
#define AMBA_PART_BITS(a) ((a) & 0xfff)
|
||||
|
||||
#define amba_config(d) AMBA_CONFIG_BITS((d)->periphid)
|
||||
#define amba_rev(d) AMBA_REV_BITS((d)->periphid)
|
||||
#define amba_manf(d) AMBA_MANF_BITS((d)->periphid)
|
||||
#define amba_part(d) AMBA_PART_BITS((d)->periphid)
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@
|
|||
* whether a card is present in the MMC slot or not
|
||||
* @gpio_wp: read this GPIO pin to see if the card is write protected
|
||||
* @gpio_cd: read this GPIO pin to detect card insertion
|
||||
* @cd_invert: true if the gpio_cd pin value is active low
|
||||
* @capabilities: the capabilities of the block as implemented in
|
||||
* this platform, signify anything MMC_CAP_* from mmc/host.h
|
||||
*/
|
||||
|
|
@ -35,6 +36,7 @@ struct mmci_platform_data {
|
|||
unsigned int (*status)(struct device *);
|
||||
int gpio_wp;
|
||||
int gpio_cd;
|
||||
bool cd_invert;
|
||||
unsigned long capabilities;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -32,7 +32,9 @@
|
|||
#define UART01x_RSR 0x04 /* Receive status register (Read). */
|
||||
#define UART01x_ECR 0x04 /* Error clear register (Write). */
|
||||
#define UART010_LCRH 0x08 /* Line control register, high byte. */
|
||||
#define ST_UART011_DMAWM 0x08 /* DMA watermark configure register. */
|
||||
#define UART010_LCRM 0x0C /* Line control register, middle byte. */
|
||||
#define ST_UART011_TIMEOUT 0x0C /* Timeout period register. */
|
||||
#define UART010_LCRL 0x10 /* Line control register, low byte. */
|
||||
#define UART010_CR 0x14 /* Control register. */
|
||||
#define UART01x_FR 0x18 /* Flag register (Read only). */
|
||||
|
|
@ -51,6 +53,15 @@
|
|||
#define UART011_MIS 0x40 /* Masked interrupt status. */
|
||||
#define UART011_ICR 0x44 /* Interrupt clear register. */
|
||||
#define UART011_DMACR 0x48 /* DMA control register. */
|
||||
#define ST_UART011_XFCR 0x50 /* XON/XOFF control register. */
|
||||
#define ST_UART011_XON1 0x54 /* XON1 register. */
|
||||
#define ST_UART011_XON2 0x58 /* XON2 register. */
|
||||
#define ST_UART011_XOFF1 0x5C /* XON1 register. */
|
||||
#define ST_UART011_XOFF2 0x60 /* XON2 register. */
|
||||
#define ST_UART011_ITCR 0x80 /* Integration test control register. */
|
||||
#define ST_UART011_ITIP 0x84 /* Integration test input register. */
|
||||
#define ST_UART011_ABCR 0x100 /* Autobaud control register. */
|
||||
#define ST_UART011_ABIMSC 0x15C /* Autobaud interrupt mask/clear register. */
|
||||
|
||||
#define UART011_DR_OE (1 << 11)
|
||||
#define UART011_DR_BE (1 << 10)
|
||||
|
|
|
|||
|
|
@ -578,7 +578,12 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
|
|||
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
|
||||
int cgroup_scan_tasks(struct cgroup_scanner *scan);
|
||||
int cgroup_attach_task(struct cgroup *, struct task_struct *);
|
||||
int cgroup_attach_task_current_cg(struct task_struct *);
|
||||
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
|
||||
|
||||
static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
|
||||
{
|
||||
return cgroup_attach_task_all(current, tsk);
|
||||
}
|
||||
|
||||
/*
|
||||
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
|
||||
|
|
@ -636,6 +641,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
|
|||
}
|
||||
|
||||
/* No cgroups - nothing to do */
|
||||
static inline int cgroup_attach_task_all(struct task_struct *from,
|
||||
struct task_struct *t)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int cgroup_attach_task_current_cg(struct task_struct *t)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -360,5 +360,8 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
|
|||
const struct compat_iovec __user *uvector, unsigned long nr_segs,
|
||||
unsigned long fast_segs, struct iovec *fast_pointer,
|
||||
struct iovec **ret_pointer);
|
||||
|
||||
extern void __user *compat_alloc_user_space(unsigned long len);
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
#endif /* _LINUX_COMPAT_H */
|
||||
|
|
|
|||
|
|
@ -102,6 +102,9 @@ static inline u64 dma_get_mask(struct device *dev)
|
|||
return DMA_BIT_MASK(32);
|
||||
}
|
||||
|
||||
#ifdef ARCH_HAS_DMA_SET_COHERENT_MASK
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||
#else
|
||||
static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
if (!dma_supported(dev, mask))
|
||||
|
|
@ -109,6 +112,7 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
|
|||
dev->coherent_dma_mask = mask;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern u64 dma_get_required_mask(struct device *dev);
|
||||
|
||||
|
|
|
|||
|
|
@ -136,6 +136,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
|
|||
|
||||
extern int elevator_init(struct request_queue *, char *);
|
||||
extern void elevator_exit(struct elevator_queue *);
|
||||
extern int elevator_change(struct request_queue *, const char *);
|
||||
extern int elv_rq_merge_ok(struct request *, struct bio *);
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -1093,6 +1093,10 @@ struct file_lock {
|
|||
|
||||
#include <linux/fcntl.h>
|
||||
|
||||
/* temporary stubs for BKL removal */
|
||||
#define lock_flocks() lock_kernel()
|
||||
#define unlock_flocks() unlock_kernel()
|
||||
|
||||
extern void send_sigio(struct fown_struct *fown, int fd, int band);
|
||||
|
||||
#ifdef CONFIG_FILE_LOCKING
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/errno.h>
|
||||
|
||||
struct device;
|
||||
struct gpio_chip;
|
||||
|
||||
/*
|
||||
* Some platforms don't support the GPIO programming interface.
|
||||
|
|
|
|||
|
|
@ -63,6 +63,9 @@
|
|||
* IRQ lines will appear. Similarly to gpio_base, the expander
|
||||
* will create a block of irqs beginning at this number.
|
||||
* This value is ignored if irq_summary is < 0.
|
||||
* @reset_during_probe: If set to true, the driver will trigger a full
|
||||
* reset of the chip at the beginning of the probe
|
||||
* in order to place it in a known state.
|
||||
*/
|
||||
struct sx150x_platform_data {
|
||||
unsigned gpio_base;
|
||||
|
|
@ -73,6 +76,7 @@ struct sx150x_platform_data {
|
|||
u16 io_polarity;
|
||||
int irq_summary;
|
||||
unsigned irq_base;
|
||||
bool reset_during_probe;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_I2C_SX150X_H */
|
||||
|
|
|
|||
20
include/linux/intel-gtt.h
Normal file
20
include/linux/intel-gtt.h
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
* Common Intel AGPGART and GTT definitions.
|
||||
*/
|
||||
#ifndef _INTEL_GTT_H
|
||||
#define _INTEL_GTT_H
|
||||
|
||||
#include <linux/agp_backend.h>
|
||||
|
||||
/* This is for Intel only GTT controls.
|
||||
*
|
||||
* Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only
|
||||
*/
|
||||
|
||||
#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
|
||||
#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
|
||||
|
||||
/* flag for GFDT type */
|
||||
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
|
||||
|
||||
#endif
|
||||
|
|
@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping)
|
|||
}
|
||||
|
||||
/* Atomic map/unmap */
|
||||
static inline void *
|
||||
static inline void __iomem *
|
||||
io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
||||
unsigned long offset,
|
||||
int slot)
|
||||
|
|
@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
|||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_unmap_atomic(void *vaddr, int slot)
|
||||
io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
|
||||
{
|
||||
iounmap_atomic(vaddr, slot);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
static inline void __iomem *
|
||||
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
|
||||
{
|
||||
resource_size_t phys_addr;
|
||||
|
|
@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
|
|||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_unmap(void *vaddr)
|
||||
io_mapping_unmap(void __iomem *vaddr)
|
||||
{
|
||||
iounmap(vaddr);
|
||||
}
|
||||
|
|
@ -125,38 +125,38 @@ struct io_mapping;
|
|||
static inline struct io_mapping *
|
||||
io_mapping_create_wc(resource_size_t base, unsigned long size)
|
||||
{
|
||||
return (struct io_mapping *) ioremap_wc(base, size);
|
||||
return (struct io_mapping __force *) ioremap_wc(base, size);
|
||||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_free(struct io_mapping *mapping)
|
||||
{
|
||||
iounmap(mapping);
|
||||
iounmap((void __force __iomem *) mapping);
|
||||
}
|
||||
|
||||
/* Atomic map/unmap */
|
||||
static inline void *
|
||||
static inline void __iomem *
|
||||
io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
||||
unsigned long offset,
|
||||
int slot)
|
||||
{
|
||||
return ((char *) mapping) + offset;
|
||||
return ((char __force __iomem *) mapping) + offset;
|
||||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_unmap_atomic(void *vaddr, int slot)
|
||||
io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
|
||||
{
|
||||
}
|
||||
|
||||
/* Non-atomic map/unmap */
|
||||
static inline void *
|
||||
static inline void __iomem *
|
||||
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
|
||||
{
|
||||
return ((char *) mapping) + offset;
|
||||
return ((char __force __iomem *) mapping) + offset;
|
||||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_unmap(void *vaddr)
|
||||
io_mapping_unmap(void __iomem *vaddr)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val)
|
|||
*/
|
||||
#define kfifo_reset(fifo) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
__tmp->kfifo.in = __tmp->kfifo.out = 0; \
|
||||
})
|
||||
|
||||
|
|
@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val)
|
|||
*/
|
||||
#define kfifo_reset_out(fifo) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
__tmp->kfifo.out = __tmp->kfifo.in; \
|
||||
})
|
||||
|
||||
|
|
@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val)
|
|||
*/
|
||||
#define kfifo_len(fifo) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmpl = (fifo); \
|
||||
typeof((fifo) + 1) __tmpl = (fifo); \
|
||||
__tmpl->kfifo.in - __tmpl->kfifo.out; \
|
||||
})
|
||||
|
||||
|
|
@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val)
|
|||
*/
|
||||
#define kfifo_is_empty(fifo) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmpq = (fifo); \
|
||||
typeof((fifo) + 1) __tmpq = (fifo); \
|
||||
__tmpq->kfifo.in == __tmpq->kfifo.out; \
|
||||
})
|
||||
|
||||
|
|
@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val)
|
|||
*/
|
||||
#define kfifo_is_full(fifo) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmpq = (fifo); \
|
||||
typeof((fifo) + 1) __tmpq = (fifo); \
|
||||
kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
|
||||
})
|
||||
|
||||
|
|
@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val)
|
|||
#define kfifo_avail(fifo) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmpq = (fifo); \
|
||||
typeof((fifo) + 1) __tmpq = (fifo); \
|
||||
const size_t __recsize = sizeof(*__tmpq->rectype); \
|
||||
unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
|
||||
(__recsize) ? ((__avail <= __recsize) ? 0 : \
|
||||
|
|
@ -284,7 +284,7 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_skip(fifo) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
if (__recsize) \
|
||||
|
|
@ -302,7 +302,7 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_peek_len(fifo) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
(!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
|
||||
|
|
@ -325,7 +325,7 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_alloc(fifo, size, gfp_mask) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
__is_kfifo_ptr(__tmp) ? \
|
||||
__kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
|
||||
|
|
@ -339,7 +339,7 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_free(fifo) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
if (__is_kfifo_ptr(__tmp)) \
|
||||
__kfifo_free(__kfifo); \
|
||||
|
|
@ -358,7 +358,7 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_init(fifo, buffer, size) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
__is_kfifo_ptr(__tmp) ? \
|
||||
__kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
|
||||
|
|
@ -379,8 +379,8 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_put(fifo, val) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(val + 1) __val = (val); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((val) + 1) __val = (val); \
|
||||
unsigned int __ret; \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -421,8 +421,8 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_get(fifo, val) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(val + 1) __val = (val); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((val) + 1) __val = (val); \
|
||||
unsigned int __ret; \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -462,8 +462,8 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_peek(fifo, val) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(val + 1) __val = (val); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((val) + 1) __val = (val); \
|
||||
unsigned int __ret; \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -501,8 +501,8 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_in(fifo, buf, n) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(buf + 1) __buf = (buf); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((buf) + 1) __buf = (buf); \
|
||||
unsigned long __n = (n); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -554,8 +554,8 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_out(fifo, buf, n) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(buf + 1) __buf = (buf); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((buf) + 1) __buf = (buf); \
|
||||
unsigned long __n = (n); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -611,7 +611,7 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_from_user(fifo, from, len, copied) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
const void __user *__from = (from); \
|
||||
unsigned int __len = (len); \
|
||||
unsigned int *__copied = (copied); \
|
||||
|
|
@ -639,7 +639,7 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_to_user(fifo, to, len, copied) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
void __user *__to = (to); \
|
||||
unsigned int __len = (len); \
|
||||
unsigned int *__copied = (copied); \
|
||||
|
|
@ -666,7 +666,7 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct scatterlist *__sgl = (sgl); \
|
||||
int __nents = (nents); \
|
||||
unsigned int __len = (len); \
|
||||
|
|
@ -690,7 +690,7 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_dma_in_finish(fifo, len) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
unsigned int __len = (len); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -717,7 +717,7 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct scatterlist *__sgl = (sgl); \
|
||||
int __nents = (nents); \
|
||||
unsigned int __len = (len); \
|
||||
|
|
@ -741,7 +741,7 @@ __kfifo_must_check_helper( \
|
|||
*/
|
||||
#define kfifo_dma_out_finish(fifo, len) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
unsigned int __len = (len); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
@ -766,8 +766,8 @@ __kfifo_must_check_helper( \
|
|||
#define kfifo_out_peek(fifo, buf, n) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(buf + 1) __buf = (buf); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((buf) + 1) __buf = (buf); \
|
||||
unsigned long __n = (n); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
|
|
|||
|
|
@ -16,6 +16,9 @@
|
|||
struct stable_node;
|
||||
struct mem_cgroup;
|
||||
|
||||
struct page *ksm_does_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address);
|
||||
|
||||
#ifdef CONFIG_KSM
|
||||
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end, int advice, unsigned long *vm_flags);
|
||||
|
|
@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page,
|
|||
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
|
||||
* but what if the vma was unmerged while the page was swapped out?
|
||||
*/
|
||||
struct page *ksm_does_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address);
|
||||
static inline struct page *ksm_might_need_to_copy(struct page *page,
|
||||
static inline int ksm_might_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
struct anon_vma *anon_vma = page_anon_vma(page);
|
||||
|
||||
if (!anon_vma ||
|
||||
(anon_vma->root == vma->anon_vma->root &&
|
||||
page->index == linear_page_index(vma, address)))
|
||||
return page;
|
||||
|
||||
return ksm_does_need_to_copy(page, vma, address);
|
||||
return anon_vma &&
|
||||
(anon_vma->root != vma->anon_vma->root ||
|
||||
page->index != linear_page_index(vma, address));
|
||||
}
|
||||
|
||||
int page_referenced_ksm(struct page *page,
|
||||
|
|
@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline struct page *ksm_might_need_to_copy(struct page *page,
|
||||
static inline int ksm_might_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
return page;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int page_referenced_ksm(struct page *page,
|
||||
|
|
|
|||
|
|
@ -150,7 +150,7 @@
|
|||
int i; \
|
||||
preempt_disable(); \
|
||||
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
|
||||
for_each_online_cpu(i) { \
|
||||
for_each_possible_cpu(i) { \
|
||||
arch_spinlock_t *lock; \
|
||||
lock = &per_cpu(name##_lock, i); \
|
||||
arch_spin_lock(lock); \
|
||||
|
|
@ -161,7 +161,7 @@
|
|||
void name##_global_unlock(void) { \
|
||||
int i; \
|
||||
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
|
||||
for_each_online_cpu(i) { \
|
||||
for_each_possible_cpu(i) { \
|
||||
arch_spinlock_t *lock; \
|
||||
lock = &per_cpu(name##_lock, i); \
|
||||
arch_spin_unlock(lock); \
|
||||
|
|
|
|||
|
|
@ -335,6 +335,7 @@ enum {
|
|||
ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
|
||||
ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
|
||||
ATA_EHI_QUIET = (1 << 3), /* be quiet */
|
||||
ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */
|
||||
|
||||
ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */
|
||||
ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */
|
||||
|
|
@ -723,6 +724,7 @@ struct ata_port {
|
|||
struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
|
||||
u8 ctl; /* cache of ATA control register */
|
||||
u8 last_ctl; /* Cache last written value */
|
||||
struct ata_link* sff_pio_task_link; /* link currently used */
|
||||
struct delayed_work sff_pio_task;
|
||||
#ifdef CONFIG_ATA_BMDMA
|
||||
struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */
|
||||
|
|
@ -1594,7 +1596,7 @@ extern void ata_sff_irq_on(struct ata_port *ap);
|
|||
extern void ata_sff_irq_clear(struct ata_port *ap);
|
||||
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
|
||||
u8 status, int in_wq);
|
||||
extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay);
|
||||
extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
|
||||
extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
|
||||
extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
|
||||
extern unsigned int ata_sff_port_intr(struct ata_port *ap,
|
||||
|
|
|
|||
|
|
@ -111,9 +111,13 @@ extern int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val);
|
|||
* struct tc35892_gpio_platform_data - TC35892 GPIO platform data
|
||||
* @gpio_base: first gpio number assigned to TC35892. A maximum of
|
||||
* %TC35892_NR_GPIOS GPIOs will be allocated.
|
||||
* @setup: callback for board-specific initialization
|
||||
* @remove: callback for board-specific teardown
|
||||
*/
|
||||
struct tc35892_gpio_platform_data {
|
||||
int gpio_base;
|
||||
void (*setup)(struct tc35892 *tc35892, unsigned gpio_base);
|
||||
void (*remove)(struct tc35892 *tc35892, unsigned gpio_base);
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -864,6 +864,12 @@ int set_page_dirty(struct page *page);
|
|||
int set_page_dirty_lock(struct page *page);
|
||||
int clear_page_dirty_for_io(struct page *page);
|
||||
|
||||
/* Is the vma a continuation of the stack vma above it? */
|
||||
static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
|
||||
}
|
||||
|
||||
extern unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
unsigned long old_addr, struct vm_area_struct *new_vma,
|
||||
unsigned long new_addr, unsigned long len);
|
||||
|
|
|
|||
|
|
@ -38,6 +38,8 @@
|
|||
* [8:0] Byte/block count
|
||||
*/
|
||||
|
||||
#define R4_MEMORY_PRESENT (1 << 27)
|
||||
|
||||
/*
|
||||
SDIO status in R5
|
||||
Type
|
||||
|
|
|
|||
|
|
@ -283,6 +283,13 @@ struct zone {
|
|||
/* zone watermarks, access with *_wmark_pages(zone) macros */
|
||||
unsigned long watermark[NR_WMARK];
|
||||
|
||||
/*
|
||||
* When free pages are below this point, additional steps are taken
|
||||
* when reading the number of free pages to avoid per-cpu counter
|
||||
* drift allowing watermarks to be breached
|
||||
*/
|
||||
unsigned long percpu_drift_mark;
|
||||
|
||||
/*
|
||||
* We don't know if the memory that we're going to allocate will be freeable
|
||||
* or/and it will be released eventually, so to avoid totally wasting several
|
||||
|
|
@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone)
|
|||
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long zone_nr_free_pages(struct zone *zone);
|
||||
#else
|
||||
#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* The "priority" of VM scanning is how much of the queues we will scan in one
|
||||
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
|
||||
|
|
|
|||
|
|
@ -78,6 +78,14 @@ struct mutex_waiter {
|
|||
# include <linux/mutex-debug.h>
|
||||
#else
|
||||
# define __DEBUG_MUTEX_INITIALIZER(lockname)
|
||||
/**
|
||||
* mutex_init - initialize the mutex
|
||||
* @mutex: the mutex to be initialized
|
||||
*
|
||||
* Initialize the mutex to unlocked state.
|
||||
*
|
||||
* It is not allowed to initialize an already locked mutex.
|
||||
*/
|
||||
# define mutex_init(mutex) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
|
|
|
|||
|
|
@ -27,8 +27,6 @@
|
|||
|
||||
#define MAX_LINKS 32
|
||||
|
||||
struct net;
|
||||
|
||||
struct sockaddr_nl {
|
||||
sa_family_t nl_family; /* AF_NETLINK */
|
||||
unsigned short nl_pad; /* zero */
|
||||
|
|
@ -151,6 +149,8 @@ struct nlattr {
|
|||
#include <linux/capability.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
struct net;
|
||||
|
||||
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
|
||||
{
|
||||
return (struct nlmsghdr *)skb->data;
|
||||
|
|
|
|||
|
|
@ -63,20 +63,20 @@ static inline bool netpoll_rx(struct sk_buff *skb)
|
|||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
local_irq_save(flags);
|
||||
npinfo = rcu_dereference_bh(skb->dev->npinfo);
|
||||
|
||||
if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
|
||||
goto out;
|
||||
|
||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
||||
spin_lock(&npinfo->rx_lock);
|
||||
/* check rx_flags again with the lock held */
|
||||
if (npinfo->rx_flags && __netpoll_rx(skb))
|
||||
ret = true;
|
||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||
spin_unlock(&npinfo->rx_lock);
|
||||
|
||||
out:
|
||||
rcu_read_unlock_bh();
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1214,6 +1214,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
|
|||
unsigned int devfn)
|
||||
{ return NULL; }
|
||||
|
||||
static inline int pci_domain_nr(struct pci_bus *bus)
|
||||
{ return 0; }
|
||||
|
||||
#define dev_is_pci(d) (false)
|
||||
#define dev_is_pf(d) (false)
|
||||
#define dev_num_vf(d) (0)
|
||||
|
|
|
|||
|
|
@ -393,6 +393,9 @@
|
|||
#define PCI_DEVICE_ID_VLSI_82C147 0x0105
|
||||
#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
|
||||
|
||||
/* AMD RD890 Chipset */
|
||||
#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23
|
||||
|
||||
#define PCI_VENDOR_ID_ADL 0x1005
|
||||
#define PCI_DEVICE_ID_ADL_2301 0x2301
|
||||
|
||||
|
|
@ -2300,6 +2303,8 @@
|
|||
#define PCI_DEVICE_ID_P2010 0x0079
|
||||
#define PCI_DEVICE_ID_P1020E 0x0100
|
||||
#define PCI_DEVICE_ID_P1020 0x0101
|
||||
#define PCI_DEVICE_ID_P1021E 0x0102
|
||||
#define PCI_DEVICE_ID_P1021 0x0103
|
||||
#define PCI_DEVICE_ID_P1011E 0x0108
|
||||
#define PCI_DEVICE_ID_P1011 0x0109
|
||||
#define PCI_DEVICE_ID_P1022E 0x0110
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ extern void __init percpu_init_late(void);
|
|||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
|
||||
|
||||
/* can't distinguish from other static vars, always false */
|
||||
static inline bool is_kernel_percpu_address(unsigned long addr)
|
||||
|
|
|
|||
|
|
@ -274,8 +274,14 @@ static inline int dquot_alloc_space(struct inode *inode, qsize_t nr)
|
|||
int ret;
|
||||
|
||||
ret = dquot_alloc_space_nodirty(inode, nr);
|
||||
if (!ret)
|
||||
mark_inode_dirty_sync(inode);
|
||||
if (!ret) {
|
||||
/*
|
||||
* Mark inode fully dirty. Since we are allocating blocks, inode
|
||||
* would become fully dirty soon anyway and it reportedly
|
||||
* reduces inode_lock contention.
|
||||
*/
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,6 +26,9 @@ struct semaphore {
|
|||
.wait_list = LIST_HEAD_INIT((name).wait_list), \
|
||||
}
|
||||
|
||||
#define DEFINE_SEMAPHORE(name) \
|
||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
|
||||
|
||||
#define DECLARE_MUTEX(name) \
|
||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
|
||||
|
||||
|
|
|
|||
|
|
@ -77,8 +77,7 @@ struct serial_struct {
|
|||
#define PORT_16654 11
|
||||
#define PORT_16850 12
|
||||
#define PORT_RSA 13 /* RSA-DV II/S card */
|
||||
#define PORT_U6_16550A 14
|
||||
#define PORT_MAX 14
|
||||
#define PORT_MAX 13
|
||||
|
||||
#define SERIAL_IO_PORT 0
|
||||
#define SERIAL_IO_HUB6 1
|
||||
|
|
|
|||
|
|
@ -44,7 +44,8 @@
|
|||
#define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */
|
||||
#define PORT_OCTEON 17 /* Cavium OCTEON internal UART */
|
||||
#define PORT_AR7 18 /* Texas Instruments AR7 internal UART */
|
||||
#define PORT_MAX_8250 18 /* max port ID */
|
||||
#define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */
|
||||
#define PORT_MAX_8250 19 /* max port ID */
|
||||
|
||||
/*
|
||||
* ARM specific type numbers. These are not currently guaranteed
|
||||
|
|
|
|||
|
|
@ -322,7 +322,7 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
|
|||
int offset,
|
||||
unsigned int len, __wsum *csump);
|
||||
|
||||
extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
|
||||
extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
|
||||
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
|
||||
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
|
||||
int offset, int len);
|
||||
|
|
|
|||
|
|
@ -14,7 +14,9 @@
|
|||
#define SPI_MODE_OFFSET 6
|
||||
#define SPI_SCPH_OFFSET 6
|
||||
#define SPI_SCOL_OFFSET 7
|
||||
|
||||
#define SPI_TMOD_OFFSET 8
|
||||
#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
|
||||
#define SPI_TMOD_TR 0x0 /* xmit & recv */
|
||||
#define SPI_TMOD_TO 0x1 /* xmit only */
|
||||
#define SPI_TMOD_RO 0x2 /* recv only */
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ struct rpc_inode;
|
|||
* The high-level client handle
|
||||
*/
|
||||
struct rpc_clnt {
|
||||
struct kref cl_kref; /* Number of references */
|
||||
atomic_t cl_count; /* Number of references */
|
||||
struct list_head cl_clients; /* Global list of clients */
|
||||
struct list_head cl_tasks; /* List of tasks */
|
||||
spinlock_t cl_lock; /* spinlock */
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ struct bio;
|
|||
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
|
||||
#define SWAP_FLAG_PRIO_MASK 0x7fff
|
||||
#define SWAP_FLAG_PRIO_SHIFT 0
|
||||
#define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */
|
||||
|
||||
static inline int current_is_kswapd(void)
|
||||
{
|
||||
|
|
@ -142,7 +143,7 @@ struct swap_extent {
|
|||
enum {
|
||||
SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
|
||||
SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
|
||||
SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */
|
||||
SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */
|
||||
SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
|
||||
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
|
||||
SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
|
||||
|
|
@ -315,6 +316,7 @@ extern long nr_swap_pages;
|
|||
extern long total_swap_pages;
|
||||
extern void si_swapinfo(struct sysinfo *);
|
||||
extern swp_entry_t get_swap_page(void);
|
||||
extern swp_entry_t get_swap_page_of_type(int);
|
||||
extern int valid_swaphandles(swp_entry_t, unsigned long *);
|
||||
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
|
||||
extern void swap_shmem_alloc(swp_entry_t);
|
||||
|
|
@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *);
|
|||
extern int try_to_free_swap(struct page *);
|
||||
struct backing_dev_info;
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
void hibernation_freeze_swap(void);
|
||||
void hibernation_thaw_swap(void);
|
||||
swp_entry_t get_swap_for_hibernation(int type);
|
||||
void swap_free_for_hibernation(swp_entry_t val);
|
||||
#endif
|
||||
|
||||
/* linux/mm/thrash.c */
|
||||
extern struct mm_struct *swap_token_mm;
|
||||
extern void grab_swap_token(struct mm_struct *);
|
||||
|
|
|
|||
|
|
@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone,
|
|||
return x;
|
||||
}
|
||||
|
||||
/*
|
||||
* More accurate version that also considers the currently pending
|
||||
* deltas. For that we need to loop over all cpus to find the current
|
||||
* deltas. There is no synchronization so the result cannot be
|
||||
* exactly accurate either.
|
||||
*/
|
||||
static inline unsigned long zone_page_state_snapshot(struct zone *zone,
|
||||
enum zone_stat_item item)
|
||||
{
|
||||
long x = atomic_long_read(&zone->vm_stat[item]);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu;
|
||||
for_each_online_cpu(cpu)
|
||||
x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
|
||||
|
||||
if (x < 0)
|
||||
x = 0;
|
||||
#endif
|
||||
return x;
|
||||
}
|
||||
|
||||
extern unsigned long global_reclaimable_pages(void);
|
||||
extern unsigned long zone_reclaimable_pages(struct zone *zone);
|
||||
|
||||
|
|
|
|||
|
|
@ -25,18 +25,20 @@ typedef void (*work_func_t)(struct work_struct *work);
|
|||
|
||||
enum {
|
||||
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
|
||||
WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */
|
||||
WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */
|
||||
WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
|
||||
WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
|
||||
WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
|
||||
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
||||
WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */
|
||||
WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
|
||||
WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
|
||||
WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
|
||||
#else
|
||||
WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */
|
||||
WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
|
||||
#endif
|
||||
|
||||
WORK_STRUCT_COLOR_BITS = 4,
|
||||
|
||||
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
|
||||
WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
|
||||
WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
|
||||
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
|
||||
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
||||
|
|
@ -59,8 +61,8 @@ enum {
|
|||
|
||||
/*
|
||||
* Reserve 7 bits off of cwq pointer w/ debugobjects turned
|
||||
* off. This makes cwqs aligned to 128 bytes which isn't too
|
||||
* excessive while allowing 15 workqueue flush colors.
|
||||
* off. This makes cwqs aligned to 256 bytes and allows 15
|
||||
* workqueue flush colors.
|
||||
*/
|
||||
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
|
||||
WORK_STRUCT_COLOR_BITS,
|
||||
|
|
@ -233,6 +235,10 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
|
|||
#define work_clear_pending(work) \
|
||||
clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
|
||||
|
||||
/*
|
||||
* Workqueue flags and constants. For details, please refer to
|
||||
* Documentation/workqueue.txt.
|
||||
*/
|
||||
enum {
|
||||
WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
|
||||
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
|
||||
|
|
@ -241,6 +247,8 @@ enum {
|
|||
WQ_HIGHPRI = 1 << 4, /* high priority */
|
||||
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
|
||||
|
||||
WQ_DYING = 1 << 6, /* internal: workqueue is dying */
|
||||
|
||||
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
||||
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
|
||||
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue