Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

This commit is contained in:
David S. Miller 2010-01-23 00:31:06 -08:00
commit 51c24aaaca
1380 changed files with 31816 additions and 11802 deletions

View file

@ -80,7 +80,7 @@ char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
void __acpi_unmap_table(char *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
int acpi_boot_table_init (void);
void acpi_boot_table_init (void);
int acpi_mps_check (void);
int acpi_numa_init (void);
@ -251,6 +251,7 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
void __init acpi_no_s4_hw_signature(void);
void __init acpi_old_suspend_ordering(void);
void __init acpi_s4_no_nvs(void);
void __init acpi_set_sci_en_on_resume(void);
#endif /* CONFIG_PM_SLEEP */
struct acpi_osc_context {
@ -320,9 +321,9 @@ static inline int acpi_boot_init(void)
return 0;
}
static inline int acpi_boot_table_init(void)
static inline void acpi_boot_table_init(void)
{
return 0;
return;
}
static inline int acpi_mps_check(void)

View file

@ -845,7 +845,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
* blk_rq_err_bytes() : bytes left till the next error boundary
* blk_rq_sectors() : sectors left in the entire request
* blk_rq_cur_sectors() : sectors left in the current segment
* blk_rq_err_sectors() : sectors left till the next error boundary
*/
static inline sector_t blk_rq_pos(const struct request *rq)
{
@ -874,11 +873,6 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> 9;
}
static inline unsigned int blk_rq_err_sectors(const struct request *rq)
{
return blk_rq_err_bytes(rq) >> 9;
}
/*
* Request issue related functions.
*/
@ -944,6 +938,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_default_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
sector_t offset);
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
@ -1116,11 +1112,18 @@ static inline int queue_alignment_offset(struct request_queue *q)
return q->limits.alignment_offset;
}
static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset)
{
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
offset &= granularity - 1;
return (granularity + lim->alignment_offset - offset) & (granularity - 1);
}
static inline int queue_sector_alignment_offset(struct request_queue *q,
sector_t sector)
{
return ((sector << 9) - q->limits.alignment_offset)
& (q->limits.io_min - 1);
return queue_limit_alignment_offset(&q->limits, sector << 9);
}
static inline int bdev_alignment_offset(struct block_device *bdev)
@ -1147,8 +1150,11 @@ static inline int queue_discard_alignment(struct request_queue *q)
static inline int queue_sector_discard_alignment(struct request_queue *q,
sector_t sector)
{
return ((sector << 9) - q->limits.discard_alignment)
& (q->limits.discard_granularity - 1);
struct queue_limits *lim = &q->limits;
unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
return (lim->discard_granularity + lim->discard_alignment - alignment)
& (lim->discard_granularity - 1);
}
static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)

View file

@ -0,0 +1,10 @@
#ifndef DECOMPRESS_UNLZO_H
#define DECOMPRESS_UNLZO_H
int unlzo(unsigned char *inbuf, int len,
int(*fill)(void*, unsigned int),
int(*flush)(void*, unsigned int),
unsigned char *output,
int *pos,
void(*error)(char *x));
#endif

View file

@ -53,7 +53,7 @@
extern const char *drbd_buildtag(void);
#define REL_VERSION "8.3.6"
#define REL_VERSION "8.3.7"
#define API_VERSION 88
#define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 91

View file

@ -69,6 +69,7 @@ NL_PACKET(disconnect, 6, )
NL_PACKET(resize, 7,
NL_INT64( 29, T_MAY_IGNORE, resize_size)
NL_BIT( 68, T_MAY_IGNORE, resize_force)
)
NL_PACKET(syncer_conf, 8,

View file

@ -62,5 +62,7 @@ struct fiemap {
#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively
* support extents. Result
* merged for efficiency. */
#define FIEMAP_EXTENT_SHARED 0x00002000 /* Space shared with other
* files. */
#endif /* _LINUX_FIEMAP_H */

View file

@ -340,6 +340,9 @@ struct fw_cdev_send_response {
* The @closure field is passed back to userspace in the response event.
* The @handle field is an out parameter, returning a handle to the allocated
* range to be used for later deallocation of the range.
*
* The address range is allocated on all local nodes. The address allocation
* is exclusive except for the FCP command and response registers.
*/
struct fw_cdev_allocate {
__u64 offset;

View file

@ -248,8 +248,8 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
void *data, size_t length,
void *callback_data);
/*
* Important note: The callback must guarantee that either fw_send_response()
* or kfree() is called on the @request.
* Important note: Except for the FCP registers, the callback must guarantee
* that either fw_send_response() or kfree() is called on the @request.
*/
typedef void (*fw_address_callback_t)(struct fw_card *card,
struct fw_request *request,

View file

@ -2463,7 +2463,7 @@ int proc_nr_files(struct ctl_table *table, int write,
int __init get_filesystem_list(char *buf);
#define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE])
#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
#define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE))
#endif /* __KERNEL__ */

View file

@ -256,9 +256,9 @@ extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
#define part_stat_read(part, field) \
({ \
typeof((part)->dkstats->field) res = 0; \
int i; \
for_each_possible_cpu(i) \
res += per_cpu_ptr((part)->dkstats, i)->field; \
unsigned int _cpu; \
for_each_possible_cpu(_cpu) \
res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
res; \
})

View file

@ -46,7 +46,7 @@ void kmap_flush_unused(void);
static inline unsigned int nr_free_highpages(void) { return 0; }
#define totalhigh_pages 0
#define totalhigh_pages 0UL
#ifndef ARCH_HAS_KMAP
static inline void *kmap(struct page *page)

View file

@ -89,4 +89,16 @@ struct adp5588_kpad_platform_data {
unsigned short unlock_key2; /* Unlock Key 2 */
};
struct adp5588_gpio_platform_data {
unsigned gpio_start; /* GPIO Chip base # */
unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
int (*setup)(struct i2c_client *client,
int gpio, unsigned ngpio,
void *context);
int (*teardown)(struct i2c_client *client,
int gpio, unsigned ngpio,
void *context);
void *context;
};
#endif

View file

@ -4,32 +4,6 @@
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
/*
* This is the per-process anticipatory I/O scheduler state.
*/
struct as_io_context {
spinlock_t lock;
void (*dtor)(struct as_io_context *aic); /* destructor */
void (*exit)(struct as_io_context *aic); /* called on task exit */
unsigned long state;
atomic_t nr_queued; /* queued reads & sync writes */
atomic_t nr_dispatched; /* number of requests gone to the drivers */
/* IO History tracking */
/* Thinktime */
unsigned long last_end_request;
unsigned long ttime_total;
unsigned long ttime_samples;
unsigned long ttime_mean;
/* Layout pattern */
unsigned int seek_samples;
sector_t last_request_pos;
u64 seek_total;
sector_t seek_mean;
};
struct cfq_queue;
struct cfq_io_context {
void *key;
@ -78,7 +52,6 @@ struct io_context {
unsigned long last_waited; /* Time last woken after wait for request */
int nr_batch_requests; /* Number of requests left in the batch */
struct as_io_context *aic;
struct radix_tree_root radix_root;
struct hlist_head cic_list;
void *ioc_data;

View file

@ -653,6 +653,7 @@ struct transaction_s
* waiting for it to finish.
*/
unsigned int t_synchronous_commit:1;
unsigned int t_flushed_data_blocks:1;
/*
* For use by the filesystem to store fs-specific data

View file

@ -734,6 +734,10 @@ struct sysinfo {
/* Force a compilation error if condition is constant and true */
#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
/* Force a compilation error if a constant expression is not a power of 2 */
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
/* Force a compilation error if condition is true, but also produce a
result (of value 0 and type size_t), so the expression can be used
e.g. in a structure initializer (or where-ever else comma expressions

View file

@ -67,7 +67,7 @@ struct kfifo {
/**
* DECLARE_KFIFO - macro to declare a kfifo and the associated buffer
* @name: name of the declared kfifo datatype
* @size: size of the fifo buffer
* @size: size of the fifo buffer. Must be a power of two.
*
* Note1: the macro can be used inside struct or union declaration
* Note2: the macro creates two objects:
@ -81,7 +81,7 @@ union { \
}
/**
* INIT_KFIFO - Initialize a kfifo declared by DECLARED_KFIFO
* INIT_KFIFO - Initialize a kfifo declared by DECLARE_KFIFO
* @name: name of the declared kfifo datatype
*/
#define INIT_KFIFO(name) \
@ -91,7 +91,7 @@ union { \
/**
* DEFINE_KFIFO - macro to define and initialize a kfifo
* @name: name of the declared kfifo datatype
* @size: size of the fifo buffer
* @size: size of the fifo buffer. Must be a power of two.
*
* Note1: the macro can be used for global and local kfifo data type variables
* Note2: the macro creates two objects:
@ -104,15 +104,28 @@ union { \
#undef __kfifo_initializer
extern void kfifo_init(struct kfifo *fifo, unsigned char *buffer,
extern void kfifo_init(struct kfifo *fifo, void *buffer,
unsigned int size);
extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size,
gfp_t gfp_mask);
extern void kfifo_free(struct kfifo *fifo);
extern unsigned int kfifo_in(struct kfifo *fifo,
const unsigned char *from, unsigned int len);
const void *from, unsigned int len);
extern __must_check unsigned int kfifo_out(struct kfifo *fifo,
unsigned char *to, unsigned int len);
void *to, unsigned int len);
extern __must_check unsigned int kfifo_out_peek(struct kfifo *fifo,
void *to, unsigned int len, unsigned offset);
/**
* kfifo_initialized - Check if kfifo is initialized.
* @fifo: fifo to check
* Return %true if FIFO is initialized, otherwise %false.
* Assumes the fifo was 0 before.
*/
static inline bool kfifo_initialized(struct kfifo *fifo)
{
return fifo->buffer != 0;
}
/**
* kfifo_reset - removes the entire FIFO contents
@ -194,7 +207,7 @@ static inline __must_check unsigned int kfifo_avail(struct kfifo *fifo)
* bytes copied.
*/
static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
const unsigned char *from, unsigned int n, spinlock_t *lock)
const void *from, unsigned int n, spinlock_t *lock)
{
unsigned long flags;
unsigned int ret;
@ -219,7 +232,7 @@ static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
* @to buffer and returns the number of copied bytes.
*/
static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
unsigned char *to, unsigned int n, spinlock_t *lock)
void *to, unsigned int n, spinlock_t *lock)
{
unsigned long flags;
unsigned int ret;
@ -228,13 +241,6 @@ static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
ret = kfifo_out(fifo, to, n);
/*
* optimization: if the FIFO is empty, set the indices to 0
* so we don't wrap the next time
*/
if (kfifo_is_empty(fifo))
kfifo_reset(fifo);
spin_unlock_irqrestore(lock, flags);
return ret;
@ -242,11 +248,11 @@ static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
extern void kfifo_skip(struct kfifo *fifo, unsigned int len);
extern __must_check unsigned int kfifo_from_user(struct kfifo *fifo,
const void __user *from, unsigned int n);
extern __must_check int kfifo_from_user(struct kfifo *fifo,
const void __user *from, unsigned int n, unsigned *lenout);
extern __must_check unsigned int kfifo_to_user(struct kfifo *fifo,
void __user *to, unsigned int n);
extern __must_check int kfifo_to_user(struct kfifo *fifo,
void __user *to, unsigned int n, unsigned *lenout);
/*
* __kfifo_add_out internal helper function for updating the out offset

View file

@ -29,8 +29,7 @@ struct pt_regs;
*
* On some architectures it is required to skip a breakpoint
* exception when it occurs after a breakpoint has been removed.
* This can be implemented in the architecture specific portion of
* for kgdb.
* This can be implemented in the architecture specific portion of kgdb.
*/
extern int kgdb_skipexception(int exception, struct pt_regs *regs);
@ -65,7 +64,7 @@ struct uart_port;
/**
* kgdb_breakpoint - compiled in breakpoint
*
* This will be impelmented a static inline per architecture. This
* This will be implemented as a static inline per architecture. This
* function is called by the kgdb core to execute an architecture
* specific trap to cause kgdb to enter the exception processing.
*
@ -190,7 +189,7 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them be in a known state. This should do what is needed
* and get them into a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In

View file

@ -36,6 +36,56 @@ int kmemcheck_hide_addr(unsigned long address);
bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
/*
* Bitfield annotations
*
* How to use: If you have a struct using bitfields, for example
*
* struct a {
* int x:8, y:8;
* };
*
* then this should be rewritten as
*
* struct a {
* kmemcheck_bitfield_begin(flags);
* int x:8, y:8;
* kmemcheck_bitfield_end(flags);
* };
*
* Now the "flags_begin" and "flags_end" members may be used to refer to the
* beginning and end, respectively, of the bitfield (and things like
* &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
* fields should be annotated:
*
* struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
* kmemcheck_annotate_bitfield(a, flags);
*/
#define kmemcheck_bitfield_begin(name) \
int name##_begin[0];
#define kmemcheck_bitfield_end(name) \
int name##_end[0];
#define kmemcheck_annotate_bitfield(ptr, name) \
do { \
int _n; \
\
if (!ptr) \
break; \
\
_n = (long) &((ptr)->name##_end) \
- (long) &((ptr)->name##_begin); \
MAYBE_BUILD_BUG_ON(_n < 0); \
\
kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
} while (0)
#define kmemcheck_annotate_variable(var) \
do { \
kmemcheck_mark_initialized(&(var), sizeof(var)); \
} while (0) \
#else
#define kmemcheck_enabled 0
@ -106,60 +156,16 @@ static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
return true;
}
#endif /* CONFIG_KMEMCHECK */
/*
* Bitfield annotations
*
* How to use: If you have a struct using bitfields, for example
*
* struct a {
* int x:8, y:8;
* };
*
* then this should be rewritten as
*
* struct a {
* kmemcheck_bitfield_begin(flags);
* int x:8, y:8;
* kmemcheck_bitfield_end(flags);
* };
*
* Now the "flags_begin" and "flags_end" members may be used to refer to the
* beginning and end, respectively, of the bitfield (and things like
* &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
* fields should be annotated:
*
* struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
* kmemcheck_annotate_bitfield(a, flags);
*
* Note: We provide the same definitions for both kmemcheck and non-
* kmemcheck kernels. This makes it harder to introduce accidental errors. It
* is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
*/
#define kmemcheck_bitfield_begin(name) \
int name##_begin[0];
#define kmemcheck_bitfield_end(name) \
int name##_end[0];
#define kmemcheck_annotate_bitfield(ptr, name) \
do { \
int _n; \
\
if (!ptr) \
break; \
\
_n = (long) &((ptr)->name##_end) \
- (long) &((ptr)->name##_begin); \
MAYBE_BUILD_BUG_ON(_n < 0); \
\
kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
#define kmemcheck_bitfield_begin(name)
#define kmemcheck_bitfield_end(name)
#define kmemcheck_annotate_bitfield(ptr, name) \
do { \
} while (0)
#define kmemcheck_annotate_variable(var) \
do { \
kmemcheck_mark_initialized(&(var), sizeof(var)); \
} while (0) \
#define kmemcheck_annotate_variable(var) \
do { \
} while (0)
#endif /* CONFIG_KMEMCHECK */
#endif /* LINUX_KMEMCHECK_H */

View file

@ -354,6 +354,9 @@ enum {
/* max tries if error condition is still set after ->error_handler */
ATA_EH_MAX_TRIES = 5,
/* sometimes resuming a link requires several retries */
ATA_LINK_RESUME_TRIES = 5,
/* how hard are we gonna try to probe/recover devices */
ATA_PROBE_MAX_TRIES = 3,
ATA_EH_DEV_TRIES = 3,

11
include/linux/list_sort.h Normal file
View file

@ -0,0 +1,11 @@
#ifndef _LINUX_LIST_SORT_H
#define _LINUX_LIST_SORT_H
#include <linux/types.h>
struct list_head;
void list_sort(void *priv, struct list_head *head,
int (*cmp)(void *priv, struct list_head *a,
struct list_head *b));
#endif

View file

@ -2,6 +2,8 @@
#define MFD_TMIO_H
#include <linux/fb.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#define tmio_ioread8(addr) readb(addr)
#define tmio_ioread16(addr) readw(addr)
@ -18,11 +20,48 @@
writew((val) >> 16, (addr) + 2); \
} while (0)
#define CNF_CMD 0x04
#define CNF_CTL_BASE 0x10
#define CNF_INT_PIN 0x3d
#define CNF_STOP_CLK_CTL 0x40
#define CNF_GCLK_CTL 0x41
#define CNF_SD_CLK_MODE 0x42
#define CNF_PIN_STATUS 0x44
#define CNF_PWR_CTL_1 0x48
#define CNF_PWR_CTL_2 0x49
#define CNF_PWR_CTL_3 0x4a
#define CNF_CARD_DETECT_MODE 0x4c
#define CNF_SD_SLOT 0x50
#define CNF_EXT_GCLK_CTL_1 0xf0
#define CNF_EXT_GCLK_CTL_2 0xf1
#define CNF_EXT_GCLK_CTL_3 0xf9
#define CNF_SD_LED_EN_1 0xfa
#define CNF_SD_LED_EN_2 0xfe
#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
#define sd_config_write8(base, shift, reg, val) \
tmio_iowrite8((val), (base) + ((reg) << (shift)))
#define sd_config_write16(base, shift, reg, val) \
tmio_iowrite16((val), (base) + ((reg) << (shift)))
#define sd_config_write32(base, shift, reg, val) \
do { \
tmio_iowrite16((val), (base) + ((reg) << (shift))); \
tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \
} while (0)
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
/*
* data for the MMC controller
*/
struct tmio_mmc_data {
const unsigned int hclk;
void (*set_pwr)(struct platform_device *host, int state);
void (*set_clk_div)(struct platform_device *host, int state);
};
/*

View file

@ -666,20 +666,20 @@
#define WM8350_ISINK_FLASH_DUR_64MS (1 << 8)
#define WM8350_ISINK_FLASH_DUR_96MS (2 << 8)
#define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8)
#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 4)
#define WM8350_ISINK_FLASH_ON_0_25S (1 << 4)
#define WM8350_ISINK_FLASH_ON_0_50S (2 << 4)
#define WM8350_ISINK_FLASH_ON_1_00S (3 << 4)
#define WM8350_ISINK_FLASH_ON_1_95S (1 << 4)
#define WM8350_ISINK_FLASH_ON_3_91S (2 << 4)
#define WM8350_ISINK_FLASH_ON_7_80S (3 << 4)
#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 0)
#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 0)
#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 0)
#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 0)
#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 0)
#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 0)
#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 0)
#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 0)
#define WM8350_ISINK_FLASH_ON_0_25S (1 << 0)
#define WM8350_ISINK_FLASH_ON_0_50S (2 << 0)
#define WM8350_ISINK_FLASH_ON_1_00S (3 << 0)
#define WM8350_ISINK_FLASH_ON_1_95S (1 << 0)
#define WM8350_ISINK_FLASH_ON_3_91S (2 << 0)
#define WM8350_ISINK_FLASH_ON_7_80S (3 << 0)
#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 4)
#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 4)
#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 4)
#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 4)
#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 4)
#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 4)
#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 4)
/*
* Regulator Interrupts.

View file

@ -1089,6 +1089,7 @@ extern void zone_pcp_update(struct zone *zone);
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
/* prio_tree.c */
void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);

View file

@ -122,7 +122,9 @@ struct vm_region {
unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
struct file *vm_file; /* the backing file or NULL */
atomic_t vm_usage; /* region usage count */
int vm_usage; /* region usage count (access under nommu_region_sem) */
bool vm_icache_flushed : 1; /* true if the icache has been flushed for
* this region */
};
/*
@ -203,10 +205,12 @@ struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
struct rb_root mm_rb;
struct vm_area_struct * mmap_cache; /* last find_vma result */
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
#endif
unsigned long mmap_base; /* base of mmap area */
unsigned long task_size; /* size of task vm space */
unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */

View file

@ -243,6 +243,7 @@ struct pci_dev {
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
unsigned int wakeup_prepared:1;
unsigned int d3_delay; /* D3->D0 transition time in ms */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state. */
@ -566,6 +567,9 @@ void pcibios_align_resource(void *, struct resource *, resource_size_t,
resource_size_t);
void pcibios_update_irq(struct pci_dev *, int irq);
/* Weak but can be overriden by arch */
void pci_fixup_cardbus(struct pci_bus *);
/* Generic PCI functions used internally */
extern struct pci_bus *pci_find_bus(int domain, int busnr);

View file

@ -814,9 +814,14 @@ extern int perf_event_overflow(struct perf_event *event, int nmi,
*/
static inline int is_software_event(struct perf_event *event)
{
return (event->attr.type != PERF_TYPE_RAW) &&
(event->attr.type != PERF_TYPE_HARDWARE) &&
(event->attr.type != PERF_TYPE_HW_CACHE);
switch (event->attr.type) {
case PERF_TYPE_SOFTWARE:
case PERF_TYPE_TRACEPOINT:
/* for now the breakpoint stuff also works as software event */
case PERF_TYPE_BREAKPOINT:
return 1;
}
return 0;
}
extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];

View file

@ -485,6 +485,7 @@ void phy_driver_unregister(struct phy_driver *drv);
int phy_driver_register(struct phy_driver *new_driver);
void phy_prepare_link(struct phy_device *phydev,
void (*adjust_link)(struct net_device *));
void phy_state_machine(struct work_struct *work);
void phy_start_machine(struct phy_device *phydev,
void (*handler)(struct net_device *));
void phy_stop_machine(struct phy_device *phydev);

View file

@ -2,13 +2,25 @@
#define _LINUX_POISON_H
/********** include/linux/list.h **********/
/*
* Architectures might want to move the poison pointer offset
* into some well-recognized area such as 0xdead000000000000,
* that is also not mappable by user-space exploits:
*/
#ifdef CONFIG_ILLEGAL_POINTER_VALUE
# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
#else
# define POISON_POINTER_DELTA 0
#endif
/*
* These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0x00100100)
#define LIST_POISON2 ((void *) 0x00200200)
#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
/*

View file

@ -62,6 +62,12 @@ void reiserfs_write_unlock(struct super_block *s);
int reiserfs_write_lock_once(struct super_block *s);
void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
#ifdef CONFIG_REISERFS_CHECK
void reiserfs_lock_check_recursive(struct super_block *s);
#else
static inline void reiserfs_lock_check_recursive(struct super_block *s) { }
#endif
/*
* Several mutexes depend on the write lock.
* However sometimes we want to relax the write lock while we hold
@ -92,11 +98,31 @@ void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
static inline void reiserfs_mutex_lock_safe(struct mutex *m,
struct super_block *s)
{
reiserfs_lock_check_recursive(s);
reiserfs_write_unlock(s);
mutex_lock(m);
reiserfs_write_lock(s);
}
static inline void
reiserfs_mutex_lock_nested_safe(struct mutex *m, unsigned int subclass,
struct super_block *s)
{
reiserfs_lock_check_recursive(s);
reiserfs_write_unlock(s);
mutex_lock_nested(m, subclass);
reiserfs_write_lock(s);
}
static inline void
reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s)
{
reiserfs_lock_check_recursive(s);
reiserfs_write_unlock(s);
down_read(sem);
reiserfs_write_lock(s);
}
/*
* When we schedule, we usually want to also release the write lock,
* according to the previous bkl based locking scheme of reiserfs.

View file

@ -3,8 +3,6 @@
#include <linux/time.h>
struct task_struct;
/*
* Resource control/accounting header file for linux
*/
@ -70,6 +68,12 @@ struct rlimit {
*/
#include <asm/resource.h>
#ifdef __KERNEL__
struct task_struct;
int getrusage(struct task_struct *p, int who, struct rusage __user *ru);
#endif /* __KERNEL__ */
#endif

View file

@ -377,6 +377,8 @@ extern int sysctl_max_map_count;
#include <linux/aio.h>
#ifdef CONFIG_MMU
extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
@ -386,6 +388,9 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long flags);
extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
#endif
#if USE_SPLIT_PTLOCKS
/*
@ -2491,8 +2496,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
extern void arch_pick_mmap_layout(struct mm_struct *mm);
#ifdef CONFIG_TRACING
extern void
__trace_special(void *__tr, void *__data,
@ -2601,6 +2604,28 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
}
#endif /* CONFIG_MM_OWNER */
static inline unsigned long task_rlimit(const struct task_struct *tsk,
unsigned int limit)
{
return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
}
static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
unsigned int limit)
{
return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
}
static inline unsigned long rlimit(unsigned int limit)
{
return task_rlimit(current, limit);
}
static inline unsigned long rlimit_max(unsigned int limit)
{
return task_rlimit_max(current, limit);
}
#endif /* __KERNEL__ */
#endif

View file

@ -136,25 +136,6 @@ static inline void serio_continue_rx(struct serio *serio)
spin_unlock_irq(&serio->lock);
}
/*
* Use the following functions to pin serio's driver in process context
*/
static inline int serio_pin_driver(struct serio *serio)
{
return mutex_lock_interruptible(&serio->drv_mutex);
}
static inline void serio_pin_driver_uninterruptible(struct serio *serio)
{
mutex_lock(&serio->drv_mutex);
}
static inline void serio_unpin_driver(struct serio *serio)
{
mutex_unlock(&serio->drv_mutex);
}
#endif
/*

View file

@ -111,6 +111,7 @@
#define SONYPI_EVENT_VOLUME_INC_PRESSED 69
#define SONYPI_EVENT_VOLUME_DEC_PRESSED 70
#define SONYPI_EVENT_BRIGHTNESS_PRESSED 71
#define SONYPI_EVENT_MEDIA_PRESSED 72
/* get/set brightness */
#define SONYPI_IOCGBRT _IOR('v', 0, __u8)

View file

@ -72,7 +72,10 @@ static inline __must_check char *strstrip(char *str)
}
#ifndef __HAVE_ARCH_STRSTR
extern char * strstr(const char *,const char *);
extern char * strstr(const char *, const char *);
#endif
#ifndef __HAVE_ARCH_STRNSTR
extern char * strnstr(const char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *);

View file

@ -195,7 +195,7 @@ struct perf_event_attr;
static const struct syscall_metadata __used \
__attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \
__syscall_meta_##sname = { \
__syscall_meta__##sname = { \
.name = "sys_"#sname, \
.nb_args = 0, \
.enter_event = &event_enter__##sname, \

View file

@ -99,7 +99,7 @@ int arch_update_cpu_topology(void);
| 1*SD_WAKE_AFFINE \
| 1*SD_SHARE_CPUPOWER \
| 0*SD_POWERSAVINGS_BALANCE \
| 0*SD_SHARE_PKG_RESOURCES \
| 1*SD_SHARE_PKG_RESOURCES \
| 0*SD_SERIALIZE \
| 0*SD_PREFER_SIBLING \
, \

View file

@ -464,7 +464,7 @@ extern int tty_port_alloc_xmit_buf(struct tty_port *port);
extern void tty_port_free_xmit_buf(struct tty_port *port);
extern void tty_port_put(struct tty_port *port);
extern inline struct tty_port *tty_port_get(struct tty_port *port)
static inline struct tty_port *tty_port_get(struct tty_port *port)
{
if (port)
kref_get(&port->kref);
@ -486,7 +486,7 @@ extern void tty_port_close(struct tty_port *port,
struct tty_struct *tty, struct file *filp);
extern int tty_port_open(struct tty_port *port,
struct tty_struct *tty, struct file *filp);
extern inline int tty_port_users(struct tty_port *port)
static inline int tty_port_users(struct tty_port *port)
{
return port->count + port->blocked_open;
}

View file

@ -94,6 +94,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_read(void *dst, void *src, size_t size);
extern long __probe_kernel_read(void *dst, void *src, size_t size);
/*
* probe_kernel_write(): safely attempt to write to a location
@ -104,6 +105,7 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_write(void *dst, void *src, size_t size);
extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
#endif /* __LINUX_UACCESS_H__ */

View file

@ -192,6 +192,7 @@ struct usb_interface {
unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */
unsigned needs_binding:1; /* needs delayed unbind/rebind */
unsigned reset_running:1;
unsigned resetting_device:1; /* true: bandwidth alloc after reset */
struct device dev; /* interface specific device info */
struct device *usb_dev;

View file

@ -70,6 +70,7 @@ struct writeback_control {
struct bdi_writeback;
int inode_wait(void *);
void writeback_inodes_sb(struct super_block *);
int writeback_inodes_sb_if_idle(struct super_block *);
void sync_inodes_sb(struct super_block *);
void writeback_inodes_wbc(struct writeback_control *wbc);
long wb_do_writeback(struct bdi_writeback *wb, int force_wait);