Merge branch 'upstream'
Conflicts: drivers/scsi/libata-core.c
This commit is contained in:
commit
4bbf7bc4c7
1282 changed files with 38304 additions and 22085 deletions
|
@ -46,6 +46,9 @@
|
|||
* bitmap_parse(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
|
||||
* bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf
|
||||
* bitmap_parselist(buf, dst, nbits) Parse bitmap dst from list
|
||||
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
|
||||
* bitmap_release_region(bitmap, pos, order) Free specified bit region
|
||||
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
|
||||
*/
|
||||
|
||||
/*
|
||||
|
|
|
@ -22,6 +22,7 @@ typedef struct request_queue request_queue_t;
|
|||
struct elevator_queue;
|
||||
typedef struct elevator_queue elevator_t;
|
||||
struct request_pm_state;
|
||||
struct blk_trace;
|
||||
|
||||
#define BLKDEV_MIN_RQ 4
|
||||
#define BLKDEV_MAX_RQ 128 /* Default maximum */
|
||||
|
@ -416,6 +417,8 @@ struct request_queue
|
|||
unsigned int sg_reserved_size;
|
||||
int node;
|
||||
|
||||
struct blk_trace *blk_trace;
|
||||
|
||||
/*
|
||||
* reserved for flush operations
|
||||
*/
|
||||
|
|
277
include/linux/blktrace_api.h
Normal file
277
include/linux/blktrace_api.h
Normal file
|
@ -0,0 +1,277 @@
|
|||
#ifndef BLKTRACE_H
|
||||
#define BLKTRACE_H
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/relay.h>
|
||||
|
||||
/*
|
||||
* Trace categories
|
||||
*/
|
||||
enum blktrace_cat {
|
||||
BLK_TC_READ = 1 << 0, /* reads */
|
||||
BLK_TC_WRITE = 1 << 1, /* writes */
|
||||
BLK_TC_BARRIER = 1 << 2, /* barrier */
|
||||
BLK_TC_SYNC = 1 << 3, /* barrier */
|
||||
BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
|
||||
BLK_TC_REQUEUE = 1 << 5, /* requeueing */
|
||||
BLK_TC_ISSUE = 1 << 6, /* issue */
|
||||
BLK_TC_COMPLETE = 1 << 7, /* completions */
|
||||
BLK_TC_FS = 1 << 8, /* fs requests */
|
||||
BLK_TC_PC = 1 << 9, /* pc requests */
|
||||
BLK_TC_NOTIFY = 1 << 10, /* special message */
|
||||
|
||||
BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
|
||||
};
|
||||
|
||||
#define BLK_TC_SHIFT (16)
|
||||
#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT)
|
||||
|
||||
/*
|
||||
* Basic trace actions
|
||||
*/
|
||||
enum blktrace_act {
|
||||
__BLK_TA_QUEUE = 1, /* queued */
|
||||
__BLK_TA_BACKMERGE, /* back merged to existing rq */
|
||||
__BLK_TA_FRONTMERGE, /* front merge to existing rq */
|
||||
__BLK_TA_GETRQ, /* allocated new request */
|
||||
__BLK_TA_SLEEPRQ, /* sleeping on rq allocation */
|
||||
__BLK_TA_REQUEUE, /* request requeued */
|
||||
__BLK_TA_ISSUE, /* sent to driver */
|
||||
__BLK_TA_COMPLETE, /* completed by driver */
|
||||
__BLK_TA_PLUG, /* queue was plugged */
|
||||
__BLK_TA_UNPLUG_IO, /* queue was unplugged by io */
|
||||
__BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */
|
||||
__BLK_TA_INSERT, /* insert request */
|
||||
__BLK_TA_SPLIT, /* bio was split */
|
||||
__BLK_TA_BOUNCE, /* bio was bounced */
|
||||
__BLK_TA_REMAP, /* bio was remapped */
|
||||
};
|
||||
|
||||
/*
|
||||
* Trace actions in full. Additionally, read or write is masked
|
||||
*/
|
||||
#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
|
||||
#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
|
||||
#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
|
||||
#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
|
||||
#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
|
||||
#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
|
||||
#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
|
||||
#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
|
||||
#define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
|
||||
#define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
|
||||
#define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
|
||||
#define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
|
||||
#define BLK_TA_SPLIT (__BLK_TA_SPLIT)
|
||||
#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
|
||||
#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
|
||||
|
||||
#define BLK_IO_TRACE_MAGIC 0x65617400
|
||||
#define BLK_IO_TRACE_VERSION 0x07
|
||||
|
||||
/*
|
||||
* The trace itself
|
||||
*/
|
||||
struct blk_io_trace {
|
||||
u32 magic; /* MAGIC << 8 | version */
|
||||
u32 sequence; /* event number */
|
||||
u64 time; /* in microseconds */
|
||||
u64 sector; /* disk offset */
|
||||
u32 bytes; /* transfer length */
|
||||
u32 action; /* what happened */
|
||||
u32 pid; /* who did it */
|
||||
u32 device; /* device number */
|
||||
u32 cpu; /* on what cpu did it happen */
|
||||
u16 error; /* completion error */
|
||||
u16 pdu_len; /* length of data after this trace */
|
||||
};
|
||||
|
||||
/*
|
||||
* The remap event
|
||||
*/
|
||||
struct blk_io_trace_remap {
|
||||
u32 device;
|
||||
u32 __pad;
|
||||
u64 sector;
|
||||
};
|
||||
|
||||
enum {
|
||||
Blktrace_setup = 1,
|
||||
Blktrace_running,
|
||||
Blktrace_stopped,
|
||||
};
|
||||
|
||||
struct blk_trace {
|
||||
int trace_state;
|
||||
struct rchan *rchan;
|
||||
unsigned long *sequence;
|
||||
u16 act_mask;
|
||||
u64 start_lba;
|
||||
u64 end_lba;
|
||||
u32 pid;
|
||||
u32 dev;
|
||||
struct dentry *dir;
|
||||
struct dentry *dropped_file;
|
||||
atomic_t dropped;
|
||||
};
|
||||
|
||||
/*
|
||||
* User setup structure passed with BLKTRACESTART
|
||||
*/
|
||||
struct blk_user_trace_setup {
|
||||
char name[BDEVNAME_SIZE]; /* output */
|
||||
u16 act_mask; /* input */
|
||||
u32 buf_size; /* input */
|
||||
u32 buf_nr; /* input */
|
||||
u64 start_lba;
|
||||
u64 end_lba;
|
||||
u32 pid;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_IO_TRACE)
|
||||
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
|
||||
extern void blk_trace_shutdown(request_queue_t *);
|
||||
extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
|
||||
|
||||
/**
|
||||
* blk_add_trace_rq - Add a trace for a request oriented action
|
||||
* @q: queue the io is for
|
||||
* @rq: the source request
|
||||
* @what: the action
|
||||
*
|
||||
* Description:
|
||||
* Records an action against a request. Will log the bio offset + size.
|
||||
*
|
||||
**/
|
||||
static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
|
||||
u32 what)
|
||||
{
|
||||
struct blk_trace *bt = q->blk_trace;
|
||||
int rw = rq->flags & 0x07;
|
||||
|
||||
if (likely(!bt))
|
||||
return;
|
||||
|
||||
if (blk_pc_request(rq)) {
|
||||
what |= BLK_TC_ACT(BLK_TC_PC);
|
||||
__blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
|
||||
} else {
|
||||
what |= BLK_TC_ACT(BLK_TC_FS);
|
||||
__blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_add_trace_bio - Add a trace for a bio oriented action
|
||||
* @q: queue the io is for
|
||||
* @bio: the source bio
|
||||
* @what: the action
|
||||
*
|
||||
* Description:
|
||||
* Records an action against a bio. Will log the bio offset + size.
|
||||
*
|
||||
**/
|
||||
static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
|
||||
u32 what)
|
||||
{
|
||||
struct blk_trace *bt = q->blk_trace;
|
||||
|
||||
if (likely(!bt))
|
||||
return;
|
||||
|
||||
__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_add_trace_generic - Add a trace for a generic action
|
||||
* @q: queue the io is for
|
||||
* @bio: the source bio
|
||||
* @rw: the data direction
|
||||
* @what: the action
|
||||
*
|
||||
* Description:
|
||||
* Records a simple trace
|
||||
*
|
||||
**/
|
||||
static inline void blk_add_trace_generic(struct request_queue *q,
|
||||
struct bio *bio, int rw, u32 what)
|
||||
{
|
||||
struct blk_trace *bt = q->blk_trace;
|
||||
|
||||
if (likely(!bt))
|
||||
return;
|
||||
|
||||
if (bio)
|
||||
blk_add_trace_bio(q, bio, what);
|
||||
else
|
||||
__blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_add_trace_pdu_int - Add a trace for a bio with an integer payload
|
||||
* @q: queue the io is for
|
||||
* @what: the action
|
||||
* @bio: the source bio
|
||||
* @pdu: the integer payload
|
||||
*
|
||||
* Description:
|
||||
* Adds a trace with some integer payload. This might be an unplug
|
||||
* option given as the action, with the depth at unplug time given
|
||||
* as the payload
|
||||
*
|
||||
**/
|
||||
static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
|
||||
struct bio *bio, unsigned int pdu)
|
||||
{
|
||||
struct blk_trace *bt = q->blk_trace;
|
||||
u64 rpdu = cpu_to_be64(pdu);
|
||||
|
||||
if (likely(!bt))
|
||||
return;
|
||||
|
||||
if (bio)
|
||||
__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
|
||||
else
|
||||
__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_add_trace_remap - Add a trace for a remap operation
|
||||
* @q: queue the io is for
|
||||
* @bio: the source bio
|
||||
* @dev: target device
|
||||
* @from: source sector
|
||||
* @to: target sector
|
||||
*
|
||||
* Description:
|
||||
* Device mapper or raid target sometimes need to split a bio because
|
||||
* it spans a stripe (or similar). Add a trace for that action.
|
||||
*
|
||||
**/
|
||||
static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
|
||||
dev_t dev, sector_t from, sector_t to)
|
||||
{
|
||||
struct blk_trace *bt = q->blk_trace;
|
||||
struct blk_io_trace_remap r;
|
||||
|
||||
if (likely(!bt))
|
||||
return;
|
||||
|
||||
r.device = cpu_to_be32(dev);
|
||||
r.sector = cpu_to_be64(to);
|
||||
|
||||
__blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_BLK_DEV_IO_TRACE */
|
||||
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
|
||||
#define blk_trace_shutdown(q) do { } while (0)
|
||||
#define blk_add_trace_rq(q, rq, what) do { } while (0)
|
||||
#define blk_add_trace_bio(q, rq, what) do { } while (0)
|
||||
#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
|
||||
#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
|
||||
#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
|
||||
#endif /* CONFIG_BLK_DEV_IO_TRACE */
|
||||
|
||||
#endif
|
|
@ -13,9 +13,7 @@
|
|||
#define SMP_CACHE_BYTES L1_CACHE_BYTES
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
|
||||
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
|
||||
#else
|
||||
#ifndef __read_mostly
|
||||
#define __read_mostly
|
||||
#endif
|
||||
|
||||
|
|
|
@ -378,7 +378,6 @@ struct cdrom_generic_command
|
|||
#define CDC_MEDIA_CHANGED 0x80 /* media changed */
|
||||
#define CDC_PLAY_AUDIO 0x100 /* audio functions */
|
||||
#define CDC_RESET 0x200 /* hard reset device */
|
||||
#define CDC_IOCTLS 0x400 /* driver has non-standard ioctls */
|
||||
#define CDC_DRIVE_STATUS 0x800 /* driver implements drive status */
|
||||
#define CDC_GENERIC_PACKET 0x1000 /* driver implements generic packets */
|
||||
#define CDC_CD_R 0x2000 /* drive is a CD-R */
|
||||
|
@ -974,9 +973,7 @@ struct cdrom_device_ops {
|
|||
int (*reset) (struct cdrom_device_info *);
|
||||
/* play stuff */
|
||||
int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
|
||||
/* dev-specific */
|
||||
int (*dev_ioctl) (struct cdrom_device_info *,
|
||||
unsigned int, unsigned long);
|
||||
|
||||
/* driver specifications */
|
||||
const int capability; /* capability flags */
|
||||
int n_minors; /* number of active minor devices */
|
||||
|
|
|
@ -97,6 +97,10 @@ COMPATIBLE_IOCTL(BLKRRPART)
|
|||
COMPATIBLE_IOCTL(BLKFLSBUF)
|
||||
COMPATIBLE_IOCTL(BLKSECTSET)
|
||||
COMPATIBLE_IOCTL(BLKSSZGET)
|
||||
COMPATIBLE_IOCTL(BLKTRACESTART)
|
||||
COMPATIBLE_IOCTL(BLKTRACESTOP)
|
||||
COMPATIBLE_IOCTL(BLKTRACESETUP)
|
||||
COMPATIBLE_IOCTL(BLKTRACETEARDOWN)
|
||||
ULONG_IOCTL(BLKRASET)
|
||||
ULONG_IOCTL(BLKFRASET)
|
||||
/* RAID */
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* cpuset interface
|
||||
*
|
||||
* Copyright (C) 2003 BULL SA
|
||||
* Copyright (C) 2004 Silicon Graphics, Inc.
|
||||
* Copyright (C) 2004-2006 Silicon Graphics, Inc.
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -51,6 +51,18 @@ extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer);
|
|||
extern void cpuset_lock(void);
|
||||
extern void cpuset_unlock(void);
|
||||
|
||||
extern int cpuset_mem_spread_node(void);
|
||||
|
||||
static inline int cpuset_do_page_mem_spread(void)
|
||||
{
|
||||
return current->flags & PF_SPREAD_PAGE;
|
||||
}
|
||||
|
||||
static inline int cpuset_do_slab_mem_spread(void)
|
||||
{
|
||||
return current->flags & PF_SPREAD_SLAB;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_CPUSETS */
|
||||
|
||||
static inline int cpuset_init_early(void) { return 0; }
|
||||
|
@ -99,6 +111,21 @@ static inline char *cpuset_task_status_allowed(struct task_struct *task,
|
|||
static inline void cpuset_lock(void) {}
|
||||
static inline void cpuset_unlock(void) {}
|
||||
|
||||
static inline int cpuset_mem_spread_node(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cpuset_do_page_mem_spread(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cpuset_do_slab_mem_spread(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_CPUSETS */
|
||||
|
||||
#endif /* _LINUX_CPUSET_H */
|
||||
|
|
|
@ -52,7 +52,12 @@ struct file;
|
|||
#ifdef CONFIG_EPOLL
|
||||
|
||||
/* Used to initialize the epoll bits inside the "struct file" */
|
||||
void eventpoll_init_file(struct file *file);
|
||||
static inline void eventpoll_init_file(struct file *file)
|
||||
{
|
||||
INIT_LIST_HEAD(&file->f_ep_links);
|
||||
spin_lock_init(&file->f_ep_lock);
|
||||
}
|
||||
|
||||
|
||||
/* Used to release the epoll bits inside the "struct file" */
|
||||
void eventpoll_release_file(struct file *file);
|
||||
|
@ -85,7 +90,6 @@ static inline void eventpoll_release(struct file *file)
|
|||
eventpoll_release_file(file);
|
||||
}
|
||||
|
||||
|
||||
#else
|
||||
|
||||
static inline void eventpoll_init_file(struct file *file) {}
|
||||
|
|
|
@ -772,9 +772,12 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
|
|||
|
||||
|
||||
/* inode.c */
|
||||
extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
|
||||
extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
|
||||
extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
|
||||
int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
|
||||
struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
|
||||
struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
|
||||
int ext3_get_block_handle(handle_t *handle, struct inode *inode,
|
||||
sector_t iblock, struct buffer_head *bh_result, int create,
|
||||
int extend_disksize);
|
||||
|
||||
extern void ext3_read_inode (struct inode *);
|
||||
extern int ext3_write_inode (struct inode *, int);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/rwsem.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
struct ext3_reserve_window {
|
||||
__u32 _rsv_start; /* First byte reserved */
|
||||
|
@ -122,16 +123,16 @@ struct ext3_inode_info {
|
|||
__u16 i_extra_isize;
|
||||
|
||||
/*
|
||||
* truncate_sem is for serialising ext3_truncate() against
|
||||
* truncate_mutex is for serialising ext3_truncate() against
|
||||
* ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's
|
||||
* data tree are chopped off during truncate. We can't do that in
|
||||
* ext3 because whenever we perform intermediate commits during
|
||||
* truncate, the inode and all the metadata blocks *must* be in a
|
||||
* consistent state which allows truncation of the orphans to restart
|
||||
* during recovery. Hence we must fix the get_block-vs-truncate race
|
||||
* by other means, so we have truncate_sem.
|
||||
* by other means, so we have truncate_mutex.
|
||||
*/
|
||||
struct semaphore truncate_sem;
|
||||
struct mutex truncate_mutex;
|
||||
struct inode vfs_inode;
|
||||
};
|
||||
|
||||
|
|
|
@ -18,4 +18,10 @@
|
|||
#define POSIX_FADV_NOREUSE 5 /* Data will be accessed once. */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Linux-specific fadvise() extensions:
|
||||
*/
|
||||
#define LINUX_FADV_ASYNC_WRITE 32 /* Start writeout on range */
|
||||
#define LINUX_FADV_WRITE_WAIT 33 /* Wait upon writeout to range */
|
||||
|
||||
#endif /* FADVISE_H_INCLUDED */
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/compiler.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* The default fd array needs to be at least BITS_PER_LONG,
|
||||
|
@ -17,10 +18,22 @@
|
|||
*/
|
||||
#define NR_OPEN_DEFAULT BITS_PER_LONG
|
||||
|
||||
/*
|
||||
* The embedded_fd_set is a small fd_set,
|
||||
* suitable for most tasks (which open <= BITS_PER_LONG files)
|
||||
*/
|
||||
struct embedded_fd_set {
|
||||
unsigned long fds_bits[1];
|
||||
};
|
||||
|
||||
/*
|
||||
* More than this number of fds: we use a separately allocated fd_set
|
||||
*/
|
||||
#define EMBEDDED_FD_SET_SIZE (BITS_PER_BYTE * sizeof(struct embedded_fd_set))
|
||||
|
||||
struct fdtable {
|
||||
unsigned int max_fds;
|
||||
int max_fdset;
|
||||
int next_fd;
|
||||
struct file ** fd; /* current fd array */
|
||||
fd_set *close_on_exec;
|
||||
fd_set *open_fds;
|
||||
|
@ -33,13 +46,20 @@ struct fdtable {
|
|||
* Open file table structure
|
||||
*/
|
||||
struct files_struct {
|
||||
/*
|
||||
* read mostly part
|
||||
*/
|
||||
atomic_t count;
|
||||
struct fdtable *fdt;
|
||||
struct fdtable fdtab;
|
||||
fd_set close_on_exec_init;
|
||||
fd_set open_fds_init;
|
||||
/*
|
||||
* written part on a separate cache line in SMP
|
||||
*/
|
||||
spinlock_t file_lock ____cacheline_aligned_in_smp;
|
||||
int next_fd;
|
||||
struct embedded_fd_set close_on_exec_init;
|
||||
struct embedded_fd_set open_fds_init;
|
||||
struct file * fd_array[NR_OPEN_DEFAULT];
|
||||
spinlock_t file_lock; /* Protects concurrent writers. Nests inside tsk->alloc_lock */
|
||||
};
|
||||
|
||||
#define files_fdtable(files) (rcu_dereference((files)->fdt))
|
||||
|
|
|
@ -103,7 +103,9 @@ extern int dir_notify_enable;
|
|||
#define MS_BIND 4096
|
||||
#define MS_MOVE 8192
|
||||
#define MS_REC 16384
|
||||
#define MS_VERBOSE 32768
|
||||
#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
|
||||
MS_VERBOSE is deprecated. */
|
||||
#define MS_SILENT 32768
|
||||
#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
|
||||
#define MS_UNBINDABLE (1<<17) /* change to unbindable */
|
||||
#define MS_PRIVATE (1<<18) /* change to private */
|
||||
|
@ -197,6 +199,10 @@ extern int dir_notify_enable;
|
|||
#define BLKBSZGET _IOR(0x12,112,size_t)
|
||||
#define BLKBSZSET _IOW(0x12,113,size_t)
|
||||
#define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */
|
||||
#define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup)
|
||||
#define BLKTRACESTART _IO(0x12,116)
|
||||
#define BLKTRACESTOP _IO(0x12,117)
|
||||
#define BLKTRACETEARDOWN _IO(0x12,118)
|
||||
|
||||
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
|
||||
#define FIBMAP _IO(0x00,1) /* bmap access */
|
||||
|
@ -344,7 +350,7 @@ struct address_space_operations {
|
|||
/* Write back some dirty pages from this mapping. */
|
||||
int (*writepages)(struct address_space *, struct writeback_control *);
|
||||
|
||||
/* Set a page dirty */
|
||||
/* Set a page dirty. Return true if this dirtied it */
|
||||
int (*set_page_dirty)(struct page *page);
|
||||
|
||||
int (*readpages)(struct file *filp, struct address_space *mapping,
|
||||
|
@ -397,8 +403,8 @@ struct block_device {
|
|||
dev_t bd_dev; /* not a kdev_t - it's a search key */
|
||||
struct inode * bd_inode; /* will die */
|
||||
int bd_openers;
|
||||
struct semaphore bd_sem; /* open/close mutex */
|
||||
struct semaphore bd_mount_sem; /* mount mutex */
|
||||
struct mutex bd_mutex; /* open/close mutex */
|
||||
struct mutex bd_mount_mutex; /* mount mutex */
|
||||
struct list_head bd_inodes;
|
||||
void * bd_holder;
|
||||
int bd_holders;
|
||||
|
@ -509,7 +515,7 @@ struct inode {
|
|||
|
||||
#ifdef CONFIG_INOTIFY
|
||||
struct list_head inotify_watches; /* watches on this inode */
|
||||
struct semaphore inotify_sem; /* protects the watches list */
|
||||
struct mutex inotify_mutex; /* protects the watches list */
|
||||
#endif
|
||||
|
||||
unsigned long i_state;
|
||||
|
@ -847,7 +853,7 @@ struct super_block {
|
|||
* The next field is for VFS *only*. No filesystems have any business
|
||||
* even looking at it. You had been warned.
|
||||
*/
|
||||
struct semaphore s_vfs_rename_sem; /* Kludge */
|
||||
struct mutex s_vfs_rename_mutex; /* Kludge */
|
||||
|
||||
/* Granuality of c/m/atime in ns.
|
||||
Cannot be worse than a second */
|
||||
|
@ -1115,6 +1121,18 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
|
|||
__mark_inode_dirty(inode, I_DIRTY_SYNC);
|
||||
}
|
||||
|
||||
static inline void inode_inc_link_count(struct inode *inode)
|
||||
{
|
||||
inode->i_nlink++;
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
|
||||
static inline void inode_dec_link_count(struct inode *inode)
|
||||
{
|
||||
inode->i_nlink--;
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
|
||||
extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry);
|
||||
static inline void file_accessed(struct file *file)
|
||||
{
|
||||
|
@ -1455,6 +1473,12 @@ extern int filemap_fdatawait(struct address_space *);
|
|||
extern int filemap_write_and_wait(struct address_space *mapping);
|
||||
extern int filemap_write_and_wait_range(struct address_space *mapping,
|
||||
loff_t lstart, loff_t lend);
|
||||
extern int wait_on_page_writeback_range(struct address_space *mapping,
|
||||
pgoff_t start, pgoff_t end);
|
||||
extern int __filemap_fdatawrite_range(struct address_space *mapping,
|
||||
loff_t start, loff_t end, int sync_mode);
|
||||
|
||||
extern long do_fsync(struct file *file, int datasync);
|
||||
extern void sync_supers(void);
|
||||
extern void sync_filesystems(int wait);
|
||||
extern void emergency_sync(void);
|
||||
|
@ -1534,7 +1558,7 @@ extern void destroy_inode(struct inode *);
|
|||
extern struct inode *new_inode(struct super_block *);
|
||||
extern int remove_suid(struct dentry *);
|
||||
extern void remove_dquot_ref(struct super_block *, int, struct list_head *);
|
||||
extern struct semaphore iprune_sem;
|
||||
extern struct mutex iprune_mutex;
|
||||
|
||||
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
|
||||
extern void remove_inode_hash(struct inode *);
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
#ifndef GENERIC_SERIAL_H
|
||||
#define GENERIC_SERIAL_H
|
||||
|
||||
#include <linux/mutex.h>
|
||||
|
||||
struct real_driver {
|
||||
void (*disable_tx_interrupts) (void *);
|
||||
void (*enable_tx_interrupts) (void *);
|
||||
|
@ -34,7 +36,7 @@ struct gs_port {
|
|||
int xmit_head;
|
||||
int xmit_tail;
|
||||
int xmit_cnt;
|
||||
struct semaphore port_write_sem;
|
||||
struct mutex port_write_mutex;
|
||||
int flags;
|
||||
wait_queue_head_t open_wait;
|
||||
wait_queue_head_t close_wait;
|
||||
|
|
|
@ -149,22 +149,16 @@ struct disk_attribute {
|
|||
({ \
|
||||
typeof(gendiskp->dkstats->field) res = 0; \
|
||||
int i; \
|
||||
for (i=0; i < NR_CPUS; i++) { \
|
||||
if (!cpu_possible(i)) \
|
||||
continue; \
|
||||
for_each_cpu(i) \
|
||||
res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
|
||||
} \
|
||||
res; \
|
||||
})
|
||||
|
||||
static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
|
||||
int i;
|
||||
for (i=0; i < NR_CPUS; i++) {
|
||||
if (cpu_possible(i)) {
|
||||
memset(per_cpu_ptr(gendiskp->dkstats, i), value,
|
||||
sizeof (struct disk_stats));
|
||||
}
|
||||
}
|
||||
for_each_cpu(i)
|
||||
memset(per_cpu_ptr(gendiskp->dkstats, i), value,
|
||||
sizeof (struct disk_stats));
|
||||
}
|
||||
|
||||
#else
|
||||
|
|
|
@ -27,11 +27,13 @@ struct sensor_device_attribute{
|
|||
#define to_sensor_dev_attr(_dev_attr) \
|
||||
container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
|
||||
|
||||
#define SENSOR_DEVICE_ATTR(_name,_mode,_show,_store,_index) \
|
||||
struct sensor_device_attribute sensor_dev_attr_##_name = { \
|
||||
.dev_attr = __ATTR(_name,_mode,_show,_store), \
|
||||
.index = _index, \
|
||||
}
|
||||
#define SENSOR_ATTR(_name, _mode, _show, _store, _index) \
|
||||
{ .dev_attr = __ATTR(_name, _mode, _show, _store), \
|
||||
.index = _index }
|
||||
|
||||
#define SENSOR_DEVICE_ATTR(_name, _mode, _show, _store, _index) \
|
||||
struct sensor_device_attribute sensor_dev_attr_##_name \
|
||||
= SENSOR_ATTR(_name, _mode, _show, _store, _index)
|
||||
|
||||
struct sensor_device_attribute_2 {
|
||||
struct device_attribute dev_attr;
|
||||
|
@ -41,11 +43,13 @@ struct sensor_device_attribute_2 {
|
|||
#define to_sensor_dev_attr_2(_dev_attr) \
|
||||
container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
|
||||
|
||||
#define SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index) \
|
||||
{ .dev_attr = __ATTR(_name, _mode, _show, _store), \
|
||||
.index = _index, \
|
||||
.nr = _nr }
|
||||
|
||||
#define SENSOR_DEVICE_ATTR_2(_name,_mode,_show,_store,_nr,_index) \
|
||||
struct sensor_device_attribute_2 sensor_dev_attr_##_name = { \
|
||||
.dev_attr = __ATTR(_name,_mode,_show,_store), \
|
||||
.index = _index, \
|
||||
.nr = _nr, \
|
||||
}
|
||||
struct sensor_device_attribute_2 sensor_dev_attr_##_name \
|
||||
= SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index)
|
||||
|
||||
#endif /* _LINUX_HWMON_SYSFS_H */
|
||||
|
|
|
@ -172,7 +172,6 @@
|
|||
#define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */
|
||||
#define I2C_HW_B_IOC 0x010011 /* IOC bit-wiggling */
|
||||
#define I2C_HW_B_TSUNA 0x010012 /* DEC Tsunami chipset */
|
||||
#define I2C_HW_B_FRODO 0x010013 /* 2d3D SA-1110 Development Board */
|
||||
#define I2C_HW_B_OMAHA 0x010014 /* Omaha I2C interface (ARM) */
|
||||
#define I2C_HW_B_GUIDE 0x010015 /* Guide bit-basher */
|
||||
#define I2C_HW_B_IXP2000 0x010016 /* GPIO on IXP2000 systems */
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/device.h> /* for struct device */
|
||||
#include <linux/sched.h> /* for completion */
|
||||
#include <asm/semaphore.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
/* --- For i2c-isa ---------------------------------------------------- */
|
||||
|
||||
|
@ -225,8 +225,8 @@ struct i2c_adapter {
|
|||
int (*client_unregister)(struct i2c_client *);
|
||||
|
||||
/* data fields that are valid for all devices */
|
||||
struct semaphore bus_lock;
|
||||
struct semaphore clist_lock;
|
||||
struct mutex bus_lock;
|
||||
struct mutex clist_lock;
|
||||
|
||||
int timeout;
|
||||
int retries;
|
||||
|
|
|
@ -792,6 +792,7 @@ typedef struct hwif_s {
|
|||
unsigned no_dsc : 1; /* 0 default, 1 dsc_overlap disabled */
|
||||
unsigned auto_poll : 1; /* supports nop auto-poll */
|
||||
unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
|
||||
unsigned no_io_32bit : 1; /* 1 = can not do 32-bit IO ops */
|
||||
|
||||
struct device gendev;
|
||||
struct completion gendev_rel_comp; /* To deal with device release() */
|
||||
|
|
|
@ -7,11 +7,10 @@
|
|||
#define INIT_FDTABLE \
|
||||
{ \
|
||||
.max_fds = NR_OPEN_DEFAULT, \
|
||||
.max_fdset = __FD_SETSIZE, \
|
||||
.next_fd = 0, \
|
||||
.max_fdset = EMBEDDED_FD_SET_SIZE, \
|
||||
.fd = &init_files.fd_array[0], \
|
||||
.close_on_exec = &init_files.close_on_exec_init, \
|
||||
.open_fds = &init_files.open_fds_init, \
|
||||
.close_on_exec = (fd_set *)&init_files.close_on_exec_init, \
|
||||
.open_fds = (fd_set *)&init_files.open_fds_init, \
|
||||
.rcu = RCU_HEAD_INIT, \
|
||||
.free_files = NULL, \
|
||||
.next = NULL, \
|
||||
|
@ -20,9 +19,10 @@
|
|||
#define INIT_FILES \
|
||||
{ \
|
||||
.count = ATOMIC_INIT(1), \
|
||||
.file_lock = SPIN_LOCK_UNLOCKED, \
|
||||
.fdt = &init_files.fdtab, \
|
||||
.fdtab = INIT_FDTABLE, \
|
||||
.file_lock = SPIN_LOCK_UNLOCKED, \
|
||||
.next_fd = 0, \
|
||||
.close_on_exec_init = { { 0, } }, \
|
||||
.open_fds_init = { { 0, } }, \
|
||||
.fd_array = { NULL, } \
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/journal-head.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/bit_spinlock.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <asm/semaphore.h>
|
||||
#endif
|
||||
|
||||
|
@ -575,7 +576,7 @@ struct transaction_s
|
|||
* @j_wait_checkpoint: Wait queue to trigger checkpointing
|
||||
* @j_wait_commit: Wait queue to trigger commit
|
||||
* @j_wait_updates: Wait queue to wait for updates to complete
|
||||
* @j_checkpoint_sem: Semaphore for locking against concurrent checkpoints
|
||||
* @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
|
||||
* @j_head: Journal head - identifies the first unused block in the journal
|
||||
* @j_tail: Journal tail - identifies the oldest still-used block in the
|
||||
* journal.
|
||||
|
@ -645,7 +646,7 @@ struct journal_s
|
|||
int j_barrier_count;
|
||||
|
||||
/* The barrier lock itself */
|
||||
struct semaphore j_barrier;
|
||||
struct mutex j_barrier;
|
||||
|
||||
/*
|
||||
* Transactions: The current running transaction...
|
||||
|
@ -687,7 +688,7 @@ struct journal_s
|
|||
wait_queue_head_t j_wait_updates;
|
||||
|
||||
/* Semaphore for locking against concurrent checkpoints */
|
||||
struct semaphore j_checkpoint_sem;
|
||||
struct mutex j_checkpoint_mutex;
|
||||
|
||||
/*
|
||||
* Journal head: identifies the first unused block in the journal.
|
||||
|
|
|
@ -91,6 +91,9 @@ extern struct notifier_block *panic_notifier_list;
|
|||
extern long (*panic_blink)(long time);
|
||||
NORET_TYPE void panic(const char * fmt, ...)
|
||||
__attribute__ ((NORET_AND format (printf, 1, 2)));
|
||||
extern void oops_enter(void);
|
||||
extern void oops_exit(void);
|
||||
extern int oops_may_print(void);
|
||||
fastcall NORET_TYPE void do_exit(long error_code)
|
||||
ATTRIB_NORET;
|
||||
NORET_TYPE void complete_and_exit(struct completion *, long)
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <linux/percpu.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
#include <asm/kprobes.h>
|
||||
|
@ -152,7 +153,7 @@ struct kretprobe_instance {
|
|||
};
|
||||
|
||||
extern spinlock_t kretprobe_lock;
|
||||
extern struct semaphore kprobe_mutex;
|
||||
extern struct mutex kprobe_mutex;
|
||||
extern int arch_prepare_kprobe(struct kprobe *p);
|
||||
extern void arch_arm_kprobe(struct kprobe *p);
|
||||
extern void arch_disarm_kprobe(struct kprobe *p);
|
||||
|
|
|
@ -162,7 +162,6 @@ enum {
|
|||
ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */
|
||||
|
||||
/* various lengths of time */
|
||||
ATA_TMOUT_EDD = 5 * HZ, /* heuristic */
|
||||
ATA_TMOUT_PIO = 30 * HZ,
|
||||
ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
|
||||
ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
|
||||
|
@ -363,6 +362,11 @@ struct ata_device {
|
|||
unsigned int max_sectors; /* per-device max sectors */
|
||||
unsigned int cdb_len;
|
||||
|
||||
/* per-dev xfer mask */
|
||||
unsigned int pio_mask;
|
||||
unsigned int mwdma_mask;
|
||||
unsigned int udma_mask;
|
||||
|
||||
/* for CHS addressing */
|
||||
u16 cylinders; /* Number of cylinders */
|
||||
u16 heads; /* Number of heads */
|
||||
|
@ -400,6 +404,7 @@ struct ata_port {
|
|||
|
||||
struct ata_host_stats stats;
|
||||
struct ata_host_set *host_set;
|
||||
struct device *dev;
|
||||
|
||||
struct work_struct port_task;
|
||||
|
||||
|
@ -520,9 +525,9 @@ extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
|
|||
extern int ata_scsi_release(struct Scsi_Host *host);
|
||||
extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
|
||||
extern int ata_scsi_device_resume(struct scsi_device *);
|
||||
extern int ata_scsi_device_suspend(struct scsi_device *);
|
||||
extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
|
||||
extern int ata_device_resume(struct ata_port *, struct ata_device *);
|
||||
extern int ata_device_suspend(struct ata_port *, struct ata_device *);
|
||||
extern int ata_device_suspend(struct ata_port *, struct ata_device *, pm_message_t state);
|
||||
extern int ata_ratelimit(void);
|
||||
extern unsigned int ata_busy_sleep(struct ata_port *ap,
|
||||
unsigned long timeout_pat,
|
||||
|
@ -573,6 +578,8 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
|
|||
struct block_device *bdev,
|
||||
sector_t capacity, int geom[]);
|
||||
extern int ata_scsi_slave_config(struct scsi_device *sdev);
|
||||
extern struct ata_device *ata_dev_pair(struct ata_port *ap,
|
||||
struct ata_device *adev);
|
||||
|
||||
/*
|
||||
* Timing helpers
|
||||
|
|
|
@ -28,17 +28,27 @@
|
|||
#define ALIGN __ALIGN
|
||||
#define ALIGN_STR __ALIGN_STR
|
||||
|
||||
#ifndef ENTRY
|
||||
#define ENTRY(name) \
|
||||
.globl name; \
|
||||
ALIGN; \
|
||||
name:
|
||||
#endif
|
||||
|
||||
#define KPROBE_ENTRY(name) \
|
||||
.section .kprobes.text, "ax"; \
|
||||
.globl name; \
|
||||
ALIGN; \
|
||||
name:
|
||||
ENTRY(name)
|
||||
|
||||
#ifndef END
|
||||
#define END(name) \
|
||||
.size name, .-name
|
||||
#endif
|
||||
|
||||
#ifndef ENDPROC
|
||||
#define ENDPROC(name) \
|
||||
.type name, @function; \
|
||||
END(name)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
/* Possible states of device */
|
||||
enum {
|
||||
|
@ -60,7 +61,7 @@ struct loop_device {
|
|||
int lo_state;
|
||||
struct completion lo_done;
|
||||
struct completion lo_bh_done;
|
||||
struct semaphore lo_ctl_mutex;
|
||||
struct mutex lo_ctl_mutex;
|
||||
int lo_pending;
|
||||
|
||||
request_queue_t *lo_queue;
|
||||
|
|
|
@ -147,6 +147,7 @@ extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new);
|
|||
extern void mpol_rebind_task(struct task_struct *tsk,
|
||||
const nodemask_t *new);
|
||||
extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
|
||||
extern void mpol_fix_fork_child_flag(struct task_struct *p);
|
||||
#define set_cpuset_being_rebound(x) (cpuset_being_rebound = (x))
|
||||
|
||||
#ifdef CONFIG_CPUSET
|
||||
|
@ -248,6 +249,10 @@ static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void mpol_fix_fork_child_flag(struct task_struct *p)
|
||||
{
|
||||
}
|
||||
|
||||
#define set_cpuset_being_rebound(x) do {} while (0)
|
||||
|
||||
static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
|
||||
|
|
|
@ -184,6 +184,7 @@ struct fat_slot_info {
|
|||
#include <linux/string.h>
|
||||
#include <linux/nls.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
struct fat_mount_options {
|
||||
uid_t fs_uid;
|
||||
|
@ -226,7 +227,7 @@ struct msdos_sb_info {
|
|||
unsigned long max_cluster; /* maximum cluster number */
|
||||
unsigned long root_cluster; /* first cluster of the root directory */
|
||||
unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */
|
||||
struct semaphore fat_lock;
|
||||
struct mutex fat_lock;
|
||||
unsigned int prev_free; /* previously allocated cluster number */
|
||||
unsigned int free_clusters; /* -1 if undefined */
|
||||
struct fat_mount_options options;
|
||||
|
|
|
@ -38,6 +38,7 @@ enum {
|
|||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/wait.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
/* values for flags field */
|
||||
#define NBD_READ_ONLY 0x0001
|
||||
|
@ -57,7 +58,7 @@ struct nbd_device {
|
|||
struct request *active_req;
|
||||
wait_queue_head_t active_wq;
|
||||
|
||||
struct semaphore tx_lock;
|
||||
struct mutex tx_lock;
|
||||
struct gendisk *disk;
|
||||
int blksize;
|
||||
u64 bytesize;
|
||||
|
|
|
@ -19,7 +19,7 @@ struct ncp_inode_info {
|
|||
__le32 DosDirNum;
|
||||
__u8 volNumber;
|
||||
__le32 nwattr;
|
||||
struct semaphore open_sem;
|
||||
struct mutex open_mutex;
|
||||
atomic_t opened;
|
||||
int access;
|
||||
int flags;
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/ncp_mount.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
|
@ -51,7 +52,7 @@ struct ncp_server {
|
|||
receive replies */
|
||||
|
||||
int lock; /* To prevent mismatch in protocols. */
|
||||
struct semaphore sem;
|
||||
struct mutex mutex;
|
||||
|
||||
int current_size; /* for packet preparation */
|
||||
int has_subfunction;
|
||||
|
@ -96,7 +97,7 @@ struct ncp_server {
|
|||
struct {
|
||||
struct work_struct tq; /* STREAM/DGRAM: data/error ready */
|
||||
struct ncp_request_reply* creq; /* STREAM/DGRAM: awaiting reply from this request */
|
||||
struct semaphore creq_sem; /* DGRAM only: lock accesses to rcv.creq */
|
||||
struct mutex creq_mutex; /* DGRAM only: lock accesses to rcv.creq */
|
||||
|
||||
unsigned int state; /* STREAM only: receiver state */
|
||||
struct {
|
||||
|
|
|
@ -51,6 +51,10 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
|
|||
#define page_cache_release(page) put_page(page)
|
||||
void release_pages(struct page **pages, int nr, int cold);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern struct page *page_cache_alloc(struct address_space *x);
|
||||
extern struct page *page_cache_alloc_cold(struct address_space *x);
|
||||
#else
|
||||
static inline struct page *page_cache_alloc(struct address_space *x)
|
||||
{
|
||||
return alloc_pages(mapping_gfp_mask(x), 0);
|
||||
|
@ -60,6 +64,7 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x)
|
|||
{
|
||||
return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
typedef int filler_t(void *, struct page *);
|
||||
|
||||
|
|
|
@ -95,6 +95,11 @@ enum pci_channel_state {
|
|||
pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
|
||||
};
|
||||
|
||||
typedef unsigned short __bitwise pci_bus_flags_t;
|
||||
enum pci_bus_flags {
|
||||
PCI_BUS_FLAGS_NO_MSI = (pci_bus_flags_t) 1,
|
||||
};
|
||||
|
||||
/*
|
||||
* The pci_dev structure is used to describe PCI devices.
|
||||
*/
|
||||
|
@ -203,7 +208,7 @@ struct pci_bus {
|
|||
char name[48];
|
||||
|
||||
unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
|
||||
unsigned short pad2;
|
||||
pci_bus_flags_t bus_flags; /* Inherited by child busses */
|
||||
struct device *bridge;
|
||||
struct class_device class_dev;
|
||||
struct bin_attribute *legacy_io; /* legacy I/O for this bus */
|
||||
|
@ -485,9 +490,9 @@ void pdev_sort_resources(struct pci_dev *, struct resource_list *);
|
|||
void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
|
||||
int (*)(struct pci_dev *, u8, u8));
|
||||
#define HAVE_PCI_REQ_REGIONS 2
|
||||
int pci_request_regions(struct pci_dev *, char *);
|
||||
int pci_request_regions(struct pci_dev *, const char *);
|
||||
void pci_release_regions(struct pci_dev *);
|
||||
int pci_request_region(struct pci_dev *, int, char *);
|
||||
int pci_request_region(struct pci_dev *, int, const char *);
|
||||
void pci_release_region(struct pci_dev *, int);
|
||||
|
||||
/* drivers/pci/bus.c */
|
||||
|
@ -516,6 +521,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass
|
|||
void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
|
||||
void *userdata);
|
||||
int pci_cfg_space_size(struct pci_dev *dev);
|
||||
unsigned char pci_bus_max_busnr(struct pci_bus* bus);
|
||||
|
||||
/* kmem_cache style wrapper around pci_alloc_consistent() */
|
||||
|
||||
|
|
|
@ -69,6 +69,7 @@
|
|||
#define PCI_CLASS_SYSTEM_TIMER 0x0802
|
||||
#define PCI_CLASS_SYSTEM_RTC 0x0803
|
||||
#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804
|
||||
#define PCI_CLASS_SYSTEM_SDHCI 0x0805
|
||||
#define PCI_CLASS_SYSTEM_OTHER 0x0880
|
||||
|
||||
#define PCI_BASE_CLASS_INPUT 0x09
|
||||
|
@ -1371,6 +1372,7 @@
|
|||
#define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200
|
||||
#define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201
|
||||
#define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203
|
||||
#define PCI_DEVICE_ID_SERVERWORKS_HT1000SB 0x0205
|
||||
#define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211
|
||||
#define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212
|
||||
#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213
|
||||
|
@ -1864,11 +1866,13 @@
|
|||
#define PCI_DEVICE_ID_TIGON3_5780S 0x166b
|
||||
#define PCI_DEVICE_ID_TIGON3_5705F 0x166e
|
||||
#define PCI_DEVICE_ID_TIGON3_5754M 0x1672
|
||||
#define PCI_DEVICE_ID_TIGON3_5755M 0x1673
|
||||
#define PCI_DEVICE_ID_TIGON3_5750 0x1676
|
||||
#define PCI_DEVICE_ID_TIGON3_5751 0x1677
|
||||
#define PCI_DEVICE_ID_TIGON3_5715 0x1678
|
||||
#define PCI_DEVICE_ID_TIGON3_5715S 0x1679
|
||||
#define PCI_DEVICE_ID_TIGON3_5754 0x167a
|
||||
#define PCI_DEVICE_ID_TIGON3_5755 0x167b
|
||||
#define PCI_DEVICE_ID_TIGON3_5750M 0x167c
|
||||
#define PCI_DEVICE_ID_TIGON3_5751M 0x167d
|
||||
#define PCI_DEVICE_ID_TIGON3_5751F 0x167e
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
/*
|
||||
* include/linux/platform.h - platform driver definitions
|
||||
*
|
||||
* Because of the prolific consumerism of the average American,
|
||||
* and the dominant marketing budgets of PC OEMs, we have been
|
||||
* blessed with frequent updates of the PC architecture.
|
||||
*
|
||||
* While most of these calls are singular per architecture, they
|
||||
* require an extra layer of abstraction on the x86 so the right
|
||||
* subsystem gets the right call.
|
||||
*
|
||||
* Basically, this consolidates the power off and reboot callbacks
|
||||
* into one structure, as well as adding power management hooks.
|
||||
*
|
||||
* When adding a platform driver, please make sure all callbacks are
|
||||
* filled. There are defaults defined below that do nothing; use those
|
||||
* if you do not support that callback.
|
||||
*/
|
||||
|
||||
#ifndef _PLATFORM_H_
|
||||
#define _PLATFORM_H_
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct platform_t {
|
||||
char * name;
|
||||
u32 suspend_states;
|
||||
void (*reboot)(char * cmd);
|
||||
void (*halt)(void);
|
||||
void (*power_off)(void);
|
||||
int (*suspend)(int state, int flags);
|
||||
void (*idle)(void);
|
||||
};
|
||||
|
||||
extern struct platform_t * platform;
|
||||
extern void default_reboot(char * cmd);
|
||||
extern void default_halt(void);
|
||||
extern int default_suspend(int state, int flags);
|
||||
extern void default_idle(void);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _PLATFORM_H */
|
|
@ -188,6 +188,8 @@ extern void device_power_up(void);
|
|||
extern void device_resume(void);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
extern suspend_disk_method_t pm_disk_mode;
|
||||
|
||||
extern int device_suspend(pm_message_t state);
|
||||
|
||||
#define device_set_wakeup_enable(dev,val) \
|
||||
|
@ -215,7 +217,6 @@ static inline int dpm_runtime_suspend(struct device * dev, pm_message_t state)
|
|||
|
||||
static inline void dpm_runtime_resume(struct device * dev)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
struct proc_dir_entry;
|
||||
struct pt_regs;
|
||||
struct notifier_block;
|
||||
|
||||
/* init basic kernel profiler */
|
||||
void __init profile_init(void);
|
||||
|
@ -32,7 +33,6 @@ enum profile_type {
|
|||
|
||||
#ifdef CONFIG_PROFILING
|
||||
|
||||
struct notifier_block;
|
||||
struct task_struct;
|
||||
struct mm_struct;
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#define __DQUOT_VERSION__ "dquot_6.5.1"
|
||||
#define __DQUOT_NUM_VERSION__ 6*10000+5*100+1
|
||||
|
@ -215,7 +216,7 @@ struct dquot {
|
|||
struct list_head dq_inuse; /* List of all quotas */
|
||||
struct list_head dq_free; /* Free list element */
|
||||
struct list_head dq_dirty; /* List of dirty dquots */
|
||||
struct semaphore dq_lock; /* dquot IO lock */
|
||||
struct mutex dq_lock; /* dquot IO lock */
|
||||
atomic_t dq_count; /* Use count */
|
||||
wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */
|
||||
struct super_block *dq_sb; /* superblock this applies to */
|
||||
|
@ -285,8 +286,8 @@ struct quota_format_type {
|
|||
|
||||
struct quota_info {
|
||||
unsigned int flags; /* Flags for diskquotas on this device */
|
||||
struct semaphore dqio_sem; /* lock device while I/O in progress */
|
||||
struct semaphore dqonoff_sem; /* Serialize quotaon & quotaoff */
|
||||
struct mutex dqio_mutex; /* lock device while I/O in progress */
|
||||
struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */
|
||||
struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */
|
||||
struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
|
||||
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
|
||||
|
|
|
@ -130,6 +130,6 @@ struct r1bio_s {
|
|||
* with failure when last write completes (and all failed).
|
||||
* Record that bi_end_io was called with this flag...
|
||||
*/
|
||||
#define R1BIO_Returned 4
|
||||
#define R1BIO_Returned 6
|
||||
|
||||
#endif
|
||||
|
|
|
@ -113,8 +113,6 @@ struct rcu_data {
|
|||
|
||||
DECLARE_PER_CPU(struct rcu_data, rcu_data);
|
||||
DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
|
||||
extern struct rcu_ctrlblk rcu_ctrlblk;
|
||||
extern struct rcu_ctrlblk rcu_bh_ctrlblk;
|
||||
|
||||
/*
|
||||
* Increment the quiescent state counter.
|
||||
|
|
281
include/linux/relay.h
Normal file
281
include/linux/relay.h
Normal file
|
@ -0,0 +1,281 @@
|
|||
/*
|
||||
* linux/include/linux/relay.h
|
||||
*
|
||||
* Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
|
||||
* Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com)
|
||||
*
|
||||
* CONFIG_RELAY definitions and declarations
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_RELAY_H
|
||||
#define _LINUX_RELAY_H
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/kref.h>
|
||||
|
||||
/* Needs a _much_ better name... */
|
||||
#define FIX_SIZE(x) ((((x) - 1) & PAGE_MASK) + PAGE_SIZE)
|
||||
|
||||
/*
|
||||
* Tracks changes to rchan/rchan_buf structs
|
||||
*/
|
||||
#define RELAYFS_CHANNEL_VERSION 6
|
||||
|
||||
/*
|
||||
* Per-cpu relay channel buffer
|
||||
*/
|
||||
struct rchan_buf
|
||||
{
|
||||
void *start; /* start of channel buffer */
|
||||
void *data; /* start of current sub-buffer */
|
||||
size_t offset; /* current offset into sub-buffer */
|
||||
size_t subbufs_produced; /* count of sub-buffers produced */
|
||||
size_t subbufs_consumed; /* count of sub-buffers consumed */
|
||||
struct rchan *chan; /* associated channel */
|
||||
wait_queue_head_t read_wait; /* reader wait queue */
|
||||
struct work_struct wake_readers; /* reader wake-up work struct */
|
||||
struct dentry *dentry; /* channel file dentry */
|
||||
struct kref kref; /* channel buffer refcount */
|
||||
struct page **page_array; /* array of current buffer pages */
|
||||
unsigned int page_count; /* number of current buffer pages */
|
||||
unsigned int finalized; /* buffer has been finalized */
|
||||
size_t *padding; /* padding counts per sub-buffer */
|
||||
size_t prev_padding; /* temporary variable */
|
||||
size_t bytes_consumed; /* bytes consumed in cur read subbuf */
|
||||
unsigned int cpu; /* this buf's cpu */
|
||||
} ____cacheline_aligned;
|
||||
|
||||
/*
|
||||
* Relay channel data structure
|
||||
*/
|
||||
struct rchan
|
||||
{
|
||||
u32 version; /* the version of this struct */
|
||||
size_t subbuf_size; /* sub-buffer size */
|
||||
size_t n_subbufs; /* number of sub-buffers per buffer */
|
||||
size_t alloc_size; /* total buffer size allocated */
|
||||
struct rchan_callbacks *cb; /* client callbacks */
|
||||
struct kref kref; /* channel refcount */
|
||||
void *private_data; /* for user-defined data */
|
||||
size_t last_toobig; /* tried to log event > subbuf size */
|
||||
struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
|
||||
};
|
||||
|
||||
/*
|
||||
* Relay channel client callbacks
|
||||
*/
|
||||
struct rchan_callbacks
|
||||
{
|
||||
/*
|
||||
* subbuf_start - called on buffer-switch to a new sub-buffer
|
||||
* @buf: the channel buffer containing the new sub-buffer
|
||||
* @subbuf: the start of the new sub-buffer
|
||||
* @prev_subbuf: the start of the previous sub-buffer
|
||||
* @prev_padding: unused space at the end of previous sub-buffer
|
||||
*
|
||||
* The client should return 1 to continue logging, 0 to stop
|
||||
* logging.
|
||||
*
|
||||
* NOTE: subbuf_start will also be invoked when the buffer is
|
||||
* created, so that the first sub-buffer can be initialized
|
||||
* if necessary. In this case, prev_subbuf will be NULL.
|
||||
*
|
||||
* NOTE: the client can reserve bytes at the beginning of the new
|
||||
* sub-buffer by calling subbuf_start_reserve() in this callback.
|
||||
*/
|
||||
int (*subbuf_start) (struct rchan_buf *buf,
|
||||
void *subbuf,
|
||||
void *prev_subbuf,
|
||||
size_t prev_padding);
|
||||
|
||||
/*
|
||||
* buf_mapped - relay buffer mmap notification
|
||||
* @buf: the channel buffer
|
||||
* @filp: relay file pointer
|
||||
*
|
||||
* Called when a relay file is successfully mmapped
|
||||
*/
|
||||
void (*buf_mapped)(struct rchan_buf *buf,
|
||||
struct file *filp);
|
||||
|
||||
/*
|
||||
* buf_unmapped - relay buffer unmap notification
|
||||
* @buf: the channel buffer
|
||||
* @filp: relay file pointer
|
||||
*
|
||||
* Called when a relay file is successfully unmapped
|
||||
*/
|
||||
void (*buf_unmapped)(struct rchan_buf *buf,
|
||||
struct file *filp);
|
||||
/*
|
||||
* create_buf_file - create file to represent a relay channel buffer
|
||||
* @filename: the name of the file to create
|
||||
* @parent: the parent of the file to create
|
||||
* @mode: the mode of the file to create
|
||||
* @buf: the channel buffer
|
||||
* @is_global: outparam - set non-zero if the buffer should be global
|
||||
*
|
||||
* Called during relay_open(), once for each per-cpu buffer,
|
||||
* to allow the client to create a file to be used to
|
||||
* represent the corresponding channel buffer. If the file is
|
||||
* created outside of relay, the parent must also exist in
|
||||
* that filesystem.
|
||||
*
|
||||
* The callback should return the dentry of the file created
|
||||
* to represent the relay buffer.
|
||||
*
|
||||
* Setting the is_global outparam to a non-zero value will
|
||||
* cause relay_open() to create a single global buffer rather
|
||||
* than the default set of per-cpu buffers.
|
||||
*
|
||||
* See Documentation/filesystems/relayfs.txt for more info.
|
||||
*/
|
||||
struct dentry *(*create_buf_file)(const char *filename,
|
||||
struct dentry *parent,
|
||||
int mode,
|
||||
struct rchan_buf *buf,
|
||||
int *is_global);
|
||||
|
||||
/*
|
||||
* remove_buf_file - remove file representing a relay channel buffer
|
||||
* @dentry: the dentry of the file to remove
|
||||
*
|
||||
* Called during relay_close(), once for each per-cpu buffer,
|
||||
* to allow the client to remove a file used to represent a
|
||||
* channel buffer.
|
||||
*
|
||||
* The callback should return 0 if successful, negative if not.
|
||||
*/
|
||||
int (*remove_buf_file)(struct dentry *dentry);
|
||||
};
|
||||
|
||||
/*
|
||||
* CONFIG_RELAY kernel API, kernel/relay.c
|
||||
*/
|
||||
|
||||
struct rchan *relay_open(const char *base_filename,
|
||||
struct dentry *parent,
|
||||
size_t subbuf_size,
|
||||
size_t n_subbufs,
|
||||
struct rchan_callbacks *cb);
|
||||
extern void relay_close(struct rchan *chan);
|
||||
extern void relay_flush(struct rchan *chan);
|
||||
extern void relay_subbufs_consumed(struct rchan *chan,
|
||||
unsigned int cpu,
|
||||
size_t consumed);
|
||||
extern void relay_reset(struct rchan *chan);
|
||||
extern int relay_buf_full(struct rchan_buf *buf);
|
||||
|
||||
extern size_t relay_switch_subbuf(struct rchan_buf *buf,
|
||||
size_t length);
|
||||
|
||||
/**
|
||||
* relay_write - write data into the channel
|
||||
* @chan: relay channel
|
||||
* @data: data to be written
|
||||
* @length: number of bytes to write
|
||||
*
|
||||
* Writes data into the current cpu's channel buffer.
|
||||
*
|
||||
* Protects the buffer by disabling interrupts. Use this
|
||||
* if you might be logging from interrupt context. Try
|
||||
* __relay_write() if you know you won't be logging from
|
||||
* interrupt context.
|
||||
*/
|
||||
static inline void relay_write(struct rchan *chan,
|
||||
const void *data,
|
||||
size_t length)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rchan_buf *buf;
|
||||
|
||||
local_irq_save(flags);
|
||||
buf = chan->buf[smp_processor_id()];
|
||||
if (unlikely(buf->offset + length > chan->subbuf_size))
|
||||
length = relay_switch_subbuf(buf, length);
|
||||
memcpy(buf->data + buf->offset, data, length);
|
||||
buf->offset += length;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* __relay_write - write data into the channel
|
||||
* @chan: relay channel
|
||||
* @data: data to be written
|
||||
* @length: number of bytes to write
|
||||
*
|
||||
* Writes data into the current cpu's channel buffer.
|
||||
*
|
||||
* Protects the buffer by disabling preemption. Use
|
||||
* relay_write() if you might be logging from interrupt
|
||||
* context.
|
||||
*/
|
||||
static inline void __relay_write(struct rchan *chan,
|
||||
const void *data,
|
||||
size_t length)
|
||||
{
|
||||
struct rchan_buf *buf;
|
||||
|
||||
buf = chan->buf[get_cpu()];
|
||||
if (unlikely(buf->offset + length > buf->chan->subbuf_size))
|
||||
length = relay_switch_subbuf(buf, length);
|
||||
memcpy(buf->data + buf->offset, data, length);
|
||||
buf->offset += length;
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
/**
|
||||
* relay_reserve - reserve slot in channel buffer
|
||||
* @chan: relay channel
|
||||
* @length: number of bytes to reserve
|
||||
*
|
||||
* Returns pointer to reserved slot, NULL if full.
|
||||
*
|
||||
* Reserves a slot in the current cpu's channel buffer.
|
||||
* Does not protect the buffer at all - caller must provide
|
||||
* appropriate synchronization.
|
||||
*/
|
||||
static inline void *relay_reserve(struct rchan *chan, size_t length)
|
||||
{
|
||||
void *reserved;
|
||||
struct rchan_buf *buf = chan->buf[smp_processor_id()];
|
||||
|
||||
if (unlikely(buf->offset + length > buf->chan->subbuf_size)) {
|
||||
length = relay_switch_subbuf(buf, length);
|
||||
if (!length)
|
||||
return NULL;
|
||||
}
|
||||
reserved = buf->data + buf->offset;
|
||||
buf->offset += length;
|
||||
|
||||
return reserved;
|
||||
}
|
||||
|
||||
/**
|
||||
* subbuf_start_reserve - reserve bytes at the start of a sub-buffer
|
||||
* @buf: relay channel buffer
|
||||
* @length: number of bytes to reserve
|
||||
*
|
||||
* Helper function used to reserve bytes at the beginning of
|
||||
* a sub-buffer in the subbuf_start() callback.
|
||||
*/
|
||||
static inline void subbuf_start_reserve(struct rchan_buf *buf,
|
||||
size_t length)
|
||||
{
|
||||
BUG_ON(length >= buf->chan->subbuf_size - 1);
|
||||
buf->offset = length;
|
||||
}
|
||||
|
||||
/*
|
||||
* exported relay file operations, kernel/relay.c
|
||||
*/
|
||||
extern struct file_operations relay_file_operations;
|
||||
|
||||
#endif /* _LINUX_RELAY_H */
|
||||
|
|
@ -839,6 +839,7 @@ enum
|
|||
#define RTMGRP_IPV4_IFADDR 0x10
|
||||
#define RTMGRP_IPV4_MROUTE 0x20
|
||||
#define RTMGRP_IPV4_ROUTE 0x40
|
||||
#define RTMGRP_IPV4_RULE 0x80
|
||||
|
||||
#define RTMGRP_IPV6_IFADDR 0x100
|
||||
#define RTMGRP_IPV6_MROUTE 0x200
|
||||
|
@ -869,7 +870,8 @@ enum rtnetlink_groups {
|
|||
#define RTNLGRP_IPV4_MROUTE RTNLGRP_IPV4_MROUTE
|
||||
RTNLGRP_IPV4_ROUTE,
|
||||
#define RTNLGRP_IPV4_ROUTE RTNLGRP_IPV4_ROUTE
|
||||
RTNLGRP_NOP1,
|
||||
RTNLGRP_IPV4_RULE,
|
||||
#define RTNLGRP_IPV4_RULE RTNLGRP_IPV4_RULE
|
||||
RTNLGRP_IPV6_IFADDR,
|
||||
#define RTNLGRP_IPV6_IFADDR RTNLGRP_IPV6_IFADDR
|
||||
RTNLGRP_IPV6_MROUTE,
|
||||
|
|
|
@ -206,11 +206,11 @@ extern void update_process_times(int user);
|
|||
extern void scheduler_tick(void);
|
||||
|
||||
#ifdef CONFIG_DETECT_SOFTLOCKUP
|
||||
extern void softlockup_tick(struct pt_regs *regs);
|
||||
extern void softlockup_tick(void);
|
||||
extern void spawn_softlockup_task(void);
|
||||
extern void touch_softlockup_watchdog(void);
|
||||
#else
|
||||
static inline void softlockup_tick(struct pt_regs *regs)
|
||||
static inline void softlockup_tick(void)
|
||||
{
|
||||
}
|
||||
static inline void spawn_softlockup_task(void)
|
||||
|
@ -706,6 +706,7 @@ struct task_struct {
|
|||
prio_array_t *array;
|
||||
|
||||
unsigned short ioprio;
|
||||
unsigned int btrace_seq;
|
||||
|
||||
unsigned long sleep_avg;
|
||||
unsigned long long timestamp, last_ran;
|
||||
|
@ -868,6 +869,7 @@ struct task_struct {
|
|||
struct cpuset *cpuset;
|
||||
nodemask_t mems_allowed;
|
||||
int cpuset_mems_generation;
|
||||
int cpuset_mem_spread_rotor;
|
||||
#endif
|
||||
atomic_t fs_excl; /* holding fs exclusive resources */
|
||||
struct rcu_head rcu;
|
||||
|
@ -928,6 +930,9 @@ static inline void put_task_struct(struct task_struct *t)
|
|||
#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */
|
||||
#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */
|
||||
#define PF_SWAPWRITE 0x01000000 /* Allowed to write to swap */
|
||||
#define PF_SPREAD_PAGE 0x04000000 /* Spread page cache over cpuset */
|
||||
#define PF_SPREAD_SLAB 0x08000000 /* Spread some slab caches over cpuset */
|
||||
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
|
||||
|
||||
/*
|
||||
* Only the _current_ task can read/write to tsk->flags, but other
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
struct seq_operations;
|
||||
struct file;
|
||||
|
@ -19,7 +19,7 @@ struct seq_file {
|
|||
size_t count;
|
||||
loff_t index;
|
||||
loff_t version;
|
||||
struct semaphore sem;
|
||||
struct mutex lock;
|
||||
struct seq_operations *op;
|
||||
void *private;
|
||||
};
|
||||
|
|
|
@ -46,6 +46,7 @@ typedef struct kmem_cache kmem_cache_t;
|
|||
what is reclaimable later*/
|
||||
#define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */
|
||||
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */
|
||||
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
|
||||
|
||||
/* flags passed to a constructor func */
|
||||
#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */
|
||||
|
|
|
@ -18,6 +18,8 @@ extern char * strsep(char **,const char *);
|
|||
extern __kernel_size_t strspn(const char *,const char *);
|
||||
extern __kernel_size_t strcspn(const char *,const char *);
|
||||
|
||||
extern char *strndup_user(const char __user *, long);
|
||||
|
||||
/*
|
||||
* Include machine specific inline routines
|
||||
*/
|
||||
|
|
|
@ -234,14 +234,15 @@ extern struct page * read_swap_cache_async(swp_entry_t, struct vm_area_struct *v
|
|||
/* linux/mm/swapfile.c */
|
||||
extern long total_swap_pages;
|
||||
extern unsigned int nr_swapfiles;
|
||||
extern struct swap_info_struct swap_info[];
|
||||
extern void si_swapinfo(struct sysinfo *);
|
||||
extern swp_entry_t get_swap_page(void);
|
||||
extern swp_entry_t get_swap_page_of_type(int type);
|
||||
extern swp_entry_t get_swap_page_of_type(int);
|
||||
extern int swap_duplicate(swp_entry_t);
|
||||
extern int valid_swaphandles(swp_entry_t, unsigned long *);
|
||||
extern void swap_free(swp_entry_t);
|
||||
extern void free_swap_and_cache(swp_entry_t);
|
||||
extern int swap_type_of(dev_t);
|
||||
extern unsigned int count_swap_pages(int, int);
|
||||
extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
|
||||
extern struct swap_info_struct *get_swap_info_struct(unsigned);
|
||||
extern int can_share_swap_page(struct page *);
|
||||
|
|
|
@ -568,5 +568,6 @@ asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
|
|||
int flag);
|
||||
asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
|
||||
int flags, int mode);
|
||||
asmlinkage long sys_unshare(unsigned long unshare_flags);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/tty_driver.h>
|
||||
#include <linux/tty_ldisc.h>
|
||||
#include <linux/screen_info.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
|
||||
|
@ -231,8 +232,8 @@ struct tty_struct {
|
|||
int canon_data;
|
||||
unsigned long canon_head;
|
||||
unsigned int canon_column;
|
||||
struct semaphore atomic_read;
|
||||
struct semaphore atomic_write;
|
||||
struct mutex atomic_read_lock;
|
||||
struct mutex atomic_write_lock;
|
||||
unsigned char *write_buf;
|
||||
int write_cnt;
|
||||
spinlock_t read_lock;
|
||||
|
@ -319,8 +320,7 @@ extern void tty_ldisc_put(int);
|
|||
extern void tty_wakeup(struct tty_struct *tty);
|
||||
extern void tty_ldisc_flush(struct tty_struct *tty);
|
||||
|
||||
struct semaphore;
|
||||
extern struct semaphore tty_sem;
|
||||
extern struct mutex tty_mutex;
|
||||
|
||||
/* n_tty.c */
|
||||
extern struct tty_ldisc tty_ldisc_N_TTY;
|
||||
|
|
|
@ -7,14 +7,8 @@ extern int tty_insert_flip_string_flags(struct tty_struct *tty, unsigned char *c
|
|||
extern int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size);
|
||||
extern int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size);
|
||||
|
||||
#ifdef INCLUDE_INLINE_FUNCS
|
||||
#define _INLINE_ extern
|
||||
#else
|
||||
#define _INLINE_ static __inline__
|
||||
#endif
|
||||
|
||||
_INLINE_ int tty_insert_flip_char(struct tty_struct *tty,
|
||||
unsigned char ch, char flag)
|
||||
static inline int tty_insert_flip_char(struct tty_struct *tty,
|
||||
unsigned char ch, char flag)
|
||||
{
|
||||
struct tty_buffer *tb = tty->buf.tail;
|
||||
if (tb && tb->active && tb->used < tb->size) {
|
||||
|
@ -25,7 +19,7 @@ _INLINE_ int tty_insert_flip_char(struct tty_struct *tty,
|
|||
return tty_insert_flip_string_flags(tty, &ch, &flag, 1);
|
||||
}
|
||||
|
||||
_INLINE_ void tty_schedule_flip(struct tty_struct *tty)
|
||||
static inline void tty_schedule_flip(struct tty_struct *tty)
|
||||
{
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&tty->buf.lock, flags);
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#ifndef _UDF_FS_SB_H
|
||||
#define _UDF_FS_SB_H 1
|
||||
|
||||
#include <asm/semaphore.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#pragma pack(1)
|
||||
|
||||
|
@ -111,7 +111,7 @@ struct udf_sb_info
|
|||
/* VAT inode */
|
||||
struct inode *s_vat;
|
||||
|
||||
struct semaphore s_alloc_sem;
|
||||
struct mutex s_alloc_mutex;
|
||||
};
|
||||
|
||||
#endif /* _UDF_FS_SB_H */
|
||||
|
|
|
@ -73,6 +73,11 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc);
|
|||
int vt_waitactive(int vt);
|
||||
void change_console(struct vc_data *new_vc);
|
||||
void reset_vc(struct vc_data *vc);
|
||||
#ifdef CONFIG_VT
|
||||
int is_console_suspend_safe(void);
|
||||
#else
|
||||
static inline int is_console_suspend_safe(void) { return 1; }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* vc_screen.c shares this temporary buffer with the console write code so that
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
/*
|
||||
* This file define a set of standard wireless extensions
|
||||
*
|
||||
* Version : 19 18.3.05
|
||||
* Version : 20 17.2.06
|
||||
*
|
||||
* Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
|
||||
* Copyright (c) 1997-2005 Jean Tourrilhes, All Rights Reserved.
|
||||
* Copyright (c) 1997-2006 Jean Tourrilhes, All Rights Reserved.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_WIRELESS_H
|
||||
|
@ -80,7 +80,7 @@
|
|||
* (there is some stuff that will be added in the future...)
|
||||
* I just plan to increment with each new version.
|
||||
*/
|
||||
#define WIRELESS_EXT 19
|
||||
#define WIRELESS_EXT 20
|
||||
|
||||
/*
|
||||
* Changes :
|
||||
|
@ -204,6 +204,10 @@
|
|||
* - Add IW_QUAL_ALL_UPDATED and IW_QUAL_ALL_INVALID macros
|
||||
* - Add explicit flag to tell stats are in dBm : IW_QUAL_DBM
|
||||
* - Add IW_IOCTL_IDX() and IW_EVENT_IDX() macros
|
||||
*
|
||||
* V19 to V20
|
||||
* ----------
|
||||
* - RtNetlink requests support (SET/GET)
|
||||
*/
|
||||
|
||||
/**************************** CONSTANTS ****************************/
|
||||
|
|
|
@ -88,8 +88,8 @@ void throttle_vm_writeout(void);
|
|||
/* These are exported to sysctl. */
|
||||
extern int dirty_background_ratio;
|
||||
extern int vm_dirty_ratio;
|
||||
extern int dirty_writeback_centisecs;
|
||||
extern int dirty_expire_centisecs;
|
||||
extern int dirty_writeback_interval;
|
||||
extern int dirty_expire_interval;
|
||||
extern int block_dump;
|
||||
extern int laptop_mode;
|
||||
|
||||
|
@ -99,7 +99,15 @@ int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *,
|
|||
void __user *, size_t *, loff_t *);
|
||||
|
||||
void page_writeback_init(void);
|
||||
void balance_dirty_pages_ratelimited(struct address_space *mapping);
|
||||
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
|
||||
unsigned long nr_pages_dirtied);
|
||||
|
||||
static inline void
|
||||
balance_dirty_pages_ratelimited(struct address_space *mapping)
|
||||
{
|
||||
balance_dirty_pages_ratelimited_nr(mapping, 1);
|
||||
}
|
||||
|
||||
int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0);
|
||||
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
|
||||
int sync_page_range(struct inode *inode, struct address_space *mapping,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue