Merge branch 'linus'

This commit is contained in:
Trond Myklebust 2006-03-23 23:44:19 -05:00
commit 1ebbe2b200
2612 changed files with 133972 additions and 100561 deletions

View file

@ -54,6 +54,7 @@
#define CNTL_LCDBPP4 (2 << 1)
#define CNTL_LCDBPP8 (3 << 1)
#define CNTL_LCDBPP16 (4 << 1)
#define CNTL_LCDBPP16_565 (6 << 1)
#define CNTL_LCDBPP24 (5 << 1)
#define CNTL_LCDBW (1 << 4)
#define CNTL_LCDTFT (1 << 5)
@ -209,7 +210,16 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
val |= CNTL_LCDBPP8;
break;
case 16:
val |= CNTL_LCDBPP16;
/*
* PL110 cannot choose between 5551 and 565 modes in
* its control register
*/
if ((fb->dev->periphid & 0x000fffff) == 0x00041110)
val |= CNTL_LCDBPP16;
else if (fb->fb.var.green.length == 5)
val |= CNTL_LCDBPP16;
else
val |= CNTL_LCDBPP16_565;
break;
case 32:
val |= CNTL_LCDBPP24;

View file

@ -146,6 +146,8 @@ enum {
ATA_CMD_STANDBYNOW1 = 0xE0,
ATA_CMD_IDLEIMMEDIATE = 0xE1,
ATA_CMD_INIT_DEV_PARAMS = 0x91,
ATA_CMD_READ_NATIVE_MAX = 0xF8,
ATA_CMD_READ_NATIVE_MAX_EXT = 0x27,
/* SETFEATURES stuff */
SETFEATURES_XFER = 0x03,
@ -204,7 +206,6 @@ enum ata_tf_protocols {
ATA_PROT_UNKNOWN, /* unknown/invalid */
ATA_PROT_NODATA, /* no data */
ATA_PROT_PIO, /* PIO single sector */
ATA_PROT_PIO_MULT, /* PIO multiple sector */
ATA_PROT_DMA, /* DMA */
ATA_PROT_ATAPI, /* packet command, PIO data xfer*/
ATA_PROT_ATAPI_NODATA, /* packet command, no data */
@ -247,18 +248,22 @@ struct ata_taskfile {
};
#define ata_id_is_ata(id) (((id)[0] & (1 << 15)) == 0)
#define ata_id_is_cfa(id) ((id)[0] == 0x848A)
#define ata_id_is_sata(id) ((id)[93] == 0)
#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
#define ata_id_hpa_enabled(id) ((id)[85] & (1 << 10))
#define ata_id_has_fua(id) ((id)[84] & (1 << 6))
#define ata_id_has_flush(id) ((id)[83] & (1 << 12))
#define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13))
#define ata_id_has_lba48(id) ((id)[83] & (1 << 10))
#define ata_id_has_hpa(id) ((id)[82] & (1 << 10))
#define ata_id_has_wcache(id) ((id)[82] & (1 << 5))
#define ata_id_has_pm(id) ((id)[82] & (1 << 3))
#define ata_id_has_lba(id) ((id)[49] & (1 << 9))
#define ata_id_has_dma(id) ((id)[49] & (1 << 8))
#define ata_id_removeable(id) ((id)[0] & (1 << 7))
#define ata_id_has_dword_io(id) ((id)[50] & (1 << 0))
#define ata_id_u32(id,n) \
(((u32) (id)[(n) + 1] << 16) | ((u32) (id)[(n)]))
#define ata_id_u64(id,n) \
@ -267,6 +272,16 @@ struct ata_taskfile {
((u64) (id)[(n) + 1] << 16) | \
((u64) (id)[(n) + 0]) )
static inline unsigned int ata_id_major_version(const u16 *id)
{
unsigned int mver;
for (mver = 14; mver >= 1; mver--)
if (id[ATA_ID_MAJOR_VER] & (1 << mver))
break;
return mver;
}
static inline int ata_id_current_chs_valid(const u16 *id)
{
/* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@ -302,4 +317,16 @@ static inline int ata_ok(u8 status)
== ATA_DRDY);
}
static inline int lba_28_ok(u64 block, u32 n_block)
{
/* check the ending block number */
return ((block + n_block - 1) < ((u64)1 << 28)) && (n_block <= 256);
}
static inline int lba_48_ok(u64 block, u32 n_block)
{
/* check the ending block number */
return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536);
}
#endif /* __LINUX_ATA_H__ */

View file

@ -22,6 +22,7 @@ typedef struct request_queue request_queue_t;
struct elevator_queue;
typedef struct elevator_queue elevator_t;
struct request_pm_state;
struct blk_trace;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
@ -416,6 +417,8 @@ struct request_queue
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
/*
* reserved for flush operations
*/

View file

@ -0,0 +1,277 @@
#ifndef BLKTRACE_H
#define BLKTRACE_H
#include <linux/config.h>
#include <linux/blkdev.h>
#include <linux/relay.h>
/*
* Trace categories
*/
enum blktrace_cat {
BLK_TC_READ = 1 << 0, /* reads */
BLK_TC_WRITE = 1 << 1, /* writes */
BLK_TC_BARRIER = 1 << 2, /* barrier */
BLK_TC_SYNC = 1 << 3, /* barrier */
BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
BLK_TC_REQUEUE = 1 << 5, /* requeueing */
BLK_TC_ISSUE = 1 << 6, /* issue */
BLK_TC_COMPLETE = 1 << 7, /* completions */
BLK_TC_FS = 1 << 8, /* fs requests */
BLK_TC_PC = 1 << 9, /* pc requests */
BLK_TC_NOTIFY = 1 << 10, /* special message */
BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
};
#define BLK_TC_SHIFT (16)
#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT)
/*
* Basic trace actions
*/
enum blktrace_act {
__BLK_TA_QUEUE = 1, /* queued */
__BLK_TA_BACKMERGE, /* back merged to existing rq */
__BLK_TA_FRONTMERGE, /* front merge to existing rq */
__BLK_TA_GETRQ, /* allocated new request */
__BLK_TA_SLEEPRQ, /* sleeping on rq allocation */
__BLK_TA_REQUEUE, /* request requeued */
__BLK_TA_ISSUE, /* sent to driver */
__BLK_TA_COMPLETE, /* completed by driver */
__BLK_TA_PLUG, /* queue was plugged */
__BLK_TA_UNPLUG_IO, /* queue was unplugged by io */
__BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */
__BLK_TA_INSERT, /* insert request */
__BLK_TA_SPLIT, /* bio was split */
__BLK_TA_BOUNCE, /* bio was bounced */
__BLK_TA_REMAP, /* bio was remapped */
};
/*
* Trace actions in full. Additionally, read or write is masked
*/
#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
#define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_SPLIT (__BLK_TA_SPLIT)
#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_IO_TRACE_MAGIC 0x65617400
#define BLK_IO_TRACE_VERSION 0x07
/*
* The trace itself
*/
struct blk_io_trace {
u32 magic; /* MAGIC << 8 | version */
u32 sequence; /* event number */
u64 time; /* in microseconds */
u64 sector; /* disk offset */
u32 bytes; /* transfer length */
u32 action; /* what happened */
u32 pid; /* who did it */
u32 device; /* device number */
u32 cpu; /* on what cpu did it happen */
u16 error; /* completion error */
u16 pdu_len; /* length of data after this trace */
};
/*
* The remap event
*/
struct blk_io_trace_remap {
u32 device;
u32 __pad;
u64 sector;
};
enum {
Blktrace_setup = 1,
Blktrace_running,
Blktrace_stopped,
};
struct blk_trace {
int trace_state;
struct rchan *rchan;
unsigned long *sequence;
u16 act_mask;
u64 start_lba;
u64 end_lba;
u32 pid;
u32 dev;
struct dentry *dir;
struct dentry *dropped_file;
atomic_t dropped;
};
/*
* User setup structure passed with BLKTRACESTART
*/
struct blk_user_trace_setup {
char name[BDEVNAME_SIZE]; /* output */
u16 act_mask; /* input */
u32 buf_size; /* input */
u32 buf_nr; /* input */
u64 start_lba;
u64 end_lba;
u32 pid;
};
#if defined(CONFIG_BLK_DEV_IO_TRACE)
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(request_queue_t *);
extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
/**
* blk_add_trace_rq - Add a trace for a request oriented action
* @q: queue the io is for
* @rq: the source request
* @what: the action
*
* Description:
* Records an action against a request. Will log the bio offset + size.
*
**/
static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
u32 what)
{
struct blk_trace *bt = q->blk_trace;
int rw = rq->flags & 0x07;
if (likely(!bt))
return;
if (blk_pc_request(rq)) {
what |= BLK_TC_ACT(BLK_TC_PC);
__blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
} else {
what |= BLK_TC_ACT(BLK_TC_FS);
__blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
}
}
/**
* blk_add_trace_bio - Add a trace for a bio oriented action
* @q: queue the io is for
* @bio: the source bio
* @what: the action
*
* Description:
* Records an action against a bio. Will log the bio offset + size.
*
**/
static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
u32 what)
{
struct blk_trace *bt = q->blk_trace;
if (likely(!bt))
return;
__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
}
/**
* blk_add_trace_generic - Add a trace for a generic action
* @q: queue the io is for
* @bio: the source bio
* @rw: the data direction
* @what: the action
*
* Description:
* Records a simple trace
*
**/
static inline void blk_add_trace_generic(struct request_queue *q,
struct bio *bio, int rw, u32 what)
{
struct blk_trace *bt = q->blk_trace;
if (likely(!bt))
return;
if (bio)
blk_add_trace_bio(q, bio, what);
else
__blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
}
/**
* blk_add_trace_pdu_int - Add a trace for a bio with an integer payload
* @q: queue the io is for
* @what: the action
* @bio: the source bio
* @pdu: the integer payload
*
* Description:
* Adds a trace with some integer payload. This might be an unplug
* option given as the action, with the depth at unplug time given
* as the payload
*
**/
static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
struct bio *bio, unsigned int pdu)
{
struct blk_trace *bt = q->blk_trace;
u64 rpdu = cpu_to_be64(pdu);
if (likely(!bt))
return;
if (bio)
__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
else
__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
}
/**
* blk_add_trace_remap - Add a trace for a remap operation
* @q: queue the io is for
* @bio: the source bio
* @dev: target device
* @from: source sector
* @to: target sector
*
* Description:
* Device mapper or raid target sometimes need to split a bio because
* it spans a stripe (or similar). Add a trace for that action.
*
**/
static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
dev_t dev, sector_t from, sector_t to)
{
struct blk_trace *bt = q->blk_trace;
struct blk_io_trace_remap r;
if (likely(!bt))
return;
r.device = cpu_to_be32(dev);
r.sector = cpu_to_be64(to);
__blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
}
#else /* !CONFIG_BLK_DEV_IO_TRACE */
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
#define blk_trace_shutdown(q) do { } while (0)
#define blk_add_trace_rq(q, rq, what) do { } while (0)
#define blk_add_trace_bio(q, rq, what) do { } while (0)
#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#endif

View file

@ -13,9 +13,7 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
#if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
#else
#ifndef __read_mostly
#define __read_mostly
#endif

View file

@ -378,7 +378,6 @@ struct cdrom_generic_command
#define CDC_MEDIA_CHANGED 0x80 /* media changed */
#define CDC_PLAY_AUDIO 0x100 /* audio functions */
#define CDC_RESET 0x200 /* hard reset device */
#define CDC_IOCTLS 0x400 /* driver has non-standard ioctls */
#define CDC_DRIVE_STATUS 0x800 /* driver implements drive status */
#define CDC_GENERIC_PACKET 0x1000 /* driver implements generic packets */
#define CDC_CD_R 0x2000 /* drive is a CD-R */
@ -974,9 +973,7 @@ struct cdrom_device_ops {
int (*reset) (struct cdrom_device_info *);
/* play stuff */
int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
/* dev-specific */
int (*dev_ioctl) (struct cdrom_device_info *,
unsigned int, unsigned long);
/* driver specifications */
const int capability; /* capability flags */
int n_minors; /* number of active minor devices */

View file

@ -97,6 +97,10 @@ COMPATIBLE_IOCTL(BLKRRPART)
COMPATIBLE_IOCTL(BLKFLSBUF)
COMPATIBLE_IOCTL(BLKSECTSET)
COMPATIBLE_IOCTL(BLKSSZGET)
COMPATIBLE_IOCTL(BLKTRACESTART)
COMPATIBLE_IOCTL(BLKTRACESTOP)
COMPATIBLE_IOCTL(BLKTRACESETUP)
COMPATIBLE_IOCTL(BLKTRACETEARDOWN)
ULONG_IOCTL(BLKRASET)
ULONG_IOCTL(BLKFRASET)
/* RAID */

View file

@ -32,7 +32,7 @@ struct cpu {
};
extern int register_cpu(struct cpu *, int, struct node *);
extern struct sys_device *get_cpu_sysdev(int cpu);
extern struct sys_device *get_cpu_sysdev(unsigned cpu);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *, struct node *);
#endif

View file

@ -229,6 +229,8 @@ struct crypto_tfm {
} crt_u;
struct crypto_alg *__crt_alg;
char __crt_ctx[] __attribute__ ((__aligned__));
};
/*
@ -301,7 +303,13 @@ static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
{
return (void *)&tfm[1];
return tfm->__crt_ctx;
}
static inline unsigned int crypto_tfm_ctx_alignment(void)
{
struct crypto_tfm *tfm;
return __alignof__(tfm->__crt_ctx);
}
/*

View file

@ -18,7 +18,7 @@
* @dccph_seq - sequence number high or low order 24 bits, depends on dccph_x
*/
struct dccp_hdr {
__u16 dccph_sport,
__be16 dccph_sport,
dccph_dport;
__u8 dccph_doff;
#if defined(__LITTLE_ENDIAN_BITFIELD)
@ -32,18 +32,18 @@ struct dccp_hdr {
#endif
__u16 dccph_checksum;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u32 dccph_x:1,
__u8 dccph_x:1,
dccph_type:4,
dccph_reserved:3,
dccph_seq:24;
dccph_reserved:3;
#elif defined(__BIG_ENDIAN_BITFIELD)
__u32 dccph_reserved:3,
__u8 dccph_reserved:3,
dccph_type:4,
dccph_x:1,
dccph_seq:24;
dccph_x:1;
#else
#error "Adjust your <asm/byteorder.h> defines"
#endif
__u8 dccph_seq2;
__be16 dccph_seq;
};
/**
@ -52,7 +52,7 @@ struct dccp_hdr {
* @dccph_seq_low - low 24 bits of a 48 bit seq packet
*/
struct dccp_hdr_ext {
__u32 dccph_seq_low;
__be32 dccph_seq_low;
};
/**
@ -62,7 +62,7 @@ struct dccp_hdr_ext {
* @dccph_req_options - list of options (must be a multiple of 32 bits
*/
struct dccp_hdr_request {
__u32 dccph_req_service;
__be32 dccph_req_service;
};
/**
* struct dccp_hdr_ack_bits - acknowledgment bits common to most packets
@ -71,9 +71,9 @@ struct dccp_hdr_request {
* @dccph_resp_ack_nr_low - 48 bit ack number low order bits, contains GSR
*/
struct dccp_hdr_ack_bits {
__u32 dccph_reserved1:8,
dccph_ack_nr_high:24;
__u32 dccph_ack_nr_low;
__be16 dccph_reserved1;
__be16 dccph_ack_nr_high;
__be32 dccph_ack_nr_low;
};
/**
* struct dccp_hdr_response - Conection initiation response header
@ -85,7 +85,7 @@ struct dccp_hdr_ack_bits {
*/
struct dccp_hdr_response {
struct dccp_hdr_ack_bits dccph_resp_ack;
__u32 dccph_resp_service;
__be32 dccph_resp_service;
};
/**
@ -154,6 +154,10 @@ enum {
DCCPO_MANDATORY = 1,
DCCPO_MIN_RESERVED = 3,
DCCPO_MAX_RESERVED = 31,
DCCPO_CHANGE_L = 32,
DCCPO_CONFIRM_L = 33,
DCCPO_CHANGE_R = 34,
DCCPO_CONFIRM_R = 35,
DCCPO_NDP_COUNT = 37,
DCCPO_ACK_VECTOR_0 = 38,
DCCPO_ACK_VECTOR_1 = 39,
@ -168,7 +172,9 @@ enum {
/* DCCP features */
enum {
DCCPF_RESERVED = 0,
DCCPF_CCID = 1,
DCCPF_SEQUENCE_WINDOW = 3,
DCCPF_ACK_RATIO = 5,
DCCPF_SEND_ACK_VECTOR = 6,
DCCPF_SEND_NDP_COUNT = 7,
/* 10-127 reserved */
@ -176,9 +182,18 @@ enum {
DCCPF_MAX_CCID_SPECIFIC = 255,
};
/* this structure is argument to DCCP_SOCKOPT_CHANGE_X */
struct dccp_so_feat {
__u8 dccpsf_feat;
__u8 *dccpsf_val;
__u8 dccpsf_len;
};
/* DCCP socket options */
#define DCCP_SOCKOPT_PACKET_SIZE 1
#define DCCP_SOCKOPT_SERVICE 2
#define DCCP_SOCKOPT_CHANGE_L 3
#define DCCP_SOCKOPT_CHANGE_R 4
#define DCCP_SOCKOPT_CCID_RX_INFO 128
#define DCCP_SOCKOPT_CCID_TX_INFO 192
@ -254,16 +269,12 @@ static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb)
static inline __u64 dccp_hdr_seq(const struct sk_buff *skb)
{
const struct dccp_hdr *dh = dccp_hdr(skb);
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 seq_nr = ntohl(dh->dccph_seq << 8);
#elif defined(__BIG_ENDIAN_BITFIELD)
__u64 seq_nr = ntohl(dh->dccph_seq);
#else
#error "Adjust your <asm/byteorder.h> defines"
#endif
__u64 seq_nr = ntohs(dh->dccph_seq);
if (dh->dccph_x != 0)
seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(skb)->dccph_seq_low);
else
seq_nr += (u32)dh->dccph_seq2 << 16;
return seq_nr;
}
@ -281,13 +292,7 @@ static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *
static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb)
{
const struct dccp_hdr_ack_bits *dhack = dccp_hdr_ack_bits(skb);
#if defined(__LITTLE_ENDIAN_BITFIELD)
return (((u64)ntohl(dhack->dccph_ack_nr_high << 8)) << 32) + ntohl(dhack->dccph_ack_nr_low);
#elif defined(__BIG_ENDIAN_BITFIELD)
return (((u64)ntohl(dhack->dccph_ack_nr_high)) << 32) + ntohl(dhack->dccph_ack_nr_low);
#else
#error "Adjust your <asm/byteorder.h> defines"
#endif
return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) + ntohl(dhack->dccph_ack_nr_low);
}
static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb)
@ -314,38 +319,60 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
/* initial values for each feature */
#define DCCPF_INITIAL_SEQUENCE_WINDOW 100
/* FIXME: for now we're using CCID 3 (TFRC) */
#define DCCPF_INITIAL_CCID 3
#define DCCPF_INITIAL_SEND_ACK_VECTOR 0
#define DCCPF_INITIAL_ACK_RATIO 2
#define DCCPF_INITIAL_CCID 2
#define DCCPF_INITIAL_SEND_ACK_VECTOR 1
/* FIXME: for now we're default to 1 but it should really be 0 */
#define DCCPF_INITIAL_SEND_NDP_COUNT 1
#define DCCP_NDP_LIMIT 0xFFFFFF
/**
* struct dccp_options - option values for a DCCP connection
* @dccpo_sequence_window - Sequence Window Feature (section 7.5.2)
* @dccpo_ccid - Congestion Control Id (CCID) (section 10)
* @dccpo_send_ack_vector - Send Ack Vector Feature (section 11.5)
* @dccpo_send_ndp_count - Send NDP Count Feature (7.7.2)
* struct dccp_minisock - Minimal DCCP connection representation
*
* Will be used to pass the state from dccp_request_sock to dccp_sock.
*
* @dccpms_sequence_window - Sequence Window Feature (section 7.5.2)
* @dccpms_ccid - Congestion Control Id (CCID) (section 10)
* @dccpms_send_ack_vector - Send Ack Vector Feature (section 11.5)
* @dccpms_send_ndp_count - Send NDP Count Feature (7.7.2)
*/
struct dccp_options {
__u64 dccpo_sequence_window;
__u8 dccpo_rx_ccid;
__u8 dccpo_tx_ccid;
__u8 dccpo_send_ack_vector;
__u8 dccpo_send_ndp_count;
struct dccp_minisock {
__u64 dccpms_sequence_window;
__u8 dccpms_rx_ccid;
__u8 dccpms_tx_ccid;
__u8 dccpms_send_ack_vector;
__u8 dccpms_send_ndp_count;
__u8 dccpms_ack_ratio;
struct list_head dccpms_pending;
struct list_head dccpms_conf;
};
extern void __dccp_options_init(struct dccp_options *dccpo);
extern void dccp_options_init(struct dccp_options *dccpo);
struct dccp_opt_conf {
__u8 *dccpoc_val;
__u8 dccpoc_len;
};
struct dccp_opt_pend {
struct list_head dccpop_node;
__u8 dccpop_type;
__u8 dccpop_feat;
__u8 *dccpop_val;
__u8 dccpop_len;
int dccpop_conf;
struct dccp_opt_conf *dccpop_sc;
};
extern void __dccp_minisock_init(struct dccp_minisock *dmsk);
extern void dccp_minisock_init(struct dccp_minisock *dmsk);
extern int dccp_parse_options(struct sock *sk, struct sk_buff *skb);
struct dccp_request_sock {
struct inet_request_sock dreq_inet_rsk;
__u64 dreq_iss;
__u64 dreq_isr;
__u32 dreq_service;
__be32 dreq_service;
};
static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req)
@ -373,13 +400,13 @@ enum dccp_role {
struct dccp_service_list {
__u32 dccpsl_nr;
__u32 dccpsl_list[0];
__be32 dccpsl_list[0];
};
#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
static inline int dccp_list_has_service(const struct dccp_service_list *sl,
const u32 service)
const __be32 service)
{
if (likely(sl != NULL)) {
u32 i = sl->dccpsl_nr;
@ -425,17 +452,17 @@ struct dccp_sock {
__u64 dccps_gss;
__u64 dccps_gsr;
__u64 dccps_gar;
__u32 dccps_service;
__be32 dccps_service;
struct dccp_service_list *dccps_service_list;
struct timeval dccps_timestamp_time;
__u32 dccps_timestamp_echo;
__u32 dccps_packet_size;
__u16 dccps_l_ack_ratio;
__u16 dccps_r_ack_ratio;
unsigned long dccps_ndp_count;
__u32 dccps_mss_cache;
struct dccp_options dccps_options;
struct dccp_minisock dccps_minisock;
struct dccp_ackvec *dccps_hc_rx_ackvec;
void *dccps_hc_rx_ccid_private;
void *dccps_hc_tx_ccid_private;
struct ccid *dccps_hc_rx_ccid;
struct ccid *dccps_hc_tx_ccid;
struct dccp_options_received dccps_options_received;
@ -450,6 +477,11 @@ static inline struct dccp_sock *dccp_sk(const struct sock *sk)
return (struct dccp_sock *)sk;
}
static inline struct dccp_minisock *dccp_msk(const struct sock *sk)
{
return (struct dccp_minisock *)&dccp_sk(sk)->dccps_minisock;
}
static inline int dccp_service_not_initialized(const struct sock *sk)
{
return dccp_sk(sk)->dccps_service == DCCP_SERVICE_INVALID_VALUE;

View file

@ -21,6 +21,11 @@
struct file_operations;
struct debugfs_blob_wrapper {
void *data;
unsigned long size;
};
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_create_file(const char *name, mode_t mode,
struct dentry *parent, void *data,
@ -39,6 +44,9 @@ struct dentry *debugfs_create_u32(const char *name, mode_t mode,
struct dentry *debugfs_create_bool(const char *name, mode_t mode,
struct dentry *parent, u32 *value);
struct dentry *debugfs_create_blob(const char *name, mode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob);
#else
#include <linux/err.h>
@ -94,6 +102,13 @@ static inline struct dentry *debugfs_create_bool(const char *name, mode_t mode,
return ERR_PTR(-ENODEV);
}
static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob)
{
return ERR_PTR(-ENODEV);
}
#endif
#endif

View file

@ -378,6 +378,7 @@ extern void device_bind_driver(struct device * dev);
extern void device_release_driver(struct device * dev);
extern int device_attach(struct device * dev);
extern void driver_attach(struct device_driver * drv);
extern void device_reprobe(struct device *dev);
/*
@ -399,7 +400,7 @@ extern struct device * get_device(struct device * dev);
extern void put_device(struct device * dev);
/* drivers/base/power.c */
/* drivers/base/power/shutdown.c */
extern void device_shutdown(void);
@ -424,6 +425,8 @@ extern void firmware_unregister(struct subsystem *);
dev_printk(KERN_INFO , dev , format , ## arg)
#define dev_warn(dev, format, arg...) \
dev_printk(KERN_WARNING , dev , format , ## arg)
#define dev_notice(dev, format, arg...) \
dev_printk(KERN_NOTICE , dev , format , ## arg)
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \

View file

@ -20,6 +20,7 @@ enum dma_data_direction {
#define DMA_31BIT_MASK 0x000000007fffffffULL
#define DMA_30BIT_MASK 0x000000003fffffffULL
#define DMA_29BIT_MASK 0x000000001fffffffULL
#define DMA_28BIT_MASK 0x000000000fffffffULL
#include <asm/dma-mapping.h>

View file

@ -71,17 +71,17 @@
struct dn_naddr
{
unsigned short a_len;
unsigned char a_addr[DN_MAXADDL];
__le16 a_len;
__u8 a_addr[DN_MAXADDL]; /* Two bytes little endian */
};
struct sockaddr_dn
{
unsigned short sdn_family;
unsigned char sdn_flags;
unsigned char sdn_objnum;
unsigned short sdn_objnamel;
unsigned char sdn_objname[DN_MAXOBJL];
__u16 sdn_family;
__u8 sdn_flags;
__u8 sdn_objnum;
__le16 sdn_objnamel;
__u8 sdn_objname[DN_MAXOBJL];
struct dn_naddr sdn_add;
};
#define sdn_nodeaddrl sdn_add.a_len /* Node address length */
@ -93,38 +93,38 @@ struct sockaddr_dn
* DECnet set/get DSO_CONDATA, DSO_DISDATA (optional data) structure
*/
struct optdata_dn {
unsigned short opt_status; /* Extended status return */
__le16 opt_status; /* Extended status return */
#define opt_sts opt_status
unsigned short opt_optl; /* Length of user data */
unsigned char opt_data[16]; /* User data */
__le16 opt_optl; /* Length of user data */
__u8 opt_data[16]; /* User data */
};
struct accessdata_dn
{
unsigned char acc_accl;
unsigned char acc_acc[DN_MAXACCL];
unsigned char acc_passl;
unsigned char acc_pass[DN_MAXACCL];
unsigned char acc_userl;
unsigned char acc_user[DN_MAXACCL];
__u8 acc_accl;
__u8 acc_acc[DN_MAXACCL];
__u8 acc_passl;
__u8 acc_pass[DN_MAXACCL];
__u8 acc_userl;
__u8 acc_user[DN_MAXACCL];
};
/*
* DECnet logical link information structure
*/
struct linkinfo_dn {
unsigned short idn_segsize; /* Segment size for link */
unsigned char idn_linkstate; /* Logical link state */
__le16 idn_segsize; /* Segment size for link */
__u8 idn_linkstate; /* Logical link state */
};
/*
* Ethernet address format (for DECnet)
*/
union etheraddress {
unsigned char dne_addr[6]; /* Full ethernet address */
__u8 dne_addr[6]; /* Full ethernet address */
struct {
unsigned char dne_hiord[4]; /* DECnet HIORD prefix */
unsigned char dne_nodeaddr[2]; /* DECnet node address */
__u8 dne_hiord[4]; /* DECnet HIORD prefix */
__u8 dne_nodeaddr[2]; /* DECnet node address */
} dne_remote;
};
@ -133,7 +133,7 @@ union etheraddress {
* DECnet physical socket address format
*/
struct dn_addr {
unsigned short dna_family; /* AF_DECnet */
__le16 dna_family; /* AF_DECnet */
union etheraddress dna_netaddr; /* DECnet ethernet address */
};

View file

@ -121,4 +121,17 @@ typedef uint16_t audio_attributes_t;
#define AUDIO_SET_ATTRIBUTES _IOW('o', 17, audio_attributes_t)
#define AUDIO_SET_KARAOKE _IOW('o', 18, audio_karaoke_t)
/**
* AUDIO_GET_PTS
*
* Read the 33 bit presentation time stamp as defined
* in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
*
* The PTS should belong to the currently played
* frame if possible, but may also be a value close to it
* like the PTS of the last decoded frame or the last PTS
* extracted by the PES parser.
*/
#define AUDIO_GET_PTS _IOR('o', 19, __u64)
#endif /* _DVBAUDIO_H_ */

View file

@ -200,4 +200,17 @@ typedef uint16_t video_attributes_t;
#define VIDEO_GET_SIZE _IOR('o', 55, video_size_t)
#define VIDEO_GET_FRAME_RATE _IOR('o', 56, unsigned int)
/**
* VIDEO_GET_PTS
*
* Read the 33 bit presentation time stamp as defined
* in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
*
* The PTS should belong to the currently played
* frame if possible, but may also be a value close to it
* like the PTS of the last decoded frame or the last PTS
* extracted by the PES parser.
*/
#define VIDEO_GET_PTS _IOR('o', 57, __u64)
#endif /*_DVBVIDEO_H_*/

View file

@ -52,7 +52,12 @@ struct file;
#ifdef CONFIG_EPOLL
/* Used to initialize the epoll bits inside the "struct file" */
void eventpoll_init_file(struct file *file);
static inline void eventpoll_init_file(struct file *file)
{
INIT_LIST_HEAD(&file->f_ep_links);
spin_lock_init(&file->f_ep_lock);
}
/* Used to release the epoll bits inside the "struct file" */
void eventpoll_release_file(struct file *file);
@ -85,7 +90,6 @@ static inline void eventpoll_release(struct file *file)
eventpoll_release_file(file);
}
#else
static inline void eventpoll_init_file(struct file *file) {}

View file

@ -772,9 +772,12 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
/* inode.c */
extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
int ext3_get_block_handle(handle_t *handle, struct inode *inode,
sector_t iblock, struct buffer_head *bh_result, int create,
int extend_disksize);
extern void ext3_read_inode (struct inode *);
extern int ext3_write_inode (struct inode *, int);

View file

@ -19,6 +19,7 @@
#include <linux/rwsem.h>
#include <linux/rbtree.h>
#include <linux/seqlock.h>
#include <linux/mutex.h>
struct ext3_reserve_window {
__u32 _rsv_start; /* First byte reserved */
@ -122,16 +123,16 @@ struct ext3_inode_info {
__u16 i_extra_isize;
/*
* truncate_sem is for serialising ext3_truncate() against
* truncate_mutex is for serialising ext3_truncate() against
* ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's
* data tree are chopped off during truncate. We can't do that in
* ext3 because whenever we perform intermediate commits during
* truncate, the inode and all the metadata blocks *must* be in a
* consistent state which allows truncation of the orphans to restart
* during recovery. Hence we must fix the get_block-vs-truncate race
* by other means, so we have truncate_sem.
* by other means, so we have truncate_mutex.
*/
struct semaphore truncate_sem;
struct mutex truncate_mutex;
struct inode vfs_inode;
};

View file

@ -10,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/types.h>
/*
* The default fd array needs to be at least BITS_PER_LONG,
@ -17,10 +18,22 @@
*/
#define NR_OPEN_DEFAULT BITS_PER_LONG
/*
* The embedded_fd_set is a small fd_set,
* suitable for most tasks (which open <= BITS_PER_LONG files)
*/
struct embedded_fd_set {
unsigned long fds_bits[1];
};
/*
* More than this number of fds: we use a separately allocated fd_set
*/
#define EMBEDDED_FD_SET_SIZE (BITS_PER_BYTE * sizeof(struct embedded_fd_set))
struct fdtable {
unsigned int max_fds;
int max_fdset;
int next_fd;
struct file ** fd; /* current fd array */
fd_set *close_on_exec;
fd_set *open_fds;
@ -33,13 +46,20 @@ struct fdtable {
* Open file table structure
*/
struct files_struct {
/*
* read mostly part
*/
atomic_t count;
struct fdtable *fdt;
struct fdtable fdtab;
fd_set close_on_exec_init;
fd_set open_fds_init;
/*
* written part on a separate cache line in SMP
*/
spinlock_t file_lock ____cacheline_aligned_in_smp;
int next_fd;
struct embedded_fd_set close_on_exec_init;
struct embedded_fd_set open_fds_init;
struct file * fd_array[NR_OPEN_DEFAULT];
spinlock_t file_lock; /* Protects concurrent writers. Nests inside tsk->alloc_lock */
};
#define files_fdtable(files) (rcu_dereference((files)->fdt))

View file

@ -197,6 +197,10 @@ extern int dir_notify_enable;
#define BLKBSZGET _IOR(0x12,112,size_t)
#define BLKBSZSET _IOW(0x12,113,size_t)
#define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */
#define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup)
#define BLKTRACESTART _IO(0x12,116)
#define BLKTRACESTOP _IO(0x12,117)
#define BLKTRACETEARDOWN _IO(0x12,118)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
@ -397,8 +401,8 @@ struct block_device {
dev_t bd_dev; /* not a kdev_t - it's a search key */
struct inode * bd_inode; /* will die */
int bd_openers;
struct semaphore bd_sem; /* open/close mutex */
struct semaphore bd_mount_sem; /* mount mutex */
struct mutex bd_mutex; /* open/close mutex */
struct mutex bd_mount_mutex; /* mount mutex */
struct list_head bd_inodes;
void * bd_holder;
int bd_holders;
@ -509,7 +513,7 @@ struct inode {
#ifdef CONFIG_INOTIFY
struct list_head inotify_watches; /* watches on this inode */
struct semaphore inotify_sem; /* protects the watches list */
struct mutex inotify_mutex; /* protects the watches list */
#endif
unsigned long i_state;
@ -843,7 +847,7 @@ struct super_block {
* The next field is for VFS *only*. No filesystems have any business
* even looking at it. You had been warned.
*/
struct semaphore s_vfs_rename_sem; /* Kludge */
struct mutex s_vfs_rename_mutex; /* Kludge */
/* Granuality of c/m/atime in ns.
Cannot be worse than a second */
@ -1112,6 +1116,18 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
__mark_inode_dirty(inode, I_DIRTY_SYNC);
}
static inline void inode_inc_link_count(struct inode *inode)
{
inode->i_nlink++;
mark_inode_dirty(inode);
}
static inline void inode_dec_link_count(struct inode *inode)
{
inode->i_nlink--;
mark_inode_dirty(inode);
}
extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry);
static inline void file_accessed(struct file *file)
{
@ -1531,7 +1547,7 @@ extern void destroy_inode(struct inode *);
extern struct inode *new_inode(struct super_block *);
extern int remove_suid(struct dentry *);
extern void remove_dquot_ref(struct super_block *, int, struct list_head *);
extern struct semaphore iprune_sem;
extern struct mutex iprune_mutex;
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
extern void remove_inode_hash(struct inode *);

View file

@ -83,5 +83,32 @@ struct fsl_i2c_platform_data {
#define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001
#define FSL_I2C_DEV_CLOCK_5200 0x00000002
enum fsl_usb2_operating_modes {
FSL_USB2_MPH_HOST,
FSL_USB2_DR_HOST,
FSL_USB2_DR_DEVICE,
FSL_USB2_DR_OTG,
};
enum fsl_usb2_phy_modes {
FSL_USB2_PHY_NONE,
FSL_USB2_PHY_ULPI,
FSL_USB2_PHY_UTMI,
FSL_USB2_PHY_UTMI_WIDE,
FSL_USB2_PHY_SERIAL,
};
struct fsl_usb2_platform_data {
/* board specific information */
enum fsl_usb2_operating_modes operating_mode;
enum fsl_usb2_phy_modes phy_mode;
unsigned int port_enables;
};
/* Flags in fsl_usb2_mph_platform_data */
#define FSL_USB2_PORT0_ENABLED 0x00000001
#define FSL_USB2_PORT1_ENABLED 0x00000002
#endif /* _FSL_DEVICE_H_ */
#endif /* __KERNEL__ */

View file

@ -12,6 +12,8 @@
#ifndef GENERIC_SERIAL_H
#define GENERIC_SERIAL_H
#include <linux/mutex.h>
struct real_driver {
void (*disable_tx_interrupts) (void *);
void (*enable_tx_interrupts) (void *);
@ -34,7 +36,7 @@ struct gs_port {
int xmit_head;
int xmit_tail;
int xmit_cnt;
struct semaphore port_write_sem;
struct mutex port_write_mutex;
int flags;
wait_queue_head_t open_wait;
wait_queue_head_t close_wait;

View file

@ -149,22 +149,16 @@ struct disk_attribute {
({ \
typeof(gendiskp->dkstats->field) res = 0; \
int i; \
for (i=0; i < NR_CPUS; i++) { \
if (!cpu_possible(i)) \
continue; \
for_each_cpu(i) \
res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
} \
res; \
})
static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
int i;
for (i=0; i < NR_CPUS; i++) {
if (cpu_possible(i)) {
memset(per_cpu_ptr(gendiskp->dkstats, i), value,
sizeof (struct disk_stats));
}
}
for_each_cpu(i)
memset(per_cpu_ptr(gendiskp->dkstats, i), value,
sizeof (struct disk_stats));
}
#else

View file

@ -20,10 +20,7 @@ void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long)
int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
int hugetlb_report_meminfo(char *);
int hugetlb_report_node_meminfo(int, char *);
int is_hugepage_mem_enough(size_t);
unsigned long hugetlb_total_pages(void);
struct page *alloc_huge_page(struct vm_area_struct *, unsigned long);
void free_huge_page(struct page *);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access);
@ -39,18 +36,35 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write);
int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
int pmd_huge(pmd_t pmd);
void hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot);
#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
#define is_hugepage_only_range(mm, addr, len) 0
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
do { } while (0)
#endif
#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
#define hugetlb_free_pgd_range free_pgd_range
#else
void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
unsigned long end, unsigned long floor,
unsigned long ceiling);
#endif
#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
#define prepare_hugepage_range(addr, len) \
is_aligned_hugepage_range(addr, len)
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
#else
int prepare_hugepage_range(unsigned long addr, unsigned long len);
#endif
@ -87,20 +101,17 @@ static inline unsigned long hugetlb_total_pages(void)
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
#define unmap_hugepage_range(vma, start, end) BUG()
#define is_hugepage_mem_enough(size) 0
#define hugetlb_report_meminfo(buf) 0
#define hugetlb_report_node_meminfo(n, buf) 0
#define follow_huge_pmd(mm, addr, pmd, write) NULL
#define is_aligned_hugepage_range(addr, len) 0
#define prepare_hugepage_range(addr, len) (-EINVAL)
#define pmd_huge(x) 0
#define is_hugepage_only_range(mm, addr, len) 0
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
do { } while (0)
#define alloc_huge_page(vma, addr) ({ NULL; })
#define free_huge_page(p) ({ (void)(p); BUG(); })
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
#define hugetlb_change_protection(vma, address, end, newprot)
#ifndef HPAGE_MASK
#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
#define HPAGE_SIZE PAGE_SIZE
@ -128,6 +139,8 @@ struct hugetlbfs_sb_info {
struct hugetlbfs_inode_info {
struct shared_policy policy;
/* Protected by the (global) hugetlb_lock */
unsigned long prereserved_hpages;
struct inode vfs_inode;
};
@ -144,6 +157,10 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
extern struct file_operations hugetlbfs_file_operations;
extern struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_zero_setup(size_t);
int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info,
unsigned long atleast_hpages);
void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info,
unsigned long atmost_hpages);
int hugetlb_get_quota(struct address_space *mapping);
void hugetlb_put_quota(struct address_space *mapping);

View file

@ -27,11 +27,13 @@ struct sensor_device_attribute{
#define to_sensor_dev_attr(_dev_attr) \
container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
#define SENSOR_DEVICE_ATTR(_name,_mode,_show,_store,_index) \
struct sensor_device_attribute sensor_dev_attr_##_name = { \
.dev_attr = __ATTR(_name,_mode,_show,_store), \
.index = _index, \
}
#define SENSOR_ATTR(_name, _mode, _show, _store, _index) \
{ .dev_attr = __ATTR(_name, _mode, _show, _store), \
.index = _index }
#define SENSOR_DEVICE_ATTR(_name, _mode, _show, _store, _index) \
struct sensor_device_attribute sensor_dev_attr_##_name \
= SENSOR_ATTR(_name, _mode, _show, _store, _index)
struct sensor_device_attribute_2 {
struct device_attribute dev_attr;
@ -41,11 +43,13 @@ struct sensor_device_attribute_2 {
#define to_sensor_dev_attr_2(_dev_attr) \
container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
#define SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index) \
{ .dev_attr = __ATTR(_name, _mode, _show, _store), \
.index = _index, \
.nr = _nr }
#define SENSOR_DEVICE_ATTR_2(_name,_mode,_show,_store,_nr,_index) \
struct sensor_device_attribute_2 sensor_dev_attr_##_name = { \
.dev_attr = __ATTR(_name,_mode,_show,_store), \
.index = _index, \
.nr = _nr, \
}
struct sensor_device_attribute_2 sensor_dev_attr_##_name \
= SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index)
#endif /* _LINUX_HWMON_SYSFS_H */

View file

@ -172,7 +172,6 @@
#define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */
#define I2C_HW_B_IOC 0x010011 /* IOC bit-wiggling */
#define I2C_HW_B_TSUNA 0x010012 /* DEC Tsunami chipset */
#define I2C_HW_B_FRODO 0x010013 /* 2d3D SA-1110 Development Board */
#define I2C_HW_B_OMAHA 0x010014 /* Omaha I2C interface (ARM) */
#define I2C_HW_B_GUIDE 0x010015 /* Guide bit-basher */
#define I2C_HW_B_IXP2000 0x010016 /* GPIO on IXP2000 systems */

View file

@ -32,7 +32,7 @@
#include <linux/mod_devicetable.h>
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
#include <asm/semaphore.h>
#include <linux/mutex.h>
/* --- For i2c-isa ---------------------------------------------------- */
@ -225,8 +225,8 @@ struct i2c_adapter {
int (*client_unregister)(struct i2c_client *);
/* data fields that are valid for all devices */
struct semaphore bus_lock;
struct semaphore clist_lock;
struct mutex bus_lock;
struct mutex clist_lock;
int timeout;
int retries;

View file

@ -40,14 +40,16 @@ struct icmp6hdr {
struct icmpv6_nd_ra {
__u8 hop_limit;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 reserved:6,
__u8 reserved:4,
router_pref:2,
other:1,
managed:1;
#elif defined(__BIG_ENDIAN_BITFIELD)
__u8 managed:1,
other:1,
reserved:6;
router_pref:2,
reserved:4;
#else
#error "Please fix <asm/byteorder.h>"
#endif
@ -70,8 +72,13 @@ struct icmp6hdr {
#define icmp6_addrconf_managed icmp6_dataun.u_nd_ra.managed
#define icmp6_addrconf_other icmp6_dataun.u_nd_ra.other
#define icmp6_rt_lifetime icmp6_dataun.u_nd_ra.rt_lifetime
#define icmp6_router_pref icmp6_dataun.u_nd_ra.router_pref
};
#define ICMPV6_ROUTER_PREF_LOW 0x3
#define ICMPV6_ROUTER_PREF_MEDIUM 0x0
#define ICMPV6_ROUTER_PREF_HIGH 0x1
#define ICMPV6_ROUTER_PREF_INVALID 0x2
#define ICMPV6_DEST_UNREACH 1
#define ICMPV6_PKT_TOOBIG 2

View file

@ -33,7 +33,7 @@
#define IFF_LOOPBACK 0x8 /* is a loopback net */
#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */
#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */
#define IFF_RUNNING 0x40 /* interface running and carrier ok */
#define IFF_RUNNING 0x40 /* interface RFC2863 OPER_UP */
#define IFF_NOARP 0x80 /* no ARP protocol */
#define IFF_PROMISC 0x100 /* receive all packets */
#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/
@ -43,12 +43,16 @@
#define IFF_MULTICAST 0x1000 /* Supports multicast */
#define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_MASTER|IFF_SLAVE|IFF_RUNNING)
#define IFF_PORTSEL 0x2000 /* can set media type */
#define IFF_AUTOMEDIA 0x4000 /* auto media select active */
#define IFF_DYNAMIC 0x8000 /* dialup device with changing addresses*/
#define IFF_LOWER_UP 0x10000 /* driver signals L1 up */
#define IFF_DORMANT 0x20000 /* driver signals dormant */
#define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|\
IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
/* Private (from user) interface flags (netdevice->priv_flags). */
#define IFF_802_1Q_VLAN 0x1 /* 802.1Q VLAN device. */
#define IFF_EBRIDGE 0x2 /* Ethernet bridging device. */
@ -83,6 +87,22 @@
#define IF_PROTO_FR_ETH_PVC 0x200B
#define IF_PROTO_RAW 0x200C /* RAW Socket */
/* RFC 2863 operational status */
enum {
IF_OPER_UNKNOWN,
IF_OPER_NOTPRESENT,
IF_OPER_DOWN,
IF_OPER_LOWERLAYERDOWN,
IF_OPER_TESTING,
IF_OPER_DORMANT,
IF_OPER_UP,
};
/* link modes */
enum {
IF_LINK_MODE_DEFAULT,
IF_LINK_MODE_DORMANT, /* limit upward transition to dormant */
};
/*
* Device mapping structure. I'd just gone off and designed a

View file

@ -72,6 +72,7 @@ struct in_addr {
#define IP_FREEBIND 15
#define IP_IPSEC_POLICY 16
#define IP_XFRM_POLICY 17
#define IP_PASSSEC 18
/* BSD compatibility */
#define IP_RECVRETOPTS IP_RETOPTS

View file

@ -25,6 +25,7 @@ struct ipv4_devconf
int arp_filter;
int arp_announce;
int arp_ignore;
int arp_accept;
int medium_id;
int no_xfrm;
int no_policy;

View file

@ -7,11 +7,10 @@
#define INIT_FDTABLE \
{ \
.max_fds = NR_OPEN_DEFAULT, \
.max_fdset = __FD_SETSIZE, \
.next_fd = 0, \
.max_fdset = EMBEDDED_FD_SET_SIZE, \
.fd = &init_files.fd_array[0], \
.close_on_exec = &init_files.close_on_exec_init, \
.open_fds = &init_files.open_fds_init, \
.close_on_exec = (fd_set *)&init_files.close_on_exec_init, \
.open_fds = (fd_set *)&init_files.open_fds_init, \
.rcu = RCU_HEAD_INIT, \
.free_files = NULL, \
.next = NULL, \
@ -20,9 +19,10 @@
#define INIT_FILES \
{ \
.count = ATOMIC_INIT(1), \
.file_lock = SPIN_LOCK_UNLOCKED, \
.fdt = &init_files.fdtab, \
.fdtab = INIT_FDTABLE, \
.file_lock = SPIN_LOCK_UNLOCKED, \
.next_fd = 0, \
.close_on_exec_init = { { 0, } }, \
.open_fds_init = { { 0, } }, \
.fd_array = { NULL, } \

View file

@ -145,6 +145,15 @@ struct ipv6_devconf {
__s32 max_desync_factor;
#endif
__s32 max_addresses;
__s32 accept_ra_defrtr;
__s32 accept_ra_pinfo;
#ifdef CONFIG_IPV6_ROUTER_PREF
__s32 accept_ra_rtr_pref;
__s32 rtr_probe_interval;
#ifdef CONFIG_IPV6_ROUTE_INFO
__s32 accept_ra_rt_info_max_plen;
#endif
#endif
void *sysctl;
};
@ -167,6 +176,11 @@ enum {
DEVCONF_MAX_DESYNC_FACTOR,
DEVCONF_MAX_ADDRESSES,
DEVCONF_FORCE_MLD_VERSION,
DEVCONF_ACCEPT_RA_DEFRTR,
DEVCONF_ACCEPT_RA_PINFO,
DEVCONF_ACCEPT_RA_RTR_PREF,
DEVCONF_RTR_PROBE_INTERVAL,
DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN,
DEVCONF_MAX
};

View file

@ -23,12 +23,22 @@
#define RTF_NONEXTHOP 0x00200000 /* route with no nexthop */
#define RTF_EXPIRES 0x00400000
#define RTF_ROUTEINFO 0x00800000 /* route information - RA */
#define RTF_CACHE 0x01000000 /* cache entry */
#define RTF_FLOW 0x02000000 /* flow significant route */
#define RTF_POLICY 0x04000000 /* policy route */
#define RTF_PREF(pref) ((pref) << 27)
#define RTF_PREF_MASK 0x18000000
#define RTF_LOCAL 0x80000000
#ifdef __KERNEL__
#define IPV6_EXTRACT_PREF(flag) (((flag) & RTF_PREF_MASK) >> 27)
#define IPV6_DECODE_PREF(pref) ((pref) ^ 2) /* 1:low,2:med,3:high */
#endif
struct in6_rtmsg {
struct in6_addr rtmsg_dst;
struct in6_addr rtmsg_src;

View file

@ -76,6 +76,7 @@ typedef enum {
IRDA_MCP2120_DONGLE = 9,
IRDA_ACT200L_DONGLE = 10,
IRDA_MA600_DONGLE = 11,
IRDA_TOIM3232_DONGLE = 12,
} IRDA_DONGLE;
/* Protocol types to be used for SOCK_DGRAM */

View file

@ -28,6 +28,7 @@
#include <linux/journal-head.h>
#include <linux/stddef.h>
#include <linux/bit_spinlock.h>
#include <linux/mutex.h>
#include <asm/semaphore.h>
#endif
@ -575,7 +576,7 @@ struct transaction_s
* @j_wait_checkpoint: Wait queue to trigger checkpointing
* @j_wait_commit: Wait queue to trigger commit
* @j_wait_updates: Wait queue to wait for updates to complete
* @j_checkpoint_sem: Semaphore for locking against concurrent checkpoints
* @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
* @j_head: Journal head - identifies the first unused block in the journal
* @j_tail: Journal tail - identifies the oldest still-used block in the
* journal.
@ -645,7 +646,7 @@ struct journal_s
int j_barrier_count;
/* The barrier lock itself */
struct semaphore j_barrier;
struct mutex j_barrier;
/*
* Transactions: The current running transaction...
@ -687,7 +688,7 @@ struct journal_s
wait_queue_head_t j_wait_updates;
/* Semaphore for locking against concurrent checkpoints */
struct semaphore j_checkpoint_sem;
struct mutex j_checkpoint_mutex;
/*
* Journal head: identifies the first unused block in the journal.

View file

@ -91,6 +91,9 @@ extern struct notifier_block *panic_notifier_list;
extern long (*panic_blink)(long time);
NORET_TYPE void panic(const char * fmt, ...)
__attribute__ ((NORET_AND format (printf, 1, 2)));
extern void oops_enter(void);
extern void oops_exit(void);
extern int oops_may_print(void);
fastcall NORET_TYPE void do_exit(long error_code)
ATTRIB_NORET;
NORET_TYPE void complete_and_exit(struct completion *, long)

View file

@ -1,6 +1,6 @@
#ifdef __KERNEL__
#include <asm/semaphore.h>
#include <linux/mutex.h>
typedef struct kobject *kobj_probe_t(dev_t, int *, void *);
struct kobj_map;
@ -9,6 +9,6 @@ int kobj_map(struct kobj_map *, dev_t, unsigned long, struct module *,
kobj_probe_t *, int (*)(dev_t, void *), void *);
void kobj_unmap(struct kobj_map *, dev_t, unsigned long);
struct kobject *kobj_lookup(struct kobj_map *, dev_t, int *);
struct kobj_map *kobj_map_init(kobj_probe_t *, struct semaphore *);
struct kobj_map *kobj_map_init(kobj_probe_t *, struct mutex *);
#endif

View file

@ -80,6 +80,8 @@ extern void kobject_unregister(struct kobject *);
extern struct kobject * kobject_get(struct kobject *);
extern void kobject_put(struct kobject *);
extern struct kobject *kobject_add_dir(struct kobject *, const char *);
extern char * kobject_get_path(struct kobject *, gfp_t);
struct kobj_type {
@ -255,7 +257,7 @@ struct subsys_attribute {
extern int subsys_create_file(struct subsystem * , struct subsys_attribute *);
extern void subsys_remove_file(struct subsystem * , struct subsys_attribute *);
#if defined(CONFIG_HOTPLUG) & defined(CONFIG_NET)
#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
void kobject_uevent(struct kobject *kobj, enum kobject_action action);
int add_uevent_var(char **envp, int num_envp, int *cur_index,

View file

@ -36,6 +36,7 @@
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#ifdef CONFIG_KPROBES
#include <asm/kprobes.h>
@ -152,7 +153,7 @@ struct kretprobe_instance {
};
extern spinlock_t kretprobe_lock;
extern struct semaphore kprobe_mutex;
extern struct mutex kprobe_mutex;
extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);

View file

@ -35,7 +35,8 @@
#include <linux/workqueue.h>
/*
* compile-time options
* compile-time options: to be removed as soon as all the drivers are
* converted to the new debugging mechanism
*/
#undef ATA_DEBUG /* debugging output */
#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
@ -61,15 +62,37 @@
#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
#ifdef ATA_NDEBUG
#define assert(expr)
#else
#define assert(expr) \
if(unlikely(!(expr))) { \
printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
#expr,__FILE__,__FUNCTION__,__LINE__); \
}
#endif
/* NEW: debug levels */
#define HAVE_LIBATA_MSG 1
enum {
ATA_MSG_DRV = 0x0001,
ATA_MSG_INFO = 0x0002,
ATA_MSG_PROBE = 0x0004,
ATA_MSG_WARN = 0x0008,
ATA_MSG_MALLOC = 0x0010,
ATA_MSG_CTL = 0x0020,
ATA_MSG_INTR = 0x0040,
ATA_MSG_ERR = 0x0080,
};
#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
{
if (dval < 0 || dval >= (sizeof(u32) * 8))
return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
if (!dval)
return 0;
return (1 << dval) - 1;
}
/* defines only for the constants which don't work well as enums */
#define ATA_TAG_POISON 0xfafbfcfdU
@ -99,8 +122,7 @@ enum {
/* struct ata_device stuff */
ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */
ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */
ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */
ATA_DFLAG_LBA = (1 << 2), /* device supports LBA */
ATA_DEV_UNKNOWN = 0, /* unknown device */
ATA_DEV_ATA = 1, /* ATA device */
@ -115,9 +137,9 @@ enum {
ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */
ATA_FLAG_SATA = (1 << 3),
ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */
ATA_FLAG_SRST = (1 << 5), /* use ATA SRST, not E.D.D. */
ATA_FLAG_SRST = (1 << 5), /* (obsolete) use ATA SRST, not E.D.D. */
ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */
ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */
ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once
* proper HSM is in place. */
@ -129,10 +151,14 @@ enum {
ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */
ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */
ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* Flush port task */
ATA_FLAG_IN_EH = (1 << 16), /* EH in progress */
ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */
ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */
/* various lengths of time */
ATA_TMOUT_EDD = 5 * HZ, /* heuristic */
@ -162,11 +188,19 @@ enum {
PORT_DISABLED = 2,
/* encoding various smaller bitmaps into a single
* unsigned long bitmap
* unsigned int bitmap
*/
ATA_SHIFT_UDMA = 0,
ATA_SHIFT_MWDMA = 8,
ATA_SHIFT_PIO = 11,
ATA_BITS_PIO = 5,
ATA_BITS_MWDMA = 3,
ATA_BITS_UDMA = 8,
ATA_SHIFT_PIO = 0,
ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_BITS_PIO,
ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_BITS_MWDMA,
ATA_MASK_PIO = ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO,
ATA_MASK_MWDMA = ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA,
ATA_MASK_UDMA = ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA,
/* size of buffer to pad xfers ending on unaligned boundaries */
ATA_DMA_PAD_SZ = 4,
@ -189,10 +223,15 @@ enum hsm_task_states {
};
enum ata_completion_errors {
AC_ERR_OTHER = (1 << 0),
AC_ERR_DEV = (1 << 1),
AC_ERR_ATA_BUS = (1 << 2),
AC_ERR_HOST_BUS = (1 << 3),
AC_ERR_DEV = (1 << 0), /* device reported error */
AC_ERR_HSM = (1 << 1), /* host state machine violation */
AC_ERR_TIMEOUT = (1 << 2), /* timeout */
AC_ERR_MEDIA = (1 << 3), /* media error */
AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */
AC_ERR_HOST_BUS = (1 << 5), /* host bus error */
AC_ERR_SYSTEM = (1 << 6), /* system error */
AC_ERR_INVALID = (1 << 7), /* invalid argument */
AC_ERR_OTHER = (1 << 8), /* unknown */
};
/* forward declarations */
@ -202,7 +241,10 @@ struct ata_port;
struct ata_queued_cmd;
/* typedefs */
typedef int (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
typedef void (*ata_probeinit_fn_t)(struct ata_port *);
typedef int (*ata_reset_fn_t)(struct ata_port *, int, unsigned int *);
typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *);
struct ata_ioports {
unsigned long cmd_addr;
@ -305,7 +347,7 @@ struct ata_device {
unsigned long flags; /* ATA_DFLAG_xxx */
unsigned int class; /* ATA_DEV_xxx */
unsigned int devno; /* 0 or 1 */
u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
u16 *id; /* IDENTIFY xxx DEVICE data */
u8 pio_mode;
u8 dma_mode;
u8 xfer_mode;
@ -313,6 +355,8 @@ struct ata_device {
unsigned int multi_count; /* sectors count for
READ/WRITE MULTIPLE */
unsigned int max_sectors; /* per-device max sectors */
unsigned int cdb_len;
/* for CHS addressing */
u16 cylinders; /* Number of cylinders */
@ -342,7 +386,6 @@ struct ata_port {
unsigned int mwdma_mask;
unsigned int udma_mask;
unsigned int cbl; /* cable type; ATA_CBL_xxx */
unsigned int cdb_len;
struct ata_device device[ATA_MAX_DEVICES];
@ -353,12 +396,14 @@ struct ata_port {
struct ata_host_stats stats;
struct ata_host_set *host_set;
struct work_struct packet_task;
struct work_struct port_task;
struct work_struct pio_task;
unsigned int hsm_task_state;
unsigned long pio_task_timeout;
u32 msg_enable;
struct list_head eh_done_q;
void *private_data;
};
@ -378,7 +423,9 @@ struct ata_port_operations {
u8 (*check_altstatus)(struct ata_port *ap);
void (*dev_select)(struct ata_port *ap, unsigned int device);
void (*phy_reset) (struct ata_port *ap);
void (*phy_reset) (struct ata_port *ap); /* obsolete */
int (*probe_reset) (struct ata_port *ap, unsigned int *classes);
void (*post_set_mode) (struct ata_port *ap);
int (*check_atapi_dma) (struct ata_queued_cmd *qc);
@ -387,7 +434,7 @@ struct ata_port_operations {
void (*bmdma_start) (struct ata_queued_cmd *qc);
void (*qc_prep) (struct ata_queued_cmd *qc);
int (*qc_issue) (struct ata_queued_cmd *qc);
unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
void (*eng_timeout) (struct ata_port *ap);
@ -435,6 +482,18 @@ extern void ata_port_probe(struct ata_port *);
extern void __sata_phy_reset(struct ata_port *ap);
extern void sata_phy_reset(struct ata_port *ap);
extern void ata_bus_reset(struct ata_port *ap);
extern int ata_drive_probe_reset(struct ata_port *ap,
ata_probeinit_fn_t probeinit,
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
ata_postreset_fn_t postreset, unsigned int *classes);
extern void ata_std_probeinit(struct ata_port *ap);
extern int ata_std_softreset(struct ata_port *ap, int verbose,
unsigned int *classes);
extern int sata_std_hardreset(struct ata_port *ap, int verbose,
unsigned int *class);
extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
extern int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
int post_reset);
extern void ata_port_disable(struct ata_port *);
extern void ata_std_ports(struct ata_ioports *ioaddr);
#ifdef CONFIG_PCI
@ -443,6 +502,7 @@ extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_i
extern void ata_pci_remove_one (struct pci_dev *pdev);
extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
extern int ata_pci_device_resume(struct pci_dev *pdev);
extern int ata_pci_clear_simplex(struct pci_dev *pdev);
#endif /* CONFIG_PCI */
extern int ata_device_add(const struct ata_probe_ent *ent);
extern void ata_host_set_remove(struct ata_host_set *host_set);
@ -450,6 +510,8 @@ extern int ata_scsi_detect(struct scsi_host_template *sht);
extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
extern int ata_scsi_error(struct Scsi_Host *host);
extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
extern int ata_scsi_release(struct Scsi_Host *host);
extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
extern int ata_scsi_device_resume(struct scsi_device *);
@ -457,6 +519,11 @@ extern int ata_scsi_device_suspend(struct scsi_device *);
extern int ata_device_resume(struct ata_port *, struct ata_device *);
extern int ata_device_suspend(struct ata_port *, struct ata_device *);
extern int ata_ratelimit(void);
extern unsigned int ata_busy_sleep(struct ata_port *ap,
unsigned long timeout_pat,
unsigned long timeout);
extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
void *data, unsigned long delay);
/*
* Default driver ops implementations
@ -470,26 +537,29 @@ extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
extern u8 ata_check_status(struct ata_port *ap);
extern u8 ata_altstatus(struct ata_port *ap);
extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
extern int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes);
extern int ata_port_start (struct ata_port *ap);
extern void ata_port_stop (struct ata_port *ap);
extern void ata_host_stop (struct ata_host_set *host_set);
extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
extern void ata_qc_prep(struct ata_queued_cmd *qc);
extern int ata_qc_issue_prot(struct ata_queued_cmd *qc);
extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
unsigned int buflen);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
extern void ata_dev_id_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern void ata_dev_config(struct ata_port *ap, unsigned int i);
extern void ata_id_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern void ata_id_c_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
extern void ata_bmdma_start (struct ata_queued_cmd *qc);
extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
extern u8 ata_bmdma_status(struct ata_port *ap);
extern void ata_bmdma_irq_clear(struct ata_port *ap);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
extern void __ata_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eng_timeout(struct ata_port *ap);
extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
struct scsi_cmnd *cmd,
@ -540,7 +610,7 @@ extern void ata_pci_host_stop (struct ata_host_set *host_set);
extern struct ata_probe_ent *
ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
#endif /* CONFIG_PCI */
@ -586,10 +656,14 @@ static inline unsigned int ata_tag_valid(unsigned int tag)
return (tag < ATA_MAX_QUEUE) ? 1 : 0;
}
static inline unsigned int ata_class_present(unsigned int class)
{
return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
}
static inline unsigned int ata_dev_present(const struct ata_device *dev)
{
return ((dev->class == ATA_DEV_ATA) ||
(dev->class == ATA_DEV_ATAPI));
return ata_class_present(dev->class);
}
static inline u8 ata_chk_status(struct ata_port *ap)
@ -657,9 +731,9 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
if (status & (ATA_BUSY | ATA_DRQ)) {
unsigned long l = ap->ioaddr.status_addr;
printk(KERN_WARNING
"ATA: abnormal status 0x%X on port 0x%lX\n",
status, l);
if (ata_msg_warn(ap))
printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
status, l);
}
return status;
@ -701,6 +775,24 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
ata_tf_init(qc->ap, &qc->tf, qc->dev->devno);
}
/**
* ata_qc_complete - Complete an active ATA command
* @qc: Command to complete
* @err_mask: ATA Status register contents
*
* Indicate to the mid and upper layers that an ATA
* command has completed, with either an ok or not-ok status.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static inline void ata_qc_complete(struct ata_queued_cmd *qc)
{
if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
return;
__ata_qc_complete(qc);
}
/**
* ata_irq_on - Enable interrupts on a port.
@ -751,7 +843,8 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
status = ata_busy_wait(ap, bits, 1000);
if (status & bits)
DPRINTK("abnormal status 0x%X\n", status);
if (ata_msg_err(ap))
printk(KERN_ERR "abnormal status 0x%X\n", status);
/* get controller status; clear intr, err bits */
if (ap->flags & ATA_FLAG_MMIO) {
@ -769,8 +862,10 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
}
VPRINTK("irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
host_stat, post_stat, status);
if (ata_msg_intr(ap))
printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
__FUNCTION__,
host_stat, post_stat, status);
return status;
}
@ -807,7 +902,7 @@ static inline int ata_try_flush_cache(const struct ata_device *dev)
static inline unsigned int ac_err_mask(u8 status)
{
if (status & ATA_BUSY)
return AC_ERR_ATA_BUS;
return AC_ERR_HSM;
if (status & (ATA_ERR | ATA_DF))
return AC_ERR_DEV;
return 0;

View file

@ -410,6 +410,17 @@ static inline void list_splice_init(struct list_head *list,
prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_from - iterate over list of given type
* continuing from existing point
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_from(pos, head, member) \
for (; prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop counter.
@ -437,6 +448,19 @@ static inline void list_splice_init(struct list_head *list,
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
/**
* list_for_each_entry_safe_from - iterate over list of given type
* from existing point safe against removal of list entry
* @pos: the type * to use as a loop counter.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe_from(pos, n, head, member) \
for (n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
/**
* list_for_each_entry_safe_reverse - iterate backwards over list of given type safe against
* removal of list entry

View file

@ -17,6 +17,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
/* Possible states of device */
enum {
@ -60,7 +61,7 @@ struct loop_device {
int lo_state;
struct completion lo_done;
struct completion lo_bh_done;
struct semaphore lo_ctl_mutex;
struct mutex lo_ctl_mutex;
int lo_pending;
request_queue_t *lo_queue;

36
include/linux/migrate.h Normal file
View file

@ -0,0 +1,36 @@
#ifndef _LINUX_MIGRATE_H
#define _LINUX_MIGRATE_H
#include <linux/config.h>
#include <linux/mm.h>
#ifdef CONFIG_MIGRATION
extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
extern int putback_lru_pages(struct list_head *l);
extern int migrate_page(struct page *, struct page *);
extern void migrate_page_copy(struct page *, struct page *);
extern int migrate_page_remove_references(struct page *, struct page *, int);
extern int migrate_pages(struct list_head *l, struct list_head *t,
struct list_head *moved, struct list_head *failed);
int migrate_pages_to(struct list_head *pagelist,
struct vm_area_struct *vma, int dest);
extern int fail_migrate_page(struct page *, struct page *);
extern int migrate_prep(void);
#else
static inline int isolate_lru_page(struct page *p, struct list_head *list)
{ return -ENOSYS; }
static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, struct list_head *t,
struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }
/* Possible settings for the migrate_page() method in address_operations */
#define migrate_page NULL
#define fail_migrate_page NULL
#endif /* CONFIG_MIGRATION */
#endif /* _LINUX_MIGRATE_H */

View file

@ -286,43 +286,34 @@ struct page {
*
* Also, many kernel routines increase the page count before a critical
* routine so they can be sure the page doesn't go away from under them.
*
* Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we
* can use atomic_add_negative(-1, page->_count) to detect when the page
* becomes free and so that we can also use atomic_inc_and_test to atomically
* detect when we just tried to grab a ref on a page which some other CPU has
* already deemed to be freeable.
*
* NO code should make assumptions about this internal detail! Use the provided
* macros which retain the old rules: page_count(page) == 0 is a free page.
*/
/*
* Drop a ref, return true if the logical refcount fell to zero (the page has
* no users)
*/
#define put_page_testzero(p) \
({ \
BUG_ON(atomic_read(&(p)->_count) == -1);\
atomic_add_negative(-1, &(p)->_count); \
})
static inline int put_page_testzero(struct page *page)
{
BUG_ON(atomic_read(&page->_count) == 0);
return atomic_dec_and_test(&page->_count);
}
/*
* Grab a ref, return true if the page previously had a logical refcount of
* zero. ie: returns true if we just grabbed an already-deemed-to-be-free page
* Try to grab a ref unless the page has a refcount of zero, return false if
* that is the case.
*/
#define get_page_testone(p) atomic_inc_and_test(&(p)->_count)
#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1)
#define __put_page(p) atomic_dec(&(p)->_count)
static inline int get_page_unless_zero(struct page *page)
{
return atomic_inc_not_zero(&page->_count);
}
extern void FASTCALL(__page_cache_release(struct page *));
static inline int page_count(struct page *page)
{
if (PageCompound(page))
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
return atomic_read(&page->_count) + 1;
return atomic_read(&page->_count);
}
static inline void get_page(struct page *page)
@ -332,8 +323,19 @@ static inline void get_page(struct page *page)
atomic_inc(&page->_count);
}
/*
* Setup the page count before being freed into the page allocator for
* the first time (boot or memory hotplug)
*/
static inline void init_page_count(struct page *page)
{
atomic_set(&page->_count, 1);
}
void put_page(struct page *page);
void split_page(struct page *page, unsigned int order);
/*
* Multiple processes may "see" the same page. E.g. for untouched
* mappings of /dev/null, all processes see the same page full of
@ -1046,7 +1048,7 @@ int in_gate_area_no_task(unsigned long addr);
int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
unsigned long lru_pages);
void drop_pagecache(void);
void drop_slab(void);

View file

@ -32,7 +32,7 @@ del_page_from_lru(struct zone *zone, struct page *page)
{
list_del(&page->lru);
if (PageActive(page)) {
ClearPageActive(page);
__ClearPageActive(page);
zone->nr_active--;
} else {
zone->nr_inactive--;

View file

@ -198,6 +198,9 @@ void *__symbol_get_gpl(const char *symbol);
#define EXPORT_SYMBOL_GPL(sym) \
__EXPORT_SYMBOL(sym, "_gpl")
#define EXPORT_SYMBOL_GPL_FUTURE(sym) \
__EXPORT_SYMBOL(sym, "_gpl_future")
#endif
struct module_ref
@ -242,6 +245,7 @@ struct module
/* Sysfs stuff. */
struct module_kobject mkobj;
struct module_param_attrs *param_attrs;
struct module_attribute *modinfo_attrs;
const char *version;
const char *srcversion;
@ -255,6 +259,11 @@ struct module
unsigned int num_gpl_syms;
const unsigned long *gpl_crcs;
/* symbols that will be GPL-only in the near future. */
const struct kernel_symbol *gpl_future_syms;
unsigned int num_gpl_future_syms;
const unsigned long *gpl_future_crcs;
/* Exception table */
unsigned int num_exentries;
const struct exception_table_entry *extable;
@ -441,6 +450,7 @@ void module_remove_driver(struct device_driver *);
#else /* !CONFIG_MODULES... */
#define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym)
#define EXPORT_SYMBOL_GPL_FUTURE(sym)
/* Given an address, look for it in the exception tables. */
static inline const struct exception_table_entry *

View file

@ -184,6 +184,7 @@ struct fat_slot_info {
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/fs.h>
#include <linux/mutex.h>
struct fat_mount_options {
uid_t fs_uid;
@ -199,7 +200,7 @@ struct fat_mount_options {
sys_immutable:1, /* set = system files are immutable */
dotsOK:1, /* set = hidden and system files are named '.filename' */
isvfat:1, /* 0=no vfat long filename support, 1=vfat support */
utf8:1, /* Use of UTF8 character set (Default) */
utf8:1, /* Use of UTF-8 character set (Default) */
unicode_xlate:1, /* create escape sequences for unhandled Unicode */
numtail:1, /* Does first alias have a numeric '~1' type tail? */
atari:1, /* Use Atari GEMDOS variation of MS-DOS fs */
@ -226,7 +227,7 @@ struct msdos_sb_info {
unsigned long max_cluster; /* maximum cluster number */
unsigned long root_cluster; /* first cluster of the root directory */
unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */
struct semaphore fat_lock;
struct mutex fat_lock;
unsigned int prev_free; /* previously allocated cluster number */
unsigned int free_clusters; /* -1 if undefined */
struct fat_mount_options options;

View file

@ -38,6 +38,7 @@ enum {
#ifdef __KERNEL__
#include <linux/wait.h>
#include <linux/mutex.h>
/* values for flags field */
#define NBD_READ_ONLY 0x0001
@ -57,7 +58,7 @@ struct nbd_device {
struct request *active_req;
wait_queue_head_t active_wq;
struct semaphore tx_lock;
struct mutex tx_lock;
struct gendisk *disk;
int blksize;
u64 bytesize;

View file

@ -19,7 +19,7 @@ struct ncp_inode_info {
__le32 DosDirNum;
__u8 volNumber;
__le32 nwattr;
struct semaphore open_sem;
struct mutex open_mutex;
atomic_t opened;
int access;
int flags;

View file

@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/ncp_mount.h>
#include <linux/net.h>
#include <linux/mutex.h>
#ifdef __KERNEL__
@ -51,7 +52,7 @@ struct ncp_server {
receive replies */
int lock; /* To prevent mismatch in protocols. */
struct semaphore sem;
struct mutex mutex;
int current_size; /* for packet preparation */
int has_subfunction;
@ -96,7 +97,7 @@ struct ncp_server {
struct {
struct work_struct tq; /* STREAM/DGRAM: data/error ready */
struct ncp_request_reply* creq; /* STREAM/DGRAM: awaiting reply from this request */
struct semaphore creq_sem; /* DGRAM only: lock accesses to rcv.creq */
struct mutex creq_mutex; /* DGRAM only: lock accesses to rcv.creq */
unsigned int state; /* STREAM only: receiver state */
struct {

View file

@ -143,12 +143,18 @@ struct proto_ops {
struct poll_table_struct *wait);
int (*ioctl) (struct socket *sock, unsigned int cmd,
unsigned long arg);
int (*compat_ioctl) (struct socket *sock, unsigned int cmd,
unsigned long arg);
int (*listen) (struct socket *sock, int len);
int (*shutdown) (struct socket *sock, int flags);
int (*setsockopt)(struct socket *sock, int level,
int optname, char __user *optval, int optlen);
int (*getsockopt)(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen);
int (*compat_setsockopt)(struct socket *sock, int level,
int optname, char __user *optval, int optlen);
int (*compat_getsockopt)(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen);
int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len);
int (*recvmsg) (struct kiocb *iocb, struct socket *sock,
@ -247,6 +253,8 @@ SOCKCALL_UWRAP(name, poll, (struct file *file, struct socket *sock, struct poll_
(file, sock, wait)) \
SOCKCALL_WRAP(name, ioctl, (struct socket *sock, unsigned int cmd, \
unsigned long arg), (sock, cmd, arg)) \
SOCKCALL_WRAP(name, compat_ioctl, (struct socket *sock, unsigned int cmd, \
unsigned long arg), (sock, cmd, arg)) \
SOCKCALL_WRAP(name, listen, (struct socket *sock, int len), (sock, len)) \
SOCKCALL_WRAP(name, shutdown, (struct socket *sock, int flags), (sock, flags)) \
SOCKCALL_WRAP(name, setsockopt, (struct socket *sock, int level, int optname, \
@ -271,6 +279,7 @@ static const struct proto_ops name##_ops = { \
.getname = __lock_##name##_getname, \
.poll = __lock_##name##_poll, \
.ioctl = __lock_##name##_ioctl, \
.compat_ioctl = __lock_##name##_compat_ioctl, \
.listen = __lock_##name##_listen, \
.shutdown = __lock_##name##_shutdown, \
.setsockopt = __lock_##name##_setsockopt, \
@ -279,6 +288,7 @@ static const struct proto_ops name##_ops = { \
.recvmsg = __lock_##name##_recvmsg, \
.mmap = __lock_##name##_mmap, \
};
#endif
#define MODULE_ALIAS_NETPROTO(proto) \

View file

@ -230,7 +230,8 @@ enum netdev_state_t
__LINK_STATE_SCHED,
__LINK_STATE_NOCARRIER,
__LINK_STATE_RX_SCHED,
__LINK_STATE_LINKWATCH_PENDING
__LINK_STATE_LINKWATCH_PENDING,
__LINK_STATE_DORMANT,
};
@ -335,11 +336,14 @@ struct net_device
*/
unsigned short flags; /* interface flags (a la BSD) */
unsigned int flags; /* interface flags (a la BSD) */
unsigned short gflags;
unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
unsigned short padded; /* How much padding added by alloc_netdev() */
unsigned char operstate; /* RFC2863 operstate */
unsigned char link_mode; /* mapping policy to operstate */
unsigned mtu; /* interface MTU value */
unsigned short type; /* interface hardware type */
unsigned short hard_header_len; /* hardware hdr length */
@ -708,12 +712,18 @@ static inline void dev_put(struct net_device *dev)
atomic_dec(&dev->refcnt);
}
#define __dev_put(dev) atomic_dec(&(dev)->refcnt)
#define dev_hold(dev) atomic_inc(&(dev)->refcnt)
static inline void dev_hold(struct net_device *dev)
{
atomic_inc(&dev->refcnt);
}
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
* and _off may be called from IRQ context, but it is caller
* who is responsible for serialization of these calls.
*
* The name carrier is inappropriate, these functions should really be
* called netif_lowerlayer_*() because they represent the state of any
* kind of lower layer not just hardware media.
*/
extern void linkwatch_fire_event(struct net_device *dev);
@ -729,6 +739,29 @@ extern void netif_carrier_on(struct net_device *dev);
extern void netif_carrier_off(struct net_device *dev);
static inline void netif_dormant_on(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
linkwatch_fire_event(dev);
}
static inline void netif_dormant_off(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
linkwatch_fire_event(dev);
}
static inline int netif_dormant(const struct net_device *dev)
{
return test_bit(__LINK_STATE_DORMANT, &dev->state);
}
static inline int netif_oper_up(const struct net_device *dev) {
return (dev->operstate == IF_OPER_UP ||
dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
}
/* Hot-plugging. */
static inline int netif_device_present(struct net_device *dev)
{

View file

@ -80,10 +80,14 @@ struct nf_sockopt_ops
int set_optmin;
int set_optmax;
int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
int (*compat_set)(struct sock *sk, int optval,
void __user *user, unsigned int len);
int get_optmin;
int get_optmax;
int (*get)(struct sock *sk, int optval, void __user *user, int *len);
int (*compat_get)(struct sock *sk, int optval,
void __user *user, int *len);
/* Number of users inside set() or get(). */
unsigned int use;
@ -246,6 +250,11 @@ int nf_setsockopt(struct sock *sk, int pf, int optval, char __user *opt,
int nf_getsockopt(struct sock *sk, int pf, int optval, char __user *opt,
int *len);
int compat_nf_setsockopt(struct sock *sk, int pf, int optval,
char __user *opt, int len);
int compat_nf_getsockopt(struct sock *sk, int pf, int optval,
char __user *opt, int *len);
/* Packet queuing */
struct nf_queue_handler {
int (*outfn)(struct sk_buff *skb, struct nf_info *info,

View file

@ -164,6 +164,7 @@ extern void nfattr_parse(struct nfattr *tb[], int maxattr,
__res; \
})
extern int nfnetlink_has_listeners(unsigned int group);
extern int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group,
int echo);
extern int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags);

View file

@ -47,6 +47,8 @@ enum nfulnl_attr_type {
NFULA_PAYLOAD, /* opaque data payload */
NFULA_PREFIX, /* string prefix */
NFULA_UID, /* user id of socket */
NFULA_SEQ, /* instance-local sequence number */
NFULA_SEQ_GLOBAL, /* global sequence number */
__NFULA_MAX
};
@ -77,6 +79,7 @@ enum nfulnl_attr_config {
NFULA_CFG_NLBUFSIZ, /* u_int32_t buffer size */
NFULA_CFG_TIMEOUT, /* u_int32_t in 1/100 s */
NFULA_CFG_QTHRESH, /* u_int32_t */
NFULA_CFG_FLAGS, /* u_int16_t */
__NFULA_CFG_MAX
};
#define NFULA_CFG_MAX (__NFULA_CFG_MAX -1)
@ -85,4 +88,7 @@ enum nfulnl_attr_config {
#define NFULNL_COPY_META 0x01
#define NFULNL_COPY_PACKET 0x02
#define NFULNL_CFG_F_SEQ 0x0001
#define NFULNL_CFG_F_SEQ_GLOBAL 0x0002
#endif /* _NFNETLINK_LOG_H */

View file

@ -4,6 +4,62 @@
#define XT_FUNCTION_MAXNAMELEN 30
#define XT_TABLE_MAXNAMELEN 32
struct xt_entry_match
{
union {
struct {
u_int16_t match_size;
/* Used by userspace */
char name[XT_FUNCTION_MAXNAMELEN-1];
u_int8_t revision;
} user;
struct {
u_int16_t match_size;
/* Used inside the kernel */
struct xt_match *match;
} kernel;
/* Total length */
u_int16_t match_size;
} u;
unsigned char data[0];
};
struct xt_entry_target
{
union {
struct {
u_int16_t target_size;
/* Used by userspace */
char name[XT_FUNCTION_MAXNAMELEN-1];
u_int8_t revision;
} user;
struct {
u_int16_t target_size;
/* Used inside the kernel */
struct xt_target *target;
} kernel;
/* Total length */
u_int16_t target_size;
} u;
unsigned char data[0];
};
struct xt_standard_target
{
struct xt_entry_target target;
int verdict;
};
/* The argument to IPT_SO_GET_REVISION_*. Returns highest revision
* kernel supports, if >= revision. */
struct xt_get_revision
@ -92,8 +148,6 @@ struct xt_match
const char name[XT_FUNCTION_MAXNAMELEN-1];
u_int8_t revision;
/* Return true or false: return FALSE and set *hotdrop = 1 to
force immediate packet drop. */
/* Arguments changed since 2.6.9, as this must now handle
@ -102,6 +156,7 @@ struct xt_match
int (*match)(const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct xt_match *match,
const void *matchinfo,
int offset,
unsigned int protoff,
@ -111,15 +166,25 @@ struct xt_match
/* Should return true or false. */
int (*checkentry)(const char *tablename,
const void *ip,
const struct xt_match *match,
void *matchinfo,
unsigned int matchinfosize,
unsigned int hook_mask);
/* Called when entry of this type deleted. */
void (*destroy)(void *matchinfo, unsigned int matchinfosize);
void (*destroy)(const struct xt_match *match, void *matchinfo,
unsigned int matchinfosize);
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
char *table;
unsigned int matchsize;
unsigned int hooks;
unsigned short proto;
unsigned short family;
u_int8_t revision;
};
/* Registration hooks for targets. */
@ -129,8 +194,6 @@ struct xt_target
const char name[XT_FUNCTION_MAXNAMELEN-1];
u_int8_t revision;
/* Returns verdict. Argument order changed since 2.6.9, as this
must now handle non-linear skbs, using skb_copy_bits and
skb_ip_make_writable. */
@ -138,6 +201,7 @@ struct xt_target
const struct net_device *in,
const struct net_device *out,
unsigned int hooknum,
const struct xt_target *target,
const void *targinfo,
void *userdata);
@ -147,15 +211,25 @@ struct xt_target
/* Should return true or false. */
int (*checkentry)(const char *tablename,
const void *entry,
const struct xt_target *target,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask);
/* Called when entry of this type deleted. */
void (*destroy)(void *targinfo, unsigned int targinfosize);
void (*destroy)(const struct xt_target *target, void *targinfo,
unsigned int targinfosize);
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
char *table;
unsigned int targetsize;
unsigned int hooks;
unsigned short proto;
unsigned short family;
u_int8_t revision;
};
/* Furniture shopping... */
@ -202,10 +276,17 @@ struct xt_table_info
char *entries[NR_CPUS];
};
extern int xt_register_target(int af, struct xt_target *target);
extern void xt_unregister_target(int af, struct xt_target *target);
extern int xt_register_match(int af, struct xt_match *target);
extern void xt_unregister_match(int af, struct xt_match *target);
extern int xt_register_target(struct xt_target *target);
extern void xt_unregister_target(struct xt_target *target);
extern int xt_register_match(struct xt_match *target);
extern void xt_unregister_match(struct xt_match *target);
extern int xt_check_match(const struct xt_match *match, unsigned short family,
unsigned int size, const char *table, unsigned int hook,
unsigned short proto, int inv_proto);
extern int xt_check_target(const struct xt_target *target, unsigned short family,
unsigned int size, const char *table, unsigned int hook,
unsigned short proto, int inv_proto);
extern int xt_register_table(struct xt_table *table,
struct xt_table_info *bootstrap,

View file

@ -0,0 +1,58 @@
#ifndef _XT_POLICY_H
#define _XT_POLICY_H
#define XT_POLICY_MAX_ELEM 4
enum xt_policy_flags
{
XT_POLICY_MATCH_IN = 0x1,
XT_POLICY_MATCH_OUT = 0x2,
XT_POLICY_MATCH_NONE = 0x4,
XT_POLICY_MATCH_STRICT = 0x8,
};
enum xt_policy_modes
{
XT_POLICY_MODE_TRANSPORT,
XT_POLICY_MODE_TUNNEL
};
struct xt_policy_spec
{
u_int8_t saddr:1,
daddr:1,
proto:1,
mode:1,
spi:1,
reqid:1;
};
union xt_policy_addr
{
struct in_addr a4;
struct in6_addr a6;
};
struct xt_policy_elem
{
union xt_policy_addr saddr;
union xt_policy_addr smask;
union xt_policy_addr daddr;
union xt_policy_addr dmask;
u_int32_t spi;
u_int32_t reqid;
u_int8_t proto;
u_int8_t mode;
struct xt_policy_spec match;
struct xt_policy_spec invert;
};
struct xt_policy_info
{
struct xt_policy_elem pol[XT_POLICY_MAX_ELEM];
u_int16_t flags;
u_int16_t len;
};
#endif /* _XT_POLICY_H */

View file

@ -65,35 +65,8 @@ struct arpt_arp {
u_int16_t invflags;
};
struct arpt_entry_target
{
union {
struct {
u_int16_t target_size;
/* Used by userspace */
char name[ARPT_FUNCTION_MAXNAMELEN-1];
u_int8_t revision;
} user;
struct {
u_int16_t target_size;
/* Used inside the kernel */
struct arpt_target *target;
} kernel;
/* Total length */
u_int16_t target_size;
} u;
unsigned char data[0];
};
struct arpt_standard_target
{
struct arpt_entry_target target;
int verdict;
};
#define arpt_entry_target xt_entry_target
#define arpt_standard_target xt_standard_target
/* Values for "flag" field in struct arpt_ip (general arp structure).
* No flags defined yet.
@ -263,8 +236,10 @@ static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e
*/
#ifdef __KERNEL__
#define arpt_register_target(tgt) xt_register_target(NF_ARP, tgt)
#define arpt_unregister_target(tgt) xt_unregister_target(NF_ARP, tgt)
#define arpt_register_target(tgt) \
({ (tgt)->family = NF_ARP; \
xt_register_target(tgt); })
#define arpt_unregister_target(tgt) xt_unregister_target(tgt)
extern int arpt_register_table(struct arpt_table *table,
const struct arpt_replace *repl);

View file

@ -47,22 +47,6 @@ enum nf_br_hook_priorities {
#define BRNF_BRIDGED 0x08
#define BRNF_NF_BRIDGE_PREROUTING 0x10
static inline
struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
{
struct nf_bridge_info **nf_bridge = &(skb->nf_bridge);
if ((*nf_bridge = kmalloc(sizeof(**nf_bridge), GFP_ATOMIC)) != NULL) {
atomic_set(&(*nf_bridge)->use, 1);
(*nf_bridge)->mask = 0;
(*nf_bridge)->physindev = (*nf_bridge)->physoutdev = NULL;
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
(*nf_bridge)->netoutdev = NULL;
#endif
}
return *nf_bridge;
}
/* Only used in br_forward.c */
static inline
@ -77,17 +61,6 @@ void nf_bridge_maybe_copy_header(struct sk_buff *skb)
}
}
static inline
void nf_bridge_save_header(struct sk_buff *skb)
{
int header_size = 16;
if (skb->protocol == __constant_htons(ETH_P_8021Q))
header_size = 18;
memcpy(skb->nf_bridge->data, skb->data - header_size, header_size);
}
/* This is called by the IP fragmenting code and it ensures there is
* enough room for the encapsulating header (if there is one). */
static inline

View file

@ -29,6 +29,7 @@ union ip_conntrack_expect_proto {
};
/* Add protocol helper include file here */
#include <linux/netfilter_ipv4/ip_conntrack_h323.h>
#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
#include <linux/netfilter_ipv4/ip_conntrack_amanda.h>
#include <linux/netfilter_ipv4/ip_conntrack_ftp.h>
@ -37,6 +38,7 @@ union ip_conntrack_expect_proto {
/* per conntrack: application helper private data */
union ip_conntrack_help {
/* insert conntrack helper private data (master) here */
struct ip_ct_h323_master ct_h323_info;
struct ip_ct_pptp_master ct_pptp_info;
struct ip_ct_ftp_master ct_ftp_info;
struct ip_ct_irc_master ct_irc_info;

View file

@ -0,0 +1,30 @@
#ifndef _IP_CONNTRACK_H323_H
#define _IP_CONNTRACK_H323_H
#ifdef __KERNEL__
#define RAS_PORT 1719
#define Q931_PORT 1720
#define H323_RTP_CHANNEL_MAX 4 /* Audio, video, FAX and other */
/* This structure exists only once per master */
struct ip_ct_h323_master {
/* Original and NATed Q.931 or H.245 signal ports */
u_int16_t sig_port[IP_CT_DIR_MAX];
/* Original and NATed RTP ports */
u_int16_t rtp_port[H323_RTP_CHANNEL_MAX][IP_CT_DIR_MAX];
union {
/* RAS connection timeout */
u_int32_t timeout;
/* Next TPKT length (for separate TPKT header and data) */
u_int16_t tpkt_len[IP_CT_DIR_MAX];
};
};
#endif
#endif

View file

@ -23,7 +23,7 @@ struct ip_nat_seq {
* modification (if any) */
u_int32_t correction_pos;
/* sequence number offset before and after last modification */
int32_t offset_before, offset_after;
int16_t offset_before, offset_after;
};
/* Single range specification. */

View file

@ -52,61 +52,9 @@ struct ipt_ip {
u_int8_t invflags;
};
struct ipt_entry_match
{
union {
struct {
u_int16_t match_size;
/* Used by userspace */
char name[IPT_FUNCTION_MAXNAMELEN-1];
u_int8_t revision;
} user;
struct {
u_int16_t match_size;
/* Used inside the kernel */
struct ipt_match *match;
} kernel;
/* Total length */
u_int16_t match_size;
} u;
unsigned char data[0];
};
struct ipt_entry_target
{
union {
struct {
u_int16_t target_size;
/* Used by userspace */
char name[IPT_FUNCTION_MAXNAMELEN-1];
u_int8_t revision;
} user;
struct {
u_int16_t target_size;
/* Used inside the kernel */
struct ipt_target *target;
} kernel;
/* Total length */
u_int16_t target_size;
} u;
unsigned char data[0];
};
struct ipt_standard_target
{
struct ipt_entry_target target;
int verdict;
};
#define ipt_entry_match xt_entry_match
#define ipt_entry_target xt_entry_target
#define ipt_standard_target xt_standard_target
#define ipt_counters xt_counters
@ -321,11 +269,15 @@ ipt_get_target(struct ipt_entry *e)
#include <linux/init.h>
extern void ipt_init(void) __init;
#define ipt_register_target(tgt) xt_register_target(AF_INET, tgt)
#define ipt_unregister_target(tgt) xt_unregister_target(AF_INET, tgt)
#define ipt_register_target(tgt) \
({ (tgt)->family = AF_INET; \
xt_register_target(tgt); })
#define ipt_unregister_target(tgt) xt_unregister_target(tgt)
#define ipt_register_match(mtch) xt_register_match(AF_INET, mtch)
#define ipt_unregister_match(mtch) xt_unregister_match(AF_INET, mtch)
#define ipt_register_match(mtch) \
({ (mtch)->family = AF_INET; \
xt_register_match(mtch); })
#define ipt_unregister_match(mtch) xt_unregister_match(mtch)
//#define ipt_register_table(tbl, repl) xt_register_table(AF_INET, tbl, repl)
//#define ipt_unregister_table(tbl) xt_unregister_table(AF_INET, tbl)

View file

@ -1,58 +1,21 @@
#ifndef _IPT_POLICY_H
#define _IPT_POLICY_H
#define IPT_POLICY_MAX_ELEM 4
#define IPT_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM
enum ipt_policy_flags
{
IPT_POLICY_MATCH_IN = 0x1,
IPT_POLICY_MATCH_OUT = 0x2,
IPT_POLICY_MATCH_NONE = 0x4,
IPT_POLICY_MATCH_STRICT = 0x8,
};
/* ipt_policy_flags */
#define IPT_POLICY_MATCH_IN XT_POLICY_MATCH_IN
#define IPT_POLICY_MATCH_OUT XT_POLICY_MATCH_OUT
#define IPT_POLICY_MATCH_NONE XT_POLICY_MATCH_NONE
#define IPT_POLICY_MATCH_STRICT XT_POLICY_MATCH_STRICT
enum ipt_policy_modes
{
IPT_POLICY_MODE_TRANSPORT,
IPT_POLICY_MODE_TUNNEL
};
/* ipt_policy_modes */
#define IPT_POLICY_MODE_TRANSPORT XT_POLICY_MODE_TRANSPORT
#define IPT_POLICY_MODE_TUNNEL XT_POLICY_MODE_TUNNEL
struct ipt_policy_spec
{
u_int8_t saddr:1,
daddr:1,
proto:1,
mode:1,
spi:1,
reqid:1;
};
union ipt_policy_addr
{
struct in_addr a4;
struct in6_addr a6;
};
struct ipt_policy_elem
{
union ipt_policy_addr saddr;
union ipt_policy_addr smask;
union ipt_policy_addr daddr;
union ipt_policy_addr dmask;
u_int32_t spi;
u_int32_t reqid;
u_int8_t proto;
u_int8_t mode;
struct ipt_policy_spec match;
struct ipt_policy_spec invert;
};
struct ipt_policy_info
{
struct ipt_policy_elem pol[IPT_POLICY_MAX_ELEM];
u_int16_t flags;
u_int16_t len;
};
#define ipt_policy_spec xt_policy_spec
#define ipt_policy_addr xt_policy_addr
#define ipt_policy_elem xt_policy_elem
#define ipt_policy_info xt_policy_info
#endif /* _IPT_POLICY_H */

View file

@ -56,60 +56,9 @@ struct ip6t_ip6 {
u_int8_t invflags;
};
/* FIXME: If alignment in kernel different from userspace? --RR */
struct ip6t_entry_match
{
union {
struct {
u_int16_t match_size;
/* Used by userspace */
char name[IP6T_FUNCTION_MAXNAMELEN-1];
u_int8_t revision;
} user;
struct {
u_int16_t match_size;
/* Used inside the kernel */
struct ip6t_match *match;
} kernel;
/* Total length */
u_int16_t match_size;
} u;
unsigned char data[0];
};
struct ip6t_entry_target
{
union {
struct {
u_int16_t target_size;
/* Used by userspace */
char name[IP6T_FUNCTION_MAXNAMELEN-1];
u_int8_t revision;
} user;
struct {
u_int16_t target_size;
/* Used inside the kernel */
struct ip6t_target *target;
} kernel;
/* Total length */
u_int16_t target_size;
} u;
unsigned char data[0];
};
struct ip6t_standard_target
{
struct ip6t_entry_target target;
int verdict;
};
#define ip6t_entry_match xt_entry_match
#define ip6t_entry_target xt_entry_target
#define ip6t_standard_target xt_standard_target
#define ip6t_counters xt_counters
@ -334,11 +283,15 @@ ip6t_get_target(struct ip6t_entry *e)
#include <linux/init.h>
extern void ip6t_init(void) __init;
#define ip6t_register_target(tgt) xt_register_target(AF_INET6, tgt)
#define ip6t_unregister_target(tgt) xt_unregister_target(AF_INET6, tgt)
#define ip6t_register_target(tgt) \
({ (tgt)->family = AF_INET6; \
xt_register_target(tgt); })
#define ip6t_unregister_target(tgt) xt_unregister_target(tgt)
#define ip6t_register_match(match) xt_register_match(AF_INET6, match)
#define ip6t_unregister_match(match) xt_unregister_match(AF_INET6, match)
#define ip6t_register_match(match) \
({ (match)->family = AF_INET6; \
xt_register_match(match); })
#define ip6t_unregister_match(match) xt_unregister_match(match)
extern int ip6t_register_table(struct ip6t_table *table,
const struct ip6t_replace *repl);

View file

@ -1,58 +1,21 @@
#ifndef _IP6T_POLICY_H
#define _IP6T_POLICY_H
#define IP6T_POLICY_MAX_ELEM 4
#define IP6T_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM
enum ip6t_policy_flags
{
IP6T_POLICY_MATCH_IN = 0x1,
IP6T_POLICY_MATCH_OUT = 0x2,
IP6T_POLICY_MATCH_NONE = 0x4,
IP6T_POLICY_MATCH_STRICT = 0x8,
};
/* ip6t_policy_flags */
#define IP6T_POLICY_MATCH_IN XT_POLICY_MATCH_IN
#define IP6T_POLICY_MATCH_OUT XT_POLICY_MATCH_OUT
#define IP6T_POLICY_MATCH_NONE XT_POLICY_MATCH_NONE
#define IP6T_POLICY_MATCH_STRICT XT_POLICY_MATCH_STRICT
enum ip6t_policy_modes
{
IP6T_POLICY_MODE_TRANSPORT,
IP6T_POLICY_MODE_TUNNEL
};
/* ip6t_policy_modes */
#define IP6T_POLICY_MODE_TRANSPORT XT_POLICY_MODE_TRANSPORT
#define IP6T_POLICY_MODE_TUNNEL XT_POLICY_MODE_TUNNEL
struct ip6t_policy_spec
{
u_int8_t saddr:1,
daddr:1,
proto:1,
mode:1,
spi:1,
reqid:1;
};
union ip6t_policy_addr
{
struct in_addr a4;
struct in6_addr a6;
};
struct ip6t_policy_elem
{
union ip6t_policy_addr saddr;
union ip6t_policy_addr smask;
union ip6t_policy_addr daddr;
union ip6t_policy_addr dmask;
u_int32_t spi;
u_int32_t reqid;
u_int8_t proto;
u_int8_t mode;
struct ip6t_policy_spec match;
struct ip6t_policy_spec invert;
};
struct ip6t_policy_info
{
struct ip6t_policy_elem pol[IP6T_POLICY_MAX_ELEM];
u_int16_t flags;
u_int16_t len;
};
#define ip6t_policy_spec xt_policy_spec
#define ip6t_policy_addr xt_policy_addr
#define ip6t_policy_elem xt_policy_elem
#define ip6t_policy_info xt_policy_info
#endif /* _IP6T_POLICY_H */

View file

@ -151,6 +151,7 @@ struct netlink_skb_parms
extern struct sock *netlink_kernel_create(int unit, unsigned int groups, void (*input)(struct sock *sk, int len), struct module *module);
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
__u32 group, gfp_t allocation);

View file

@ -86,8 +86,9 @@
* - The __xxx_page_state variants can be used safely when interrupts are
* disabled.
* - The __xxx_page_state variants can be used if the field is only
* modified from process context, or only modified from interrupt context.
* In this case, the field should be commented here.
* modified from process context and protected from preemption, or only
* modified from interrupt context. In this case, the field should be
* commented here.
*/
struct page_state {
unsigned long nr_dirty; /* Dirty writeable pages */
@ -239,22 +240,19 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
#define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags)
#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
#define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags)
#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
#define ClearPageLRU(page) clear_bit(PG_lru, &(page)->flags)
#define __ClearPageLRU(page) __clear_bit(PG_lru, &(page)->flags)
#define PageActive(page) test_bit(PG_active, &(page)->flags)
#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
#define TestClearPageActive(page) test_and_clear_bit(PG_active, &(page)->flags)
#define TestSetPageActive(page) test_and_set_bit(PG_active, &(page)->flags)
#define __ClearPageActive(page) __clear_bit(PG_active, &(page)->flags)
#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
#define SetPageSlab(page) set_bit(PG_slab, &(page)->flags)
#define ClearPageSlab(page) clear_bit(PG_slab, &(page)->flags)
#define TestClearPageSlab(page) test_and_clear_bit(PG_slab, &(page)->flags)
#define TestSetPageSlab(page) test_and_set_bit(PG_slab, &(page)->flags)
#define __SetPageSlab(page) __set_bit(PG_slab, &(page)->flags)
#define __ClearPageSlab(page) __clear_bit(PG_slab, &(page)->flags)
#ifdef CONFIG_HIGHMEM
#define PageHighMem(page) is_highmem(page_zone(page))
@ -329,8 +327,8 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
#define PageCompound(page) test_bit(PG_compound, &(page)->flags)
#define SetPageCompound(page) set_bit(PG_compound, &(page)->flags)
#define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags)
#define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags)
#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags)
#ifdef CONFIG_SWAP
#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags)

View file

@ -95,6 +95,11 @@ enum pci_channel_state {
pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
};
typedef unsigned short __bitwise pci_bus_flags_t;
enum pci_bus_flags {
PCI_BUS_FLAGS_NO_MSI = (pci_bus_flags_t) 1,
};
/*
* The pci_dev structure is used to describe PCI devices.
*/
@ -203,7 +208,7 @@ struct pci_bus {
char name[48];
unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
unsigned short pad2;
pci_bus_flags_t bus_flags; /* Inherited by child busses */
struct device *bridge;
struct class_device class_dev;
struct bin_attribute *legacy_io; /* legacy I/O for this bus */
@ -485,9 +490,9 @@ void pdev_sort_resources(struct pci_dev *, struct resource_list *);
void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
int (*)(struct pci_dev *, u8, u8));
#define HAVE_PCI_REQ_REGIONS 2
int pci_request_regions(struct pci_dev *, char *);
int pci_request_regions(struct pci_dev *, const char *);
void pci_release_regions(struct pci_dev *);
int pci_request_region(struct pci_dev *, int, char *);
int pci_request_region(struct pci_dev *, int, const char *);
void pci_release_region(struct pci_dev *, int);
/* drivers/pci/bus.c */
@ -516,6 +521,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass
void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
void *userdata);
int pci_cfg_space_size(struct pci_dev *dev);
unsigned char pci_bus_max_busnr(struct pci_bus* bus);
/* kmem_cache style wrapper around pci_alloc_consistent() */

View file

@ -852,6 +852,8 @@
#define PCI_DEVICE_ID_QLOGIC_ISP2432 0x2432
#define PCI_DEVICE_ID_QLOGIC_ISP2512 0x2512
#define PCI_DEVICE_ID_QLOGIC_ISP2522 0x2522
#define PCI_DEVICE_ID_QLOGIC_ISP5422 0x5422
#define PCI_DEVICE_ID_QLOGIC_ISP5432 0x5432
#define PCI_VENDOR_ID_CYRIX 0x1078
#define PCI_DEVICE_ID_CYRIX_5510 0x0000
@ -1369,6 +1371,7 @@
#define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200
#define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201
#define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203
#define PCI_DEVICE_ID_SERVERWORKS_HT1000SB 0x0205
#define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211
#define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212
#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213
@ -1857,16 +1860,24 @@
#define PCI_DEVICE_ID_TIGON3_5705M 0x165d
#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e
#define PCI_DEVICE_ID_TIGON3_5714 0x1668
#define PCI_DEVICE_ID_TIGON3_5714S 0x1669
#define PCI_DEVICE_ID_TIGON3_5780 0x166a
#define PCI_DEVICE_ID_TIGON3_5780S 0x166b
#define PCI_DEVICE_ID_TIGON3_5705F 0x166e
#define PCI_DEVICE_ID_TIGON3_5754M 0x1672
#define PCI_DEVICE_ID_TIGON3_5755M 0x1673
#define PCI_DEVICE_ID_TIGON3_5750 0x1676
#define PCI_DEVICE_ID_TIGON3_5751 0x1677
#define PCI_DEVICE_ID_TIGON3_5715 0x1678
#define PCI_DEVICE_ID_TIGON3_5715S 0x1679
#define PCI_DEVICE_ID_TIGON3_5754 0x167a
#define PCI_DEVICE_ID_TIGON3_5755 0x167b
#define PCI_DEVICE_ID_TIGON3_5750M 0x167c
#define PCI_DEVICE_ID_TIGON3_5751M 0x167d
#define PCI_DEVICE_ID_TIGON3_5751F 0x167e
#define PCI_DEVICE_ID_TIGON3_5787M 0x1693
#define PCI_DEVICE_ID_TIGON3_5782 0x1696
#define PCI_DEVICE_ID_TIGON3_5787 0x169b
#define PCI_DEVICE_ID_TIGON3_5788 0x169c
#define PCI_DEVICE_ID_TIGON3_5789 0x169d
#define PCI_DEVICE_ID_TIGON3_5702X 0x16a6

View file

@ -188,6 +188,8 @@ extern void device_power_up(void);
extern void device_resume(void);
#ifdef CONFIG_PM
extern suspend_disk_method_t pm_disk_mode;
extern int device_suspend(pm_message_t state);
#define device_set_wakeup_enable(dev,val) \
@ -215,7 +217,6 @@ static inline int dpm_runtime_suspend(struct device * dev, pm_message_t state)
static inline void dpm_runtime_resume(struct device * dev)
{
}
#endif

View file

@ -14,6 +14,7 @@
struct proc_dir_entry;
struct pt_regs;
struct notifier_block;
/* init basic kernel profiler */
void __init profile_init(void);
@ -32,7 +33,6 @@ enum profile_type {
#ifdef CONFIG_PROFILING
struct notifier_block;
struct task_struct;
struct mm_struct;

View file

@ -38,6 +38,7 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#define __DQUOT_VERSION__ "dquot_6.5.1"
#define __DQUOT_NUM_VERSION__ 6*10000+5*100+1
@ -215,7 +216,7 @@ struct dquot {
struct list_head dq_inuse; /* List of all quotas */
struct list_head dq_free; /* Free list element */
struct list_head dq_dirty; /* List of dirty dquots */
struct semaphore dq_lock; /* dquot IO lock */
struct mutex dq_lock; /* dquot IO lock */
atomic_t dq_count; /* Use count */
wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */
struct super_block *dq_sb; /* superblock this applies to */
@ -285,8 +286,8 @@ struct quota_format_type {
struct quota_info {
unsigned int flags; /* Flags for diskquotas on this device */
struct semaphore dqio_sem; /* lock device while I/O in progress */
struct semaphore dqonoff_sem; /* Serialize quotaon & quotaoff */
struct mutex dqio_mutex; /* lock device while I/O in progress */
struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */
struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */
struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */

View file

@ -130,6 +130,6 @@ struct r1bio_s {
* with failure when last write completes (and all failed).
* Record that bi_end_io was called with this flag...
*/
#define R1BIO_Returned 4
#define R1BIO_Returned 6
#endif

View file

@ -113,8 +113,6 @@ struct rcu_data {
DECLARE_PER_CPU(struct rcu_data, rcu_data);
DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
extern struct rcu_ctrlblk rcu_ctrlblk;
extern struct rcu_ctrlblk rcu_bh_ctrlblk;
/*
* Increment the quiescent state counter.

281
include/linux/relay.h Normal file
View file

@ -0,0 +1,281 @@
/*
* linux/include/linux/relay.h
*
* Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
* Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com)
*
* CONFIG_RELAY definitions and declarations
*/
#ifndef _LINUX_RELAY_H
#define _LINUX_RELAY_H
#include <linux/config.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/kref.h>
/* Needs a _much_ better name... */
#define FIX_SIZE(x) ((((x) - 1) & PAGE_MASK) + PAGE_SIZE)
/*
* Tracks changes to rchan/rchan_buf structs
*/
#define RELAYFS_CHANNEL_VERSION 6
/*
* Per-cpu relay channel buffer
*/
struct rchan_buf
{
void *start; /* start of channel buffer */
void *data; /* start of current sub-buffer */
size_t offset; /* current offset into sub-buffer */
size_t subbufs_produced; /* count of sub-buffers produced */
size_t subbufs_consumed; /* count of sub-buffers consumed */
struct rchan *chan; /* associated channel */
wait_queue_head_t read_wait; /* reader wait queue */
struct work_struct wake_readers; /* reader wake-up work struct */
struct dentry *dentry; /* channel file dentry */
struct kref kref; /* channel buffer refcount */
struct page **page_array; /* array of current buffer pages */
unsigned int page_count; /* number of current buffer pages */
unsigned int finalized; /* buffer has been finalized */
size_t *padding; /* padding counts per sub-buffer */
size_t prev_padding; /* temporary variable */
size_t bytes_consumed; /* bytes consumed in cur read subbuf */
unsigned int cpu; /* this buf's cpu */
} ____cacheline_aligned;
/*
* Relay channel data structure
*/
struct rchan
{
u32 version; /* the version of this struct */
size_t subbuf_size; /* sub-buffer size */
size_t n_subbufs; /* number of sub-buffers per buffer */
size_t alloc_size; /* total buffer size allocated */
struct rchan_callbacks *cb; /* client callbacks */
struct kref kref; /* channel refcount */
void *private_data; /* for user-defined data */
size_t last_toobig; /* tried to log event > subbuf size */
struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
};
/*
* Relay channel client callbacks
*/
struct rchan_callbacks
{
/*
* subbuf_start - called on buffer-switch to a new sub-buffer
* @buf: the channel buffer containing the new sub-buffer
* @subbuf: the start of the new sub-buffer
* @prev_subbuf: the start of the previous sub-buffer
* @prev_padding: unused space at the end of previous sub-buffer
*
* The client should return 1 to continue logging, 0 to stop
* logging.
*
* NOTE: subbuf_start will also be invoked when the buffer is
* created, so that the first sub-buffer can be initialized
* if necessary. In this case, prev_subbuf will be NULL.
*
* NOTE: the client can reserve bytes at the beginning of the new
* sub-buffer by calling subbuf_start_reserve() in this callback.
*/
int (*subbuf_start) (struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
size_t prev_padding);
/*
* buf_mapped - relay buffer mmap notification
* @buf: the channel buffer
* @filp: relay file pointer
*
* Called when a relay file is successfully mmapped
*/
void (*buf_mapped)(struct rchan_buf *buf,
struct file *filp);
/*
* buf_unmapped - relay buffer unmap notification
* @buf: the channel buffer
* @filp: relay file pointer
*
* Called when a relay file is successfully unmapped
*/
void (*buf_unmapped)(struct rchan_buf *buf,
struct file *filp);
/*
* create_buf_file - create file to represent a relay channel buffer
* @filename: the name of the file to create
* @parent: the parent of the file to create
* @mode: the mode of the file to create
* @buf: the channel buffer
* @is_global: outparam - set non-zero if the buffer should be global
*
* Called during relay_open(), once for each per-cpu buffer,
* to allow the client to create a file to be used to
* represent the corresponding channel buffer. If the file is
* created outside of relay, the parent must also exist in
* that filesystem.
*
* The callback should return the dentry of the file created
* to represent the relay buffer.
*
* Setting the is_global outparam to a non-zero value will
* cause relay_open() to create a single global buffer rather
* than the default set of per-cpu buffers.
*
* See Documentation/filesystems/relayfs.txt for more info.
*/
struct dentry *(*create_buf_file)(const char *filename,
struct dentry *parent,
int mode,
struct rchan_buf *buf,
int *is_global);
/*
* remove_buf_file - remove file representing a relay channel buffer
* @dentry: the dentry of the file to remove
*
* Called during relay_close(), once for each per-cpu buffer,
* to allow the client to remove a file used to represent a
* channel buffer.
*
* The callback should return 0 if successful, negative if not.
*/
int (*remove_buf_file)(struct dentry *dentry);
};
/*
* CONFIG_RELAY kernel API, kernel/relay.c
*/
struct rchan *relay_open(const char *base_filename,
struct dentry *parent,
size_t subbuf_size,
size_t n_subbufs,
struct rchan_callbacks *cb);
extern void relay_close(struct rchan *chan);
extern void relay_flush(struct rchan *chan);
extern void relay_subbufs_consumed(struct rchan *chan,
unsigned int cpu,
size_t consumed);
extern void relay_reset(struct rchan *chan);
extern int relay_buf_full(struct rchan_buf *buf);
extern size_t relay_switch_subbuf(struct rchan_buf *buf,
size_t length);
/**
* relay_write - write data into the channel
* @chan: relay channel
* @data: data to be written
* @length: number of bytes to write
*
* Writes data into the current cpu's channel buffer.
*
* Protects the buffer by disabling interrupts. Use this
* if you might be logging from interrupt context. Try
* __relay_write() if you know you won't be logging from
* interrupt context.
*/
static inline void relay_write(struct rchan *chan,
const void *data,
size_t length)
{
unsigned long flags;
struct rchan_buf *buf;
local_irq_save(flags);
buf = chan->buf[smp_processor_id()];
if (unlikely(buf->offset + length > chan->subbuf_size))
length = relay_switch_subbuf(buf, length);
memcpy(buf->data + buf->offset, data, length);
buf->offset += length;
local_irq_restore(flags);
}
/**
* __relay_write - write data into the channel
* @chan: relay channel
* @data: data to be written
* @length: number of bytes to write
*
* Writes data into the current cpu's channel buffer.
*
* Protects the buffer by disabling preemption. Use
* relay_write() if you might be logging from interrupt
* context.
*/
static inline void __relay_write(struct rchan *chan,
const void *data,
size_t length)
{
struct rchan_buf *buf;
buf = chan->buf[get_cpu()];
if (unlikely(buf->offset + length > buf->chan->subbuf_size))
length = relay_switch_subbuf(buf, length);
memcpy(buf->data + buf->offset, data, length);
buf->offset += length;
put_cpu();
}
/**
* relay_reserve - reserve slot in channel buffer
* @chan: relay channel
* @length: number of bytes to reserve
*
* Returns pointer to reserved slot, NULL if full.
*
* Reserves a slot in the current cpu's channel buffer.
* Does not protect the buffer at all - caller must provide
* appropriate synchronization.
*/
static inline void *relay_reserve(struct rchan *chan, size_t length)
{
void *reserved;
struct rchan_buf *buf = chan->buf[smp_processor_id()];
if (unlikely(buf->offset + length > buf->chan->subbuf_size)) {
length = relay_switch_subbuf(buf, length);
if (!length)
return NULL;
}
reserved = buf->data + buf->offset;
buf->offset += length;
return reserved;
}
/**
* subbuf_start_reserve - reserve bytes at the start of a sub-buffer
* @buf: relay channel buffer
* @length: number of bytes to reserve
*
* Helper function used to reserve bytes at the beginning of
* a sub-buffer in the subbuf_start() callback.
*/
static inline void subbuf_start_reserve(struct rchan_buf *buf,
size_t length)
{
BUG_ON(length >= buf->chan->subbuf_size - 1);
buf->offset = length;
}
/*
* exported relay file operations, kernel/relay.c
*/
extern struct file_operations relay_file_operations;
#endif /* _LINUX_RELAY_H */

View file

@ -11,8 +11,6 @@
#ifndef _LINUX_RTC_H_
#define _LINUX_RTC_H_
#include <linux/interrupt.h>
/*
* The struct used to pass data via the following ioctl. Similar to the
* struct tm in <time.h>, but it needs to be here so that the kernel
@ -95,6 +93,8 @@ struct rtc_pll_info {
#ifdef __KERNEL__
#include <linux/interrupt.h>
typedef struct rtc_task {
void (*func)(void *private_data);
void *private_data;

View file

@ -199,6 +199,7 @@ enum
#define RTPROT_BIRD 12 /* BIRD */
#define RTPROT_DNROUTED 13 /* DECnet routing daemon */
#define RTPROT_XORP 14 /* XORP */
#define RTPROT_NTK 15 /* Netsukuku */
/* rtm_scope
@ -733,6 +734,8 @@ enum
#define IFLA_MAP IFLA_MAP
IFLA_WEIGHT,
#define IFLA_WEIGHT IFLA_WEIGHT
IFLA_OPERSTATE,
IFLA_LINKMODE,
__IFLA_MAX
};
@ -836,6 +839,7 @@ enum
#define RTMGRP_IPV4_IFADDR 0x10
#define RTMGRP_IPV4_MROUTE 0x20
#define RTMGRP_IPV4_ROUTE 0x40
#define RTMGRP_IPV4_RULE 0x80
#define RTMGRP_IPV6_IFADDR 0x100
#define RTMGRP_IPV6_MROUTE 0x200
@ -866,7 +870,8 @@ enum rtnetlink_groups {
#define RTNLGRP_IPV4_MROUTE RTNLGRP_IPV4_MROUTE
RTNLGRP_IPV4_ROUTE,
#define RTNLGRP_IPV4_ROUTE RTNLGRP_IPV4_ROUTE
RTNLGRP_NOP1,
RTNLGRP_IPV4_RULE,
#define RTNLGRP_IPV4_RULE RTNLGRP_IPV4_RULE
RTNLGRP_IPV6_IFADDR,
#define RTNLGRP_IPV6_IFADDR RTNLGRP_IPV6_IFADDR
RTNLGRP_IPV6_MROUTE,
@ -905,6 +910,7 @@ struct tcamsg
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/mutex.h>
extern size_t rtattr_strlcpy(char *dest, const struct rtattr *rta, size_t size);
static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str)
@ -1036,24 +1042,17 @@ __rta_reserve(struct sk_buff *skb, int attrtype, int attrlen)
extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change);
extern struct semaphore rtnl_sem;
#define rtnl_shlock() down(&rtnl_sem)
#define rtnl_shlock_nowait() down_trylock(&rtnl_sem)
#define rtnl_shunlock() do { up(&rtnl_sem); \
if (rtnl && rtnl->sk_receive_queue.qlen) \
rtnl->sk_data_ready(rtnl, 0); \
} while(0)
/* RTNL is used as a global lock for all changes to network configuration */
extern void rtnl_lock(void);
extern int rtnl_lock_interruptible(void);
extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern void rtnetlink_init(void);
extern void __rtnl_unlock(void);
#define ASSERT_RTNL() do { \
if (unlikely(down_trylock(&rtnl_sem) == 0)) { \
up(&rtnl_sem); \
if (unlikely(rtnl_trylock())) { \
rtnl_unlock(); \
printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \
__FILE__, __LINE__); \
dump_stack(); \

View file

@ -706,6 +706,7 @@ struct task_struct {
prio_array_t *array;
unsigned short ioprio;
unsigned int btrace_seq;
unsigned long sleep_avg;
unsigned long long timestamp, last_ran;

View file

@ -1286,7 +1286,8 @@ struct security_operations {
int (*socket_setsockopt) (struct socket * sock, int level, int optname);
int (*socket_shutdown) (struct socket * sock, int how);
int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb);
int (*socket_getpeersec) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len);
int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len);
int (*socket_getpeersec_dgram) (struct sk_buff *skb, char **secdata, u32 *seclen);
int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority);
void (*sk_free_security) (struct sock *sk);
unsigned int (*sk_getsid) (struct sock *sk, struct flowi *fl, u8 dir);
@ -2741,10 +2742,16 @@ static inline int security_sock_rcv_skb (struct sock * sk,
return security_ops->socket_sock_rcv_skb (sk, skb);
}
static inline int security_socket_getpeersec(struct socket *sock, char __user *optval,
int __user *optlen, unsigned len)
static inline int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
int __user *optlen, unsigned len)
{
return security_ops->socket_getpeersec(sock, optval, optlen, len);
return security_ops->socket_getpeersec_stream(sock, optval, optlen, len);
}
static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata,
u32 *seclen)
{
return security_ops->socket_getpeersec_dgram(skb, secdata, seclen);
}
static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
@ -2863,8 +2870,14 @@ static inline int security_sock_rcv_skb (struct sock * sk,
return 0;
}
static inline int security_socket_getpeersec(struct socket *sock, char __user *optval,
int __user *optlen, unsigned len)
static inline int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
int __user *optlen, unsigned len)
{
return -ENOPROTOOPT;
}
static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata,
u32 *seclen)
{
return -ENOPROTOOPT;
}

View file

@ -4,7 +4,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <asm/semaphore.h>
#include <linux/mutex.h>
struct seq_operations;
struct file;
@ -19,7 +19,7 @@ struct seq_file {
size_t count;
loff_t index;
loff_t version;
struct semaphore sem;
struct mutex lock;
struct seq_operations *op;
void *private;
};

View file

@ -369,6 +369,9 @@ void uart_parse_options(char *options, int *baud, int *parity, int *bits,
int uart_set_options(struct uart_port *port, struct console *co, int baud,
int parity, int bits, int flow);
struct tty_driver *uart_console_device(struct console *co, int *index);
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
void (*putchar)(struct uart_port *, int));
/*
* Port/driver registration/removal

View file

@ -270,7 +270,6 @@ struct sk_buff {
void (*destructor)(struct sk_buff *skb);
#ifdef CONFIG_NETFILTER
__u32 nfmark;
struct nf_conntrack *nfct;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct sk_buff *nfct_reasm;
@ -278,6 +277,7 @@ struct sk_buff {
#ifdef CONFIG_BRIDGE_NETFILTER
struct nf_bridge_info *nf_bridge;
#endif
__u32 nfmark;
#endif /* CONFIG_NETFILTER */
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
@ -304,6 +304,7 @@ struct sk_buff {
#include <asm/system.h>
extern void kfree_skb(struct sk_buff *skb);
extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *__alloc_skb(unsigned int size,
gfp_t priority, int fclone);
@ -403,22 +404,6 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
* atomic change.
*/
/**
* kfree_skb - free an sk_buff
* @skb: buffer to free
*
* Drop a reference to the buffer and free it if the usage count has
* hit zero.
*/
static inline void kfree_skb(struct sk_buff *skb)
{
if (likely(atomic_read(&skb->users) == 1))
smp_rmb();
else if (likely(!atomic_dec_and_test(&skb->users)))
return;
__kfree_skb(skb);
}
/**
* skb_cloned - is the buffer a clone
* @skb: buffer to check
@ -1174,12 +1159,14 @@ static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
*/
static inline void skb_postpull_rcsum(struct sk_buff *skb,
const void *start, int len)
const void *start, unsigned int len)
{
if (skb->ip_summed == CHECKSUM_HW)
skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
}
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
/**
* pskb_trim_rcsum - trim received skb and update checksum
* @skb: buffer to trim
@ -1351,16 +1338,6 @@ static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
kfree_skb(skb);
}
#endif
static inline void nf_reset(struct sk_buff *skb)
{
nf_conntrack_put(skb->nfct);
skb->nfct = NULL;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put_reasm(skb->nfct_reasm);
skb->nfct_reasm = NULL;
#endif
}
#ifdef CONFIG_BRIDGE_NETFILTER
static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
{
@ -1373,6 +1350,20 @@ static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
atomic_inc(&nf_bridge->use);
}
#endif /* CONFIG_BRIDGE_NETFILTER */
static inline void nf_reset(struct sk_buff *skb)
{
nf_conntrack_put(skb->nfct);
skb->nfct = NULL;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put_reasm(skb->nfct_reasm);
skb->nfct_reasm = NULL;
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
nf_bridge_put(skb->nf_bridge);
skb->nf_bridge = NULL;
#endif
}
#else /* CONFIG_NETFILTER */
static inline void nf_reset(struct sk_buff *skb) {}
#endif /* CONFIG_NETFILTER */

View file

@ -38,7 +38,6 @@ typedef struct kmem_cache kmem_cache_t;
#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */
#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* Poison objects */
#define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
@ -118,7 +117,7 @@ extern void *kzalloc(size_t, gfp_t);
*/
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
if (n != 0 && size > INT_MAX / n)
if (n != 0 && size > ULONG_MAX / n)
return NULL;
return kzalloc(n * size, flags);
}

View file

@ -52,23 +52,12 @@ extern void smp_cpus_done(unsigned int max_cpus);
/*
* Call a function on all other processors
*/
extern int smp_call_function (void (*func) (void *info), void *info,
int retry, int wait);
int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
/*
* Call a function on all processors
*/
static inline int on_each_cpu(void (*func) (void *info), void *info,
int retry, int wait)
{
int ret = 0;
preempt_disable();
ret = smp_call_function(func, info, retry, wait);
func(info);
preempt_enable();
return ret;
}
int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait);
#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
#define MSG_ALL 0x8001
@ -94,7 +83,13 @@ void smp_prepare_boot_cpu(void);
#define raw_smp_processor_id() 0
#define hard_smp_processor_id() 0
#define smp_call_function(func,info,retry,wait) ({ 0; })
#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
#define on_each_cpu(func,info,retry,wait) \
({ \
local_irq_disable(); \
func(info); \
local_irq_enable(); \
0; \
})
static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0)

View file

@ -150,6 +150,7 @@ __KINLINE struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__
#define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */
#define SCM_CREDENTIALS 0x02 /* rw: struct ucred */
#define SCM_SECURITY 0x03 /* rw: security label */
struct ucred {
__u32 pid;

View file

@ -36,7 +36,7 @@ struct svc_sock {
struct list_head sk_deferred; /* deferred requests that need to
* be revisted */
struct semaphore sk_sem; /* to serialize sending data */
struct mutex sk_mutex; /* to serialize sending data */
int (*sk_recvfrom)(struct svc_rqst *rqstp);
int (*sk_sendto)(struct svc_rqst *rqstp);

View file

@ -172,9 +172,24 @@ extern int rotate_reclaimable_page(struct page *page);
extern void swap_setup(void);
/* linux/mm/vmscan.c */
extern int try_to_free_pages(struct zone **, gfp_t);
extern int shrink_all_memory(int);
extern unsigned long try_to_free_pages(struct zone **, gfp_t);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
/* possible outcome of pageout() */
typedef enum {
/* failed to write page out, page is locked */
PAGE_KEEP,
/* move page to the active list, page is locked */
PAGE_ACTIVATE,
/* page has been sent to the disk successfully, page is unlocked */
PAGE_SUCCESS,
/* page is clean and locked */
PAGE_CLEAN,
} pageout_t;
extern pageout_t pageout(struct page *page, struct address_space *mapping);
#ifdef CONFIG_NUMA
extern int zone_reclaim_mode;
@ -188,25 +203,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
}
#endif
#ifdef CONFIG_MIGRATION
extern int isolate_lru_page(struct page *p);
extern int putback_lru_pages(struct list_head *l);
extern int migrate_page(struct page *, struct page *);
extern void migrate_page_copy(struct page *, struct page *);
extern int migrate_page_remove_references(struct page *, struct page *, int);
extern int migrate_pages(struct list_head *l, struct list_head *t,
struct list_head *moved, struct list_head *failed);
extern int fail_migrate_page(struct page *, struct page *);
#else
static inline int isolate_lru_page(struct page *p) { return -ENOSYS; }
static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, struct list_head *t,
struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
/* Possible settings for the migrate_page() method in address_operations */
#define migrate_page NULL
#define fail_migrate_page NULL
#endif
#ifdef CONFIG_MMU
/* linux/mm/shmem.c */
extern int shmem_unuse(swp_entry_t entry, struct page *page);
@ -238,14 +234,15 @@ extern struct page * read_swap_cache_async(swp_entry_t, struct vm_area_struct *v
/* linux/mm/swapfile.c */
extern long total_swap_pages;
extern unsigned int nr_swapfiles;
extern struct swap_info_struct swap_info[];
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
extern swp_entry_t get_swap_page_of_type(int type);
extern swp_entry_t get_swap_page_of_type(int);
extern int swap_duplicate(swp_entry_t);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
extern void swap_free(swp_entry_t);
extern void free_swap_and_cache(swp_entry_t);
extern int swap_type_of(dev_t);
extern unsigned int count_swap_pages(int, int);
extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
extern struct swap_info_struct *get_swap_info_struct(unsigned);
extern int can_share_swap_page(struct page *);

View file

@ -211,6 +211,7 @@ enum
NET_SCTP=17,
NET_LLC=18,
NET_NETFILTER=19,
NET_DCCP=20,
};
/* /proc/sys/kernel/random */
@ -261,6 +262,8 @@ enum
NET_CORE_DEV_WEIGHT=17,
NET_CORE_SOMAXCONN=18,
NET_CORE_BUDGET=19,
NET_CORE_AEVENT_ETIME=20,
NET_CORE_AEVENT_RSEQTH=21,
};
/* /proc/sys/net/ethernet */
@ -397,6 +400,9 @@ enum
NET_TCP_CONG_CONTROL=110,
NET_TCP_ABC=111,
NET_IPV4_IPFRAG_MAX_DIST=112,
NET_TCP_MTU_PROBING=113,
NET_TCP_BASE_MSS=114,
NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115,
};
enum {
@ -451,6 +457,7 @@ enum
NET_IPV4_CONF_ARP_ANNOUNCE=18,
NET_IPV4_CONF_ARP_IGNORE=19,
NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
NET_IPV4_CONF_ARP_ACCEPT=21,
__NET_IPV4_CONF_MAX
};
@ -531,6 +538,11 @@ enum {
NET_IPV6_MAX_DESYNC_FACTOR=15,
NET_IPV6_MAX_ADDRESSES=16,
NET_IPV6_FORCE_MLD_VERSION=17,
NET_IPV6_ACCEPT_RA_DEFRTR=18,
NET_IPV6_ACCEPT_RA_PINFO=19,
NET_IPV6_ACCEPT_RA_RTR_PREF=20,
NET_IPV6_RTR_PROBE_INTERVAL=21,
NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22,
__NET_IPV6_MAX
};
@ -562,6 +574,21 @@ enum {
__NET_NEIGH_MAX
};
/* /proc/sys/net/dccp */
enum {
NET_DCCP_DEFAULT=1,
};
/* /proc/sys/net/dccp/default */
enum {
NET_DCCP_DEFAULT_SEQ_WINDOW = 1,
NET_DCCP_DEFAULT_RX_CCID = 2,
NET_DCCP_DEFAULT_TX_CCID = 3,
NET_DCCP_DEFAULT_ACK_RATIO = 4,
NET_DCCP_DEFAULT_SEND_ACKVEC = 5,
NET_DCCP_DEFAULT_SEND_NDP = 6,
};
/* /proc/sys/net/ipx */
enum {
NET_IPX_PPROP_BROADCASTING=1,

View file

@ -343,6 +343,12 @@ struct tcp_sock {
__u32 seq;
__u32 time;
} rcvq_space;
/* TCP-specific MTU probe information. */
struct {
__u32 probe_seq_start;
__u32 probe_seq_end;
} mtu_probe;
};
static inline struct tcp_sock *tcp_sk(const struct sock *sk)

View file

@ -24,6 +24,7 @@
#include <linux/tty_driver.h>
#include <linux/tty_ldisc.h>
#include <linux/screen_info.h>
#include <linux/mutex.h>
#include <asm/system.h>
@ -231,8 +232,8 @@ struct tty_struct {
int canon_data;
unsigned long canon_head;
unsigned int canon_column;
struct semaphore atomic_read;
struct semaphore atomic_write;
struct mutex atomic_read_lock;
struct mutex atomic_write_lock;
unsigned char *write_buf;
int write_cnt;
spinlock_t read_lock;
@ -319,8 +320,7 @@ extern void tty_ldisc_put(int);
extern void tty_wakeup(struct tty_struct *tty);
extern void tty_ldisc_flush(struct tty_struct *tty);
struct semaphore;
extern struct semaphore tty_sem;
extern struct mutex tty_mutex;
/* n_tty.c */
extern struct tty_ldisc tty_ldisc_N_TTY;

View file

@ -7,14 +7,8 @@ extern int tty_insert_flip_string_flags(struct tty_struct *tty, unsigned char *c
extern int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size);
extern int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size);
#ifdef INCLUDE_INLINE_FUNCS
#define _INLINE_ extern
#else
#define _INLINE_ static __inline__
#endif
_INLINE_ int tty_insert_flip_char(struct tty_struct *tty,
unsigned char ch, char flag)
static inline int tty_insert_flip_char(struct tty_struct *tty,
unsigned char ch, char flag)
{
struct tty_buffer *tb = tty->buf.tail;
if (tb && tb->active && tb->used < tb->size) {
@ -25,7 +19,7 @@ _INLINE_ int tty_insert_flip_char(struct tty_struct *tty,
return tty_insert_flip_string_flags(tty, &ch, &flag, 1);
}
_INLINE_ void tty_schedule_flip(struct tty_struct *tty)
static inline void tty_schedule_flip(struct tty_struct *tty)
{
unsigned long flags;
spin_lock_irqsave(&tty->buf.lock, flags);

View file

@ -13,7 +13,7 @@
#ifndef _UDF_FS_SB_H
#define _UDF_FS_SB_H 1
#include <asm/semaphore.h>
#include <linux/mutex.h>
#pragma pack(1)
@ -111,7 +111,7 @@ struct udf_sb_info
/* VAT inode */
struct inode *s_vat;
struct semaphore s_alloc_sem;
struct mutex s_alloc_mutex;
};
#endif /* _UDF_FS_SB_H */

View file

@ -1018,8 +1018,6 @@ extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype,
unsigned char descindex, void *buf, int size);
extern int usb_get_status(struct usb_device *dev,
int type, int target, void *data);
extern int usb_get_string(struct usb_device *dev,
unsigned short langid, unsigned char index, void *buf, int size);
extern int usb_string(struct usb_device *dev, int index,
char *buf, size_t size);

View file

@ -801,7 +801,9 @@ struct usb_gadget_driver {
* Call this in your gadget driver's module initialization function,
* to tell the underlying usb controller driver about your driver.
* The driver's bind() function will be called to bind it to a
* gadget. This function must be called in a context that can sleep.
* gadget before this registration call returns. It's expected that
* the bind() functions will be in init sections.
* This function must be called in a context that can sleep.
*/
int usb_gadget_register_driver (struct usb_gadget_driver *driver);
@ -814,7 +816,8 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver);
* going away. If the controller is connected to a USB host,
* it will first disconnect(). The driver is also requested
* to unbind() and clean up any device state, before this procedure
* finally returns.
* finally returns. It's expected that the unbind() functions
* will in in exit sections, so may not be linked in some kernels.
* This function must be called in a context that can sleep.
*/
int usb_gadget_unregister_driver (struct usb_gadget_driver *driver);

View file

@ -17,11 +17,12 @@
#include <linux/time.h> /* need struct timeval */
#include <linux/poll.h>
#include <linux/device.h>
#include <linux/mutex.h>
#endif
#include <linux/compiler.h> /* need __user */
#define OBSOLETE_OWNER 1 /* It will be removed for 2.6.15 */
#define OBSOLETE_OWNER 1 /* It will be removed for 2.6.17 */
#define HAVE_V4L2 1
/*
@ -48,6 +49,16 @@
#ifdef __KERNEL__
/* Minor device allocation */
#define MINOR_VFL_TYPE_GRABBER_MIN 0
#define MINOR_VFL_TYPE_GRABBER_MAX 63
#define MINOR_VFL_TYPE_RADIO_MIN 64
#define MINOR_VFL_TYPE_RADIO_MAX 127
#define MINOR_VFL_TYPE_VTX_MIN 192
#define MINOR_VFL_TYPE_VTX_MAX 223
#define MINOR_VFL_TYPE_VBI_MIN 224
#define MINOR_VFL_TYPE_VBI_MAX 255
#define VFL_TYPE_GRABBER 0
#define VFL_TYPE_VBI 1
#define VFL_TYPE_RADIO 2
@ -80,7 +91,7 @@ struct video_device
/* for videodev.c intenal usage -- please don't touch */
int users; /* video_exclusive_{open|close} ... */
struct semaphore lock; /* ... helper function uses these */
struct mutex lock; /* ... helper function uses these */
char devfs_name[64]; /* devfs */
struct class_device class_dev; /* sysfs */
};
@ -952,13 +963,68 @@ struct v4l2_sliced_vbi_format
__u32 reserved[2]; /* must be zero */
};
#define V4L2_SLICED_TELETEXT_B (0x0001)
#define V4L2_SLICED_VPS (0x0400)
#define V4L2_SLICED_CAPTION_525 (0x1000)
#define V4L2_SLICED_WSS_625 (0x4000)
/* Teletext World System Teletext
(WST), defined on ITU-R BT.653-2 */
#define V4L2_SLICED_TELETEXT_PAL_B (0x000001)
#define V4L2_SLICED_TELETEXT_PAL_C (0x000002)
#define V4L2_SLICED_TELETEXT_NTSC_B (0x000010)
#define V4L2_SLICED_TELETEXT_SECAM (0x000020)
#define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525)
#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625)
/* Teletext North American Broadcast Teletext Specification
(NABTS), defined on ITU-R BT.653-2 */
#define V4L2_SLICED_TELETEXT_NTSC_C (0x000040)
#define V4L2_SLICED_TELETEXT_NTSC_D (0x000080)
/* Video Program System, defined on ETS 300 231*/
#define V4L2_SLICED_VPS (0x000400)
/* Closed Caption, defined on EIA-608 */
#define V4L2_SLICED_CAPTION_525 (0x001000)
#define V4L2_SLICED_CAPTION_625 (0x002000)
/* Wide Screen System, defined on ITU-R BT1119.1 */
#define V4L2_SLICED_WSS_625 (0x004000)
/* Wide Screen System, defined on IEC 61880 */
#define V4L2_SLICED_WSS_525 (0x008000)
/* Vertical Interval Timecode (VITC), defined on SMPTE 12M */
#define V4l2_SLICED_VITC_625 (0x010000)
#define V4l2_SLICED_VITC_525 (0x020000)
#define V4L2_SLICED_TELETEXT_B (V4L2_SLICED_TELETEXT_PAL_B |\
V4L2_SLICED_TELETEXT_NTSC_B)
#define V4L2_SLICED_TELETEXT (V4L2_SLICED_TELETEXT_PAL_B |\
V4L2_SLICED_TELETEXT_PAL_C |\
V4L2_SLICED_TELETEXT_SECAM |\
V4L2_SLICED_TELETEXT_NTSC_B |\
V4L2_SLICED_TELETEXT_NTSC_C |\
V4L2_SLICED_TELETEXT_NTSC_D)
#define V4L2_SLICED_CAPTION (V4L2_SLICED_CAPTION_525 |\
V4L2_SLICED_CAPTION_625)
#define V4L2_SLICED_WSS (V4L2_SLICED_WSS_525 |\
V4L2_SLICED_WSS_625)
#define V4L2_SLICED_VITC (V4L2_SLICED_VITC_525 |\
V4L2_SLICED_VITC_625)
#define V4L2_SLICED_VBI_525 (V4L2_SLICED_TELETEXT_NTSC_B |\
V4L2_SLICED_TELETEXT_NTSC_C |\
V4L2_SLICED_TELETEXT_NTSC_D |\
V4L2_SLICED_CAPTION_525 |\
V4L2_SLICED_WSS_525 |\
V4l2_SLICED_VITC_525)
#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_PAL_B |\
V4L2_SLICED_TELETEXT_PAL_C |\
V4L2_SLICED_TELETEXT_SECAM |\
V4L2_SLICED_VPS |\
V4L2_SLICED_CAPTION_625 |\
V4L2_SLICED_WSS_625 |\
V4l2_SLICED_VITC_625)
struct v4l2_sliced_vbi_cap
{

View file

@ -73,6 +73,11 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc);
int vt_waitactive(int vt);
void change_console(struct vc_data *new_vc);
void reset_vc(struct vc_data *vc);
#ifdef CONFIG_VT
int is_console_suspend_safe(void);
#else
static inline int is_console_suspend_safe(void) { return 1; }
#endif
/*
* vc_screen.c shares this temporary buffer with the console write code so that

View file

@ -1,10 +1,10 @@
/*
* This file define a set of standard wireless extensions
*
* Version : 19 18.3.05
* Version : 20 17.2.06
*
* Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
* Copyright (c) 1997-2005 Jean Tourrilhes, All Rights Reserved.
* Copyright (c) 1997-2006 Jean Tourrilhes, All Rights Reserved.
*/
#ifndef _LINUX_WIRELESS_H
@ -80,7 +80,7 @@
* (there is some stuff that will be added in the future...)
* I just plan to increment with each new version.
*/
#define WIRELESS_EXT 19
#define WIRELESS_EXT 20
/*
* Changes :
@ -204,6 +204,10 @@
* - Add IW_QUAL_ALL_UPDATED and IW_QUAL_ALL_INVALID macros
* - Add explicit flag to tell stats are in dBm : IW_QUAL_DBM
* - Add IW_IOCTL_IDX() and IW_EVENT_IDX() macros
*
* V19 to V20
* ----------
* - RtNetlink requests support (SET/GET)
*/
/**************************** CONSTANTS ****************************/

Some files were not shown because too many files have changed in this diff Show more