Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6

Conflicts:

	include/asm-x86/statfs.h
This commit is contained in:
David Woodhouse 2008-10-13 17:13:56 +01:00
commit e758936e02
4459 changed files with 219962 additions and 116660 deletions

View file

@ -126,6 +126,7 @@ header-y += pci_regs.h
header-y += pfkeyv2.h
header-y += pg.h
header-y += phantom.h
header-y += phonet.h
header-y += pkt_cls.h
header-y += pkt_sched.h
header-y += posix_types.h
@ -167,7 +168,8 @@ unifdef-y += acct.h
unifdef-y += adb.h
unifdef-y += adfs_fs.h
unifdef-y += agpgart.h
ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/a.out.h),)
ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \
$(srctree)/include/asm-$(SRCARCH)/a.out.h),)
unifdef-y += a.out.h
endif
unifdef-y += apm_bios.h
@ -179,6 +181,7 @@ unifdef-y += audit.h
unifdef-y += auto_fs.h
unifdef-y += auxvec.h
unifdef-y += binfmts.h
unifdef-y += blktrace_api.h
unifdef-y += capability.h
unifdef-y += capi.h
unifdef-y += cciss_ioctl.h
@ -231,6 +234,7 @@ unifdef-y += if_fddi.h
unifdef-y += if_frad.h
unifdef-y += if_ltalk.h
unifdef-y += if_link.h
unifdef-y += if_phonet.h
unifdef-y += if_pppol2tp.h
unifdef-y += if_pppox.h
unifdef-y += if_tr.h
@ -258,7 +262,8 @@ unifdef-y += kd.h
unifdef-y += kernelcapi.h
unifdef-y += kernel.h
unifdef-y += keyboard.h
ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/kvm.h),)
ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \
$(srctree)/include/asm-$(SRCARCH)/kvm.h),)
unifdef-y += kvm.h
endif
unifdef-y += llc.h
@ -297,7 +302,6 @@ unifdef-y += parport.h
unifdef-y += patchkey.h
unifdef-y += pci.h
unifdef-y += personality.h
unifdef-y += pim.h
unifdef-y += pktcdvd.h
unifdef-y += pmu.h
unifdef-y += poll.h

View file

@ -30,6 +30,7 @@
#define __LINUX_ATA_H__
#include <linux/types.h>
#include <asm/byteorder.h>
/* defines only for the constants which don't work well as enums */
#define ATA_DMA_BOUNDARY 0xffffUL
@ -88,6 +89,7 @@ enum {
ATA_ID_DLF = 128,
ATA_ID_CSFO = 129,
ATA_ID_CFA_POWER = 160,
ATA_ID_ROT_SPEED = 217,
ATA_ID_PIO4 = (1 << 1),
ATA_ID_SERNO_LEN = 20,
@ -557,6 +559,15 @@ static inline int ata_id_has_flush(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 12);
}
static inline int ata_id_flush_enabled(const u16 *id)
{
if (ata_id_has_flush(id) == 0)
return 0;
if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
return 0;
return id[ATA_ID_CFS_ENABLE_2] & (1 << 12);
}
static inline int ata_id_has_flush_ext(const u16 *id)
{
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
@ -564,6 +575,19 @@ static inline int ata_id_has_flush_ext(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 13);
}
static inline int ata_id_flush_ext_enabled(const u16 *id)
{
if (ata_id_has_flush_ext(id) == 0)
return 0;
if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
return 0;
/*
* some Maxtor disks have bit 13 defined incorrectly
* so check bit 10 too
*/
return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400;
}
static inline int ata_id_has_lba48(const u16 *id)
{
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
@ -573,6 +597,15 @@ static inline int ata_id_has_lba48(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 10);
}
static inline int ata_id_lba48_enabled(const u16 *id)
{
if (ata_id_has_lba48(id) == 0)
return 0;
if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
return 0;
return id[ATA_ID_CFS_ENABLE_2] & (1 << 10);
}
static inline int ata_id_hpa_enabled(const u16 *id)
{
/* Yes children, word 83 valid bits cover word 82 data */
@ -644,7 +677,15 @@ static inline unsigned int ata_id_major_version(const u16 *id)
static inline int ata_id_is_sata(const u16 *id)
{
return ata_id_major_version(id) >= 5 && id[ATA_ID_HW_CONFIG] == 0;
/*
* See if word 93 is 0 AND drive is at least ATA-5 compatible
* verifying that word 80 by casting it to a signed type --
* this trick allows us to filter out the reserved values of
* 0x0000 and 0xffff along with the earlier ATA revisions...
*/
if (id[ATA_ID_HW_CONFIG] == 0 && (short)id[ATA_ID_MAJOR_VER] >= 0x0020)
return 1;
return 0;
}
static inline int ata_id_has_tpm(const u16 *id)
@ -667,6 +708,15 @@ static inline int ata_id_has_dword_io(const u16 *id)
return 0;
}
static inline int ata_id_has_unload(const u16 *id)
{
if (ata_id_major_version(id) >= 7 &&
(id[ATA_ID_CFSSE] & 0xC000) == 0x4000 &&
id[ATA_ID_CFSSE] & (1 << 13))
return 1;
return 0;
}
static inline int ata_id_current_chs_valid(const u16 *id)
{
/* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@ -691,6 +741,11 @@ static inline int ata_id_is_cfa(const u16 *id)
return 0;
}
static inline int ata_id_is_ssd(const u16 *id)
{
return id[ATA_ID_ROT_SPEED] == 0x01;
}
static inline int ata_drive_40wire(const u16 *dev_id)
{
if (ata_id_is_sata(dev_id))
@ -727,6 +782,76 @@ static inline int atapi_id_dmadir(const u16 *dev_id)
return ata_id_major_version(dev_id) >= 7 && (dev_id[62] & 0x8000);
}
/*
* ata_id_is_lba_capacity_ok() performs a sanity check on
* the claimed LBA capacity value for the device.
*
* Returns 1 if LBA capacity looks sensible, 0 otherwise.
*
* It is called only once for each device.
*/
static inline int ata_id_is_lba_capacity_ok(u16 *id)
{
unsigned long lba_sects, chs_sects, head, tail;
/* No non-LBA info .. so valid! */
if (id[ATA_ID_CYLS] == 0)
return 1;
lba_sects = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
/*
* The ATA spec tells large drives to return
* C/H/S = 16383/16/63 independent of their size.
* Some drives can be jumpered to use 15 heads instead of 16.
* Some drives can be jumpered to use 4092 cyls instead of 16383.
*/
if ((id[ATA_ID_CYLS] == 16383 ||
(id[ATA_ID_CYLS] == 4092 && id[ATA_ID_CUR_CYLS] == 16383)) &&
id[ATA_ID_SECTORS] == 63 &&
(id[ATA_ID_HEADS] == 15 || id[ATA_ID_HEADS] == 16) &&
(lba_sects >= 16383 * 63 * id[ATA_ID_HEADS]))
return 1;
chs_sects = id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * id[ATA_ID_SECTORS];
/* perform a rough sanity check on lba_sects: within 10% is OK */
if (lba_sects - chs_sects < chs_sects/10)
return 1;
/* some drives have the word order reversed */
head = (lba_sects >> 16) & 0xffff;
tail = lba_sects & 0xffff;
lba_sects = head | (tail << 16);
if (lba_sects - chs_sects < chs_sects/10) {
*(__le32 *)&id[ATA_ID_LBA_CAPACITY] = __cpu_to_le32(lba_sects);
return 1; /* LBA capacity is (now) good */
}
return 0; /* LBA capacity value may be bad */
}
static inline void ata_id_to_hd_driveid(u16 *id)
{
#ifdef __BIG_ENDIAN
/* accessed in struct hd_driveid as 8-bit values */
id[ATA_ID_MAX_MULTSECT] = __cpu_to_le16(id[ATA_ID_MAX_MULTSECT]);
id[ATA_ID_CAPABILITY] = __cpu_to_le16(id[ATA_ID_CAPABILITY]);
id[ATA_ID_OLD_PIO_MODES] = __cpu_to_le16(id[ATA_ID_OLD_PIO_MODES]);
id[ATA_ID_OLD_DMA_MODES] = __cpu_to_le16(id[ATA_ID_OLD_DMA_MODES]);
id[ATA_ID_MULTSECT] = __cpu_to_le16(id[ATA_ID_MULTSECT]);
/* as 32-bit values */
*(u32 *)&id[ATA_ID_LBA_CAPACITY] = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
*(u32 *)&id[ATA_ID_SPG] = ata_id_u32(id, ATA_ID_SPG);
/* as 64-bit value */
*(u64 *)&id[ATA_ID_LBA_CAPACITY_2] =
ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
#endif
}
static inline int is_multi_taskfile(struct ata_taskfile *tf)
{
return (tf->command == ATA_CMD_READ_MULTI) ||
@ -745,7 +870,7 @@ static inline int ata_ok(u8 status)
static inline int lba_28_ok(u64 block, u32 n_block)
{
/* check the ending block number */
return ((block + n_block - 1) < ((u64)1 << 28)) && (n_block <= 256);
return ((block + n_block) < ((u64)1 << 28)) && (n_block <= 256);
}
static inline int lba_48_ok(u64 block, u32 n_block)

View file

@ -26,21 +26,8 @@
#ifdef CONFIG_BLOCK
/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
#include <asm/io.h>
#if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY)
#define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1))
#define BIOVEC_VIRT_OVERSIZE(x) ((x) > BIO_VMERGE_MAX_SIZE)
#else
#define BIOVEC_VIRT_START_SIZE(x) 0
#define BIOVEC_VIRT_OVERSIZE(x) 0
#endif
#ifndef BIO_VMERGE_BOUNDARY
#define BIO_VMERGE_BOUNDARY 0
#endif
#define BIO_DEBUG
#ifdef BIO_DEBUG
@ -88,25 +75,14 @@ struct bio {
/* Number of segments in this BIO after
* physical address coalescing is performed.
*/
unsigned short bi_phys_segments;
/* Number of segments after physical and DMA remapping
* hardware coalescing is performed.
*/
unsigned short bi_hw_segments;
unsigned int bi_phys_segments;
unsigned int bi_size; /* residual I/O count */
/*
* To keep track of the max hw size, we account for the
* sizes of the first and last virtually mergeable segments
* in this bio
*/
unsigned int bi_hw_front_size;
unsigned int bi_hw_back_size;
unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
unsigned int bi_comp_cpu; /* completion CPU */
struct bio_vec *bi_io_vec; /* the actual vec list */
bio_end_io_t *bi_end_io;
@ -126,11 +102,14 @@ struct bio {
#define BIO_UPTODATE 0 /* ok after I/O completion */
#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
#define BIO_EOF 2 /* out-out-bounds error */
#define BIO_SEG_VALID 3 /* nr_hw_seg valid */
#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
#define BIO_CLONED 4 /* doesn't own data */
#define BIO_BOUNCED 5 /* bio is a bounce bio */
#define BIO_USER_MAPPED 6 /* contains user pages */
#define BIO_EOPNOTSUPP 7 /* not supported */
#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
@ -144,18 +123,31 @@ struct bio {
/*
* bio bi_rw flags
*
* bit 0 -- read (not set) or write (set)
* bit 0 -- data direction
* If not set, bio is a read from device. If set, it's a write to device.
* bit 1 -- rw-ahead when set
* bit 2 -- barrier
* Insert a serialization point in the IO queue, forcing previously
* submitted IO to be completed before this oen is issued.
* bit 3 -- fail fast, don't want low level driver retries
* bit 4 -- synchronous I/O hint: the block layer will unplug immediately
* Note that this does NOT indicate that the IO itself is sync, just
* that the block layer will not postpone issue of this IO by plugging.
* bit 5 -- metadata request
* Used for tracing to differentiate metadata and data IO. May also
* get some preferential treatment in the IO scheduler
* bit 6 -- discard sectors
* Informs the lower level device that this range of sectors is no longer
* used by the file system and may thus be freed by the device. Used
* for flash based storage.
*/
#define BIO_RW 0
#define BIO_RW_AHEAD 1
#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
#define BIO_RW_BARRIER 2
#define BIO_RW_FAILFAST 3
#define BIO_RW_SYNC 4
#define BIO_RW_META 5
#define BIO_RW_DISCARD 6
/*
* upper 16 bits of bi_rw define the io priority of this bio
@ -185,14 +177,15 @@ struct bio {
#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size)
#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
static inline unsigned int bio_cur_sectors(struct bio *bio)
{
if (bio->bi_vcnt)
return bio_iovec(bio)->bv_len >> 9;
return 0;
else /* dataless requests such as discard */
return bio->bi_size >> 9;
}
static inline void *bio_data(struct bio *bio)
@ -236,8 +229,6 @@ static inline void *bio_data(struct bio *bio)
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
#endif
#define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
@ -319,15 +310,14 @@ struct bio_pair {
atomic_t cnt;
int error;
};
extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
int first_sectors);
extern mempool_t *bio_split_pool;
extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
extern void bio_pair_release(struct bio_pair *dbio);
extern struct bio_set *bioset_create(int, int);
extern void bioset_free(struct bio_set *);
extern struct bio *bio_alloc(gfp_t, int);
extern struct bio *bio_kmalloc(gfp_t, int);
extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
extern void bio_put(struct bio *);
extern void bio_free(struct bio *, struct bio_set *);
@ -335,7 +325,6 @@ extern void bio_free(struct bio *, struct bio_set *);
extern void bio_endio(struct bio *, int);
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
extern int bio_hw_segments(struct request_queue *, struct bio *);
extern void __bio_clone(struct bio *, struct bio *);
extern struct bio *bio_clone(struct bio *, gfp_t);
@ -346,12 +335,14 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
extern int bio_get_nr_vecs(struct block_device *);
extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
unsigned long, unsigned int, int);
unsigned long, unsigned int, int, gfp_t);
struct sg_iovec;
struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
struct block_device *,
struct sg_iovec *, int, int);
struct sg_iovec *, int, int, gfp_t);
extern void bio_unmap_user(struct bio *);
extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
gfp_t);
@ -359,14 +350,24 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
gfp_t, int);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *,
int, int);
extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
unsigned long, unsigned int, int, gfp_t);
extern struct bio *bio_copy_user_iov(struct request_queue *,
struct rq_map_data *, struct sg_iovec *,
int, int, gfp_t);
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio(struct bio *bio);
extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
extern unsigned int bvec_nr_vecs(unsigned short idx);
/*
* Allow queuer to specify a completion CPU for this bio
*/
static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
{
bio->bi_comp_cpu = cpu;
}
/*
* bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures.
@ -445,6 +446,14 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
__bio_kmap_irq((bio), (bio)->bi_idx, (flags))
#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
/*
* Check whether this bio carries any data or not. A NULL bio is allowed.
*/
static inline int bio_has_data(struct bio *bio)
{
return bio && bio->bi_io_vec != NULL;
}
#if defined(CONFIG_BLK_DEV_INTEGRITY)
#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
@ -458,14 +467,7 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
#define bip_for_each_vec(bvl, bip, i) \
__bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
static inline int bio_integrity(struct bio *bio)
{
#if defined(CONFIG_BLK_DEV_INTEGRITY)
return bio->bi_integrity != NULL;
#else
return 0;
#endif
}
#define bio_integrity(bio) (bio->bi_integrity != NULL)
extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);

View file

@ -16,7 +16,9 @@
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/stringify.h>
#include <linux/gfp.h>
#include <linux/bsg.h>
#include <linux/smp.h>
#include <asm/scatterlist.h>
@ -54,7 +56,6 @@ enum rq_cmd_type_bits {
REQ_TYPE_PM_SUSPEND, /* suspend request */
REQ_TYPE_PM_RESUME, /* resume request */
REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
REQ_TYPE_FLUSH, /* flush request */
REQ_TYPE_SPECIAL, /* driver defined type */
REQ_TYPE_LINUX_BLOCK, /* generic block layer message */
/*
@ -76,19 +77,18 @@ enum rq_cmd_type_bits {
*
*/
enum {
/*
* just examples for now
*/
REQ_LB_OP_EJECT = 0x40, /* eject request */
REQ_LB_OP_FLUSH = 0x41, /* flush device */
REQ_LB_OP_FLUSH = 0x41, /* flush request */
REQ_LB_OP_DISCARD = 0x42, /* discard sectors */
};
/*
* request type modified bits. first three bits match BIO_RW* bits, important
* request type modified bits. first two bits match BIO_RW* bits, important
*/
enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */
__REQ_FAILFAST, /* no low level driver retries */
__REQ_DISCARD, /* request to discard sectors */
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */
@ -111,6 +111,7 @@ enum rq_flag_bits {
};
#define REQ_RW (1 << __REQ_RW)
#define REQ_DISCARD (1 << __REQ_DISCARD)
#define REQ_FAILFAST (1 << __REQ_FAILFAST)
#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
@ -140,12 +141,14 @@ enum rq_flag_bits {
*/
struct request {
struct list_head queuelist;
struct list_head donelist;
struct call_single_data csd;
int cpu;
struct request_queue *q;
unsigned int cmd_flags;
enum rq_cmd_type_bits cmd_type;
unsigned long atomic_flags;
/* Maintain bio traversal state for part by part I/O submission.
* hard_* are block layer internals, no driver should touch them!
@ -190,13 +193,6 @@ struct request {
*/
unsigned short nr_phys_segments;
/* Number of scatter-gather addr+len pairs after
* physical and DMA remapping hardware coalescing is performed.
* This is the number of scatter-gather entries the driver
* will actually have to deal with after DMA mapping is done.
*/
unsigned short nr_hw_segments;
unsigned short ioprio;
void *special;
@ -220,6 +216,8 @@ struct request {
void *data;
void *sense;
unsigned long deadline;
struct list_head timeout_list;
unsigned int timeout;
int retries;
@ -233,6 +231,11 @@ struct request {
struct request *next_rq;
};
static inline unsigned short req_get_ioprio(struct request *req)
{
return req->ioprio;
}
/*
* State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
* requests. Some step values could eventually be made generic.
@ -252,6 +255,7 @@ typedef void (request_fn_proc) (struct request_queue *q);
typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unplug_fn) (struct request_queue *);
typedef int (prepare_discard_fn) (struct request_queue *, struct request *);
struct bio_vec;
struct bvec_merge_data {
@ -265,6 +269,15 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
typedef int (lld_busy_fn) (struct request_queue *q);
enum blk_eh_timer_return {
BLK_EH_NOT_HANDLED,
BLK_EH_HANDLED,
BLK_EH_RESET_TIMER,
};
typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
enum blk_queue_state {
Queue_down,
@ -307,10 +320,13 @@ struct request_queue
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unplug_fn *unplug_fn;
prepare_discard_fn *prepare_discard_fn;
merge_bvec_fn *merge_bvec_fn;
prepare_flush_fn *prepare_flush_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
/*
* Dispatch queue sorting
@ -385,6 +401,10 @@ struct request_queue
unsigned int nr_sorted;
unsigned int in_flight;
unsigned int rq_timeout;
struct timer_list timeout;
struct list_head timeout_list;
/*
* sg stuff
*/
@ -421,6 +441,10 @@ struct request_queue
#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */
#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
static inline int queue_is_locked(struct request_queue *q)
{
@ -526,7 +550,10 @@ enum {
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_flushing(q) ((q)->ordseq)
#define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
@ -536,16 +563,18 @@ enum {
#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq))
#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
#define blk_pm_request(rq) \
(blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
/* rq->queuelist of dequeued request must be list_empty() */
@ -592,7 +621,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw)
#define RQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
#define rq_mergeable(rq) \
(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
(blk_discard_rq(rq) || blk_fs_request((rq))))
/*
* q->prep_rq_fn return values
@ -637,6 +667,12 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
}
#endif /* CONFIG_MMU */
struct rq_map_data {
struct page **pages;
int page_order;
int nr_entries;
};
struct req_iterator {
int i;
struct bio *bio;
@ -664,6 +700,10 @@ extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern void blk_plug_device(struct request_queue *);
extern void blk_plug_device_unlocked(struct request_queue *);
extern int blk_remove_plug(struct request_queue *);
@ -705,11 +745,14 @@ extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *);
extern void blk_run_queue(struct request_queue *);
extern void blk_start_queueing(struct request_queue *);
extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);
extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long,
gfp_t);
extern int blk_rq_unmap_user(struct bio *);
extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct sg_iovec *, int, unsigned int);
struct rq_map_data *, struct sg_iovec *, int,
unsigned int, gfp_t);
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
@ -750,12 +793,15 @@ extern int __blk_end_request(struct request *rq, int error,
extern int blk_end_bidi_request(struct request *rq, int error,
unsigned int nr_bytes, unsigned int bidi_bytes);
extern void end_request(struct request *, int);
extern void end_queued_request(struct request *, int);
extern void end_dequeued_request(struct request *, int);
extern int blk_end_request_callback(struct request *rq, int error,
unsigned int nr_bytes,
int (drv_callback)(struct request *));
extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
extern void blk_abort_queue(struct request_queue *);
extern void blk_update_request(struct request *rq, int error,
unsigned int nr_bytes);
/*
* blk_end_request() takes bytes instead of sectors as a complete size.
@ -790,12 +836,16 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size);
extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
extern int blk_do_ordered(struct request_queue *, struct request **);
@ -837,14 +887,22 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
}
extern int blkdev_issue_flush(struct block_device *, sector_t *);
extern int blkdev_issue_discard(struct block_device *,
sector_t sector, sector_t nr_sects, gfp_t);
static inline int sb_issue_discard(struct super_block *sb,
sector_t block, sector_t nr_blocks)
{
block <<= (sb->s_blocksize_bits - 9);
nr_blocks <<= (sb->s_blocksize_bits - 9);
return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL);
}
/*
* command filter functions
*/
extern int blk_verify_command(struct blk_cmd_filter *filter,
unsigned char *cmd, int has_write_perm);
extern int blk_register_filter(struct gendisk *disk);
extern void blk_unregister_filter(struct gendisk *disk);
extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
#define MAX_PHYS_SEGMENTS 128
@ -876,6 +934,13 @@ static inline int queue_dma_alignment(struct request_queue *q)
return q ? q->dma_alignment : 511;
}
static inline int blk_rq_aligned(struct request_queue *q, void *addr,
unsigned int len)
{
unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
return !((unsigned long)addr & alignment) && !(len & alignment);
}
/* assumes size > 256 */
static inline unsigned int blksize_bits(unsigned int size)
{
@ -902,7 +967,7 @@ static inline void put_dev_sector(Sector p)
}
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
void kblockd_flush_work(struct work_struct *work);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
@ -947,49 +1012,19 @@ struct blk_integrity {
extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
extern void blk_integrity_unregister(struct gendisk *);
extern int blk_integrity_compare(struct block_device *, struct block_device *);
extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
extern int blk_rq_count_integrity_sg(struct request *);
static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
{
if (bi)
return bi->tuple_size;
return 0;
}
static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
static inline
struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
{
return bdev->bd_disk->integrity;
}
static inline unsigned int bdev_get_tag_size(struct block_device *bdev)
static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
{
struct blk_integrity *bi = bdev_get_integrity(bdev);
if (bi)
return bi->tag_size;
return 0;
}
static inline int bdev_integrity_enabled(struct block_device *bdev, int rw)
{
struct blk_integrity *bi = bdev_get_integrity(bdev);
if (bi == NULL)
return 0;
if (rw == READ && bi->verify_fn != NULL &&
(bi->flags & INTEGRITY_FLAG_READ))
return 1;
if (rw == WRITE && bi->generate_fn != NULL &&
(bi->flags & INTEGRITY_FLAG_WRITE))
return 1;
return 0;
return disk->integrity;
}
static inline int blk_integrity_rq(struct request *rq)
@ -1006,7 +1041,7 @@ static inline int blk_integrity_rq(struct request *rq)
#define blk_rq_count_integrity_sg(a) (0)
#define blk_rq_map_integrity_sg(a, b) (0)
#define bdev_get_integrity(a) (0)
#define bdev_get_tag_size(a) (0)
#define blk_get_integrity(a) (0)
#define blk_integrity_compare(a, b) (0)
#define blk_integrity_register(a, b) (0)
#define blk_integrity_unregister(a) do { } while (0);

View file

@ -1,8 +1,10 @@
#ifndef BLKTRACE_H
#define BLKTRACE_H
#ifdef __KERNEL__
#include <linux/blkdev.h>
#include <linux/relay.h>
#endif
/*
* Trace categories
@ -21,6 +23,7 @@ enum blktrace_cat {
BLK_TC_NOTIFY = 1 << 10, /* special message */
BLK_TC_AHEAD = 1 << 11, /* readahead */
BLK_TC_META = 1 << 12, /* metadata */
BLK_TC_DISCARD = 1 << 13, /* discard requests */
BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
};
@ -47,6 +50,7 @@ enum blktrace_act {
__BLK_TA_SPLIT, /* bio was split */
__BLK_TA_BOUNCE, /* bio was bounced */
__BLK_TA_REMAP, /* bio was remapped */
__BLK_TA_ABORT, /* request aborted */
};
/*
@ -77,6 +81,7 @@ enum blktrace_notify {
#define BLK_TA_SPLIT (__BLK_TA_SPLIT)
#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
@ -89,17 +94,17 @@ enum blktrace_notify {
* The trace itself
*/
struct blk_io_trace {
u32 magic; /* MAGIC << 8 | version */
u32 sequence; /* event number */
u64 time; /* in microseconds */
u64 sector; /* disk offset */
u32 bytes; /* transfer length */
u32 action; /* what happened */
u32 pid; /* who did it */
u32 device; /* device number */
u32 cpu; /* on what cpu did it happen */
u16 error; /* completion error */
u16 pdu_len; /* length of data after this trace */
__u32 magic; /* MAGIC << 8 | version */
__u32 sequence; /* event number */
__u64 time; /* in microseconds */
__u64 sector; /* disk offset */
__u32 bytes; /* transfer length */
__u32 action; /* what happened */
__u32 pid; /* who did it */
__u32 device; /* device number */
__u32 cpu; /* on what cpu did it happen */
__u16 error; /* completion error */
__u16 pdu_len; /* length of data after this trace */
};
/*
@ -117,6 +122,23 @@ enum {
Blktrace_stopped,
};
#define BLKTRACE_BDEV_SIZE 32
/*
* User setup structure passed with BLKTRACESTART
*/
struct blk_user_trace_setup {
char name[BLKTRACE_BDEV_SIZE]; /* output */
__u16 act_mask; /* input */
__u32 buf_size; /* input */
__u32 buf_nr; /* input */
__u64 start_lba;
__u64 end_lba;
__u32 pid;
};
#ifdef __KERNEL__
#if defined(CONFIG_BLK_DEV_IO_TRACE)
struct blk_trace {
int trace_state;
struct rchan *rchan;
@ -133,21 +155,6 @@ struct blk_trace {
atomic_t dropped;
};
/*
* User setup structure passed with BLKTRACESTART
*/
struct blk_user_trace_setup {
char name[BDEVNAME_SIZE]; /* output */
u16 act_mask; /* input */
u32 buf_size; /* input */
u32 buf_nr; /* input */
u64 start_lba;
u64 end_lba;
u32 pid;
};
#ifdef __KERNEL__
#if defined(CONFIG_BLK_DEV_IO_TRACE)
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
@ -195,6 +202,9 @@ static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
if (likely(!bt))
return;
if (blk_discard_rq(rq))
rw |= (1 << BIO_RW_DISCARD);
if (blk_pc_request(rq)) {
what |= BLK_TC_ACT(BLK_TC_PC);
__blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);

View file

@ -127,6 +127,8 @@ extern int clockevents_register_notifier(struct notifier_block *nb);
extern int clockevents_program_event(struct clock_event_device *dev,
ktime_t expires, ktime_t now);
extern void clockevents_handle_noop(struct clock_event_device *dev);
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void clockevents_notify(unsigned long reason, void *arg);
#else

View file

@ -0,0 +1,80 @@
/*
* Extend a 32-bit counter to 63 bits
*
* Author: Nicolas Pitre
* Created: December 3, 2006
* Copyright: MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*/
#ifndef __LINUX_CNT32_TO_63_H__
#define __LINUX_CNT32_TO_63_H__
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/byteorder.h>
/* this is used only to give gcc a clue about good code generation */
union cnt32_to_63 {
struct {
#if defined(__LITTLE_ENDIAN)
u32 lo, hi;
#elif defined(__BIG_ENDIAN)
u32 hi, lo;
#endif
};
u64 val;
};
/**
* cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter
* @cnt_lo: The low part of the counter
*
* Many hardware clock counters are only 32 bits wide and therefore have
* a relatively short period making wrap-arounds rather frequent. This
* is a problem when implementing sched_clock() for example, where a 64-bit
* non-wrapping monotonic value is expected to be returned.
*
* To overcome that limitation, let's extend a 32-bit counter to 63 bits
* in a completely lock free fashion. Bits 0 to 31 of the clock are provided
* by the hardware while bits 32 to 62 are stored in memory. The top bit in
* memory is used to synchronize with the hardware clock half-period. When
* the top bit of both counters (hardware and in memory) differ then the
* memory is updated with a new value, incrementing it when the hardware
* counter wraps around.
*
* Because a word store in memory is atomic then the incremented value will
* always be in synch with the top bit indicating to any potential concurrent
* reader if the value in memory is up to date or not with regards to the
* needed increment. And any race in updating the value in memory is harmless
* as the same value would simply be stored more than once.
*
* The only restriction for the algorithm to work properly is that this
* code must be executed at least once per each half period of the 32-bit
* counter to properly update the state bit in memory. This is usually not a
* problem in practice, but if it is then a kernel timer could be scheduled
* to manage for this code to be executed often enough.
*
* Note that the top bit (bit 63) in the returned value should be considered
* as garbage. It is not cleared here because callers are likely to use a
* multiplier on the returned value which can get rid of the top bit
* implicitly by making the multiplier even, therefore saving on a runtime
* clear-bit instruction. Otherwise caller must remember to clear the top
* bit explicitly.
*/
#define cnt32_to_63(cnt_lo) \
({ \
static volatile u32 __m_cnt_hi; \
union cnt32_to_63 __x; \
__x.hi = __m_cnt_hi; \
__x.lo = (cnt_lo); \
if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \
__m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \
__x.val; \
})
#endif

View file

@ -190,7 +190,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
* ACCESS_ONCE() in different C statements.
*
* This macro does absolutely -nothing- to prevent the CPU from reordering,
* merging, or refetching absolutely anything at any time.
* merging, or refetching absolutely anything at any time. Its main intended
* use is to mediate communication between process-level code and irq/NMI
* handlers, all running on the same CPU.
*/
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))

View file

@ -10,6 +10,18 @@
#include <linux/wait.h>
/**
* struct completion - structure used to maintain state for a "completion"
*
* This is the opaque structure used to maintain the state for a "completion".
* Completions currently use a FIFO to queue threads that have to wait for
* the "completion" event.
*
* See also: complete(), wait_for_completion() (and friends _timeout,
* _interruptible, _interruptible_timeout, and _killable), init_completion(),
* and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and
* INIT_COMPLETION().
*/
struct completion {
unsigned int done;
wait_queue_head_t wait;
@ -21,6 +33,14 @@ struct completion {
#define COMPLETION_INITIALIZER_ONSTACK(work) \
({ init_completion(&work); work; })
/**
* DECLARE_COMPLETION: - declare and initialize a completion structure
* @work: identifier for the completion structure
*
* This macro declares and initializes a completion structure. Generally used
* for static declarations. You should use the _ONSTACK variant for automatic
* variables.
*/
#define DECLARE_COMPLETION(work) \
struct completion work = COMPLETION_INITIALIZER(work)
@ -29,6 +49,13 @@ struct completion {
* completions - so we use the _ONSTACK() variant for those that
* are on the kernel stack:
*/
/**
* DECLARE_COMPLETION_ONSTACK: - declare and initialize a completion structure
* @work: identifier for the completion structure
*
* This macro declares and initializes a completion structure on the kernel
* stack.
*/
#ifdef CONFIG_LOCKDEP
# define DECLARE_COMPLETION_ONSTACK(work) \
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
@ -36,6 +63,13 @@ struct completion {
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
#endif
/**
* init_completion: - Initialize a dynamically allocated completion
* @x: completion structure that is to be initialized
*
* This inline function will initialize a dynamically created completion
* structure.
*/
static inline void init_completion(struct completion *x)
{
x->done = 0;
@ -55,6 +89,13 @@ extern bool completion_done(struct completion *x);
extern void complete(struct completion *);
extern void complete_all(struct completion *);
/**
* INIT_COMPLETION: - reinitialize a completion structure
* @x: completion structure to be reinitialized
*
* This macro should be used to reinitialize a completion structure so it can
* be reused. This is especially important after complete_all() is used.
*/
#define INIT_COMPLETION(x) ((x).done = 0)

View file

@ -69,6 +69,7 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
#endif
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_hotplug_init(void);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);

View file

@ -187,7 +187,8 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation);
extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy);
extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
unsigned int cpu);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
@ -226,7 +227,9 @@ struct cpufreq_driver {
unsigned int (*get) (unsigned int cpu);
/* optional */
unsigned int (*getavg) (unsigned int cpu);
unsigned int (*getavg) (struct cpufreq_policy *policy,
unsigned int cpu);
int (*exit) (struct cpufreq_policy *policy);
int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
int (*resume) (struct cpufreq_policy *policy);

View file

@ -160,7 +160,7 @@ static inline int current_cpuset_is_being_rebound(void)
static inline void rebuild_sched_domains(void)
{
partition_sched_domains(0, NULL, NULL);
partition_sched_domains(1, NULL, NULL);
}
#endif /* !CONFIG_CPUSETS */

View file

@ -38,6 +38,7 @@
#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
#define CRYPTO_ALG_TYPE_HASH 0x00000009
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
@ -60,6 +61,14 @@
*/
#define CRYPTO_ALG_GENIV 0x00000200
/*
* Set if the algorithm has passed automated run-time testing. Note that
* if there is no run-time testing for a given algorithm it is considered
* to have passed.
*/
#define CRYPTO_ALG_TESTED 0x00000400
/*
* Transform masks and values (for crt_flags).
*/
@ -105,6 +114,7 @@ struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
struct crypto_ahash;
struct crypto_rng;
struct crypto_tfm;
struct crypto_type;
struct aead_givcrypt_request;
@ -290,6 +300,15 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
struct rng_alg {
int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
unsigned int dlen);
int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
unsigned int seedsize;
};
#define cra_ablkcipher cra_u.ablkcipher
#define cra_aead cra_u.aead
#define cra_blkcipher cra_u.blkcipher
@ -298,6 +317,7 @@ struct compress_alg {
#define cra_hash cra_u.hash
#define cra_ahash cra_u.ahash
#define cra_compress cra_u.compress
#define cra_rng cra_u.rng
struct crypto_alg {
struct list_head cra_list;
@ -325,6 +345,7 @@ struct crypto_alg {
struct hash_alg hash;
struct ahash_alg ahash;
struct compress_alg compress;
struct rng_alg rng;
} cra_u;
int (*cra_init)(struct crypto_tfm *tfm);
@ -430,6 +451,12 @@ struct compress_tfm {
u8 *dst, unsigned int *dlen);
};
struct rng_tfm {
int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
unsigned int dlen);
int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
};
#define crt_ablkcipher crt_u.ablkcipher
#define crt_aead crt_u.aead
#define crt_blkcipher crt_u.blkcipher
@ -437,6 +464,7 @@ struct compress_tfm {
#define crt_hash crt_u.hash
#define crt_ahash crt_u.ahash
#define crt_compress crt_u.compress
#define crt_rng crt_u.rng
struct crypto_tfm {
@ -450,6 +478,7 @@ struct crypto_tfm {
struct hash_tfm hash;
struct ahash_tfm ahash;
struct compress_tfm compress;
struct rng_tfm rng;
} crt_u;
struct crypto_alg *__crt_alg;
@ -481,6 +510,10 @@ struct crypto_hash {
struct crypto_tfm base;
};
struct crypto_rng {
struct crypto_tfm base;
};
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
@ -515,6 +548,8 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
void crypto_free_tfm(struct crypto_tfm *tfm);
int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
/*
* Transform helpers which query the underlying algorithm.
*/

View file

@ -13,7 +13,6 @@
struct dm_target;
struct dm_table;
struct dm_dev;
struct mapped_device;
struct bio_vec;
@ -84,6 +83,12 @@ void dm_error(const char *message);
*/
void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
struct dm_dev {
struct block_device *bdev;
int mode;
char name[16];
};
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.
@ -202,6 +207,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct mapped_device *md);
int dm_noflush_suspending(struct dm_target *ti);
union map_info *dm_get_mapinfo(struct bio *bio);
/*
* Geometry functions.
@ -231,6 +237,11 @@ int dm_table_add_target(struct dm_table *t, const char *type,
*/
int dm_table_complete(struct dm_table *t);
/*
* Unplug all devices in a table.
*/
void dm_table_unplug_all(struct dm_table *t);
/*
* Table reference counting.
*/
@ -256,6 +267,11 @@ void dm_table_event(struct dm_table *t);
*/
int dm_swap_table(struct mapped_device *md, struct dm_table *t);
/*
* A wrapper around vmalloc.
*/
void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
/*-----------------------------------------------------------------
* Macros.
*---------------------------------------------------------------*/

View file

@ -199,6 +199,11 @@ struct class {
struct class_private *p;
};
struct class_dev_iter {
struct klist_iter ki;
const struct device_type *type;
};
extern struct kobject *sysfs_dev_block_kobj;
extern struct kobject *sysfs_dev_char_kobj;
extern int __must_check __class_register(struct class *class,
@ -213,6 +218,13 @@ extern void class_unregister(struct class *class);
__class_register(class, &__key); \
})
extern void class_dev_iter_init(struct class_dev_iter *iter,
struct class *class,
struct device *start,
const struct device_type *type);
extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
extern void class_dev_iter_exit(struct class_dev_iter *iter);
extern int class_for_each_device(struct class *class, struct device *start,
void *data,
int (*fn)(struct device *dev, void *data));
@ -396,7 +408,7 @@ struct device {
spinlock_t devres_lock;
struct list_head devres_head;
struct list_head node;
struct klist_node knode_class;
struct class *class;
dev_t devt; /* dev_t, creates the sysfs "dev" */
struct attribute_group **groups; /* optional groups */

View file

@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@ -65,9 +65,12 @@ struct dlm_lksb {
char * sb_lvbptr;
};
/* dlm_new_lockspace() flags */
#define DLM_LSFL_NODIR 0x00000001
#define DLM_LSFL_TIMEWARN 0x00000002
#define DLM_LSFL_FS 0x00000004
#define DLM_LSFL_NEWEXCL 0x00000008
#ifdef __KERNEL__

View file

@ -26,7 +26,7 @@
/* Version of the device interface */
#define DLM_DEVICE_VERSION_MAJOR 6
#define DLM_DEVICE_VERSION_MINOR 0
#define DLM_DEVICE_VERSION_PATCH 0
#define DLM_DEVICE_VERSION_PATCH 1
/* struct passed to the lock write */
struct dlm_lock_params {

View file

@ -48,6 +48,11 @@ static inline int is_device_dma_capable(struct device *dev)
return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
}
static inline int is_buffer_dma_capable(u64 mask, dma_addr_t addr, size_t size)
{
return addr + size <= mask;
}
#ifdef CONFIG_HAS_DMA
#include <asm/dma-mapping.h>
#else
@ -58,6 +63,13 @@ static inline int is_device_dma_capable(struct device *dev)
#define dma_sync_single dma_sync_single_for_cpu
#define dma_sync_sg dma_sync_sg_for_cpu
static inline u64 dma_get_mask(struct device *dev)
{
if (dev && dev->dma_mask && *dev->dma_mask)
return *dev->dma_mask;
return DMA_32BIT_MASK;
}
extern u64 dma_get_required_mask(struct device *dev);
static inline unsigned int dma_get_max_seg_size(struct device *dev)

View file

@ -25,9 +25,99 @@
#include <linux/types.h>
#include <linux/msi.h>
#ifdef CONFIG_DMAR
#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
struct intel_iommu;
struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 reg_base_addr; /* register base address*/
struct pci_dev **devices; /* target device array */
int devices_cnt; /* target device count */
u8 ignored:1; /* ignore drhd */
u8 include_all:1;
struct intel_iommu *iommu;
};
extern struct list_head dmar_drhd_units;
#define for_each_drhd_unit(drhd) \
list_for_each_entry(drhd, &dmar_drhd_units, list)
extern int dmar_table_init(void);
extern int early_dmar_detect(void);
extern int dmar_dev_scope_init(void);
/* Intel IOMMU detection */
extern void detect_intel_iommu(void);
extern int parse_ioapics_under_ir(void);
extern int alloc_iommu(struct dmar_drhd_unit *);
#else
static inline void detect_intel_iommu(void)
{
return;
}
static inline int dmar_table_init(void)
{
return -ENODEV;
}
#endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */
#ifdef CONFIG_INTR_REMAP
extern int intr_remapping_enabled;
extern int enable_intr_remapping(int);
struct irte {
union {
struct {
__u64 present : 1,
fpd : 1,
dst_mode : 1,
redir_hint : 1,
trigger_mode : 1,
dlvry_mode : 3,
avail : 4,
__reserved_1 : 4,
vector : 8,
__reserved_2 : 8,
dest_id : 32;
};
__u64 low;
};
union {
struct {
__u64 sid : 16,
sq : 2,
svt : 2,
__reserved_3 : 44;
};
__u64 high;
};
};
extern int get_irte(int irq, struct irte *entry);
extern int modify_irte(int irq, struct irte *irte_modified);
extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count);
extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
u16 sub_handle);
extern int map_irq_to_irte_handle(int irq, u16 *sub_handle);
extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index);
extern int flush_irte(int irq);
extern int free_irte(int irq);
extern int irq_remapped(int irq);
extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
extern struct intel_iommu *map_ioapic_to_ir(int apic);
#else
#define irq_remapped(irq) (0)
#define enable_intr_remapping(mode) (-1)
#define intr_remapping_enabled (0)
#endif
#ifdef CONFIG_DMAR
extern const char *dmar_get_fault_reason(u8 fault_reason);
/* Can't use the common MSI interrupt functions
@ -40,47 +130,30 @@ extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern int arch_setup_dmar_msi(unsigned int irq);
/* Intel IOMMU detection and initialization functions */
extern void detect_intel_iommu(void);
extern int intel_iommu_init(void);
extern int dmar_table_init(void);
extern int early_dmar_detect(void);
extern struct list_head dmar_drhd_units;
extern int iommu_detected, no_iommu;
extern struct list_head dmar_rmrr_units;
struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
u64 reg_base_addr; /* register base address*/
struct pci_dev **devices; /* target device array */
int devices_cnt; /* target device count */
u8 ignored:1; /* ignore drhd */
u8 include_all:1;
struct intel_iommu *iommu;
};
struct dmar_rmrr_unit {
struct list_head list; /* list of rmrr units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 base_address; /* reserved base address*/
u64 end_address; /* reserved end address */
struct pci_dev **devices; /* target devices */
int devices_cnt; /* target device count */
};
#define for_each_drhd_unit(drhd) \
list_for_each_entry(drhd, &dmar_drhd_units, list)
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
/* Intel DMAR initialization functions */
extern int intel_iommu_init(void);
extern int dmar_disabled;
#else
static inline void detect_intel_iommu(void)
{
return;
}
static inline int intel_iommu_init(void)
{
#ifdef CONFIG_INTR_REMAP
return dmar_dev_scope_init();
#else
return -ENODEV;
#endif
}
#endif /* !CONFIG_DMAR */
#endif /* __DMAR_H__ */

View file

@ -112,6 +112,7 @@ extern struct request *elv_latter_request(struct request_queue *, struct request
extern int elv_register_queue(struct request_queue *q);
extern void elv_unregister_queue(struct request_queue *q);
extern int elv_may_queue(struct request_queue *, int);
extern void elv_abort_queue(struct request_queue *);
extern void elv_completed_request(struct request_queue *, struct request *);
extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
extern void elv_put_request(struct request_queue *, struct request *);
@ -173,15 +174,15 @@ enum {
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
/*
* Hack to reuse the donelist list_head as the fifo time holder while
* Hack to reuse the csd.list list_head as the fifo time holder while
* the request is in the io scheduler. Saves an unsigned long in rq.
*/
#define rq_fifo_time(rq) ((unsigned long) (rq)->donelist.next)
#define rq_set_fifo_time(rq,exp) ((rq)->donelist.next = (void *) (exp))
#define rq_fifo_time(rq) ((unsigned long) (rq)->csd.list.next)
#define rq_set_fifo_time(rq,exp) ((rq)->csd.list.next = (void *) (exp))
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define rq_fifo_clear(rq) do { \
list_del_init(&(rq)->queuelist); \
INIT_LIST_HEAD(&(rq)->donelist); \
INIT_LIST_HEAD(&(rq)->csd.list); \
} while (0)
/*

View file

@ -360,6 +360,7 @@ typedef struct elf64_shdr {
#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
/* Note header in a PT_NOTE section */

View file

@ -837,6 +837,8 @@ extern void ext3_truncate (struct inode *);
extern void ext3_set_inode_flags(struct inode *);
extern void ext3_get_inode_flags(struct ext3_inode_info *);
extern void ext3_set_aops(struct inode *inode);
extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
/* ioctl.c */
extern int ext3_ioctl (struct inode *, struct file *, unsigned int,

View file

@ -15,10 +15,16 @@ struct floppy_struct {
sect, /* sectors per track */
head, /* nr of heads */
track, /* nr of tracks */
stretch; /* !=0 means double track steps */
stretch; /* bit 0 !=0 means double track steps */
/* bit 1 != 0 means swap sides */
/* bits 2..9 give the first sector */
/* number (the LSB is flipped) */
#define FD_STRETCH 1
#define FD_SWAPSIDES 2
#define FD_ZEROBASED 4
#define FD_SECTBASEMASK 0x3FC
#define FD_MKSECTBASE(s) (((s) ^ 1) << 2)
#define FD_SECTBASE(floppy) ((((floppy)->stretch & FD_SECTBASEMASK) >> 2) ^ 1)
unsigned char gap, /* gap1 size */

64
include/linux/fiemap.h Normal file
View file

@ -0,0 +1,64 @@
/*
* FS_IOC_FIEMAP ioctl infrastructure.
*
* Some portions copyright (C) 2007 Cluster File Systems, Inc
*
* Authors: Mark Fasheh <mfasheh@suse.com>
* Kalpak Shah <kalpak.shah@sun.com>
* Andreas Dilger <adilger@sun.com>
*/
#ifndef _LINUX_FIEMAP_H
#define _LINUX_FIEMAP_H
struct fiemap_extent {
__u64 fe_logical; /* logical offset in bytes for the start of
* the extent from the beginning of the file */
__u64 fe_physical; /* physical offset in bytes for the start
* of the extent from the beginning of the disk */
__u64 fe_length; /* length in bytes for this extent */
__u64 fe_reserved64[2];
__u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */
__u32 fe_reserved[3];
};
struct fiemap {
__u64 fm_start; /* logical offset (inclusive) at
* which to start mapping (in) */
__u64 fm_length; /* logical length of mapping which
* userspace wants (in) */
__u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */
__u32 fm_mapped_extents;/* number of extents that were mapped (out) */
__u32 fm_extent_count; /* size of fm_extents array (in) */
__u32 fm_reserved;
struct fiemap_extent fm_extents[0]; /* array of mapped extents (out) */
};
#define FIEMAP_MAX_OFFSET (~0ULL)
#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */
#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute tree */
#define FIEMAP_FLAGS_COMPAT (FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR)
#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */
#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */
#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending.
* Sets EXTENT_UNKNOWN. */
#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read
* while fs is unmounted */
#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs.
* Sets EXTENT_NO_BYPASS. */
#define FIEMAP_EXTENT_NOT_ALIGNED 0x00000100 /* Extent offsets may not be
* block aligned. */
#define FIEMAP_EXTENT_DATA_INLINE 0x00000200 /* Data mixed with metadata.
* Sets EXTENT_NOT_ALIGNED.*/
#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block.
* Sets EXTENT_NOT_ALIGNED.*/
#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but
* no data (i.e. zero). */
#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively
* support extents. Result
* merged for efficiency. */
#endif /* _LINUX_FIEMAP_H */

View file

@ -86,7 +86,9 @@ extern int dir_notify_enable;
#define READ_META (READ | (1 << BIO_RW_META))
#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC))
#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC))
#define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER))
#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
#define SEL_IN 1
#define SEL_OUT 2
@ -222,6 +224,7 @@ extern int dir_notify_enable;
#define BLKTRACESTART _IO(0x12,116)
#define BLKTRACESTOP _IO(0x12,117)
#define BLKTRACETEARDOWN _IO(0x12,118)
#define BLKDISCARD _IO(0x12,119)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
@ -231,6 +234,7 @@ extern int dir_notify_enable;
#define FS_IOC_SETFLAGS _IOW('f', 2, long)
#define FS_IOC_GETVERSION _IOR('v', 1, long)
#define FS_IOC_SETVERSION _IOW('v', 2, long)
#define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap)
#define FS_IOC32_GETFLAGS _IOR('f', 1, int)
#define FS_IOC32_SETFLAGS _IOW('f', 2, int)
#define FS_IOC32_GETVERSION _IOR('v', 1, int)
@ -291,6 +295,7 @@ extern int dir_notify_enable;
#include <linux/mutex.h>
#include <linux/capability.h>
#include <linux/semaphore.h>
#include <linux/fiemap.h>
#include <asm/atomic.h>
#include <asm/byteorder.h>
@ -1178,6 +1183,20 @@ extern void dentry_unhash(struct dentry *dentry);
*/
extern int file_permission(struct file *, int);
/*
* VFS FS_IOC_FIEMAP helper definitions.
*/
struct fiemap_extent_info {
unsigned int fi_flags; /* Flags as passed from user */
unsigned int fi_extents_mapped; /* Number of mapped extents */
unsigned int fi_extents_max; /* Size of fiemap_extent array */
struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
* array */
};
int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
u64 phys, u64 len, u32 flags);
int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
/*
* File types
*
@ -1287,6 +1306,8 @@ struct inode_operations {
void (*truncate_range)(struct inode *, loff_t, loff_t);
long (*fallocate)(struct inode *inode, int mode, loff_t offset,
loff_t len);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
};
struct seq_file;
@ -1682,6 +1703,7 @@ extern void chrdev_show(struct seq_file *,off_t);
/* fs/block_dev.c */
#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
#ifdef CONFIG_BLOCK
#define BLKDEV_MAJOR_HASH_SIZE 255
@ -1718,6 +1740,9 @@ extern int fs_may_remount_ro(struct super_block *);
*/
#define bio_data_dir(bio) ((bio)->bi_rw & 1)
extern void check_disk_size_change(struct gendisk *disk,
struct block_device *bdev);
extern int revalidate_disk(struct gendisk *);
extern int check_disk_change(struct block_device *);
extern int __invalidate_device(struct block_device *);
extern int invalidate_partition(struct gendisk *, int);
@ -1980,6 +2005,9 @@ extern int vfs_fstat(unsigned int, struct kstat *);
extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
unsigned long arg);
extern int generic_block_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo, u64 start,
u64 len, get_block_t *get_block);
extern void get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);

View file

@ -11,12 +11,15 @@
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/rcupdate.h>
#ifdef CONFIG_BLOCK
#define kobj_to_dev(k) container_of(k, struct device, kobj)
#define dev_to_disk(device) container_of(device, struct gendisk, dev)
#define dev_to_part(device) container_of(device, struct hd_struct, dev)
#define kobj_to_dev(k) container_of((k), struct device, kobj)
#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev)
#define dev_to_part(device) container_of((device), struct hd_struct, __dev)
#define disk_to_dev(disk) (&(disk)->part0.__dev)
#define part_to_dev(part) (&((part)->__dev))
extern struct device_type part_type;
extern struct kobject *block_depr;
@ -55,6 +58,9 @@ enum {
UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */
};
#define DISK_MAX_PARTS 256
#define DISK_NAME_LEN 32
#include <linux/major.h>
#include <linux/device.h>
#include <linux/smp.h>
@ -87,7 +93,7 @@ struct disk_stats {
struct hd_struct {
sector_t start_sect;
sector_t nr_sects;
struct device dev;
struct device __dev;
struct kobject *holder_dir;
int policy, partno;
#ifdef CONFIG_FAIL_MAKE_REQUEST
@ -100,6 +106,7 @@ struct hd_struct {
#else
struct disk_stats dkstats;
#endif
struct rcu_head rcu_head;
};
#define GENHD_FL_REMOVABLE 1
@ -108,100 +115,148 @@ struct hd_struct {
#define GENHD_FL_CD 8
#define GENHD_FL_UP 16
#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
#define GENHD_FL_FAIL 64
#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
#define BLK_SCSI_MAX_CMDS (256)
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
struct blk_scsi_cmd_filter {
unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
struct kobject kobj;
};
struct disk_part_tbl {
struct rcu_head rcu_head;
int len;
struct hd_struct *part[];
};
struct gendisk {
/* major, first_minor and minors are input parameters only,
* don't use directly. Use disk_devt() and disk_max_parts().
*/
int major; /* major number of driver */
int first_minor;
int minors; /* maximum number of minors, =1 for
* disks that can't be partitioned. */
char disk_name[32]; /* name of major driver */
struct hd_struct **part; /* [indexed by minor] */
char disk_name[DISK_NAME_LEN]; /* name of major driver */
/* Array of pointers to partitions indexed by partno.
* Protected with matching bdev lock but stat and other
* non-critical accesses use RCU. Always access through
* helpers.
*/
struct disk_part_tbl *part_tbl;
struct hd_struct part0;
struct block_device_operations *fops;
struct request_queue *queue;
void *private_data;
sector_t capacity;
int flags;
struct device *driverfs_dev; // FIXME: remove
struct device dev;
struct kobject *holder_dir;
struct kobject *slave_dir;
struct timer_rand_state *random;
int policy;
atomic_t sync_io; /* RAID */
unsigned long stamp;
int in_flight;
#ifdef CONFIG_SMP
struct disk_stats *dkstats;
#else
struct disk_stats dkstats;
#endif
struct work_struct async_notify;
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity *integrity;
#endif
int node_id;
};
/*
* Macros to operate on percpu disk statistics:
*
* The __ variants should only be called in critical sections. The full
* variants disable/enable preemption.
*/
static inline struct hd_struct *get_part(struct gendisk *gendiskp,
sector_t sector)
static inline struct gendisk *part_to_disk(struct hd_struct *part)
{
struct hd_struct *part;
int i;
for (i = 0; i < gendiskp->minors - 1; i++) {
part = gendiskp->part[i];
if (part && part->start_sect <= sector
&& sector < part->start_sect + part->nr_sects)
return part;
if (likely(part)) {
if (part->partno)
return dev_to_disk(part_to_dev(part)->parent);
else
return dev_to_disk(part_to_dev(part));
}
return NULL;
}
static inline int disk_max_parts(struct gendisk *disk)
{
if (disk->flags & GENHD_FL_EXT_DEVT)
return DISK_MAX_PARTS;
return disk->minors;
}
static inline bool disk_partitionable(struct gendisk *disk)
{
return disk_max_parts(disk) > 1;
}
static inline dev_t disk_devt(struct gendisk *disk)
{
return disk_to_dev(disk)->devt;
}
static inline dev_t part_devt(struct hd_struct *part)
{
return part_to_dev(part)->devt;
}
extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno);
static inline void disk_put_part(struct hd_struct *part)
{
if (likely(part))
put_device(part_to_dev(part));
}
/*
* Smarter partition iterator without context limits.
*/
#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
struct disk_part_iter {
struct gendisk *disk;
struct hd_struct *part;
int idx;
unsigned int flags;
};
extern void disk_part_iter_init(struct disk_part_iter *piter,
struct gendisk *disk, unsigned int flags);
extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter);
extern void disk_part_iter_exit(struct disk_part_iter *piter);
extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
sector_t sector);
/*
* Macros to operate on percpu disk statistics:
*
* {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters
* and should be called between disk_stat_lock() and
* disk_stat_unlock().
*
* part_stat_read() can be called at any time.
*
* part_stat_{add|set_all}() and {init|free}_part_stats are for
* internal use only.
*/
#ifdef CONFIG_SMP
#define __disk_stat_add(gendiskp, field, addnd) \
(per_cpu_ptr(gendiskp->dkstats, smp_processor_id())->field += addnd)
#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); })
#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0)
#define disk_stat_read(gendiskp, field) \
({ \
typeof(gendiskp->dkstats->field) res = 0; \
int i; \
for_each_possible_cpu(i) \
res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
res; \
})
static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
int i;
for_each_possible_cpu(i)
memset(per_cpu_ptr(gendiskp->dkstats, i), value,
sizeof(struct disk_stats));
}
#define __part_stat_add(part, field, addnd) \
(per_cpu_ptr(part->dkstats, smp_processor_id())->field += addnd)
#define __all_stat_add(gendiskp, part, field, addnd, sector) \
({ \
if (part) \
__part_stat_add(part, field, addnd); \
__disk_stat_add(gendiskp, field, addnd); \
})
#define __part_stat_add(cpu, part, field, addnd) \
(per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd))
#define part_stat_read(part, field) \
({ \
typeof(part->dkstats->field) res = 0; \
typeof((part)->dkstats->field) res = 0; \
int i; \
for_each_possible_cpu(i) \
res += per_cpu_ptr(part->dkstats, i)->field; \
res += per_cpu_ptr((part)->dkstats, i)->field; \
res; \
})
@ -213,108 +268,6 @@ static inline void part_stat_set_all(struct hd_struct *part, int value)
memset(per_cpu_ptr(part->dkstats, i), value,
sizeof(struct disk_stats));
}
#else /* !CONFIG_SMP */
#define __disk_stat_add(gendiskp, field, addnd) \
(gendiskp->dkstats.field += addnd)
#define disk_stat_read(gendiskp, field) (gendiskp->dkstats.field)
static inline void disk_stat_set_all(struct gendisk *gendiskp, int value)
{
memset(&gendiskp->dkstats, value, sizeof (struct disk_stats));
}
#define __part_stat_add(part, field, addnd) \
(part->dkstats.field += addnd)
#define __all_stat_add(gendiskp, part, field, addnd, sector) \
({ \
if (part) \
part->dkstats.field += addnd; \
__disk_stat_add(gendiskp, field, addnd); \
})
#define part_stat_read(part, field) (part->dkstats.field)
static inline void part_stat_set_all(struct hd_struct *part, int value)
{
memset(&part->dkstats, value, sizeof(struct disk_stats));
}
#endif /* CONFIG_SMP */
#define disk_stat_add(gendiskp, field, addnd) \
do { \
preempt_disable(); \
__disk_stat_add(gendiskp, field, addnd); \
preempt_enable(); \
} while (0)
#define __disk_stat_dec(gendiskp, field) __disk_stat_add(gendiskp, field, -1)
#define disk_stat_dec(gendiskp, field) disk_stat_add(gendiskp, field, -1)
#define __disk_stat_inc(gendiskp, field) __disk_stat_add(gendiskp, field, 1)
#define disk_stat_inc(gendiskp, field) disk_stat_add(gendiskp, field, 1)
#define __disk_stat_sub(gendiskp, field, subnd) \
__disk_stat_add(gendiskp, field, -subnd)
#define disk_stat_sub(gendiskp, field, subnd) \
disk_stat_add(gendiskp, field, -subnd)
#define part_stat_add(gendiskp, field, addnd) \
do { \
preempt_disable(); \
__part_stat_add(gendiskp, field, addnd);\
preempt_enable(); \
} while (0)
#define __part_stat_dec(gendiskp, field) __part_stat_add(gendiskp, field, -1)
#define part_stat_dec(gendiskp, field) part_stat_add(gendiskp, field, -1)
#define __part_stat_inc(gendiskp, field) __part_stat_add(gendiskp, field, 1)
#define part_stat_inc(gendiskp, field) part_stat_add(gendiskp, field, 1)
#define __part_stat_sub(gendiskp, field, subnd) \
__part_stat_add(gendiskp, field, -subnd)
#define part_stat_sub(gendiskp, field, subnd) \
part_stat_add(gendiskp, field, -subnd)
#define all_stat_add(gendiskp, part, field, addnd, sector) \
do { \
preempt_disable(); \
__all_stat_add(gendiskp, part, field, addnd, sector); \
preempt_enable(); \
} while (0)
#define __all_stat_dec(gendiskp, field, sector) \
__all_stat_add(gendiskp, field, -1, sector)
#define all_stat_dec(gendiskp, field, sector) \
all_stat_add(gendiskp, field, -1, sector)
#define __all_stat_inc(gendiskp, part, field, sector) \
__all_stat_add(gendiskp, part, field, 1, sector)
#define all_stat_inc(gendiskp, part, field, sector) \
all_stat_add(gendiskp, part, field, 1, sector)
#define __all_stat_sub(gendiskp, part, field, subnd, sector) \
__all_stat_add(gendiskp, part, field, -subnd, sector)
#define all_stat_sub(gendiskp, part, field, subnd, sector) \
all_stat_add(gendiskp, part, field, -subnd, sector)
/* Inlines to alloc and free disk stats in struct gendisk */
#ifdef CONFIG_SMP
static inline int init_disk_stats(struct gendisk *disk)
{
disk->dkstats = alloc_percpu(struct disk_stats);
if (!disk->dkstats)
return 0;
return 1;
}
static inline void free_disk_stats(struct gendisk *disk)
{
free_percpu(disk->dkstats);
}
static inline int init_part_stats(struct hd_struct *part)
{
@ -329,14 +282,18 @@ static inline void free_part_stats(struct hd_struct *part)
free_percpu(part->dkstats);
}
#else /* CONFIG_SMP */
static inline int init_disk_stats(struct gendisk *disk)
{
return 1;
}
#else /* !CONFIG_SMP */
#define part_stat_lock() ({ rcu_read_lock(); 0; })
#define part_stat_unlock() rcu_read_unlock()
static inline void free_disk_stats(struct gendisk *disk)
#define __part_stat_add(cpu, part, field, addnd) \
((part)->dkstats.field += addnd)
#define part_stat_read(part, field) ((part)->dkstats.field)
static inline void part_stat_set_all(struct hd_struct *part, int value)
{
memset(&part->dkstats, value, sizeof(struct disk_stats));
}
static inline int init_part_stats(struct hd_struct *part)
@ -347,37 +304,71 @@ static inline int init_part_stats(struct hd_struct *part)
static inline void free_part_stats(struct hd_struct *part)
{
}
#endif /* CONFIG_SMP */
#endif /* CONFIG_SMP */
#define part_stat_add(cpu, part, field, addnd) do { \
__part_stat_add((cpu), (part), field, addnd); \
if ((part)->partno) \
__part_stat_add((cpu), &part_to_disk((part))->part0, \
field, addnd); \
} while (0)
#define part_stat_dec(cpu, gendiskp, field) \
part_stat_add(cpu, gendiskp, field, -1)
#define part_stat_inc(cpu, gendiskp, field) \
part_stat_add(cpu, gendiskp, field, 1)
#define part_stat_sub(cpu, gendiskp, field, subnd) \
part_stat_add(cpu, gendiskp, field, -subnd)
static inline void part_inc_in_flight(struct hd_struct *part)
{
part->in_flight++;
if (part->partno)
part_to_disk(part)->part0.in_flight++;
}
static inline void part_dec_in_flight(struct hd_struct *part)
{
part->in_flight--;
if (part->partno)
part_to_disk(part)->part0.in_flight--;
}
/* drivers/block/ll_rw_blk.c */
extern void disk_round_stats(struct gendisk *disk);
extern void part_round_stats(struct hd_struct *part);
extern void part_round_stats(int cpu, struct hd_struct *part);
/* drivers/block/genhd.c */
extern int get_blkdev_list(char *, int);
extern void add_disk(struct gendisk *disk);
extern void del_gendisk(struct gendisk *gp);
extern void unlink_gendisk(struct gendisk *gp);
extern struct gendisk *get_gendisk(dev_t dev, int *part);
extern struct gendisk *get_gendisk(dev_t dev, int *partno);
extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
extern void set_device_ro(struct block_device *bdev, int flag);
extern void set_disk_ro(struct gendisk *disk, int flag);
static inline int get_disk_ro(struct gendisk *disk)
{
return disk->part0.policy;
}
/* drivers/char/random.c */
extern void add_disk_randomness(struct gendisk *disk);
extern void rand_initialize_disk(struct gendisk *disk);
static inline sector_t get_start_sect(struct block_device *bdev)
{
return bdev->bd_contains == bdev ? 0 : bdev->bd_part->start_sect;
return bdev->bd_part->start_sect;
}
static inline sector_t get_capacity(struct gendisk *disk)
{
return disk->capacity;
return disk->part0.nr_sects;
}
static inline void set_capacity(struct gendisk *disk, sector_t size)
{
disk->capacity = size;
disk->part0.nr_sects = size;
}
#ifdef CONFIG_SOLARIS_X86_PARTITION
@ -527,9 +518,12 @@ struct unixware_disklabel {
#define ADDPART_FLAG_RAID 1
#define ADDPART_FLAG_WHOLEDISK 2
extern dev_t blk_lookup_devt(const char *name, int part);
extern char *disk_name (struct gendisk *hd, int part, char *buf);
extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
extern void blk_free_devt(dev_t devt);
extern dev_t blk_lookup_devt(const char *name, int partno);
extern char *disk_name (struct gendisk *hd, int partno, char *buf);
extern int disk_expand_part_tbl(struct gendisk *disk, int target);
extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
extern int __must_check add_partition(struct gendisk *, int, sector_t, sector_t, int);
extern void delete_partition(struct gendisk *, int);
@ -546,16 +540,23 @@ extern void blk_register_region(dev_t devt, unsigned long range,
void *data);
extern void blk_unregister_region(dev_t devt, unsigned long range);
static inline struct block_device *bdget_disk(struct gendisk *disk, int index)
{
return bdget(MKDEV(disk->major, disk->first_minor) + index);
}
extern ssize_t part_size_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t part_stat_show(struct device *dev,
struct device_attribute *attr, char *buf);
#ifdef CONFIG_FAIL_MAKE_REQUEST
extern ssize_t part_fail_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t part_fail_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count);
#endif /* CONFIG_FAIL_MAKE_REQUEST */
#else /* CONFIG_BLOCK */
static inline void printk_all_partitions(void) { }
static inline dev_t blk_lookup_devt(const char *name, int part)
static inline dev_t blk_lookup_devt(const char *name, int partno)
{
dev_t devt = MKDEV(0, 0);
return devt;

View file

@ -118,7 +118,11 @@ struct gfs2_sb {
char sb_lockproto[GFS2_LOCKNAME_LEN];
char sb_locktable[GFS2_LOCKNAME_LEN];
/* In gfs1, quota and license dinodes followed */
struct gfs2_inum __pad3; /* Was quota inode in gfs1 */
struct gfs2_inum __pad4; /* Was licence inode in gfs1 */
#define GFS2_HAS_UUID 1
__u8 sb_uuid[16]; /* The UUID, maybe 0 for backwards compat */
};
/*

View file

@ -47,14 +47,22 @@ enum hrtimer_restart {
* HRTIMER_CB_IRQSAFE: Callback may run in hardirq context
* HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and
* does not restart the timer
* HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in hardirq context
* Special mode for tick emultation
* HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context
* Special mode for tick emulation and
* scheduler timer. Such timers are per
* cpu and not allowed to be migrated on
* cpu unplug.
* HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context
* with timer->base lock unlocked
* used for timers which call wakeup to
* avoid lock order problems with rq->lock
*/
enum hrtimer_cb_mode {
HRTIMER_CB_SOFTIRQ,
HRTIMER_CB_IRQSAFE,
HRTIMER_CB_IRQSAFE_NO_RESTART,
HRTIMER_CB_IRQSAFE_NO_SOFTIRQ,
HRTIMER_CB_IRQSAFE_PERCPU,
HRTIMER_CB_IRQSAFE_UNLOCKED,
};
/*
@ -67,9 +75,10 @@ enum hrtimer_cb_mode {
* 0x02 callback function running
* 0x04 callback pending (high resolution mode)
*
* Special case:
* Special cases:
* 0x03 callback function running and enqueued
* (was requeued on another CPU)
* 0x09 timer was migrated on CPU hotunplug
* The "callback function running and enqueued" status is only possible on
* SMP. It happens for example when a posix timer expired and the callback
* queued a signal. Between dropping the lock which protects the posix timer
@ -87,6 +96,7 @@ enum hrtimer_cb_mode {
#define HRTIMER_STATE_ENQUEUED 0x01
#define HRTIMER_STATE_CALLBACK 0x02
#define HRTIMER_STATE_PENDING 0x04
#define HRTIMER_STATE_MIGRATE 0x08
/**
* struct hrtimer - the basic hrtimer structure

View file

@ -8,7 +8,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/hdreg.h>
#include <linux/ata.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/completion.h>
#include <linux/pm.h>
#ifdef CONFIG_BLK_DEV_IDEACPI
#include <acpi/acpi.h>
#endif
@ -87,12 +88,13 @@ struct ide_io_ports {
};
#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
#define BAD_R_STAT (BUSY_STAT | ERR_STAT)
#define BAD_W_STAT (BAD_R_STAT | WRERR_STAT)
#define BAD_STAT (BAD_R_STAT | DRQ_STAT)
#define DRIVE_READY (READY_STAT | SEEK_STAT)
#define BAD_CRC (ABRT_ERR | ICRC_ERR)
#define BAD_R_STAT (ATA_BUSY | ATA_ERR)
#define BAD_W_STAT (BAD_R_STAT | ATA_DF)
#define BAD_STAT (BAD_R_STAT | ATA_DRQ)
#define DRIVE_READY (ATA_DRDY | ATA_DSC)
#define BAD_CRC (ATA_ABORTED | ATA_ICRC)
#define SATA_NR_PORTS (3) /* 16 possible ?? */
@ -125,24 +127,41 @@ struct ide_io_ports {
#define PARTN_BITS 6 /* number of minor dev bits for partitions */
#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
#define SECTOR_SIZE 512
#define SECTOR_WORDS (SECTOR_SIZE / 4) /* number of 32bit words per sector */
#define IDE_LARGE_SEEK(b1,b2,t) (((b1) > (b2) + (t)) || ((b2) > (b1) + (t)))
/*
* Timeouts for various operations:
*/
#define WAIT_DRQ (HZ/10) /* 100msec - spec allows up to 20ms */
#define WAIT_READY (5*HZ) /* 5sec - some laptops are very slow */
#define WAIT_PIDENTIFY (10*HZ) /* 10sec - should be less than 3ms (?), if all ATAPI CD is closed at boot */
#define WAIT_WORSTCASE (30*HZ) /* 30sec - worst case when spinning up */
#define WAIT_CMD (10*HZ) /* 10sec - maximum wait for an IRQ to happen */
#define WAIT_MIN_SLEEP (2*HZ/100) /* 20msec - minimum sleep time */
enum {
/* spec allows up to 20ms */
WAIT_DRQ = HZ / 10, /* 100ms */
/* some laptops are very slow */
WAIT_READY = 5 * HZ, /* 5s */
/* should be less than 3ms (?), if all ATAPI CD is closed at boot */
WAIT_PIDENTIFY = 10 * HZ, /* 10s */
/* worst case when spinning up */
WAIT_WORSTCASE = 30 * HZ, /* 30s */
/* maximum wait for an IRQ to happen */
WAIT_CMD = 10 * HZ, /* 10s */
/* Some drives require a longer IRQ timeout. */
WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */
/*
* Some drives (for example, Seagate STT3401A Travan) require a very
* long timeout, because they don't return an interrupt or clear their
* BSY bit until after the command completes (even retension commands).
*/
WAIT_TAPE_CMD = 900 * HZ, /* 900s */
/* minimum sleep time */
WAIT_MIN_SLEEP = HZ / 50, /* 20ms */
};
/*
* Op codes for special requests to be handled by ide_special_rq().
* Values should be in the range of 0x20 to 0x3f.
*/
#define REQ_DRIVE_RESET 0x20
#define REQ_DEVSET_EXEC 0x21
/*
* Check for an interrupt and acknowledge the interrupt status
@ -303,8 +322,8 @@ typedef enum {
ide_started, /* a drive operation was started, handler was set */
} ide_startstop_t;
struct ide_devset;
struct ide_driver_s;
struct ide_settings_s;
#ifdef CONFIG_BLK_DEV_IDEACPI
struct ide_acpi_drive_link;
@ -315,10 +334,10 @@ struct ide_acpi_hwif_link;
enum {
IDE_AFLAG_DRQ_INTERRUPT = (1 << 0),
IDE_AFLAG_MEDIA_CHANGED = (1 << 1),
/* ide-cd */
/* Drive cannot lock the door. */
IDE_AFLAG_NO_DOORLOCK = (1 << 2),
/* ide-cd */
/* Drive cannot eject the disc. */
IDE_AFLAG_NO_EJECT = (1 << 3),
/* Drive is a pre ATAPI 1.2 drive. */
@ -354,19 +373,25 @@ enum {
IDE_AFLAG_CLIK_DRIVE = (1 << 19),
/* Requires BH algorithm for packets */
IDE_AFLAG_ZIP_DRIVE = (1 << 20),
/* Write protect */
IDE_AFLAG_WP = (1 << 21),
/* Supports format progress report */
IDE_AFLAG_SRFP = (1 << 22),
/* ide-tape */
IDE_AFLAG_IGNORE_DSC = (1 << 21),
IDE_AFLAG_IGNORE_DSC = (1 << 23),
/* 0 When the tape position is unknown */
IDE_AFLAG_ADDRESS_VALID = (1 << 22),
IDE_AFLAG_ADDRESS_VALID = (1 << 24),
/* Device already opened */
IDE_AFLAG_BUSY = (1 << 23),
IDE_AFLAG_BUSY = (1 << 25),
/* Attempt to auto-detect the current user block size */
IDE_AFLAG_DETECT_BS = (1 << 24),
IDE_AFLAG_DETECT_BS = (1 << 26),
/* Currently on a filemark */
IDE_AFLAG_FILEMARK = (1 << 25),
IDE_AFLAG_FILEMARK = (1 << 27),
/* 0 = no tape is loaded, so we don't rewind after ejecting */
IDE_AFLAG_MEDIUM_PRESENT = (1 << 26)
IDE_AFLAG_MEDIUM_PRESENT = (1 << 28),
IDE_AFLAG_NO_AUTOCLOSE = (1 << 29),
};
struct ide_drive_s {
@ -378,10 +403,10 @@ struct ide_drive_s {
struct request *rq; /* current request */
struct ide_drive_s *next; /* circular list of hwgroup drives */
void *driver_data; /* extra driver data */
struct hd_driveid *id; /* drive model identification info */
u16 *id; /* identification info */
#ifdef CONFIG_IDE_PROC_FS
struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
struct ide_settings_s *settings;/* /proc/ide/ drive settings */
const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */
#endif
struct hwif_s *hwif; /* actually (ide_hwif_t *) */
@ -393,16 +418,16 @@ struct ide_drive_s {
special_t special; /* special action flags */
select_t select; /* basic drive/head select reg value */
u8 keep_settings; /* restore settings after drive reset */
u8 using_dma; /* disk is using dma for read/write */
u8 retry_pio; /* retrying dma capable host in pio */
u8 state; /* retry state */
u8 waiting_for_dma; /* dma currently in progress */
u8 unmask; /* okay to unmask other irqs */
u8 noflush; /* don't attempt flushes */
u8 dsc_overlap; /* DSC overlap */
u8 nice1; /* give potential excess bandwidth */
unsigned keep_settings : 1; /* restore settings after drive reset */
unsigned using_dma : 1; /* disk is using dma for read/write */
unsigned unmask : 1; /* okay to unmask other irqs */
unsigned noflush : 1; /* don't attempt flushes */
unsigned dsc_overlap : 1; /* DSC overlap */
unsigned nice1 : 1; /* give potential excess bandwidth */
unsigned present : 1; /* drive is physically present */
unsigned dead : 1; /* device ejected hint */
unsigned id_read : 1; /* 1=id read from disk 0 = synthetic */
@ -412,23 +437,22 @@ struct ide_drive_s {
unsigned forced_geom : 1; /* 1 if hdx=c,h,s was given at boot */
unsigned no_unmask : 1; /* disallow setting unmask bit */
unsigned no_io_32bit : 1; /* disallow enabling 32bit I/O */
unsigned atapi_overlap : 1; /* ATAPI overlap (not supported) */
unsigned doorlocking : 1; /* for removable only: door lock/unlock works */
unsigned nodma : 1; /* disallow DMA */
unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */
unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */
unsigned scsi : 1; /* 0=default, 1=ide-scsi emulation */
unsigned sleeping : 1; /* 1=sleeping & sleep field valid */
unsigned post_reset : 1;
unsigned udma33_warned : 1;
unsigned addressing : 2; /* 0=28-bit, 1=48-bit, 2=48-bit doing 28-bit */
unsigned wcache : 1; /* status of write cache */
unsigned nowerr : 1; /* used for ignoring ATA_DF */
u8 addressing; /* 0=28-bit, 1=48-bit, 2=48-bit doing 28-bit */
u8 quirk_list; /* considered quirky, set for a specific host */
u8 init_speed; /* transfer rate set at boot */
u8 current_speed; /* current transfer rate set */
u8 desired_speed; /* desired transfer rate set */
u8 dn; /* now wide spread use */
u8 wcache; /* status of write cache */
u8 acoustic; /* acoustic management */
u8 media; /* disk, cdrom, tape, floppy, ... */
u8 ready_stat; /* min status value for drive ready */
@ -436,9 +460,7 @@ struct ide_drive_s {
u8 mult_req; /* requested multiple sector setting */
u8 tune_req; /* requested drive tuning setting */
u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
u8 bad_wstat; /* used for ignoring WRERR_STAT */
u8 nowerr; /* used for ignoring WRERR_STAT */
u8 sect0; /* offset of first sector for DM6:DDO */
u8 bad_wstat; /* used for ignoring ATA_DF */
u8 head; /* "real" number of heads */
u8 sect; /* "real" sectors per track */
u8 bios_head; /* BIOS/fdisk/LILO number of heads */
@ -472,10 +494,6 @@ typedef struct ide_drive_s ide_drive_t;
#define to_ide_device(dev)container_of(dev, ide_drive_t, gendev)
#define IDE_CHIPSET_PCI_MASK \
((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx))
#define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1)
struct ide_task_s;
struct ide_port_info;
@ -565,7 +583,6 @@ typedef struct hwif_s {
u8 major; /* our major number */
u8 index; /* 0 for ide0; 1 for ide1; ... */
u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
u8 bus_state; /* power state of the IDE bus */
u32 host_flags;
@ -643,6 +660,7 @@ struct ide_host {
ide_hwif_t *ports[MAX_HWIFS];
unsigned int n_ports;
struct device *dev[2];
unsigned int (*init_chipset)(struct pci_dev *);
unsigned long host_flags;
void *host_priv;
};
@ -690,9 +708,61 @@ typedef struct ide_driver_s ide_driver_t;
extern struct mutex ide_setting_mtx;
int set_io_32bit(ide_drive_t *, int);
int set_pio_mode(ide_drive_t *, int);
int set_using_dma(ide_drive_t *, int);
/*
* configurable drive settings
*/
#define DS_SYNC (1 << 0)
struct ide_devset {
int (*get)(ide_drive_t *);
int (*set)(ide_drive_t *, int);
unsigned int flags;
};
#define __DEVSET(_flags, _get, _set) { \
.flags = _flags, \
.get = _get, \
.set = _set, \
}
#define ide_devset_get(name, field) \
static int get_##name(ide_drive_t *drive) \
{ \
return drive->field; \
}
#define ide_devset_set(name, field) \
static int set_##name(ide_drive_t *drive, int arg) \
{ \
drive->field = arg; \
return 0; \
}
#define __IDE_DEVSET(_name, _flags, _get, _set) \
const struct ide_devset ide_devset_##_name = \
__DEVSET(_flags, _get, _set)
#define IDE_DEVSET(_name, _flags, _get, _set) \
static __IDE_DEVSET(_name, _flags, _get, _set)
#define ide_devset_rw(_name, _func) \
IDE_DEVSET(_name, 0, get_##_func, set_##_func)
#define ide_devset_w(_name, _func) \
IDE_DEVSET(_name, 0, NULL, set_##_func)
#define ide_devset_rw_sync(_name, _func) \
IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func)
#define ide_decl_devset(_name) \
extern const struct ide_devset ide_devset_##_name
ide_decl_devset(io_32bit);
ide_decl_devset(keepsettings);
ide_decl_devset(pio_mode);
ide_decl_devset(unmaskirq);
ide_decl_devset(using_dma);
/* ATAPI packet command flags */
enum {
@ -708,6 +778,12 @@ enum {
PC_FLAG_TIMEDOUT = (1 << 7),
};
/*
* With each packet command, we allocate a buffer of IDE_PC_BUFFER_SIZE bytes.
* This is used for several packet commands (not for READ/WRITE commands).
*/
#define IDE_PC_BUFFER_SIZE 256
struct ide_atapi_pc {
/* actual packet bytes */
u8 c[12];
@ -737,7 +813,7 @@ struct ide_atapi_pc {
* those are more or less driver-specific and some of them are subject
* to change/removal later.
*/
u8 pc_buf[256];
u8 pc_buf[IDE_PC_BUFFER_SIZE];
/* idetape only */
struct idetape_bh *bh;
@ -754,38 +830,35 @@ struct ide_atapi_pc {
};
#ifdef CONFIG_IDE_PROC_FS
/*
* configurable drive settings
*/
#define TYPE_INT 0
#define TYPE_BYTE 1
#define TYPE_SHORT 2
#define SETTING_READ (1 << 0)
#define SETTING_WRITE (1 << 1)
#define SETTING_RW (SETTING_READ | SETTING_WRITE)
typedef int (ide_procset_t)(ide_drive_t *, int);
typedef struct ide_settings_s {
char *name;
int rw;
int data_type;
int min;
int max;
int mul_factor;
int div_factor;
void *data;
ide_procset_t *set;
int auto_remove;
struct ide_settings_s *next;
} ide_settings_t;
int ide_add_setting(ide_drive_t *, const char *, int, int, int, int, int, int, void *, ide_procset_t *set);
/*
* /proc/ide interface
*/
#define ide_devset_rw_field(_name, _field) \
ide_devset_get(_name, _field); \
ide_devset_set(_name, _field); \
IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
struct ide_proc_devset {
const char *name;
const struct ide_devset *setting;
int min, max;
int (*mulf)(ide_drive_t *);
int (*divf)(ide_drive_t *);
};
#define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \
.name = __stringify(_name), \
.setting = &ide_devset_##_name, \
.min = _min, \
.max = _max, \
.mulf = _mulf, \
.divf = _divf, \
}
#define IDE_PROC_DEVSET(_name, _min, _max) \
__IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
typedef struct {
const char *name;
mode_t mode;
@ -802,8 +875,6 @@ void ide_proc_unregister_port(ide_hwif_t *);
void ide_proc_register_driver(ide_drive_t *, ide_driver_t *);
void ide_proc_unregister_driver(ide_drive_t *, ide_driver_t *);
void ide_add_generic_settings(ide_drive_t *);
read_proc_t proc_ide_read_capacity;
read_proc_t proc_ide_read_geometry;
@ -831,7 +902,6 @@ static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
static inline void ide_proc_register_driver(ide_drive_t *drive, ide_driver_t *driver) { ; }
static inline void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver) { ; }
static inline void ide_add_generic_settings(ide_drive_t *drive) { ; }
#define PROC_IDE_READ_RETURN(page,start,off,count,eof,len) return 0;
#endif
@ -877,7 +947,6 @@ enum {
struct ide_driver_s {
const char *version;
u8 media;
unsigned supports_dsc_overlap : 1;
ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
int (*end_request)(ide_drive_t *, int, int);
ide_startstop_t (*error)(ide_drive_t *, struct request *rq, u8, u8);
@ -887,7 +956,8 @@ struct ide_driver_s {
void (*resume)(ide_drive_t *);
void (*shutdown)(ide_drive_t *);
#ifdef CONFIG_IDE_PROC_FS
ide_proc_entry_t *proc;
ide_proc_entry_t *proc;
const struct ide_proc_devset *settings;
#endif
};
@ -896,7 +966,17 @@ struct ide_driver_s {
int ide_device_get(ide_drive_t *);
void ide_device_put(ide_drive_t *);
int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long);
struct ide_ioctl_devset {
unsigned int get_ioctl;
unsigned int set_ioctl;
const struct ide_devset *setting;
};
int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
unsigned long, const struct ide_ioctl_devset *);
int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *,
unsigned, unsigned long);
extern int ide_vlb_clk;
extern int ide_pci_clk;
@ -918,14 +998,19 @@ ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8);
ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, byte stat);
extern void ide_fix_driveid(struct hd_driveid *);
void ide_fix_driveid(u16 *);
extern void ide_fixstring(u8 *, const int, const int);
int ide_busy_sleep(ide_hwif_t *, unsigned long, int);
int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
extern ide_startstop_t ide_do_reset (ide_drive_t *);
extern int ide_devset_execute(ide_drive_t *drive,
const struct ide_devset *setting, int arg);
extern void ide_do_drive_cmd(ide_drive_t *, struct request *);
extern void ide_end_drive_cmd(ide_drive_t *, u8, u8);
@ -1049,6 +1134,8 @@ void ide_tf_read(ide_drive_t *, ide_task_t *);
void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int);
void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int);
int ide_io_buffers(ide_drive_t *, struct ide_atapi_pc *, unsigned int, int);
extern void SELECT_DRIVE(ide_drive_t *);
void SELECT_MASK(ide_drive_t *, int);
@ -1059,11 +1146,36 @@ extern int drive_is_ready(ide_drive_t *);
void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8);
int ide_check_atapi_device(ide_drive_t *, const char *);
void ide_init_pc(struct ide_atapi_pc *);
/*
* Special requests for ide-tape block device strategy routine.
*
* In order to service a character device command, we add special requests to
* the tail of our block device request queue and wait for their completion.
*/
enum {
REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
REQ_IDETAPE_READ = (1 << 2),
REQ_IDETAPE_WRITE = (1 << 3),
};
void ide_queue_pc_head(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
struct request *);
int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *);
int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry,
void (*update_buffers)(ide_drive_t *, struct ide_atapi_pc *),
void (*retry_pc)(ide_drive_t *), void (*dsc_handle)(ide_drive_t *),
void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned int,
int (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned int,
int));
ide_startstop_t ide_transfer_pc(ide_drive_t *, struct ide_atapi_pc *,
ide_handler_t *, unsigned int, ide_expiry_t *);
@ -1078,8 +1190,6 @@ int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *, u16);
int ide_no_data_taskfile(ide_drive_t *, ide_task_t *);
int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long);
int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long);
int ide_task_ioctl(ide_drive_t *, unsigned int, unsigned long);
extern int ide_driveid_update(ide_drive_t *);
extern int ide_config_drive_speed(ide_drive_t *, u8);
@ -1090,7 +1200,6 @@ extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
extern int ide_spin_wait_hwgroup(ide_drive_t *);
extern void ide_timer_expiry(unsigned long);
extern irqreturn_t ide_intr(int irq, void *dev_id);
extern void do_ide_request(struct request_queue *);
@ -1227,6 +1336,14 @@ int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
const struct ide_port_info *, void *);
void ide_pci_remove(struct pci_dev *);
#ifdef CONFIG_PM
int ide_pci_suspend(struct pci_dev *, pm_message_t);
int ide_pci_resume(struct pci_dev *);
#else
#define ide_pci_suspend NULL
#define ide_pci_resume NULL
#endif
void ide_map_sg(ide_drive_t *, struct request *);
void ide_init_sg_cmd(ide_drive_t *, struct request *);
@ -1238,7 +1355,7 @@ struct drive_list_entry {
const char *id_firmware;
};
int ide_in_drive_list(struct hd_driveid *, const struct drive_list_entry *);
int ide_in_drive_list(u16 *, const struct drive_list_entry *);
#ifdef CONFIG_BLK_DEV_IDEDMA
int __ide_dma_bad_drive(ide_drive_t *);
@ -1345,24 +1462,6 @@ const char *ide_xfer_verbose(u8 mode);
extern void ide_toggle_bounce(ide_drive_t *drive, int on);
extern int ide_set_xfer_rate(ide_drive_t *drive, u8 rate);
static inline int ide_dev_has_iordy(struct hd_driveid *id)
{
return ((id->field_valid & 2) && (id->capability & 8)) ? 1 : 0;
}
static inline int ide_dev_is_sata(struct hd_driveid *id)
{
/*
* See if word 93 is 0 AND drive is at least ATA-5 compatible
* verifying that word 80 by casting it to a signed type --
* this trick allows us to filter out the reserved values of
* 0x0000 and 0xffff along with the earlier ATA revisions...
*/
if (id->hw_config == 0 && (short)id->major_rev_num >= 0x0020)
return 1;
return 0;
}
u64 ide_get_lba_addr(struct ide_taskfile *, int);
u8 ide_dump_status(ide_drive_t *, const char *, u8);
@ -1434,13 +1533,6 @@ extern struct mutex ide_cfg_mtx;
extern struct bus_type ide_bus_type;
extern struct class *ide_port_class;
/* check if CACHE FLUSH (EXT) command is supported (bits defined in ATA-6) */
#define ide_id_has_flush_cache(id) ((id)->cfs_enable_2 & 0x3000)
/* some Maxtor disks have bit 13 defined incorrectly so check bit 10 too */
#define ide_id_has_flush_cache_ext(id) \
(((id)->cfs_enable_2 & 0x2400) == 0x2400)
static inline void ide_dump_identify(u8 *id)
{
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
@ -1451,10 +1543,10 @@ static inline int hwif_to_node(ide_hwif_t *hwif)
return hwif->dev ? dev_to_node(hwif->dev) : -1;
}
static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive)
static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
ide_drive_t *peer = &drive->hwif->drives[(drive->dn ^ 1) & 1];
return &hwif->drives[(drive->dn ^ 1) & 1];
return peer->present ? peer : NULL;
}
#endif /* _IDE_H */

View file

@ -471,6 +471,11 @@ struct ieee80211s_hdr {
u8 eaddr3[6];
} __attribute__ ((packed));
/* Mesh flags */
#define MESH_FLAGS_AE_A4 0x1
#define MESH_FLAGS_AE_A5_A6 0x2
#define MESH_FLAGS_PS_DEEP 0x4
/**
* struct ieee80211_quiet_ie
*
@ -643,6 +648,9 @@ struct ieee80211_mgmt {
} u;
} __attribute__ ((packed));
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
/* Control frames */
struct ieee80211_rts {
@ -708,12 +716,13 @@ struct ieee80211_ht_addt_info {
/* 802.11n HT capabilities masks */
#define IEEE80211_HT_CAP_SUP_WIDTH 0x0002
#define IEEE80211_HT_CAP_MIMO_PS 0x000C
#define IEEE80211_HT_CAP_SM_PS 0x000C
#define IEEE80211_HT_CAP_GRN_FLD 0x0010
#define IEEE80211_HT_CAP_SGI_20 0x0020
#define IEEE80211_HT_CAP_SGI_40 0x0040
#define IEEE80211_HT_CAP_DELAY_BA 0x0400
#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
/* 802.11n HT capability AMPDU settings */
#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03
#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C
@ -736,11 +745,26 @@ struct ieee80211_ht_addt_info {
#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
/* MIMO Power Save Modes */
#define WLAN_HT_CAP_MIMO_PS_STATIC 0
#define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1
#define WLAN_HT_CAP_MIMO_PS_INVALID 2
#define WLAN_HT_CAP_MIMO_PS_DISABLED 3
/* block-ack parameters */
#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
/*
* A-PMDU buffer sizes
* According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
*/
#define IEEE80211_MIN_AMPDU_BUF 0x8
#define IEEE80211_MAX_AMPDU_BUF 0x40
/* Spatial Multiplexing Power Save Modes */
#define WLAN_HT_CAP_SM_PS_STATIC 0
#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
#define WLAN_HT_CAP_SM_PS_INVALID 2
#define WLAN_HT_CAP_SM_PS_DISABLED 3
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0

View file

@ -24,6 +24,7 @@
#include <linux/compiler.h> /* for "__user" et al */
#define IFNAMSIZ 16
#define IFALIASZ 256
#include <linux/hdlc/ioctl.h>
/* Standard interface flags (netdevice->flags). */

View file

@ -56,6 +56,7 @@
#define ETH_P_DIAG 0x6005 /* DEC Diagnostics */
#define ETH_P_CUST 0x6006 /* DEC Customer use */
#define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */
#define ETH_P_TEB 0x6558 /* Trans Ether Bridging */
#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */
#define ETH_P_ATALK 0x809B /* Appletalk DDP */
#define ETH_P_AARP 0x80F3 /* Appletalk AARP */
@ -74,8 +75,10 @@
#define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport
* over Ethernet
*/
#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
#define ETH_P_TIPC 0x88CA /* TIPC */
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
/*
* Non DIX types. Won't clash for 1500 types.
@ -99,6 +102,9 @@
#define ETH_P_ECONET 0x0018 /* Acorn Econet */
#define ETH_P_HDLC 0x0019 /* HDLC frames */
#define ETH_P_ARCNET 0x001A /* 1A for ArcNet :-) */
#define ETH_P_DSA 0x001B /* Distributed Switch Arch. */
#define ETH_P_TRAILER 0x001C /* Trailer switch tagging */
#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */
/*
* This is an Ethernet frame header.

View file

@ -79,6 +79,7 @@ enum
IFLA_LINKINFO,
#define IFLA_LINKINFO IFLA_LINKINFO
IFLA_NET_NS_PID,
IFLA_IFALIAS,
__IFLA_MAX
};

19
include/linux/if_phonet.h Normal file
View file

@ -0,0 +1,19 @@
/*
* File: if_phonet.h
*
* Phonet interface kernel definitions
*
* Copyright (C) 2008 Nokia Corporation. All rights reserved.
*/
#ifndef LINUX_IF_PHONET_H
#define LINUX_IF_PHONET_H
#define PHONET_MIN_MTU 6 /* pn_length = 0 */
#define PHONET_MAX_MTU 65541 /* pn_length = 0xffff */
#define PHONET_DEV_MTU PHONET_MAX_MTU
#ifdef __KERNEL__
extern struct header_ops phonet_header_ops;
#endif
#endif

View file

@ -2,6 +2,7 @@
#define _IF_TUNNEL_H_
#include <linux/types.h>
#include <linux/ip.h>
#define SIOCGETTUNNEL (SIOCDEVPRIVATE + 0)
#define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1)
@ -47,4 +48,22 @@ struct ip_tunnel_prl {
/* PRL flags */
#define PRL_DEFAULT 0x0001
enum
{
IFLA_GRE_UNSPEC,
IFLA_GRE_LINK,
IFLA_GRE_IFLAGS,
IFLA_GRE_OFLAGS,
IFLA_GRE_IKEY,
IFLA_GRE_OKEY,
IFLA_GRE_LOCAL,
IFLA_GRE_REMOTE,
IFLA_GRE_TTL,
IFLA_GRE_TOS,
IFLA_GRE_PMTUDISC,
__IFLA_GRE_MAX,
};
#define IFLA_GRE_MAX (__IFLA_GRE_MAX - 1)
#endif /* _IF_TUNNEL_H_ */

View file

@ -75,6 +75,7 @@ struct in_addr {
#define IP_IPSEC_POLICY 16
#define IP_XFRM_POLICY 17
#define IP_PASSSEC 18
#define IP_TRANSPARENT 19
/* BSD compatibility */
#define IP_RECVRETOPTS IP_RETOPTS

View file

@ -25,6 +25,7 @@ struct in_device
struct in_ifaddr *ifa_list; /* IP ifaddr chain */
rwlock_t mc_list_lock;
struct ip_mc_list *mc_list; /* IP multicast filter chain */
int mc_count; /* Number of installed mcasts */
spinlock_t mc_tomb_lock;
struct ip_mc_list *mc_tomb;
unsigned long mr_v1_seen;

View file

@ -1,6 +1,20 @@
#ifndef _LINUX_IOMMU_HELPER_H
#define _LINUX_IOMMU_HELPER_H
static inline unsigned long iommu_device_max_index(unsigned long size,
unsigned long offset,
u64 dma_mask)
{
if (size + offset > dma_mask)
return dma_mask - offset + 1;
else
return size;
}
extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
unsigned long shift,
unsigned long boundary_size);
extern void iommu_area_reserve(unsigned long *map, unsigned long i, int len);
extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr,
unsigned long shift,
@ -8,3 +22,5 @@ extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long align_mask);
extern void iommu_area_free(unsigned long *map, unsigned long start,
unsigned int nr);
#endif

View file

@ -108,6 +108,9 @@ extern struct resource iomem_resource;
extern int request_resource(struct resource *root, struct resource *new);
extern int release_resource(struct resource *new);
extern void reserve_region_with_split(struct resource *root,
resource_size_t start, resource_size_t end,
const char *name);
extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
extern int allocate_resource(struct resource *root, struct resource *new,
@ -159,9 +162,9 @@ extern struct resource * __devm_request_region(struct device *dev,
struct resource *parent, resource_size_t start,
resource_size_t n, const char *name);
#define devm_release_region(start,n) \
#define devm_release_region(dev, start, n) \
__devm_release_region(dev, &ioport_resource, (start), (n))
#define devm_release_mem_region(start,n) \
#define devm_release_mem_region(dev, start, n) \
__devm_release_region(dev, &iomem_resource, (start), (n))
extern void __devm_release_region(struct device *dev, struct resource *parent,

View file

@ -242,4 +242,164 @@ struct ip_vs_daemon_user {
int syncid;
};
/*
*
* IPVS Generic Netlink interface definitions
*
*/
/* Generic Netlink family info */
#define IPVS_GENL_NAME "IPVS"
#define IPVS_GENL_VERSION 0x1
struct ip_vs_flags {
__be32 flags;
__be32 mask;
};
/* Generic Netlink command attributes */
enum {
IPVS_CMD_UNSPEC = 0,
IPVS_CMD_NEW_SERVICE, /* add service */
IPVS_CMD_SET_SERVICE, /* modify service */
IPVS_CMD_DEL_SERVICE, /* delete service */
IPVS_CMD_GET_SERVICE, /* get service info */
IPVS_CMD_NEW_DEST, /* add destination */
IPVS_CMD_SET_DEST, /* modify destination */
IPVS_CMD_DEL_DEST, /* delete destination */
IPVS_CMD_GET_DEST, /* get destination info */
IPVS_CMD_NEW_DAEMON, /* start sync daemon */
IPVS_CMD_DEL_DAEMON, /* stop sync daemon */
IPVS_CMD_GET_DAEMON, /* get sync daemon status */
IPVS_CMD_SET_CONFIG, /* set config settings */
IPVS_CMD_GET_CONFIG, /* get config settings */
IPVS_CMD_SET_INFO, /* only used in GET_INFO reply */
IPVS_CMD_GET_INFO, /* get general IPVS info */
IPVS_CMD_ZERO, /* zero all counters and stats */
IPVS_CMD_FLUSH, /* flush services and dests */
__IPVS_CMD_MAX,
};
#define IPVS_CMD_MAX (__IPVS_CMD_MAX - 1)
/* Attributes used in the first level of commands */
enum {
IPVS_CMD_ATTR_UNSPEC = 0,
IPVS_CMD_ATTR_SERVICE, /* nested service attribute */
IPVS_CMD_ATTR_DEST, /* nested destination attribute */
IPVS_CMD_ATTR_DAEMON, /* nested sync daemon attribute */
IPVS_CMD_ATTR_TIMEOUT_TCP, /* TCP connection timeout */
IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, /* TCP FIN wait timeout */
IPVS_CMD_ATTR_TIMEOUT_UDP, /* UDP timeout */
__IPVS_CMD_ATTR_MAX,
};
#define IPVS_CMD_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1)
/*
* Attributes used to describe a service
*
* Used inside nested attribute IPVS_CMD_ATTR_SERVICE
*/
enum {
IPVS_SVC_ATTR_UNSPEC = 0,
IPVS_SVC_ATTR_AF, /* address family */
IPVS_SVC_ATTR_PROTOCOL, /* virtual service protocol */
IPVS_SVC_ATTR_ADDR, /* virtual service address */
IPVS_SVC_ATTR_PORT, /* virtual service port */
IPVS_SVC_ATTR_FWMARK, /* firewall mark of service */
IPVS_SVC_ATTR_SCHED_NAME, /* name of scheduler */
IPVS_SVC_ATTR_FLAGS, /* virtual service flags */
IPVS_SVC_ATTR_TIMEOUT, /* persistent timeout */
IPVS_SVC_ATTR_NETMASK, /* persistent netmask */
IPVS_SVC_ATTR_STATS, /* nested attribute for service stats */
__IPVS_SVC_ATTR_MAX,
};
#define IPVS_SVC_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1)
/*
* Attributes used to describe a destination (real server)
*
* Used inside nested attribute IPVS_CMD_ATTR_DEST
*/
enum {
IPVS_DEST_ATTR_UNSPEC = 0,
IPVS_DEST_ATTR_ADDR, /* real server address */
IPVS_DEST_ATTR_PORT, /* real server port */
IPVS_DEST_ATTR_FWD_METHOD, /* forwarding method */
IPVS_DEST_ATTR_WEIGHT, /* destination weight */
IPVS_DEST_ATTR_U_THRESH, /* upper threshold */
IPVS_DEST_ATTR_L_THRESH, /* lower threshold */
IPVS_DEST_ATTR_ACTIVE_CONNS, /* active connections */
IPVS_DEST_ATTR_INACT_CONNS, /* inactive connections */
IPVS_DEST_ATTR_PERSIST_CONNS, /* persistent connections */
IPVS_DEST_ATTR_STATS, /* nested attribute for dest stats */
__IPVS_DEST_ATTR_MAX,
};
#define IPVS_DEST_ATTR_MAX (__IPVS_DEST_ATTR_MAX - 1)
/*
* Attributes describing a sync daemon
*
* Used inside nested attribute IPVS_CMD_ATTR_DAEMON
*/
enum {
IPVS_DAEMON_ATTR_UNSPEC = 0,
IPVS_DAEMON_ATTR_STATE, /* sync daemon state (master/backup) */
IPVS_DAEMON_ATTR_MCAST_IFN, /* multicast interface name */
IPVS_DAEMON_ATTR_SYNC_ID, /* SyncID we belong to */
__IPVS_DAEMON_ATTR_MAX,
};
#define IPVS_DAEMON_ATTR_MAX (__IPVS_DAEMON_ATTR_MAX - 1)
/*
* Attributes used to describe service or destination entry statistics
*
* Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS
*/
enum {
IPVS_STATS_ATTR_UNSPEC = 0,
IPVS_STATS_ATTR_CONNS, /* connections scheduled */
IPVS_STATS_ATTR_INPKTS, /* incoming packets */
IPVS_STATS_ATTR_OUTPKTS, /* outgoing packets */
IPVS_STATS_ATTR_INBYTES, /* incoming bytes */
IPVS_STATS_ATTR_OUTBYTES, /* outgoing bytes */
IPVS_STATS_ATTR_CPS, /* current connection rate */
IPVS_STATS_ATTR_INPPS, /* current in packet rate */
IPVS_STATS_ATTR_OUTPPS, /* current out packet rate */
IPVS_STATS_ATTR_INBPS, /* current in byte rate */
IPVS_STATS_ATTR_OUTBPS, /* current out byte rate */
__IPVS_STATS_ATTR_MAX,
};
#define IPVS_STATS_ATTR_MAX (__IPVS_STATS_ATTR_MAX - 1)
/* Attributes used in response to IPVS_CMD_GET_INFO command */
enum {
IPVS_INFO_ATTR_UNSPEC = 0,
IPVS_INFO_ATTR_VERSION, /* IPVS version number */
IPVS_INFO_ATTR_CONN_TAB_SIZE, /* size of connection hash table */
__IPVS_INFO_ATTR_MAX,
};
#define IPVS_INFO_ATTR_MAX (__IPVS_INFO_ATTR_MAX - 1)
#endif /* _IP_VS_H */

View file

@ -62,6 +62,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
#define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */
#define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */
#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */
#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
#ifdef CONFIG_IRQ_PER_CPU
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)

View file

@ -157,7 +157,7 @@ typedef struct {
typedef struct {
int mp_mrru; /* unused */
struct sk_buff * frags; /* fragments sl list -- use skb->next */
struct sk_buff_head frags; /* fragments sl list */
long frames; /* number of frames in the frame list */
unsigned int seq; /* last processed packet seq #: any packets
* with smaller seq # will be dropped

View file

@ -850,7 +850,8 @@ struct journal_s
*/
struct block_device *j_dev;
int j_blocksize;
unsigned long long j_blk_offset;
unsigned long long j_blk_offset;
char j_devname[BDEVNAME_SIZE+24];
/*
* Device which holds the client fs. For internal journal this will be
@ -966,6 +967,9 @@ struct journal_s
#define JBD2_FLUSHED 0x008 /* The journal superblock has been flushed */
#define JBD2_LOADED 0x010 /* The journal superblock has been loaded */
#define JBD2_BARRIER 0x020 /* Use IDE barriers */
#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
* data write error in ordered
* mode */
/*
* Function declarations for the journaling transaction and buffer
@ -1059,7 +1063,7 @@ extern void jbd2_journal_clear_features
(journal_t *, unsigned long, unsigned long, unsigned long);
extern int jbd2_journal_create (journal_t *);
extern int jbd2_journal_load (journal_t *journal);
extern void jbd2_journal_destroy (journal_t *);
extern int jbd2_journal_destroy (journal_t *);
extern int jbd2_journal_recover (journal_t *journal);
extern int jbd2_journal_wipe (journal_t *, int);
extern int jbd2_journal_skip_recovery (journal_t *);

View file

@ -182,7 +182,7 @@ extern int vsscanf(const char *, const char *, va_list)
extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(char *ptr, char **retptr);
extern unsigned long long memparse(const char *ptr, char **retptr);
extern int core_kernel_text(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);

View file

@ -299,6 +299,7 @@ extern void key_init(void);
#define key_validate(k) 0
#define key_serial(k) 0
#define key_get(k) ({ NULL; })
#define key_revoke(k) do { } while(0)
#define key_put(k) do { } while(0)
#define key_ref_put(k) do { } while(0)
#define make_key_ref(k, p) ({ NULL; })

View file

@ -38,7 +38,7 @@ extern void klist_init(struct klist *k, void (*get)(struct klist_node *),
void (*put)(struct klist_node *));
struct klist_node {
struct klist *n_klist;
void *n_klist; /* never access directly */
struct list_head n_node;
struct kref n_ref;
struct completion n_removed;
@ -57,7 +57,6 @@ extern int klist_node_attached(struct klist_node *n);
struct klist_iter {
struct klist *i_klist;
struct list_head *i_head;
struct klist_node *i_cur;
};

View file

@ -11,6 +11,7 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/fb.h>
/* Notes on locking:
*
@ -45,6 +46,8 @@ struct lcd_ops {
int (*get_contrast)(struct lcd_device *);
/* Set LCD panel contrast */
int (*set_contrast)(struct lcd_device *, int contrast);
/* Set LCD panel mode (resolutions ...) */
int (*set_mode)(struct lcd_device *, struct fb_videomode *);
/* Check if given framebuffer device is the one LCD is bound to;
return 0 if not, !=0 if it is. If NULL, lcd always matches the fb. */
int (*check_fb)(struct lcd_device *, struct fb_info *);

View file

@ -146,6 +146,7 @@ enum {
ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */
ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
ATA_DFLAG_DETACH = (1 << 24),
@ -244,6 +245,7 @@ enum {
ATA_TMOUT_BOOT = 30000, /* heuristic */
ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
ATA_TMOUT_INTERNAL_QUICK = 5000,
ATA_TMOUT_MAX_PARK = 30000,
/* FIXME: GoVault needs 2s but we can't afford that without
* parallel probing. 800ms is enough for iVDR disk
@ -319,8 +321,11 @@ enum {
ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
ATA_EH_ENABLE_LINK = (1 << 3),
ATA_EH_LPM = (1 << 4), /* link power management action */
ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE,
ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK,
ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
ATA_EH_ENABLE_LINK | ATA_EH_LPM,
/* ata_eh_info->flags */
ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
@ -452,6 +457,7 @@ enum link_pm {
MEDIUM_POWER,
};
extern struct device_attribute dev_attr_link_power_management_policy;
extern struct device_attribute dev_attr_unload_heads;
extern struct device_attribute dev_attr_em_message_type;
extern struct device_attribute dev_attr_em_message;
extern struct device_attribute dev_attr_sw_activity;
@ -554,8 +560,8 @@ struct ata_ering {
struct ata_device {
struct ata_link *link;
unsigned int devno; /* 0 or 1 */
unsigned long flags; /* ATA_DFLAG_xxx */
unsigned int horkage; /* List of broken features */
unsigned long flags; /* ATA_DFLAG_xxx */
struct scsi_device *sdev; /* attached SCSI device */
#ifdef CONFIG_ATA_ACPI
acpi_handle acpi_handle;
@ -564,6 +570,7 @@ struct ata_device {
/* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
u64 n_sectors; /* size of device, if ATA */
unsigned int class; /* ATA_DEV_xxx */
unsigned long unpark_deadline;
u8 pio_mode;
u8 dma_mode;
@ -621,6 +628,7 @@ struct ata_eh_context {
[ATA_EH_CMD_TIMEOUT_TABLE_SIZE];
unsigned int classes[ATA_MAX_DEVICES];
unsigned int did_probe_mask;
unsigned int unloaded_mask;
unsigned int saved_ncq_enabled;
u8 saved_xfer_mode[ATA_MAX_DEVICES];
/* timestamp for the last reset attempt or success */
@ -688,7 +696,8 @@ struct ata_port {
unsigned int qc_active;
int nr_active_links; /* #links with active qcs */
struct ata_link link; /* host default link */
struct ata_link link; /* host default link */
struct ata_link *slave_link; /* see ata_slave_link_init() */
int nr_pmp_links; /* nr of available PMP links */
struct ata_link *pmp_link; /* array of PMP links */
@ -709,6 +718,7 @@ struct ata_port {
struct list_head eh_done_q;
wait_queue_head_t eh_wait_q;
int eh_tries;
struct completion park_req_pending;
pm_message_t pm_mesg;
int *pm_result;
@ -772,8 +782,8 @@ struct ata_port_operations {
/*
* Optional features
*/
int (*scr_read)(struct ata_port *ap, unsigned int sc_reg, u32 *val);
int (*scr_write)(struct ata_port *ap, unsigned int sc_reg, u32 val);
int (*scr_read)(struct ata_link *link, unsigned int sc_reg, u32 *val);
int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val);
void (*pmp_attach)(struct ata_port *ap);
void (*pmp_detach)(struct ata_port *ap);
int (*enable_pm)(struct ata_port *ap, enum link_pm policy);
@ -895,6 +905,7 @@ extern void ata_port_disable(struct ata_port *);
extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi, int n_ports);
extern int ata_slave_link_init(struct ata_port *ap);
extern int ata_host_start(struct ata_host *host);
extern int ata_host_register(struct ata_host *host,
struct scsi_host_template *sht);
@ -920,8 +931,8 @@ extern int sata_scr_valid(struct ata_link *link);
extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
extern int ata_link_online(struct ata_link *link);
extern int ata_link_offline(struct ata_link *link);
extern bool ata_link_online(struct ata_link *link);
extern bool ata_link_offline(struct ata_link *link);
#ifdef CONFIG_PM
extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
extern void ata_host_resume(struct ata_host *host);
@ -1098,6 +1109,7 @@ extern void ata_std_error_handler(struct ata_port *ap);
*/
extern const struct ata_port_operations ata_base_port_ops;
extern const struct ata_port_operations sata_port_ops;
extern struct device_attribute *ata_common_sdev_attrs[];
#define ATA_BASE_SHT(drv_name) \
.module = THIS_MODULE, \
@ -1112,7 +1124,8 @@ extern const struct ata_port_operations sata_port_ops;
.proc_name = drv_name, \
.slave_configure = ata_scsi_slave_config, \
.slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param
.bios_param = ata_std_bios_param, \
.sdev_attrs = ata_common_sdev_attrs
#define ATA_NCQ_SHT(drv_name) \
ATA_BASE_SHT(drv_name), \
@ -1134,7 +1147,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap)
static inline int ata_is_host_link(const struct ata_link *link)
{
return link == &link->ap->link;
return link == &link->ap->link || link == link->ap->slave_link;
}
#else /* CONFIG_SATA_PMP */
static inline bool sata_pmp_supported(struct ata_port *ap)
@ -1167,7 +1180,7 @@ static inline int sata_srst_pmp(struct ata_link *link)
printk("%sata%u: "fmt, lv, (ap)->print_id , ##args)
#define ata_link_printk(link, lv, fmt, args...) do { \
if (sata_pmp_attached((link)->ap)) \
if (sata_pmp_attached((link)->ap) || (link)->ap->slave_link) \
printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \
(link)->pmp , ##args); \
else \
@ -1265,34 +1278,17 @@ static inline int ata_link_active(struct ata_link *link)
return ata_tag_valid(link->active_tag) || link->sactive;
}
static inline struct ata_link *ata_port_first_link(struct ata_port *ap)
{
if (sata_pmp_attached(ap))
return ap->pmp_link;
return &ap->link;
}
extern struct ata_link *__ata_port_next_link(struct ata_port *ap,
struct ata_link *link,
bool dev_only);
static inline struct ata_link *ata_port_next_link(struct ata_link *link)
{
struct ata_port *ap = link->ap;
if (ata_is_host_link(link)) {
if (!sata_pmp_attached(ap))
return NULL;
return ap->pmp_link;
}
if (++link < ap->nr_pmp_links + ap->pmp_link)
return link;
return NULL;
}
#define __ata_port_for_each_link(lk, ap) \
for ((lk) = &(ap)->link; (lk); (lk) = ata_port_next_link(lk))
#define __ata_port_for_each_link(link, ap) \
for ((link) = __ata_port_next_link((ap), NULL, false); (link); \
(link) = __ata_port_next_link((ap), (link), false))
#define ata_port_for_each_link(link, ap) \
for ((link) = ata_port_first_link(ap); (link); \
(link) = ata_port_next_link(link))
for ((link) = __ata_port_next_link((ap), NULL, true); (link); \
(link) = __ata_port_next_link((ap), (link), true))
#define ata_link_for_each_dev(dev, link) \
for ((dev) = (link)->device; \

View file

@ -619,6 +619,19 @@ static inline void hlist_add_after(struct hlist_node *n,
next->next->pprev = &next->next;
}
/*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
*/
static inline void hlist_move_list(struct hlist_head *old,
struct hlist_head *new)
{
new->first = old->first;
if (new->first)
new->first->pprev = &new->first;
old->first = NULL;
}
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \

View file

@ -170,4 +170,6 @@
#define VIOTAPE_MAJOR 230
#define BLOCK_EXT_MAJOR 259
#endif

View file

@ -21,30 +21,30 @@
struct ms_status_register {
unsigned char reserved;
unsigned char interrupt;
#define MEMSTICK_INT_CMDNAK 0x0001
#define MEMSTICK_INT_IOREQ 0x0008
#define MEMSTICK_INT_IOBREQ 0x0010
#define MEMSTICK_INT_BREQ 0x0020
#define MEMSTICK_INT_ERR 0x0040
#define MEMSTICK_INT_CED 0x0080
#define MEMSTICK_INT_CMDNAK 0x01
#define MEMSTICK_INT_IOREQ 0x08
#define MEMSTICK_INT_IOBREQ 0x10
#define MEMSTICK_INT_BREQ 0x20
#define MEMSTICK_INT_ERR 0x40
#define MEMSTICK_INT_CED 0x80
unsigned char status0;
#define MEMSTICK_STATUS0_WP 0x0001
#define MEMSTICK_STATUS0_SL 0x0002
#define MEMSTICK_STATUS0_BF 0x0010
#define MEMSTICK_STATUS0_BE 0x0020
#define MEMSTICK_STATUS0_FB0 0x0040
#define MEMSTICK_STATUS0_MB 0x0080
#define MEMSTICK_STATUS0_WP 0x01
#define MEMSTICK_STATUS0_SL 0x02
#define MEMSTICK_STATUS0_BF 0x10
#define MEMSTICK_STATUS0_BE 0x20
#define MEMSTICK_STATUS0_FB0 0x40
#define MEMSTICK_STATUS0_MB 0x80
unsigned char status1;
#define MEMSTICK_STATUS1_UCFG 0x0001
#define MEMSTICK_STATUS1_FGER 0x0002
#define MEMSTICK_STATUS1_UCEX 0x0004
#define MEMSTICK_STATUS1_EXER 0x0008
#define MEMSTICK_STATUS1_UCDT 0x0010
#define MEMSTICK_STATUS1_DTER 0x0020
#define MEMSTICK_STATUS1_FBI 0x0040
#define MEMSTICK_STATUS1_MB 0x0080
#define MEMSTICK_STATUS1_UCFG 0x01
#define MEMSTICK_STATUS1_FGER 0x02
#define MEMSTICK_STATUS1_UCEX 0x04
#define MEMSTICK_STATUS1_EXER 0x08
#define MEMSTICK_STATUS1_UCDT 0x10
#define MEMSTICK_STATUS1_DTER 0x20
#define MEMSTICK_STATUS1_FB1 0x40
#define MEMSTICK_STATUS1_MB 0x80
} __attribute__((packed));
struct ms_id_register {
@ -56,32 +56,32 @@ struct ms_id_register {
struct ms_param_register {
unsigned char system;
#define MEMSTICK_SYS_ATEN 0xc0
#define MEMSTICK_SYS_BAMD 0x80
#define MEMSTICK_SYS_PAM 0x08
#define MEMSTICK_SYS_BAMD 0x80
unsigned char block_address_msb;
unsigned short block_address;
unsigned char cp;
#define MEMSTICK_CP_BLOCK 0x0000
#define MEMSTICK_CP_PAGE 0x0020
#define MEMSTICK_CP_EXTRA 0x0040
#define MEMSTICK_CP_OVERWRITE 0x0080
#define MEMSTICK_CP_BLOCK 0x00
#define MEMSTICK_CP_PAGE 0x20
#define MEMSTICK_CP_EXTRA 0x40
#define MEMSTICK_CP_OVERWRITE 0x80
unsigned char page_address;
} __attribute__((packed));
struct ms_extra_data_register {
unsigned char overwrite_flag;
#define MEMSTICK_OVERWRITE_UPDATA 0x0010
#define MEMSTICK_OVERWRITE_PAGE 0x0060
#define MEMSTICK_OVERWRITE_BLOCK 0x0080
#define MEMSTICK_OVERWRITE_UDST 0x10
#define MEMSTICK_OVERWRITE_PGST1 0x20
#define MEMSTICK_OVERWRITE_PGST0 0x40
#define MEMSTICK_OVERWRITE_BKST 0x80
unsigned char management_flag;
#define MEMSTICK_MANAGEMENT_SYSTEM 0x0004
#define MEMSTICK_MANAGEMENT_TRANS_TABLE 0x0008
#define MEMSTICK_MANAGEMENT_COPY 0x0010
#define MEMSTICK_MANAGEMENT_ACCESS 0x0020
#define MEMSTICK_MANAGEMENT_SYSFLG 0x04
#define MEMSTICK_MANAGEMENT_ATFLG 0x08
#define MEMSTICK_MANAGEMENT_SCMS1 0x10
#define MEMSTICK_MANAGEMENT_SCMS0 0x20
unsigned short logical_address;
} __attribute__((packed));
@ -96,9 +96,9 @@ struct ms_register {
struct mspro_param_register {
unsigned char system;
#define MEMSTICK_SYS_SERIAL 0x80
#define MEMSTICK_SYS_PAR4 0x00
#define MEMSTICK_SYS_PAR8 0x40
#define MEMSTICK_SYS_SERIAL 0x80
unsigned short data_count;
unsigned int data_address;
@ -147,7 +147,7 @@ struct ms_register_addr {
unsigned char w_length;
} __attribute__((packed));
enum {
enum memstick_tpc {
MS_TPC_READ_MG_STATUS = 0x01,
MS_TPC_READ_LONG_DATA = 0x02,
MS_TPC_READ_SHORT_DATA = 0x03,
@ -167,7 +167,7 @@ enum {
MS_TPC_SET_CMD = 0x0e
};
enum {
enum memstick_command {
MS_CMD_BLOCK_END = 0x33,
MS_CMD_RESET = 0x3c,
MS_CMD_BLOCK_WRITE = 0x55,
@ -201,8 +201,6 @@ enum {
/*** Driver structures and functions ***/
#define MEMSTICK_PART_SHIFT 3
enum memstick_param { MEMSTICK_POWER = 1, MEMSTICK_INTERFACE };
#define MEMSTICK_POWER_OFF 0
@ -215,24 +213,27 @@ enum memstick_param { MEMSTICK_POWER = 1, MEMSTICK_INTERFACE };
struct memstick_host;
struct memstick_driver;
struct memstick_device_id {
unsigned char match_flags;
#define MEMSTICK_MATCH_ALL 0x01
unsigned char type;
#define MEMSTICK_TYPE_LEGACY 0xff
#define MEMSTICK_TYPE_DUO 0x00
#define MEMSTICK_TYPE_PRO 0x01
unsigned char category;
#define MEMSTICK_CATEGORY_STORAGE 0xff
#define MEMSTICK_CATEGORY_STORAGE_DUO 0x00
#define MEMSTICK_CATEGORY_IO 0x01
#define MEMSTICK_CATEGORY_IO_PRO 0x10
#define MEMSTICK_CLASS_GENERIC 0xff
#define MEMSTICK_CLASS_GENERIC_DUO 0x00
struct memstick_device_id {
unsigned char match_flags;
unsigned char type;
unsigned char category;
unsigned char class;
#define MEMSTICK_CLASS_FLASH 0xff
#define MEMSTICK_CLASS_DUO 0x00
#define MEMSTICK_CLASS_ROM 0x01
#define MEMSTICK_CLASS_RO 0x02
#define MEMSTICK_CLASS_WP 0x03
};
struct memstick_request {
@ -319,9 +320,9 @@ void memstick_suspend_host(struct memstick_host *host);
void memstick_resume_host(struct memstick_host *host);
void memstick_init_req_sg(struct memstick_request *mrq, unsigned char tpc,
struct scatterlist *sg);
const struct scatterlist *sg);
void memstick_init_req(struct memstick_request *mrq, unsigned char tpc,
void *buf, size_t length);
const void *buf, size_t length);
int memstick_next_req(struct memstick_host *host,
struct memstick_request **mrq);
void memstick_new_req(struct memstick_host *host);

View file

@ -141,6 +141,10 @@ enum {
MLX4_STAT_RATE_OFFSET = 5
};
enum {
MLX4_MTT_FLAG_PRESENT = 1
};
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;

View file

@ -7,6 +7,7 @@
#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/mmdebug.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
@ -219,12 +220,6 @@ struct inode;
*/
#include <linux/page-flags.h>
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
#else
#define VM_BUG_ON(condition) do { } while(0)
#endif
/*
* Methods to modify the page usage count.
*
@ -919,7 +914,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
}
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
#if USE_SPLIT_PTLOCKS
/*
* We tuck a spinlock to guard each pagetable page into its struct page,
* at page->private, with BUILD_BUG_ON to make sure that this will not
@ -932,14 +927,14 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
} while (0)
#define pte_lock_deinit(page) ((page)->mapping = NULL)
#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
#else
#else /* !USE_SPLIT_PTLOCKS */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
#define pte_lock_init(page) do {} while (0)
#define pte_lock_deinit(page) do {} while (0)
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
#endif /* USE_SPLIT_PTLOCKS */
static inline void pgtable_page_ctor(struct page *page)
{

View file

@ -21,11 +21,13 @@
struct address_space;
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
#if USE_SPLIT_PTLOCKS
typedef atomic_long_t mm_counter_t;
#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
#else /* !USE_SPLIT_PTLOCKS */
typedef unsigned long mm_counter_t;
#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
#endif /* !USE_SPLIT_PTLOCKS */
/*
* Each physical page in the system has a struct page associated with
@ -65,7 +67,7 @@ struct page {
* see PAGE_MAPPING_ANON below.
*/
};
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
#if USE_SPLIT_PTLOCKS
spinlock_t ptl;
#endif
struct kmem_cache *slab; /* SLUB: Pointer to slab */

View file

@ -65,7 +65,7 @@ struct mmc_host_ops {
* -ENOSYS when not supported (equal to NULL callback)
* or a negative errno value when something bad happened
*
* Return values for the get_ro callback should be:
* Return values for the get_cd callback should be:
* 0 for a absent card
* 1 for a present card
* -ENOSYS when not supported (equal to NULL callback)

18
include/linux/mmdebug.h Normal file
View file

@ -0,0 +1,18 @@
#ifndef LINUX_MM_DEBUG_H
#define LINUX_MM_DEBUG_H 1
#include <linux/autoconf.h>
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
#else
#define VM_BUG_ON(cond) do { } while (0)
#endif
#ifdef CONFIG_DEBUG_VIRTUAL
#define VIRTUAL_BUG_ON(cond) BUG_ON(cond)
#else
#define VIRTUAL_BUG_ON(cond) do { } while (0)
#endif
#endif

View file

@ -751,8 +751,9 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
*
* This function returns the next zone at or below a given zone index that is
* within the allowed nodemask using a cursor as the starting point for the
* search. The zoneref returned is a cursor that is used as the next starting
* point for future calls to next_zones_zonelist().
* search. The zoneref returned is a cursor that represents the current zone
* being examined. It should be advanced by one before calling
* next_zones_zonelist again.
*/
struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
@ -768,9 +769,8 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
*
* This function returns the first zone at or below a given zone index that is
* within the allowed nodemask. The zoneref returned is a cursor that can be
* used to iterate the zonelist with next_zones_zonelist. The cursor should
* not be used by the caller as it does not match the value of the zone
* returned.
* used to iterate the zonelist with next_zones_zonelist by advancing it by
* one before calling.
*/
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
@ -795,7 +795,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
zone; \
z = next_zones_zonelist(z, highidx, nodemask, &zone)) \
z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
/**
* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index

View file

@ -6,7 +6,6 @@
#ifdef __KERNEL__
#include <linux/in.h>
#endif
#include <linux/pim.h>
/*
* Based on the MROUTING 3.5 defines primarily to keep
@ -130,6 +129,7 @@ struct igmpmsg
*/
#ifdef __KERNEL__
#include <linux/pim.h>
#include <net/sock.h>
#ifdef CONFIG_IP_MROUTE

View file

@ -115,6 +115,7 @@ struct sioc_mif_req6
#ifdef __KERNEL__
#include <linux/pim.h>
#include <linux/skbuff.h> /* for struct sk_buff_head */
#ifdef CONFIG_IPV6_MROUTE

View file

@ -41,6 +41,8 @@ struct mtd_blktrans_ops {
unsigned long block, char *buffer);
int (*writesect)(struct mtd_blktrans_dev *dev,
unsigned long block, char *buffer);
int (*discard)(struct mtd_blktrans_dev *dev,
unsigned long block, unsigned nr_blocks);
/* Block layer ioctls */
int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo);

View file

@ -17,9 +17,14 @@
struct mv643xx_eth_shared_platform_data {
struct mbus_dram_target_info *dram;
struct platform_device *shared_smi;
unsigned int t_clk;
};
#define MV643XX_ETH_PHY_ADDR_DEFAULT 0
#define MV643XX_ETH_PHY_ADDR(x) (0x80 | (x))
#define MV643XX_ETH_PHY_NONE 0xff
struct mv643xx_eth_platform_data {
/*
* Pointer back to our parent instance, and our port number.
@ -30,8 +35,6 @@ struct mv643xx_eth_platform_data {
/*
* Whether a PHY is present, and if yes, at which address.
*/
struct platform_device *shared_smi;
int force_phy_addr;
int phy_addr;
/*
@ -49,10 +52,10 @@ struct mv643xx_eth_platform_data {
int duplex;
/*
* Which RX/TX queues to use.
* How many RX/TX queues to use.
*/
int rx_queue_mask;
int tx_queue_mask;
int rx_queue_count;
int tx_queue_count;
/*
* Override default RX/TX queue sizes if nonzero.

View file

@ -42,6 +42,7 @@
#include <linux/workqueue.h>
#include <net/net_namespace.h>
#include <net/dsa.h>
struct vlan_group;
struct ethtool_ops;
@ -471,6 +472,8 @@ struct net_device
char name[IFNAMSIZ];
/* device name hash chain */
struct hlist_node name_hlist;
/* snmp alias */
char *ifalias;
/*
* I/O specific fields
@ -605,6 +608,9 @@ struct net_device
/* Protocol specific pointers */
#ifdef CONFIG_NET_DSA
void *dsa_ptr; /* dsa specific data */
#endif
void *atalk_ptr; /* AppleTalk link */
void *ip_ptr; /* IPv4 specific data */
void *dn_ptr; /* DECnet specific data */
@ -796,6 +802,26 @@ void dev_net_set(struct net_device *dev, struct net *net)
#endif
}
static inline bool netdev_uses_dsa_tags(struct net_device *dev)
{
#ifdef CONFIG_NET_DSA_TAG_DSA
if (dev->dsa_ptr != NULL)
return dsa_uses_dsa_tags(dev->dsa_ptr);
#endif
return 0;
}
static inline bool netdev_uses_trailer_tags(struct net_device *dev)
{
#ifdef CONFIG_NET_DSA_TAG_TRAILER
if (dev->dsa_ptr != NULL)
return dsa_uses_trailer_tags(dev->dsa_ptr);
#endif
return 0;
}
/**
* netdev_priv - access network device private data
* @dev: network device
@ -1223,7 +1249,8 @@ extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
extern int dev_ethtool(struct net *net, struct ifreq *);
extern unsigned dev_get_flags(const struct net_device *);
extern int dev_change_flags(struct net_device *, unsigned);
extern int dev_change_name(struct net_device *, char *);
extern int dev_change_name(struct net_device *, const char *);
extern int dev_set_alias(struct net_device *, const char *, size_t);
extern int dev_change_net_namespace(struct net_device *,
struct net *, const char *);
extern int dev_set_mtu(struct net_device *, int);
@ -1667,7 +1694,7 @@ extern void dev_seq_stop(struct seq_file *seq, void *v);
extern int netdev_class_create_file(struct class_attribute *class_attr);
extern void netdev_class_remove_file(struct class_attribute *class_attr);
extern char *netdev_drivername(struct net_device *dev, char *buffer, int len);
extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
extern void linkwatch_run_queue(void);

View file

@ -5,13 +5,11 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/if.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <net/net_namespace.h>
#endif
#include <linux/types.h>
#include <linux/compiler.h>
@ -52,6 +50,16 @@ enum nf_inet_hooks {
NF_INET_NUMHOOKS
};
enum {
NFPROTO_UNSPEC = 0,
NFPROTO_IPV4 = 2,
NFPROTO_ARP = 3,
NFPROTO_BRIDGE = 7,
NFPROTO_IPV6 = 10,
NFPROTO_DECNET = 12,
NFPROTO_NUMPROTO,
};
union nf_inet_addr {
__u32 all[4];
__be32 ip;
@ -92,8 +100,8 @@ struct nf_hook_ops
/* User fills in from here down. */
nf_hookfn *hook;
struct module *owner;
int pf;
int hooknum;
u_int8_t pf;
unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
int priority;
};
@ -102,7 +110,7 @@ struct nf_sockopt_ops
{
struct list_head list;
int pf;
u_int8_t pf;
/* Non-inclusive ranges: use 0/0/NULL to never get called. */
int set_optmin;
@ -138,9 +146,9 @@ extern struct ctl_path nf_net_netfilter_sysctl_path[];
extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
#endif /* CONFIG_SYSCTL */
extern struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct sk_buff *), int thresh);
@ -151,7 +159,7 @@ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
* okfn must be invoked by the caller in this case. Any other return
* value indicates the packet has been consumed by the hook.
*/
static inline int nf_hook_thresh(int pf, unsigned int hook,
static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
struct sk_buff *skb,
struct net_device *indev,
struct net_device *outdev,
@ -167,7 +175,7 @@ static inline int nf_hook_thresh(int pf, unsigned int hook,
return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
}
static inline int nf_hook(int pf, unsigned int hook, struct sk_buff *skb,
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct sk_buff *))
{
@ -212,14 +220,14 @@ __ret;})
NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, INT_MIN)
/* Call setsockopt() */
int nf_setsockopt(struct sock *sk, int pf, int optval, char __user *opt,
int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
int len);
int nf_getsockopt(struct sock *sk, int pf, int optval, char __user *opt,
int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
int *len);
int compat_nf_setsockopt(struct sock *sk, int pf, int optval,
int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
char __user *opt, int len);
int compat_nf_getsockopt(struct sock *sk, int pf, int optval,
int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
char __user *opt, int *len);
/* Call this before modifying an existing packet: ensures it is
@ -247,7 +255,7 @@ struct nf_afinfo {
int route_key_size;
};
extern const struct nf_afinfo *nf_afinfo[NPROTO];
extern const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO];
static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
{
return rcu_dereference(nf_afinfo[family]);
@ -292,7 +300,7 @@ extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
extern void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *);
static inline void
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, int family)
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
{
#ifdef CONFIG_NF_NAT_NEEDED
void (*decodefn)(struct sk_buff *, struct flowi *);
@ -315,7 +323,7 @@ extern struct proc_dir_entry *proc_net_netfilter;
#else /* !CONFIG_NETFILTER */
#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb)
static inline int nf_hook_thresh(int pf, unsigned int hook,
static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
struct sk_buff *skb,
struct net_device *indev,
struct net_device *outdev,
@ -324,7 +332,7 @@ static inline int nf_hook_thresh(int pf, unsigned int hook,
{
return okfn(skb);
}
static inline int nf_hook(int pf, unsigned int hook, struct sk_buff *skb,
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct sk_buff *))
{
@ -332,7 +340,9 @@ static inline int nf_hook(int pf, unsigned int hook, struct sk_buff *skb,
}
struct flowi;
static inline void
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, int family) {}
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
{
}
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@ -343,56 +353,5 @@ extern void (*nf_ct_destroy)(struct nf_conntrack *);
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
#endif
static inline struct net *nf_pre_routing_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
return in->nd_net;
#else
return &init_net;
#endif
}
static inline struct net *nf_local_in_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
return in->nd_net;
#else
return &init_net;
#endif
}
static inline struct net *nf_forward_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
BUG_ON(in->nd_net != out->nd_net);
return in->nd_net;
#else
return &init_net;
#endif
}
static inline struct net *nf_local_out_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
return out->nd_net;
#else
return &init_net;
#endif
}
static inline struct net *nf_post_routing_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
return out->nd_net;
#else
return &init_net;
#endif
}
#endif /*__KERNEL__*/
#endif /*__LINUX_NETFILTER_H*/

View file

@ -32,6 +32,7 @@ header-y += xt_owner.h
header-y += xt_pkttype.h
header-y += xt_rateest.h
header-y += xt_realm.h
header-y += xt_recent.h
header-y += xt_sctp.h
header-y += xt_state.h
header-y += xt_statistic.h

View file

@ -87,7 +87,7 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
/* delete keymap entries */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
extern void nf_ct_gre_keymap_flush(void);
extern void nf_ct_gre_keymap_flush(struct net *net);
extern void nf_nat_need_gre(void);
#endif /* __KERNEL__ */

View file

@ -173,6 +173,98 @@ struct xt_counters_info
#include <linux/netdevice.h>
/**
* struct xt_match_param - parameters for match extensions' match functions
*
* @in: input netdevice
* @out: output netdevice
* @match: struct xt_match through which this function was invoked
* @matchinfo: per-match data
* @fragoff: packet is a fragment, this is the data offset
* @thoff: position of transport header relative to skb->data
* @hotdrop: drop packet if we had inspection problems
* @family: Actual NFPROTO_* through which the function is invoked
* (helpful when match->family == NFPROTO_UNSPEC)
*/
struct xt_match_param {
const struct net_device *in, *out;
const struct xt_match *match;
const void *matchinfo;
int fragoff;
unsigned int thoff;
bool *hotdrop;
u_int8_t family;
};
/**
* struct xt_mtchk_param - parameters for match extensions'
* checkentry functions
*
* @table: table the rule is tried to be inserted into
* @entryinfo: the family-specific rule data
* (struct ipt_ip, ip6t_ip, ebt_entry)
* @match: struct xt_match through which this function was invoked
* @matchinfo: per-match data
* @hook_mask: via which hooks the new rule is reachable
*/
struct xt_mtchk_param {
const char *table;
const void *entryinfo;
const struct xt_match *match;
void *matchinfo;
unsigned int hook_mask;
u_int8_t family;
};
/* Match destructor parameters */
struct xt_mtdtor_param {
const struct xt_match *match;
void *matchinfo;
u_int8_t family;
};
/**
* struct xt_target_param - parameters for target extensions' target functions
*
* @hooknum: hook through which this target was invoked
* @target: struct xt_target through which this function was invoked
* @targinfo: per-target data
*
* Other fields see above.
*/
struct xt_target_param {
const struct net_device *in, *out;
unsigned int hooknum;
const struct xt_target *target;
const void *targinfo;
u_int8_t family;
};
/**
* struct xt_tgchk_param - parameters for target extensions'
* checkentry functions
*
* @entryinfo: the family-specific rule data
* (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
*
* Other fields see above.
*/
struct xt_tgchk_param {
const char *table;
void *entryinfo;
const struct xt_target *target;
void *targinfo;
unsigned int hook_mask;
u_int8_t family;
};
/* Target destructor parameters */
struct xt_tgdtor_param {
const struct xt_target *target;
void *targinfo;
u_int8_t family;
};
struct xt_match
{
struct list_head list;
@ -185,24 +277,13 @@ struct xt_match
non-linear skb, using skb_header_pointer and
skb_ip_make_writable. */
bool (*match)(const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct xt_match *match,
const void *matchinfo,
int offset,
unsigned int protoff,
bool *hotdrop);
const struct xt_match_param *);
/* Called when user tries to insert an entry of this type. */
/* Should return true or false. */
bool (*checkentry)(const char *tablename,
const void *ip,
const struct xt_match *match,
void *matchinfo,
unsigned int hook_mask);
bool (*checkentry)(const struct xt_mtchk_param *);
/* Called when entry of this type deleted. */
void (*destroy)(const struct xt_match *match, void *matchinfo);
void (*destroy)(const struct xt_mtdtor_param *);
/* Called when userspace align differs from kernel space one */
void (*compat_from_user)(void *dst, void *src);
@ -235,24 +316,16 @@ struct xt_target
must now handle non-linear skbs, using skb_copy_bits and
skb_ip_make_writable. */
unsigned int (*target)(struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
unsigned int hooknum,
const struct xt_target *target,
const void *targinfo);
const struct xt_target_param *);
/* Called when user tries to insert an entry of this type:
hook_mask is a bitmask of hooks from which it can be
called. */
/* Should return true or false. */
bool (*checkentry)(const char *tablename,
const void *entry,
const struct xt_target *target,
void *targinfo,
unsigned int hook_mask);
bool (*checkentry)(const struct xt_tgchk_param *);
/* Called when entry of this type deleted. */
void (*destroy)(const struct xt_target *target, void *targinfo);
void (*destroy)(const struct xt_tgdtor_param *);
/* Called when userspace align differs from kernel space one */
void (*compat_from_user)(void *dst, void *src);
@ -292,7 +365,7 @@ struct xt_table
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
int af; /* address/protocol family */
u_int8_t af; /* address/protocol family */
};
#include <linux/netfilter_ipv4.h>
@ -328,12 +401,10 @@ extern void xt_unregister_match(struct xt_match *target);
extern int xt_register_matches(struct xt_match *match, unsigned int n);
extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
extern int xt_check_match(const struct xt_match *match, unsigned short family,
unsigned int size, const char *table, unsigned int hook,
unsigned short proto, int inv_proto);
extern int xt_check_target(const struct xt_target *target, unsigned short family,
unsigned int size, const char *table, unsigned int hook,
unsigned short proto, int inv_proto);
extern int xt_check_match(struct xt_mtchk_param *,
unsigned int size, u_int8_t proto, bool inv_proto);
extern int xt_check_target(struct xt_tgchk_param *,
unsigned int size, u_int8_t proto, bool inv_proto);
extern struct xt_table *xt_register_table(struct net *net,
struct xt_table *table,
@ -346,19 +417,19 @@ extern struct xt_table_info *xt_replace_table(struct xt_table *table,
struct xt_table_info *newinfo,
int *error);
extern struct xt_match *xt_find_match(int af, const char *name, u8 revision);
extern struct xt_target *xt_find_target(int af, const char *name, u8 revision);
extern struct xt_target *xt_request_find_target(int af, const char *name,
extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
extern struct xt_target *xt_request_find_target(u8 af, const char *name,
u8 revision);
extern int xt_find_revision(int af, const char *name, u8 revision, int target,
int *err);
extern int xt_find_revision(u8 af, const char *name, u8 revision,
int target, int *err);
extern struct xt_table *xt_find_table_lock(struct net *net, int af,
extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
const char *name);
extern void xt_table_unlock(struct xt_table *t);
extern int xt_proto_init(struct net *net, int af);
extern void xt_proto_fini(struct net *net, int af);
extern int xt_proto_init(struct net *net, u_int8_t af);
extern void xt_proto_fini(struct net *net, u_int8_t af);
extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
extern void xt_free_table_info(struct xt_table_info *info);
@ -423,12 +494,12 @@ struct compat_xt_counters_info
#define COMPAT_XT_ALIGN(s) (((s) + (__alignof__(struct compat_xt_counters)-1)) \
& ~(__alignof__(struct compat_xt_counters)-1))
extern void xt_compat_lock(int af);
extern void xt_compat_unlock(int af);
extern void xt_compat_lock(u_int8_t af);
extern void xt_compat_unlock(u_int8_t af);
extern int xt_compat_add_offset(int af, unsigned int offset, short delta);
extern void xt_compat_flush_offsets(int af);
extern short xt_compat_calc_jump(int af, unsigned int offset);
extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
extern void xt_compat_flush_offsets(u_int8_t af);
extern short xt_compat_calc_jump(u_int8_t af, unsigned int offset);
extern int xt_compat_match_offset(const struct xt_match *match);
extern int xt_compat_match_from_user(struct xt_entry_match *m,

View file

@ -0,0 +1,14 @@
#ifndef _XT_TPROXY_H_target
#define _XT_TPROXY_H_target
/* TPROXY target is capable of marking the packet to perform
* redirection. We can get rid of that whenever we get support for
* mutliple targets in the same rule. */
struct xt_tproxy_target_info {
u_int32_t mark_mask;
u_int32_t mark_value;
__be32 laddr;
__be16 lport;
};
#endif /* _XT_TPROXY_H_target */

View file

@ -0,0 +1,26 @@
#ifndef _LINUX_NETFILTER_XT_RECENT_H
#define _LINUX_NETFILTER_XT_RECENT_H 1
enum {
XT_RECENT_CHECK = 1 << 0,
XT_RECENT_SET = 1 << 1,
XT_RECENT_UPDATE = 1 << 2,
XT_RECENT_REMOVE = 1 << 3,
XT_RECENT_TTL = 1 << 4,
XT_RECENT_SOURCE = 0,
XT_RECENT_DEST = 1,
XT_RECENT_NAME_LEN = 200,
};
struct xt_recent_mtinfo {
u_int32_t seconds;
u_int32_t hit_count;
u_int8_t check_set;
u_int8_t invert;
char name[XT_RECENT_NAME_LEN];
u_int8_t side;
};
#endif /* _LINUX_NETFILTER_XT_RECENT_H */

View file

@ -31,6 +31,9 @@
* The 4 lsb are more than enough to store the verdict. */
#define EBT_VERDICT_BITS 0x0000000F
struct xt_match;
struct xt_target;
struct ebt_counter
{
uint64_t pcnt;
@ -121,7 +124,7 @@ struct ebt_entry_match
{
union {
char name[EBT_FUNCTION_MAXNAMELEN];
struct ebt_match *match;
struct xt_match *match;
} u;
/* size of data */
unsigned int match_size;
@ -132,7 +135,7 @@ struct ebt_entry_watcher
{
union {
char name[EBT_FUNCTION_MAXNAMELEN];
struct ebt_watcher *watcher;
struct xt_target *watcher;
} u;
/* size of data */
unsigned int watcher_size;
@ -143,7 +146,7 @@ struct ebt_entry_target
{
union {
char name[EBT_FUNCTION_MAXNAMELEN];
struct ebt_target *target;
struct xt_target *target;
} u;
/* size of data */
unsigned int target_size;
@ -207,14 +210,17 @@ struct ebt_match
{
struct list_head list;
const char name[EBT_FUNCTION_MAXNAMELEN];
/* 0 == it matches */
int (*match)(const struct sk_buff *skb, const struct net_device *in,
const struct net_device *out, const void *matchdata,
unsigned int datalen);
/* 0 == let it in */
int (*check)(const char *tablename, unsigned int hookmask,
const struct ebt_entry *e, void *matchdata, unsigned int datalen);
void (*destroy)(void *matchdata, unsigned int datalen);
bool (*match)(const struct sk_buff *skb, const struct net_device *in,
const struct net_device *out, const struct xt_match *match,
const void *matchinfo, int offset, unsigned int protoff,
bool *hotdrop);
bool (*checkentry)(const char *table, const void *entry,
const struct xt_match *match, void *matchinfo,
unsigned int hook_mask);
void (*destroy)(const struct xt_match *match, void *matchinfo);
unsigned int matchsize;
u_int8_t revision;
u_int8_t family;
struct module *me;
};
@ -222,13 +228,17 @@ struct ebt_watcher
{
struct list_head list;
const char name[EBT_FUNCTION_MAXNAMELEN];
void (*watcher)(const struct sk_buff *skb, unsigned int hooknr,
const struct net_device *in, const struct net_device *out,
const void *watcherdata, unsigned int datalen);
/* 0 == let it in */
int (*check)(const char *tablename, unsigned int hookmask,
const struct ebt_entry *e, void *watcherdata, unsigned int datalen);
void (*destroy)(void *watcherdata, unsigned int datalen);
unsigned int (*target)(struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
unsigned int hook_num, const struct xt_target *target,
const void *targinfo);
bool (*checkentry)(const char *table, const void *entry,
const struct xt_target *target, void *targinfo,
unsigned int hook_mask);
void (*destroy)(const struct xt_target *target, void *targinfo);
unsigned int targetsize;
u_int8_t revision;
u_int8_t family;
struct module *me;
};
@ -236,14 +246,18 @@ struct ebt_target
{
struct list_head list;
const char name[EBT_FUNCTION_MAXNAMELEN];
/* returns one of the standard verdicts */
int (*target)(struct sk_buff *skb, unsigned int hooknr,
const struct net_device *in, const struct net_device *out,
const void *targetdata, unsigned int datalen);
/* 0 == let it in */
int (*check)(const char *tablename, unsigned int hookmask,
const struct ebt_entry *e, void *targetdata, unsigned int datalen);
void (*destroy)(void *targetdata, unsigned int datalen);
/* returns one of the standard EBT_* verdicts */
unsigned int (*target)(struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
unsigned int hook_num, const struct xt_target *target,
const void *targinfo);
bool (*checkentry)(const char *table, const void *entry,
const struct xt_target *target, void *targinfo,
unsigned int hook_mask);
void (*destroy)(const struct xt_target *target, void *targinfo);
unsigned int targetsize;
u_int8_t revision;
u_int8_t family;
struct module *me;
};
@ -288,12 +302,6 @@ struct ebt_table
~(__alignof__(struct ebt_replace)-1))
extern int ebt_register_table(struct ebt_table *table);
extern void ebt_unregister_table(struct ebt_table *table);
extern int ebt_register_match(struct ebt_match *match);
extern void ebt_unregister_match(struct ebt_match *match);
extern int ebt_register_watcher(struct ebt_watcher *watcher);
extern void ebt_unregister_watcher(struct ebt_watcher *watcher);
extern int ebt_register_target(struct ebt_target *target);
extern void ebt_unregister_target(struct ebt_target *target);
extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
struct ebt_table *table);
@ -302,9 +310,9 @@ extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb,
#define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg))
/* True if the hook mask denotes that the rule is in a base chain,
* used in the check() functions */
#define BASE_CHAIN (hookmask & (1 << NF_BR_NUMHOOKS))
#define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS))
/* Clear the bit in the hook mask that tells if the rule is on a base chain */
#define CLEAR_BASE_CHAIN_BIT (hookmask &= ~(1 << NF_BR_NUMHOOKS))
#define CLEAR_BASE_CHAIN_BIT (par->hook_mask &= ~(1 << NF_BR_NUMHOOKS))
/* True if the target is not a standard target */
#define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)

View file

@ -1,27 +1,21 @@
#ifndef _IPT_RECENT_H
#define _IPT_RECENT_H
#define RECENT_NAME "ipt_recent"
#define RECENT_VER "v0.3.1"
#include <linux/netfilter/xt_recent.h>
#define IPT_RECENT_CHECK 1
#define IPT_RECENT_SET 2
#define IPT_RECENT_UPDATE 4
#define IPT_RECENT_REMOVE 8
#define IPT_RECENT_TTL 16
#define ipt_recent_info xt_recent_mtinfo
#define IPT_RECENT_SOURCE 0
#define IPT_RECENT_DEST 1
enum {
IPT_RECENT_CHECK = XT_RECENT_CHECK,
IPT_RECENT_SET = XT_RECENT_SET,
IPT_RECENT_UPDATE = XT_RECENT_UPDATE,
IPT_RECENT_REMOVE = XT_RECENT_REMOVE,
IPT_RECENT_TTL = XT_RECENT_TTL,
#define IPT_RECENT_NAME_LEN 200
IPT_RECENT_SOURCE = XT_RECENT_SOURCE,
IPT_RECENT_DEST = XT_RECENT_DEST,
struct ipt_recent_info {
u_int32_t seconds;
u_int32_t hit_count;
u_int8_t check_set;
u_int8_t invert;
char name[IPT_RECENT_NAME_LEN];
u_int8_t side;
IPT_RECENT_NAME_LEN = XT_RECENT_NAME_LEN,
};
#endif /*_IPT_RECENT_H*/

View file

@ -89,6 +89,22 @@
* @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC
* or, if no MAC address given, all mesh paths, on the interface identified
* by %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_SET_BSS: Set BSS attributes for BSS identified by
* %NL80211_ATTR_IFINDEX.
*
* @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command
* after being queried by the kernel. CRDA replies by sending a regulatory
* domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our
* current alpha2 if it found a match. It also provides
* NL80211_ATTR_REG_RULE_FLAGS, and a set of regulatory rules. Each
* regulatory rule is a nested set of attributes given by
* %NL80211_ATTR_REG_RULE_FREQ_[START|END] and
* %NL80211_ATTR_FREQ_RANGE_MAX_BW with an attached power rule given by
* %NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and
* %NL80211_ATTR_REG_RULE_POWER_MAX_EIRP.
* @NL80211_CMD_REQ_SET_REG: ask the wireless core to set the regulatory domain
* to the the specified ISO/IEC 3166-1 alpha2 country code. The core will
* store this as a valid request and then query userspace for it.
*
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
@ -127,13 +143,23 @@ enum nl80211_commands {
NL80211_CMD_NEW_MPATH,
NL80211_CMD_DEL_MPATH,
/* add commands here */
NL80211_CMD_SET_BSS,
NL80211_CMD_SET_REG,
NL80211_CMD_REQ_SET_REG,
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
__NL80211_CMD_AFTER_LAST,
NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1
};
/*
* Allow user space programs to use #ifdef on new commands by defining them
* here
*/
#define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS
/**
* enum nl80211_attrs - nl80211 netlink attributes
@ -188,10 +214,34 @@ enum nl80211_commands {
* info given for %NL80211_CMD_GET_MPATH, nested attribute described at
* &enum nl80211_mpath_info.
*
*
* @NL80211_ATTR_MNTR_FLAGS: flags, nested element with NLA_FLAG attributes of
* &enum nl80211_mntr_flags.
*
* @NL80211_ATTR_REG_ALPHA2: an ISO-3166-alpha2 country code for which the
* current regulatory domain should be set to or is already set to.
* For example, 'CR', for Costa Rica. This attribute is used by the kernel
* to query the CRDA to retrieve one regulatory domain. This attribute can
* also be used by userspace to query the kernel for the currently set
* regulatory domain. We chose an alpha2 as that is also used by the
* IEEE-802.11d country information element to identify a country.
* Users can also simply ask the wireless core to set regulatory domain
* to a specific alpha2.
* @NL80211_ATTR_REG_RULES: a nested array of regulatory domain regulatory
* rules.
*
* @NL80211_ATTR_BSS_CTS_PROT: whether CTS protection is enabled (u8, 0 or 1)
* @NL80211_ATTR_BSS_SHORT_PREAMBLE: whether short preamble is enabled
* (u8, 0 or 1)
* @NL80211_ATTR_BSS_SHORT_SLOT_TIME: whether short slot time enabled
* (u8, 0 or 1)
*
* @NL80211_ATTR_HT_CAPABILITY: HT Capability information element (from
* association request when used with NL80211_CMD_NEW_STATION)
*
* @NL80211_ATTR_SUPPORTED_IFTYPES: nested attribute containing all
* supported interface types, each a flag attribute with the number
* of the interface mode.
*
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@ -235,16 +285,35 @@ enum nl80211_attrs {
NL80211_ATTR_MPATH_NEXT_HOP,
NL80211_ATTR_MPATH_INFO,
NL80211_ATTR_BSS_CTS_PROT,
NL80211_ATTR_BSS_SHORT_PREAMBLE,
NL80211_ATTR_BSS_SHORT_SLOT_TIME,
NL80211_ATTR_HT_CAPABILITY,
NL80211_ATTR_SUPPORTED_IFTYPES,
NL80211_ATTR_REG_ALPHA2,
NL80211_ATTR_REG_RULES,
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1
};
/*
* Allow user space programs to use #ifdef on new attributes by defining them
* here
*/
#define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY
#define NL80211_MAX_SUPP_RATES 32
#define NL80211_MAX_SUPP_REG_RULES 32
#define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0
#define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16
#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24
#define NL80211_HT_CAPABILITY_LEN 26
/**
* enum nl80211_iftype - (virtual) interface types
@ -435,6 +504,66 @@ enum nl80211_bitrate_attr {
NL80211_BITRATE_ATTR_MAX = __NL80211_BITRATE_ATTR_AFTER_LAST - 1
};
/**
* enum nl80211_reg_rule_attr - regulatory rule attributes
* @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional
* considerations for a given frequency range. These are the
* &enum nl80211_reg_rule_flags.
* @NL80211_ATTR_FREQ_RANGE_START: starting frequencry for the regulatory
* rule in KHz. This is not a center of frequency but an actual regulatory
* band edge.
* @NL80211_ATTR_FREQ_RANGE_END: ending frequency for the regulatory rule
* in KHz. This is not a center a frequency but an actual regulatory
* band edge.
* @NL80211_ATTR_FREQ_RANGE_MAX_BW: maximum allowed bandwidth for this
* frequency range, in KHz.
* @NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN: the maximum allowed antenna gain
* for a given frequency range. The value is in mBi (100 * dBi).
* If you don't have one then don't send this.
* @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for
* a given frequency range. The value is in mBm (100 * dBm).
*/
enum nl80211_reg_rule_attr {
__NL80211_REG_RULE_ATTR_INVALID,
NL80211_ATTR_REG_RULE_FLAGS,
NL80211_ATTR_FREQ_RANGE_START,
NL80211_ATTR_FREQ_RANGE_END,
NL80211_ATTR_FREQ_RANGE_MAX_BW,
NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
NL80211_ATTR_POWER_RULE_MAX_EIRP,
/* keep last */
__NL80211_REG_RULE_ATTR_AFTER_LAST,
NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1
};
/**
* enum nl80211_reg_rule_flags - regulatory rule flags
*
* @NL80211_RRF_NO_OFDM: OFDM modulation not allowed
* @NL80211_RRF_NO_CCK: CCK modulation not allowed
* @NL80211_RRF_NO_INDOOR: indoor operation not allowed
* @NL80211_RRF_NO_OUTDOOR: outdoor operation not allowed
* @NL80211_RRF_DFS: DFS support is required to be used
* @NL80211_RRF_PTP_ONLY: this is only for Point To Point links
* @NL80211_RRF_PTMP_ONLY: this is only for Point To Multi Point links
* @NL80211_RRF_PASSIVE_SCAN: passive scan is required
* @NL80211_RRF_NO_IBSS: no IBSS is allowed
*/
enum nl80211_reg_rule_flags {
NL80211_RRF_NO_OFDM = 1<<0,
NL80211_RRF_NO_CCK = 1<<1,
NL80211_RRF_NO_INDOOR = 1<<2,
NL80211_RRF_NO_OUTDOOR = 1<<3,
NL80211_RRF_DFS = 1<<4,
NL80211_RRF_PTP_ONLY = 1<<5,
NL80211_RRF_PTMP_ONLY = 1<<6,
NL80211_RRF_PASSIVE_SCAN = 1<<7,
NL80211_RRF_NO_IBSS = 1<<8,
};
/**
* enum nl80211_mntr_flags - monitor configuration flags
*

View file

@ -213,9 +213,16 @@ static inline int notifier_to_errno(int ret)
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
* not handling interrupts, soon dead */
* not handling interrupts, soon dead.
* Called on the dying cpu, interrupts
* are already disabled. Must not
* sleep, must not fail */
#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
* lock is dropped */
#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
* Called on the new cpu, just before
* enabling interrupts. Must not sleep,
* must not fail */
/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
* operation in progress
@ -229,6 +236,7 @@ static inline int notifier_to_errno(int ret)
#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
/* Hibernation and suspend events */
#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */

View file

@ -534,7 +534,7 @@ extern void pci_sort_breadthfirst(void);
#ifdef CONFIG_PCI_LEGACY
struct pci_dev __deprecated *pci_find_device(unsigned int vendor,
unsigned int device,
const struct pci_dev *from);
struct pci_dev *from);
struct pci_dev __deprecated *pci_find_slot(unsigned int bus,
unsigned int devfn);
#endif /* CONFIG_PCI_LEGACY */
@ -550,7 +550,7 @@ struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
struct pci_dev *from);
struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
unsigned int ss_vendor, unsigned int ss_device,
const struct pci_dev *from);
struct pci_dev *from);
struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
@ -816,7 +816,7 @@ _PCI_NOP_ALL(write,)
static inline struct pci_dev *pci_find_device(unsigned int vendor,
unsigned int device,
const struct pci_dev *from)
struct pci_dev *from)
{
return NULL;
}
@ -838,7 +838,7 @@ static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
unsigned int device,
unsigned int ss_vendor,
unsigned int ss_device,
const struct pci_dev *from)
struct pci_dev *from)
{
return NULL;
}

View file

@ -497,6 +497,16 @@
#define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP 0x1101
#define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL 0x1102
#define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103
#define PCI_DEVICE_ID_AMD_10H_NB_HT 0x1200
#define PCI_DEVICE_ID_AMD_10H_NB_MAP 0x1201
#define PCI_DEVICE_ID_AMD_10H_NB_DRAM 0x1202
#define PCI_DEVICE_ID_AMD_10H_NB_MISC 0x1203
#define PCI_DEVICE_ID_AMD_10H_NB_LINK 0x1204
#define PCI_DEVICE_ID_AMD_11H_NB_HT 0x1300
#define PCI_DEVICE_ID_AMD_11H_NB_MAP 0x1301
#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
#define PCI_DEVICE_ID_AMD_SCSI 0x2020
@ -1411,6 +1421,8 @@
#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013
#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014
#define PCI_VENDOR_ID_CISCO 0x1137
#define PCI_VENDOR_ID_ZIATECH 0x1138
#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550
@ -2215,6 +2227,7 @@
#define PCI_VENDOR_ID_ATTANSIC 0x1969
#define PCI_DEVICE_ID_ATTANSIC_L1 0x1048
#define PCI_DEVICE_ID_ATTANSIC_L2 0x2048
#define PCI_VENDOR_ID_JMICRON 0x197B
#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
@ -2246,6 +2259,16 @@
#define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007
#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009
#define PCI_VENDOR_ID_NETXEN 0x4040
#define PCI_DEVICE_ID_NX2031_10GXSR 0x0001
#define PCI_DEVICE_ID_NX2031_10GCX4 0x0002
#define PCI_DEVICE_ID_NX2031_4GCU 0x0003
#define PCI_DEVICE_ID_NX2031_IMEZ 0x0004
#define PCI_DEVICE_ID_NX2031_HMEZ 0x0005
#define PCI_DEVICE_ID_NX2031_XG_MGMT 0x0024
#define PCI_DEVICE_ID_NX2031_XG_MGMT2 0x0025
#define PCI_DEVICE_ID_NX3031 0x0100
#define PCI_VENDOR_ID_AKS 0x416c
#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100

View file

@ -23,12 +23,19 @@
__attribute__((__section__(SHARED_ALIGNED_SECTION))) \
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \
____cacheline_aligned_in_smp
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
__attribute__((__section__(".data.percpu.page_aligned"))) \
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
#else
#define DEFINE_PER_CPU(type, name) \
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
DEFINE_PER_CPU(type, name)
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
DEFINE_PER_CPU(type, name)
#endif
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)

View file

@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
void percpu_counter_destroy(struct percpu_counter *fbc);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
@ -44,19 +44,13 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
s64 ret = __percpu_counter_sum(fbc, 0);
s64 ret = __percpu_counter_sum(fbc);
return ret < 0 ? 0 : ret;
}
static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
{
return __percpu_counter_sum(fbc, 1);
}
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
{
return __percpu_counter_sum(fbc, 0);
return __percpu_counter_sum(fbc);
}
static inline s64 percpu_counter_read(struct percpu_counter *fbc)

View file

@ -226,6 +226,15 @@ struct sadb_x_sec_ctx {
} __attribute__((packed));
/* sizeof(struct sadb_sec_ctx) = 8 */
/* Used by MIGRATE to pass addresses IKE will use to perform
* negotiation with the peer */
struct sadb_x_kmaddress {
uint16_t sadb_x_kmaddress_len;
uint16_t sadb_x_kmaddress_exttype;
uint32_t sadb_x_kmaddress_reserved;
} __attribute__((packed));
/* sizeof(struct sadb_x_kmaddress) == 8 */
/* Message types */
#define SADB_RESERVED 0
#define SADB_GETSPI 1
@ -346,7 +355,9 @@ struct sadb_x_sec_ctx {
#define SADB_X_EXT_NAT_T_DPORT 22
#define SADB_X_EXT_NAT_T_OA 23
#define SADB_X_EXT_SEC_CTX 24
#define SADB_EXT_MAX 24
/* Used with MIGRATE to pass @ to IKE for negotiation */
#define SADB_X_EXT_KMADDRESS 25
#define SADB_EXT_MAX 25
/* Identity Extension values */
#define SADB_IDENTTYPE_RESERVED 0

170
include/linux/phonet.h Normal file
View file

@ -0,0 +1,170 @@
/**
* file phonet.h
*
* Phonet sockets kernel interface
*
* Copyright (C) 2008 Nokia Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#ifndef LINUX_PHONET_H
#define LINUX_PHONET_H
/* Automatic protocol selection */
#define PN_PROTO_TRANSPORT 0
/* Phonet datagram socket */
#define PN_PROTO_PHONET 1
/* Phonet pipe */
#define PN_PROTO_PIPE 2
#define PHONET_NPROTO 3
/* Socket options for SOL_PNPIPE level */
#define PNPIPE_ENCAP 1
#define PNPIPE_IFINDEX 2
#define PNADDR_ANY 0
#define PNPORT_RESOURCE_ROUTING 0
/* Values for PNPIPE_ENCAP option */
#define PNPIPE_ENCAP_NONE 0
#define PNPIPE_ENCAP_IP 1
/* ioctls */
#define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0)
/* Phonet protocol header */
struct phonethdr {
__u8 pn_rdev;
__u8 pn_sdev;
__u8 pn_res;
__be16 pn_length;
__u8 pn_robj;
__u8 pn_sobj;
} __attribute__((packed));
/* Common Phonet payload header */
struct phonetmsg {
__u8 pn_trans_id; /* transaction ID */
__u8 pn_msg_id; /* message type */
union {
struct {
__u8 pn_submsg_id; /* message subtype */
__u8 pn_data[5];
} base;
struct {
__u16 pn_e_res_id; /* extended resource ID */
__u8 pn_e_submsg_id; /* message subtype */
__u8 pn_e_data[3];
} ext;
} pn_msg_u;
};
#define PN_COMMON_MESSAGE 0xF0
#define PN_PREFIX 0xE0 /* resource for extended messages */
#define pn_submsg_id pn_msg_u.base.pn_submsg_id
#define pn_e_submsg_id pn_msg_u.ext.pn_e_submsg_id
#define pn_e_res_id pn_msg_u.ext.pn_e_res_id
#define pn_data pn_msg_u.base.pn_data
#define pn_e_data pn_msg_u.ext.pn_e_data
/* data for unreachable errors */
#define PN_COMM_SERVICE_NOT_IDENTIFIED_RESP 0x01
#define PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP 0x14
#define pn_orig_msg_id pn_data[0]
#define pn_status pn_data[1]
#define pn_e_orig_msg_id pn_e_data[0]
#define pn_e_status pn_e_data[1]
/* Phonet socket address structure */
struct sockaddr_pn {
sa_family_t spn_family;
__u8 spn_obj;
__u8 spn_dev;
__u8 spn_resource;
__u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3];
} __attribute__ ((packed));
static inline __u16 pn_object(__u8 addr, __u16 port)
{
return (addr << 8) | (port & 0x3ff);
}
static inline __u8 pn_obj(__u16 handle)
{
return handle & 0xff;
}
static inline __u8 pn_dev(__u16 handle)
{
return handle >> 8;
}
static inline __u16 pn_port(__u16 handle)
{
return handle & 0x3ff;
}
static inline __u8 pn_addr(__u16 handle)
{
return (handle >> 8) & 0xfc;
}
static inline void pn_sockaddr_set_addr(struct sockaddr_pn *spn, __u8 addr)
{
spn->spn_dev &= 0x03;
spn->spn_dev |= addr & 0xfc;
}
static inline void pn_sockaddr_set_port(struct sockaddr_pn *spn, __u16 port)
{
spn->spn_dev &= 0xfc;
spn->spn_dev |= (port >> 8) & 0x03;
spn->spn_obj = port & 0xff;
}
static inline void pn_sockaddr_set_object(struct sockaddr_pn *spn,
__u16 handle)
{
spn->spn_dev = pn_dev(handle);
spn->spn_obj = pn_obj(handle);
}
static inline void pn_sockaddr_set_resource(struct sockaddr_pn *spn,
__u8 resource)
{
spn->spn_resource = resource;
}
static inline __u8 pn_sockaddr_get_addr(const struct sockaddr_pn *spn)
{
return spn->spn_dev & 0xfc;
}
static inline __u16 pn_sockaddr_get_port(const struct sockaddr_pn *spn)
{
return ((spn->spn_dev & 0x03) << 8) | spn->spn_obj;
}
static inline __u16 pn_sockaddr_get_object(const struct sockaddr_pn *spn)
{
return pn_object(spn->spn_dev, spn->spn_obj);
}
static inline __u8 pn_sockaddr_get_resource(const struct sockaddr_pn *spn)
{
return spn->spn_resource;
}
#endif

View file

@ -99,7 +99,14 @@ struct mii_bus {
*/
struct mutex mdio_lock;
struct device *dev;
struct device *parent;
enum {
MDIOBUS_ALLOCATED = 1,
MDIOBUS_REGISTERED,
MDIOBUS_UNREGISTERED,
MDIOBUS_RELEASED,
} state;
struct device dev;
/* list of all PHYs on bus */
struct phy_device *phy_map[PHY_MAX_ADDR];
@ -113,6 +120,16 @@ struct mii_bus {
*/
int *irq;
};
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
struct mii_bus *mdiobus_alloc(void);
int mdiobus_register(struct mii_bus *bus);
void mdiobus_unregister(struct mii_bus *bus);
void mdiobus_free(struct mii_bus *bus);
struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum);
int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val);
#define PHY_INTERRUPT_DISABLED 0x0
#define PHY_INTERRUPT_ENABLED 0x80000000
@ -391,8 +408,35 @@ struct phy_fixup {
int (*run)(struct phy_device *phydev);
};
int phy_read(struct phy_device *phydev, u16 regnum);
int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
/**
* phy_read - Convenience function for reading a given PHY register
* @phydev: the phy_device struct
* @regnum: register number to read
*
* NOTE: MUST NOT be called from interrupt context,
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
*/
static inline int phy_read(struct phy_device *phydev, u16 regnum)
{
return mdiobus_read(phydev->bus, phydev->addr, regnum);
}
/**
* phy_write - Convenience function for writing a given PHY register
* @phydev: the phy_device struct
* @regnum: register number to write
* @val: value to write to @regnum
*
* NOTE: MUST NOT be called from interrupt context,
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
*/
static inline int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
{
return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
}
int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id);
struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
int phy_clear_interrupt(struct phy_device *phydev);
@ -408,8 +452,6 @@ void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
int mdiobus_register(struct mii_bus *bus);
void mdiobus_unregister(struct mii_bus *bus);
void phy_sanitize_settings(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
int phy_enable_interrupts(struct phy_device *phydev);

View file

@ -3,22 +3,6 @@
#include <asm/byteorder.h>
#ifndef __KERNEL__
struct pim {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 pim_type:4, /* PIM message type */
pim_ver:4; /* PIM version */
#elif defined(__BIG_ENDIAN_BITFIELD)
__u8 pim_ver:4; /* PIM version */
pim_type:4; /* PIM message type */
#endif
__u8 pim_rsv; /* Reserved */
__be16 pim_cksum; /* Checksum */
};
#define PIM_MINLEN 8
#endif
/* Message types - V1 */
#define PIM_V1_VERSION __constant_htonl(0x10000000)
#define PIM_V1_REGISTER 1
@ -27,7 +11,6 @@ struct pim {
#define PIM_VERSION 2
#define PIM_REGISTER 1
#if defined(__KERNEL__)
#define PIM_NULL_REGISTER __constant_htonl(0x40000000)
/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
@ -42,4 +25,3 @@ struct pimreghdr
struct sk_buff;
extern int pim_rcv_v1(struct sk_buff *);
#endif
#endif

View file

@ -123,6 +123,13 @@ struct tc_prio_qopt
__u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
};
/* MULTIQ section */
struct tc_multiq_qopt {
__u16 bands; /* Number of bands */
__u16 max_bands; /* Maximum number of queues */
};
/* TBF section */
struct tc_tbf_qopt

View file

@ -21,7 +21,14 @@ struct pnp_dev;
/*
* Resource Management
*/
#ifdef CONFIG_PNP
struct resource *pnp_get_resource(struct pnp_dev *, unsigned int, unsigned int);
#else
static inline struct resource *pnp_get_resource(struct pnp_dev *dev, unsigned int type, unsigned int num)
{
return NULL;
}
#endif
static inline int pnp_resource_valid(struct resource *res)
{

View file

@ -104,8 +104,8 @@ struct prop_local_single {
* snapshot of the last seen global state
* and a lock protecting this state
*/
int shift;
unsigned long period;
int shift;
spinlock_t lock; /* protect the snapshot state */
};

View file

@ -80,6 +80,13 @@ void quicklist_trim(int nr, void (*dtor)(void *),
unsigned long quicklist_total_size(void);
#else
static inline unsigned long quicklist_total_size(void)
{
return 0;
}
#endif
#endif /* LINUX_QUICKLIST_H */

View file

@ -6,6 +6,7 @@ extern int ramfs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data, struct vfsmount *mnt);
#ifndef CONFIG_MMU
extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize);
extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
unsigned long addr,
unsigned long len,

View file

@ -40,12 +40,21 @@
#include <linux/cpumask.h>
#include <linux/seqlock.h>
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */
#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
/* Global control variables for rcupdate callback mechanism. */
struct rcu_ctrlblk {
long cur; /* Current batch number. */
long completed; /* Number of the last completed batch */
int next_pending; /* Is the next batch already waiting? */
long pending; /* Number of the last pending batch */
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
unsigned long gp_start; /* Time at which GP started in jiffies. */
unsigned long jiffies_stall;
/* Time at which to check for CPU stalls. */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
int signaled;
@ -66,11 +75,7 @@ static inline int rcu_batch_after(long a, long b)
return (a - b) > 0;
}
/*
* Per-CPU data for Read-Copy UPdate.
* nxtlist - new callbacks are added here
* curlist - current batch for which quiescent cycle started if any
*/
/* Per-CPU data for Read-Copy UPdate. */
struct rcu_data {
/* 1) quiescent state handling : */
long quiescbatch; /* Batch # for grace period */
@ -78,12 +83,24 @@ struct rcu_data {
int qs_pending; /* core waits for quiesc state */
/* 2) batch handling */
long batch; /* Batch # for current RCU batch */
/*
* if nxtlist is not NULL, then:
* batch:
* The batch # for the last entry of nxtlist
* [*nxttail[1], NULL = *nxttail[2]):
* Entries that batch # <= batch
* [*nxttail[0], *nxttail[1]):
* Entries that batch # <= batch - 1
* [nxtlist, *nxttail[0]):
* Entries that batch # <= batch - 2
* The grace period for these entries has completed, and
* the other grace-period-completed entries may be moved
* here temporarily in rcu_process_callbacks().
*/
long batch;
struct rcu_head *nxtlist;
struct rcu_head **nxttail;
struct rcu_head **nxttail[3];
long qlen; /* # of queued callbacks */
struct rcu_head *curlist;
struct rcu_head **curtail;
struct rcu_head *donelist;
struct rcu_head **donetail;
long blimit; /* Upper limit on a processed batch */

View file

@ -198,20 +198,6 @@ static inline void list_splice_init_rcu(struct list_head *list,
at->prev = last;
}
/**
* list_for_each_rcu - iterate over an rcu-protected list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_rcu(pos, head) \
for (pos = rcu_dereference((head)->next); \
prefetch(pos->next), pos != (head); \
pos = rcu_dereference(pos->next))
#define __list_for_each_rcu(pos, head) \
for (pos = rcu_dereference((head)->next); \
pos != (head); \

View file

@ -132,6 +132,26 @@ struct rcu_head {
*/
#define rcu_read_unlock_bh() __rcu_read_unlock_bh()
/**
* rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
*
* Should be used with either
* - synchronize_sched()
* or
* - call_rcu_sched() and rcu_barrier_sched()
* on the write-side to insure proper synchronization.
*/
#define rcu_read_lock_sched() preempt_disable()
/*
* rcu_read_unlock_sched - marks the end of a RCU-classic critical section
*
* See rcu_read_lock_sched for more information.
*/
#define rcu_read_unlock_sched() preempt_enable()
/**
* rcu_dereference - fetch an RCU-protected pointer in an
* RCU read-side critical section. This pointer may later

View file

@ -57,7 +57,13 @@ static inline void rcu_qsctr_inc(int cpu)
rdssp->sched_qs++;
}
#define rcu_bh_qsctr_inc(cpu)
#define call_rcu_bh(head, rcu) call_rcu(head, rcu)
/*
* Someone might want to pass call_rcu_bh as a function pointer.
* So this needs to just be a rename and not a macro function.
* (no parentheses)
*/
#define call_rcu_bh call_rcu
/**
* call_rcu_sched - Queue RCU callback for invocation after sched grace period.
@ -111,7 +117,6 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
struct softirq_action;
#ifdef CONFIG_NO_HZ
DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
static inline void rcu_enter_nohz(void)
{
@ -126,8 +131,8 @@ static inline void rcu_exit_nohz(void)
{
static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
__get_cpu_var(rcu_dyntick_sched).dynticks++;
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
&rs);
}

View file

@ -166,7 +166,7 @@ static inline int res_counter_set_limit(struct res_counter *cnt,
int ret = -EBUSY;
spin_lock_irqsave(&cnt->lock, flags);
if (cnt->usage < limit) {
if (cnt->usage <= limit) {
cnt->limit = limit;
ret = 0;
}

View file

@ -49,6 +49,7 @@ enum rfkill_state {
RFKILL_STATE_SOFT_BLOCKED = 0, /* Radio output blocked */
RFKILL_STATE_UNBLOCKED = 1, /* Radio output allowed */
RFKILL_STATE_HARD_BLOCKED = 2, /* Output blocked, non-overrideable */
RFKILL_STATE_MAX, /* marker for last valid state */
};
/*
@ -110,12 +111,14 @@ struct rfkill {
};
#define to_rfkill(d) container_of(d, struct rfkill, dev)
struct rfkill *rfkill_allocate(struct device *parent, enum rfkill_type type);
struct rfkill * __must_check rfkill_allocate(struct device *parent,
enum rfkill_type type);
void rfkill_free(struct rfkill *rfkill);
int rfkill_register(struct rfkill *rfkill);
int __must_check rfkill_register(struct rfkill *rfkill);
void rfkill_unregister(struct rfkill *rfkill);
int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state);
int rfkill_set_default(enum rfkill_type type, enum rfkill_state state);
/**
* rfkill_state_complement - return complementar state

View file

@ -18,40 +18,47 @@
/*
* M48T59 Register Offset
*/
#define M48T59_YEAR 0x1fff
#define M48T59_MONTH 0x1ffe
#define M48T59_MDAY 0x1ffd /* Day of Month */
#define M48T59_WDAY 0x1ffc /* Day of Week */
#define M48T59_YEAR 0xf
#define M48T59_MONTH 0xe
#define M48T59_MDAY 0xd /* Day of Month */
#define M48T59_WDAY 0xc /* Day of Week */
#define M48T59_WDAY_CB 0x20 /* Century Bit */
#define M48T59_WDAY_CEB 0x10 /* Century Enable Bit */
#define M48T59_HOUR 0x1ffb
#define M48T59_MIN 0x1ffa
#define M48T59_SEC 0x1ff9
#define M48T59_CNTL 0x1ff8
#define M48T59_HOUR 0xb
#define M48T59_MIN 0xa
#define M48T59_SEC 0x9
#define M48T59_CNTL 0x8
#define M48T59_CNTL_READ 0x40
#define M48T59_CNTL_WRITE 0x80
#define M48T59_WATCHDOG 0x1ff7
#define M48T59_INTR 0x1ff6
#define M48T59_WATCHDOG 0x7
#define M48T59_INTR 0x6
#define M48T59_INTR_AFE 0x80 /* Alarm Interrupt Enable */
#define M48T59_INTR_ABE 0x20
#define M48T59_ALARM_DATE 0x1ff5
#define M48T59_ALARM_HOUR 0x1ff4
#define M48T59_ALARM_MIN 0x1ff3
#define M48T59_ALARM_SEC 0x1ff2
#define M48T59_UNUSED 0x1ff1
#define M48T59_FLAGS 0x1ff0
#define M48T59_ALARM_DATE 0x5
#define M48T59_ALARM_HOUR 0x4
#define M48T59_ALARM_MIN 0x3
#define M48T59_ALARM_SEC 0x2
#define M48T59_UNUSED 0x1
#define M48T59_FLAGS 0x0
#define M48T59_FLAGS_WDT 0x80 /* watchdog timer expired */
#define M48T59_FLAGS_AF 0x40 /* alarm */
#define M48T59_FLAGS_BF 0x10 /* low battery */
#define M48T59_NVRAM_SIZE 0x1ff0
#define M48T59RTC_TYPE_M48T59 0 /* to keep compatibility */
#define M48T59RTC_TYPE_M48T02 1
#define M48T59RTC_TYPE_M48T08 2
struct m48t59_plat_data {
/* The method to access M48T59 registers,
* NOTE: The 'ofs' should be 0x00~0x1fff
*/
/* The method to access M48T59 registers */
void (*write_byte)(struct device *dev, u32 ofs, u8 val);
unsigned char (*read_byte)(struct device *dev, u32 ofs);
int type; /* RTC model */
/* ioaddr mapped externally */
void __iomem *ioaddr;
/* offset to RTC registers, automatically set according to the type */
unsigned int offset;
};
#endif /* _LINUX_RTC_M48T59_H_ */

View file

@ -582,6 +582,10 @@ enum rtnetlink_groups {
#define RTNLGRP_IPV6_RULE RTNLGRP_IPV6_RULE
RTNLGRP_ND_USEROPT,
#define RTNLGRP_ND_USEROPT RTNLGRP_ND_USEROPT
RTNLGRP_PHONET_IFADDR,
#define RTNLGRP_PHONET_IFADDR RTNLGRP_PHONET_IFADDR
RTNLGRP_PHONET_ROUTE,
#define RTNLGRP_PHONET_ROUTE RTNLGRP_PHONET_ROUTE
__RTNLGRP_MAX
};
#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)

View file

@ -352,7 +352,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
#if USE_SPLIT_PTLOCKS
/*
* The mm counters are not protected by its page_table_lock,
* so must be incremented atomically.
@ -363,7 +363,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
#else /* !USE_SPLIT_PTLOCKS */
/*
* The mm counters are protected by its page_table_lock,
* so can be incremented directly.
@ -374,7 +374,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
#define inc_mm_counter(mm, member) (mm)->_##member++
#define dec_mm_counter(mm, member) (mm)->_##member--
#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
#endif /* !USE_SPLIT_PTLOCKS */
#define get_mm_rss(mm) \
(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
@ -451,8 +451,8 @@ struct signal_struct {
* - everyone except group_exit_task is stopped during signal delivery
* of fatal signals, group_exit_task processes the signal.
*/
struct task_struct *group_exit_task;
int notify_count;
struct task_struct *group_exit_task;
/* thread group stop support, overloads group_exit_code too */
int group_stop_count;
@ -824,6 +824,9 @@ struct sched_domain {
unsigned int ttwu_move_affine;
unsigned int ttwu_move_balance;
#endif
#ifdef CONFIG_SCHED_DEBUG
char *name;
#endif
};
extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
@ -897,7 +900,7 @@ struct sched_class {
void (*yield_task) (struct rq *rq);
int (*select_task_rq)(struct task_struct *p, int sync);
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
struct task_struct * (*pick_next_task) (struct rq *rq);
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
@ -1010,8 +1013,8 @@ struct sched_entity {
struct sched_rt_entity {
struct list_head run_list;
unsigned int time_slice;
unsigned long timeout;
unsigned int time_slice;
int nr_cpus_allowed;
struct sched_rt_entity *back;
@ -1475,6 +1478,10 @@ static inline void put_task_struct(struct task_struct *t)
__put_task_struct(t);
}
extern cputime_t task_utime(struct task_struct *p);
extern cputime_t task_stime(struct task_struct *p);
extern cputime_t task_gtime(struct task_struct *p);
/*
* Per process flags
*/

View file

@ -1560,11 +1560,6 @@ struct security_operations {
extern int security_init(void);
extern int security_module_enable(struct security_operations *ops);
extern int register_security(struct security_operations *ops);
extern struct dentry *securityfs_create_file(const char *name, mode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent);
extern void securityfs_remove(struct dentry *dentry);
/* Security operations */
int security_ptrace_may_access(struct task_struct *child, unsigned int mode);
@ -2424,25 +2419,6 @@ static inline int security_netlink_recv(struct sk_buff *skb, int cap)
return cap_netlink_recv(skb, cap);
}
static inline struct dentry *securityfs_create_dir(const char *name,
struct dentry *parent)
{
return ERR_PTR(-ENODEV);
}
static inline struct dentry *securityfs_create_file(const char *name,
mode_t mode,
struct dentry *parent,
void *data,
const struct file_operations *fops)
{
return ERR_PTR(-ENODEV);
}
static inline void securityfs_remove(struct dentry *dentry)
{
}
static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
return -EOPNOTSUPP;
@ -2806,5 +2782,35 @@ static inline void security_audit_rule_free(void *lsmrule)
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_SECURITYFS
extern struct dentry *securityfs_create_file(const char *name, mode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent);
extern void securityfs_remove(struct dentry *dentry);
#else /* CONFIG_SECURITYFS */
static inline struct dentry *securityfs_create_dir(const char *name,
struct dentry *parent)
{
return ERR_PTR(-ENODEV);
}
static inline struct dentry *securityfs_create_file(const char *name,
mode_t mode,
struct dentry *parent,
void *data,
const struct file_operations *fops)
{
return ERR_PTR(-ENODEV);
}
static inline void securityfs_remove(struct dentry *dentry)
{}
#endif
#endif /* ! __LINUX_SECURITY_H */

View file

@ -146,8 +146,14 @@ struct skb_shared_info {
unsigned short gso_segs;
unsigned short gso_type;
__be32 ip6_frag_id;
#ifdef CONFIG_HAS_DMA
unsigned int num_dma_maps;
#endif
struct sk_buff *frag_list;
skb_frag_t frags[MAX_SKB_FRAGS];
#ifdef CONFIG_HAS_DMA
dma_addr_t dma_maps[MAX_SKB_FRAGS + 1];
#endif
};
/* We divide dataref into two halves. The higher 16 bits hold references
@ -353,6 +359,14 @@ struct sk_buff {
#include <asm/system.h>
#ifdef CONFIG_HAS_DMA
#include <linux/dma-mapping.h>
extern int skb_dma_map(struct device *dev, struct sk_buff *skb,
enum dma_data_direction dir);
extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
enum dma_data_direction dir);
#endif
extern void kfree_skb(struct sk_buff *skb);
extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *__alloc_skb(unsigned int size,
@ -369,6 +383,8 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, 1, -1);
}
extern int skb_recycle_check(struct sk_buff *skb, int skb_size);
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
gfp_t priority);
@ -458,6 +474,37 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
return list->next == (struct sk_buff *)list;
}
/**
* skb_queue_is_last - check if skb is the last entry in the queue
* @list: queue head
* @skb: buffer
*
* Returns true if @skb is the last buffer on the list.
*/
static inline bool skb_queue_is_last(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
return (skb->next == (struct sk_buff *) list);
}
/**
* skb_queue_next - return the next packet in the queue
* @list: queue head
* @skb: current buffer
*
* Return the next packet in @list after @skb. It is only valid to
* call this if skb_queue_is_last() evaluates to false.
*/
static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
/* This BUG_ON may seem severe, but if we just return then we
* are going to dereference garbage.
*/
BUG_ON(skb_queue_is_last(list, skb));
return skb->next;
}
/**
* skb_get - reference buffer
* @skb: buffer to reference
@ -646,6 +693,22 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
return list_->qlen;
}
/**
* __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
* @list: queue to initialize
*
* This initializes only the list and queue length aspects of
* an sk_buff_head object. This allows to initialize the list
* aspects of an sk_buff_head without reinitializing things like
* the spinlock. It can also be used for on-stack sk_buff_head
* objects where the spinlock is known to not be used.
*/
static inline void __skb_queue_head_init(struct sk_buff_head *list)
{
list->prev = list->next = (struct sk_buff *)list;
list->qlen = 0;
}
/*
* This function creates a split out lock class for each invocation;
* this is needed for now since a whole lot of users of the skb-queue
@ -657,8 +720,7 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
spin_lock_init(&list->lock);
list->prev = list->next = (struct sk_buff *)list;
list->qlen = 0;
__skb_queue_head_init(list);
}
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
@ -685,6 +747,83 @@ static inline void __skb_insert(struct sk_buff *newsk,
list->qlen++;
}
static inline void __skb_queue_splice(const struct sk_buff_head *list,
struct sk_buff *prev,
struct sk_buff *next)
{
struct sk_buff *first = list->next;
struct sk_buff *last = list->prev;
first->prev = prev;
prev->next = first;
last->next = next;
next->prev = last;
}
/**
* skb_queue_splice - join two skb lists, this is designed for stacks
* @list: the new list to add
* @head: the place to add it in the first list
*/
static inline void skb_queue_splice(const struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, (struct sk_buff *) head, head->next);
head->qlen += list->qlen;
}
}
/**
* skb_queue_splice - join two skb lists and reinitialise the emptied list
* @list: the new list to add
* @head: the place to add it in the first list
*
* The list at @list is reinitialised
*/
static inline void skb_queue_splice_init(struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, (struct sk_buff *) head, head->next);
head->qlen += list->qlen;
__skb_queue_head_init(list);
}
}
/**
* skb_queue_splice_tail - join two skb lists, each list being a queue
* @list: the new list to add
* @head: the place to add it in the first list
*/
static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
head->qlen += list->qlen;
}
}
/**
* skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
* @list: the new list to add
* @head: the place to add it in the first list
*
* Each of the lists is a queue.
* The list at @list is reinitialised
*/
static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
head->qlen += list->qlen;
__skb_queue_head_init(list);
}
}
/**
* __skb_queue_after - queue a buffer at the list head
* @list: list to use
@ -829,6 +968,9 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
skb_shinfo(skb)->nr_frags = i + 1;
}
extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
int off, int size);
#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
@ -1243,6 +1385,26 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
}
extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
/**
* netdev_alloc_page - allocate a page for ps-rx on a specific device
* @dev: network device to receive on
*
* Allocate a new page node local to the specified device.
*
* %NULL is returned if there is no free memory.
*/
static inline struct page *netdev_alloc_page(struct net_device *dev)
{
return __netdev_alloc_page(dev, GFP_ATOMIC);
}
static inline void netdev_free_page(struct net_device *dev, struct page *page)
{
__free_page(page);
}
/**
* skb_clone_writable - is the header of a clone writable
* @skb: buffer to check
@ -1434,6 +1596,15 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
skb != (struct sk_buff *)(queue); \
skb = tmp, tmp = skb->next)
#define skb_queue_walk_from(queue, skb) \
for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
skb = skb->next)
#define skb_queue_walk_from_safe(queue, skb, tmp) \
for (tmp = skb->next; \
skb != (struct sk_buff *)(queue); \
skb = tmp, tmp = skb->next)
#define skb_queue_reverse_walk(queue, skb) \
for (skb = (queue)->prev; \
prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \

View file

@ -11,7 +11,9 @@
#include <linux/types.h>
#include <linux/magic.h>
#ifdef __KERNEL__
#include <linux/time.h>
#endif
enum smb_protocol {
SMB_PROTOCOL_NONE,

View file

@ -16,8 +16,19 @@
#define SMC91X_USE_DMA (1 << 6)
#define RPC_LED_100_10 (0x00) /* LED = 100Mbps OR's with 10Mbps link detect */
#define RPC_LED_RES (0x01) /* LED = Reserved */
#define RPC_LED_10 (0x02) /* LED = 10Mbps link detect */
#define RPC_LED_FD (0x03) /* LED = Full Duplex Mode */
#define RPC_LED_TX_RX (0x04) /* LED = TX or RX packet occurred */
#define RPC_LED_100 (0x05) /* LED = 100Mbps link dectect */
#define RPC_LED_TX (0x06) /* LED = TX packet occurred */
#define RPC_LED_RX (0x07) /* LED = RX packet occurred */
struct smc91x_platdata {
unsigned long flags;
unsigned char leda;
unsigned char ledb;
};
#endif /* __SMC91X_H__ */

View file

@ -190,7 +190,8 @@ struct ucred {
#define AF_IUCV 32 /* IUCV sockets */
#define AF_RXRPC 33 /* RxRPC sockets */
#define AF_ISDN 34 /* mISDN sockets */
#define AF_MAX 35 /* For now.. */
#define AF_PHONET 35 /* Phonet sockets */
#define AF_MAX 36 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@ -227,6 +228,7 @@ struct ucred {
#define PF_IUCV AF_IUCV
#define PF_RXRPC AF_RXRPC
#define PF_ISDN AF_ISDN
#define PF_PHONET AF_PHONET
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
@ -295,6 +297,7 @@ struct ucred {
#define SOL_RXRPC 272
#define SOL_PPPOL2TP 273
#define SOL_BLUETOOTH 274
#define SOL_PNPIPE 275
/* IPX options */
#define IPX_TYPE 1

View file

@ -43,6 +43,9 @@ struct ads7846_platform_data {
u16 debounce_tol; /* tolerance used for filtering */
u16 debounce_rep; /* additional consecutive good readings
* required after the first two */
int gpio_pendown; /* the GPIO used to decide the pendown
* state if get_pendown_state == NULL
*/
int (*get_pendown_state)(void);
int (*filter_init) (struct ads7846_platform_data *pdata,
void **filter_data);

Some files were not shown because too many files have changed in this diff Show more