/spare/repo/libata-dev branch 'v2.6.13'
This commit is contained in:
commit
2fca877b68
4486 changed files with 332650 additions and 143058 deletions
|
@ -138,7 +138,7 @@ enum machine_type {
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#define _N_SEGMENT_ROUND(x) (((x) + SEGMENT_SIZE - 1) & ~(SEGMENT_SIZE - 1))
|
||||
#define _N_SEGMENT_ROUND(x) ALIGN(x, SEGMENT_SIZE)
|
||||
|
||||
#define _N_TXTENDADDR(x) (N_TXTADDR(x)+(x).a_text)
|
||||
|
||||
|
|
|
@ -206,7 +206,10 @@ struct acpi_table_plat_int_src {
|
|||
u8 eid;
|
||||
u8 iosapic_vector;
|
||||
u32 global_irq;
|
||||
u32 reserved;
|
||||
struct {
|
||||
u32 cpei_override_flag:1;
|
||||
u32 reserved:31;
|
||||
} plint_flags;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
enum acpi_interrupt_id {
|
||||
|
@ -342,11 +345,19 @@ struct acpi_table_ecdt {
|
|||
|
||||
/* PCI MMCONFIG */
|
||||
|
||||
/* Defined in PCI Firmware Specification 3.0 */
|
||||
struct acpi_table_mcfg_config {
|
||||
u32 base_address;
|
||||
u32 base_reserved;
|
||||
u16 pci_segment_group_number;
|
||||
u8 start_bus_number;
|
||||
u8 end_bus_number;
|
||||
u8 reserved[4];
|
||||
} __attribute__ ((packed));
|
||||
struct acpi_table_mcfg {
|
||||
struct acpi_table_header header;
|
||||
u8 reserved[8];
|
||||
u32 base_address;
|
||||
u32 base_reserved;
|
||||
struct acpi_table_mcfg_config config[0];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* Table Handlers */
|
||||
|
@ -391,6 +402,7 @@ int acpi_table_parse (enum acpi_table_id id, acpi_table_handler handler);
|
|||
int acpi_get_table_header_early (enum acpi_table_id id, struct acpi_table_header **header);
|
||||
int acpi_table_parse_madt (enum acpi_madt_entry_id id, acpi_madt_entry_handler handler, unsigned int max_entries);
|
||||
int acpi_table_parse_srat (enum acpi_srat_entry_id id, acpi_madt_entry_handler handler, unsigned int max_entries);
|
||||
int acpi_parse_mcfg (unsigned long phys_addr, unsigned long size);
|
||||
void acpi_table_print (struct acpi_table_header *header, unsigned long phys_addr);
|
||||
void acpi_table_print_madt_entry (acpi_table_entry_header *madt);
|
||||
void acpi_table_print_srat_entry (acpi_table_entry_header *srat);
|
||||
|
@ -407,9 +419,13 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu);
|
|||
int acpi_unmap_lsapic(int cpu);
|
||||
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|
||||
|
||||
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
|
||||
int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
|
||||
|
||||
extern int acpi_mp_config;
|
||||
|
||||
extern u32 pci_mmcfg_base_addr;
|
||||
extern struct acpi_table_mcfg_config *pci_mmcfg_config;
|
||||
extern int pci_mmcfg_config_num;
|
||||
|
||||
extern int sbf_port ;
|
||||
|
||||
|
@ -437,9 +453,7 @@ int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
|
|||
* If this matches the last registration, any IRQ resources for gsi
|
||||
* are freed.
|
||||
*/
|
||||
#ifdef CONFIG_ACPI_DEALLOCATE_IRQ
|
||||
void acpi_unregister_gsi (u32 gsi);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_PCI
|
||||
|
||||
|
@ -462,11 +476,9 @@ struct acpi_prt_list {
|
|||
struct pci_dev;
|
||||
|
||||
int acpi_pci_irq_enable (struct pci_dev *dev);
|
||||
void acpi_penalize_isa_irq(int irq);
|
||||
void acpi_penalize_isa_irq(int irq, int active);
|
||||
|
||||
#ifdef CONFIG_ACPI_DEALLOCATE_IRQ
|
||||
void acpi_pci_irq_disable (struct pci_dev *dev);
|
||||
#endif
|
||||
|
||||
struct acpi_pci_driver {
|
||||
struct acpi_pci_driver *next;
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef __LINUX_ATALK_H__
|
||||
#define __LINUX_ATALK_H__
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
/*
|
||||
* AppleTalk networking structures
|
||||
*
|
||||
|
|
|
@ -165,7 +165,7 @@
|
|||
#define AUDIT_ARCH_SH64 (EM_SH|__AUDIT_ARCH_64BIT)
|
||||
#define AUDIT_ARCH_SHEL64 (EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
|
||||
#define AUDIT_ARCH_SPARC (EM_SPARC)
|
||||
#define AUDIT_ARCH_SPARC64 (EM_SPARC64|__AUDIT_ARCH_64BIT)
|
||||
#define AUDIT_ARCH_SPARC64 (EM_SPARCV9|__AUDIT_ARCH_64BIT)
|
||||
#define AUDIT_ARCH_V850 (EM_V850|__AUDIT_ARCH_LE)
|
||||
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
|
||||
|
||||
|
|
|
@ -69,6 +69,11 @@ extern void remove_arg_zero(struct linux_binprm *);
|
|||
extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
|
||||
extern int flush_old_exec(struct linux_binprm * bprm);
|
||||
|
||||
extern int suid_dumpable;
|
||||
#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
|
||||
#define SUID_DUMP_USER 1 /* Dump as user of process */
|
||||
#define SUID_DUMP_ROOT 2 /* Dump as root */
|
||||
|
||||
/* Stack area protections */
|
||||
#define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */
|
||||
#define EXSTACK_DISABLE_X 1 /* Disable executable stacks */
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/ioprio.h>
|
||||
|
||||
/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
|
||||
#include <asm/io.h>
|
||||
|
@ -149,6 +150,19 @@ struct bio {
|
|||
#define BIO_RW_FAILFAST 3
|
||||
#define BIO_RW_SYNC 4
|
||||
|
||||
/*
|
||||
* upper 16 bits of bi_rw define the io priority of this bio
|
||||
*/
|
||||
#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
|
||||
#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
|
||||
#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
|
||||
|
||||
#define bio_set_prio(bio, prio) do { \
|
||||
WARN_ON(prio >= (1 << IOPRIO_BITS)); \
|
||||
(bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
|
||||
(bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* various member access, note that bio_data should of course not be used
|
||||
* on highmem page vectors
|
||||
|
|
|
@ -54,16 +54,23 @@ struct as_io_context {
|
|||
|
||||
struct cfq_queue;
|
||||
struct cfq_io_context {
|
||||
void (*dtor)(struct cfq_io_context *);
|
||||
void (*exit)(struct cfq_io_context *);
|
||||
|
||||
struct io_context *ioc;
|
||||
|
||||
/*
|
||||
* circular list of cfq_io_contexts belonging to a process io context
|
||||
*/
|
||||
struct list_head list;
|
||||
struct cfq_queue *cfqq;
|
||||
void *key;
|
||||
|
||||
struct io_context *ioc;
|
||||
|
||||
unsigned long last_end_request;
|
||||
unsigned long last_queue;
|
||||
unsigned long ttime_total;
|
||||
unsigned long ttime_samples;
|
||||
unsigned long ttime_mean;
|
||||
|
||||
void (*dtor)(struct cfq_io_context *);
|
||||
void (*exit)(struct cfq_io_context *);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -73,7 +80,9 @@ struct cfq_io_context {
|
|||
*/
|
||||
struct io_context {
|
||||
atomic_t refcount;
|
||||
pid_t pid;
|
||||
struct task_struct *task;
|
||||
|
||||
int (*set_ioprio)(struct io_context *, unsigned int);
|
||||
|
||||
/*
|
||||
* For request batching
|
||||
|
@ -81,14 +90,13 @@ struct io_context {
|
|||
unsigned long last_waited; /* Time last woken after wait for request */
|
||||
int nr_batch_requests; /* Number of requests left in the batch */
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
struct as_io_context *aic;
|
||||
struct cfq_io_context *cic;
|
||||
};
|
||||
|
||||
void put_io_context(struct io_context *ioc);
|
||||
void exit_io_context(void);
|
||||
struct io_context *current_io_context(int gfp_flags);
|
||||
struct io_context *get_io_context(int gfp_flags);
|
||||
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
|
||||
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
|
||||
|
@ -134,6 +142,8 @@ struct request {
|
|||
|
||||
void *elevator_private;
|
||||
|
||||
unsigned short ioprio;
|
||||
|
||||
int rq_status; /* should split this into a few status bits */
|
||||
struct gendisk *rq_disk;
|
||||
int errors;
|
||||
|
@ -285,9 +295,6 @@ enum blk_queue_state {
|
|||
Queue_up,
|
||||
};
|
||||
|
||||
#define BLK_TAGS_PER_LONG (sizeof(unsigned long) * 8)
|
||||
#define BLK_TAGS_MASK (BLK_TAGS_PER_LONG - 1)
|
||||
|
||||
struct blk_queue_tag {
|
||||
struct request **tag_index; /* map of busy tags */
|
||||
unsigned long *tag_map; /* bit map of free/busy tags */
|
||||
|
@ -396,6 +403,7 @@ struct request_queue
|
|||
*/
|
||||
unsigned int sg_timeout;
|
||||
unsigned int sg_reserved_size;
|
||||
int node;
|
||||
|
||||
struct list_head drain_list;
|
||||
|
||||
|
@ -542,15 +550,12 @@ extern void generic_make_request(struct bio *bio);
|
|||
extern void blk_put_request(struct request *);
|
||||
extern void blk_end_sync_rq(struct request *rq);
|
||||
extern void blk_attempt_remerge(request_queue_t *, struct request *);
|
||||
extern void __blk_attempt_remerge(request_queue_t *, struct request *);
|
||||
extern struct request *blk_get_request(request_queue_t *, int, int);
|
||||
extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
|
||||
extern void blk_requeue_request(request_queue_t *, struct request *);
|
||||
extern void blk_plug_device(request_queue_t *);
|
||||
extern int blk_remove_plug(request_queue_t *);
|
||||
extern void blk_recount_segments(request_queue_t *, struct bio *);
|
||||
extern int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
|
||||
extern int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
|
||||
extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
|
||||
extern void blk_start_queue(request_queue_t *q);
|
||||
extern void blk_stop_queue(request_queue_t *q);
|
||||
|
@ -615,6 +620,8 @@ static inline void blkdev_dequeue_request(struct request *req)
|
|||
/*
|
||||
* Access functions for manipulating queue properties
|
||||
*/
|
||||
extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn,
|
||||
spinlock_t *lock, int node_id);
|
||||
extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
|
||||
extern void blk_cleanup_queue(request_queue_t *);
|
||||
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
|
||||
|
@ -632,7 +639,6 @@ extern void blk_queue_dma_alignment(request_queue_t *, int);
|
|||
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
|
||||
extern void blk_queue_ordered(request_queue_t *, int);
|
||||
extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
|
||||
extern int blkdev_scsi_issue_flush_fn(request_queue_t *, struct gendisk *, sector_t *);
|
||||
extern struct request *blk_start_pre_flush(request_queue_t *,struct request *);
|
||||
extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int);
|
||||
extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int);
|
||||
|
@ -646,7 +652,8 @@ extern void blk_wait_queue_drained(request_queue_t *, int);
|
|||
extern void blk_finish_queue_drain(request_queue_t *);
|
||||
|
||||
int blk_get_queue(request_queue_t *);
|
||||
request_queue_t *blk_alloc_queue(int);
|
||||
request_queue_t *blk_alloc_queue(int gfp_mask);
|
||||
request_queue_t *blk_alloc_queue_node(int,int);
|
||||
#define blk_put_queue(q) blk_cleanup_queue((q))
|
||||
|
||||
/*
|
||||
|
@ -675,8 +682,6 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *);
|
|||
|
||||
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
|
||||
|
||||
extern void drive_stat_acct(struct request *, int, int);
|
||||
|
||||
static inline int queue_hardsect_size(request_queue_t *q)
|
||||
{
|
||||
int retval = 512;
|
||||
|
|
|
@ -22,6 +22,10 @@ extern unsigned long min_low_pfn;
|
|||
*/
|
||||
extern unsigned long max_pfn;
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
extern unsigned long saved_max_pfn;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* node_bootmem_map is a map pointer - the bits represent all physical
|
||||
* memory pages (including holes) on the node.
|
||||
|
@ -67,6 +71,15 @@ extern void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size,
|
|||
__alloc_bootmem_node((pgdat), (x), PAGE_SIZE, 0)
|
||||
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
|
||||
extern void *alloc_remap(int nid, unsigned long size);
|
||||
#else
|
||||
static inline void *alloc_remap(int nid, unsigned long size)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern unsigned long __initdata nr_kernel_pages;
|
||||
extern unsigned long __initdata nr_all_pages;
|
||||
|
||||
|
|
|
@ -19,6 +19,9 @@ enum bh_state_bits {
|
|||
BH_Dirty, /* Is dirty */
|
||||
BH_Lock, /* Is locked */
|
||||
BH_Req, /* Has been submitted for I/O */
|
||||
BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
|
||||
* IO completion of other buffers in the page
|
||||
*/
|
||||
|
||||
BH_Mapped, /* Has a disk mapping */
|
||||
BH_New, /* Disk mapping was newly created by get_block */
|
||||
|
|
|
@ -92,29 +92,32 @@
|
|||
#endif /* OPTIMIZE */
|
||||
|
||||
|
||||
static __inline__ __const__ __u32 __fswahw32(__u32 x)
|
||||
static inline __u32 __fswahw32(__u32 x)
|
||||
{
|
||||
return __arch__swahw32(x);
|
||||
}
|
||||
static __inline__ __u32 __swahw32p(__u32 *x)
|
||||
|
||||
static inline __u32 __swahw32p(__u32 *x)
|
||||
{
|
||||
return __arch__swahw32p(x);
|
||||
}
|
||||
static __inline__ void __swahw32s(__u32 *addr)
|
||||
|
||||
static inline void __swahw32s(__u32 *addr)
|
||||
{
|
||||
__arch__swahw32s(addr);
|
||||
}
|
||||
|
||||
|
||||
static __inline__ __const__ __u32 __fswahb32(__u32 x)
|
||||
static inline __u32 __fswahb32(__u32 x)
|
||||
{
|
||||
return __arch__swahb32(x);
|
||||
}
|
||||
static __inline__ __u32 __swahb32p(__u32 *x)
|
||||
|
||||
static inline __u32 __swahb32p(__u32 *x)
|
||||
{
|
||||
return __arch__swahb32p(x);
|
||||
}
|
||||
static __inline__ void __swahb32s(__u32 *addr)
|
||||
|
||||
static inline void __swahb32s(__u32 *addr)
|
||||
{
|
||||
__arch__swahb32s(addr);
|
||||
}
|
||||
|
|
|
@ -13,6 +13,12 @@
|
|||
#define SMP_CACHE_BYTES L1_CACHE_BYTES
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86) || defined(CONFIG_SPARC64)
|
||||
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
|
||||
#else
|
||||
#define __read_mostly
|
||||
#endif
|
||||
|
||||
#ifndef ____cacheline_aligned
|
||||
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
|
||||
#endif
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
typedef struct _cciss_pci_info_struct
|
||||
{
|
||||
unsigned char bus;
|
||||
unsigned short domain;
|
||||
unsigned char dev_fn;
|
||||
__u32 board_id;
|
||||
} cciss_pci_info_struct;
|
||||
|
|
|
@ -346,10 +346,27 @@ COMPATIBLE_IOCTL(PPPOEIOCDFWD)
|
|||
/* LP */
|
||||
COMPATIBLE_IOCTL(LPGETSTATUS)
|
||||
/* ppdev */
|
||||
COMPATIBLE_IOCTL(PPSETMODE)
|
||||
COMPATIBLE_IOCTL(PPRSTATUS)
|
||||
COMPATIBLE_IOCTL(PPRCONTROL)
|
||||
COMPATIBLE_IOCTL(PPWCONTROL)
|
||||
COMPATIBLE_IOCTL(PPFCONTROL)
|
||||
COMPATIBLE_IOCTL(PPRDATA)
|
||||
COMPATIBLE_IOCTL(PPWDATA)
|
||||
COMPATIBLE_IOCTL(PPCLAIM)
|
||||
COMPATIBLE_IOCTL(PPRELEASE)
|
||||
COMPATIBLE_IOCTL(PPEXCL)
|
||||
COMPATIBLE_IOCTL(PPYIELD)
|
||||
COMPATIBLE_IOCTL(PPEXCL)
|
||||
COMPATIBLE_IOCTL(PPDATADIR)
|
||||
COMPATIBLE_IOCTL(PPNEGOT)
|
||||
COMPATIBLE_IOCTL(PPWCTLONIRQ)
|
||||
COMPATIBLE_IOCTL(PPCLRIRQ)
|
||||
COMPATIBLE_IOCTL(PPSETPHASE)
|
||||
COMPATIBLE_IOCTL(PPGETMODES)
|
||||
COMPATIBLE_IOCTL(PPGETMODE)
|
||||
COMPATIBLE_IOCTL(PPGETPHASE)
|
||||
COMPATIBLE_IOCTL(PPGETFLAGS)
|
||||
COMPATIBLE_IOCTL(PPSETFLAGS)
|
||||
/* CDROM stuff */
|
||||
COMPATIBLE_IOCTL(CDROMPAUSE)
|
||||
COMPATIBLE_IOCTL(CDROMRESUME)
|
||||
|
|
|
@ -69,6 +69,7 @@ extern struct semaphore cpucontrol;
|
|||
register_cpu_notifier(&fn##_nb); \
|
||||
}
|
||||
int cpu_down(unsigned int cpu);
|
||||
extern int __attribute__((weak)) smp_prepare_cpu(int cpu);
|
||||
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
|
||||
#else
|
||||
#define lock_cpu_hotplug() do { } while (0)
|
||||
|
|
|
@ -201,7 +201,7 @@ struct cpufreq_driver {
|
|||
|
||||
/* optional */
|
||||
int (*exit) (struct cpufreq_policy *policy);
|
||||
int (*suspend) (struct cpufreq_policy *policy, u32 state);
|
||||
int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
|
||||
int (*resume) (struct cpufreq_policy *policy);
|
||||
struct freq_attr **attr;
|
||||
};
|
||||
|
|
18
include/linux/crash_dump.h
Normal file
18
include/linux/crash_dump.h
Normal file
|
@ -0,0 +1,18 @@
|
|||
#ifndef LINUX_CRASH_DUMP_H
|
||||
#define LINUX_CRASH_DUMP_H
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/proc_fs.h>
|
||||
|
||||
#define ELFCORE_ADDR_MAX (-1ULL)
|
||||
extern unsigned long long elfcorehdr_addr;
|
||||
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
|
||||
unsigned long, int);
|
||||
extern struct file_operations proc_vmcore_operations;
|
||||
extern struct proc_dir_entry *proc_vmcore;
|
||||
|
||||
#endif /* CONFIG_CRASH_DUMP */
|
||||
#endif /* LINUX_CRASHDUMP_H */
|
|
@ -61,6 +61,15 @@
|
|||
#define CRYPTO_DIR_DECRYPT 0
|
||||
|
||||
struct scatterlist;
|
||||
struct crypto_tfm;
|
||||
|
||||
struct cipher_desc {
|
||||
struct crypto_tfm *tfm;
|
||||
void (*crfn)(void *ctx, u8 *dst, const u8 *src);
|
||||
unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
|
||||
const u8 *src, unsigned int nbytes);
|
||||
void *info;
|
||||
};
|
||||
|
||||
/*
|
||||
* Algorithms: modular crypto algorithm implementations, managed
|
||||
|
@ -73,6 +82,19 @@ struct cipher_alg {
|
|||
unsigned int keylen, u32 *flags);
|
||||
void (*cia_encrypt)(void *ctx, u8 *dst, const u8 *src);
|
||||
void (*cia_decrypt)(void *ctx, u8 *dst, const u8 *src);
|
||||
|
||||
unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc,
|
||||
u8 *dst, const u8 *src,
|
||||
unsigned int nbytes);
|
||||
unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc,
|
||||
u8 *dst, const u8 *src,
|
||||
unsigned int nbytes);
|
||||
unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc,
|
||||
u8 *dst, const u8 *src,
|
||||
unsigned int nbytes);
|
||||
unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc,
|
||||
u8 *dst, const u8 *src,
|
||||
unsigned int nbytes);
|
||||
};
|
||||
|
||||
struct digest_alg {
|
||||
|
@ -102,6 +124,7 @@ struct crypto_alg {
|
|||
u32 cra_flags;
|
||||
unsigned int cra_blocksize;
|
||||
unsigned int cra_ctxsize;
|
||||
unsigned int cra_alignmask;
|
||||
const char cra_name[CRYPTO_MAX_ALG_NAME];
|
||||
|
||||
union {
|
||||
|
@ -136,7 +159,6 @@ static inline int crypto_alg_available(const char *name, u32 flags)
|
|||
* and core processing logic. Managed via crypto_alloc_tfm() and
|
||||
* crypto_free_tfm(), as well as the various helpers below.
|
||||
*/
|
||||
struct crypto_tfm;
|
||||
|
||||
struct cipher_tfm {
|
||||
void *cit_iv;
|
||||
|
@ -266,6 +288,16 @@ static inline unsigned int crypto_tfm_alg_digestsize(struct crypto_tfm *tfm)
|
|||
return tfm->__crt_alg->cra_digest.dia_digestsize;
|
||||
}
|
||||
|
||||
static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
|
||||
{
|
||||
return tfm->__crt_alg->cra_alignmask;
|
||||
}
|
||||
|
||||
static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
|
||||
{
|
||||
return (void *)&tfm[1];
|
||||
}
|
||||
|
||||
/*
|
||||
* API wrappers.
|
||||
*/
|
||||
|
|
|
@ -48,12 +48,12 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
|
|||
|
||||
#else
|
||||
|
||||
struct dcookie_user * dcookie_register(void)
|
||||
static inline struct dcookie_user * dcookie_register(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void dcookie_unregister(struct dcookie_user * user)
|
||||
static inline void dcookie_unregister(struct dcookie_user * user)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ struct bus_type {
|
|||
extern int bus_register(struct bus_type * bus);
|
||||
extern void bus_unregister(struct bus_type * bus);
|
||||
|
||||
extern int bus_rescan_devices(struct bus_type * bus);
|
||||
extern void bus_rescan_devices(struct bus_type * bus);
|
||||
|
||||
extern struct bus_type * get_bus(struct bus_type * bus);
|
||||
extern void put_bus(struct bus_type * bus);
|
||||
|
@ -80,6 +80,8 @@ extern struct bus_type * find_bus(char * name);
|
|||
|
||||
int bus_for_each_dev(struct bus_type * bus, struct device * start, void * data,
|
||||
int (*fn)(struct device *, void *));
|
||||
struct device * bus_find_device(struct bus_type *bus, struct device *start,
|
||||
void *data, int (*match)(struct device *, void *));
|
||||
|
||||
int bus_for_each_drv(struct bus_type * bus, struct device_driver * start,
|
||||
void * data, int (*fn)(struct device_driver *, void *));
|
||||
|
@ -142,6 +144,9 @@ extern void driver_remove_file(struct device_driver *, struct driver_attribute *
|
|||
|
||||
extern int driver_for_each_device(struct device_driver * drv, struct device * start,
|
||||
void * data, int (*fn)(struct device *, void *));
|
||||
struct device * driver_find_device(struct device_driver *drv,
|
||||
struct device *start, void *data,
|
||||
int (*match)(struct device *, void *));
|
||||
|
||||
|
||||
/*
|
||||
|
@ -279,8 +284,10 @@ struct device {
|
|||
struct device_driver *driver; /* which driver has allocated this
|
||||
device */
|
||||
void *driver_data; /* data private to the driver */
|
||||
void *platform_data; /* Platform specific data (e.g. ACPI,
|
||||
BIOS data relevant to device) */
|
||||
void *platform_data; /* Platform specific data, device
|
||||
core doesn't touch it */
|
||||
void *firmware_data; /* Firmware specific data (e.g. ACPI,
|
||||
BIOS data),reserved for device core*/
|
||||
struct dev_pm_info power;
|
||||
|
||||
u64 *dma_mask; /* dma mask (if dma'able device) */
|
||||
|
|
|
@ -9,6 +9,7 @@ enum dmi_field {
|
|||
DMI_SYS_VENDOR,
|
||||
DMI_PRODUCT_NAME,
|
||||
DMI_PRODUCT_VERSION,
|
||||
DMI_PRODUCT_SERIAL,
|
||||
DMI_BOARD_VENDOR,
|
||||
DMI_BOARD_NAME,
|
||||
DMI_BOARD_VERSION,
|
||||
|
|
|
@ -11,6 +11,12 @@
|
|||
/* Root squash turned on */
|
||||
#define V1_DQF_RSQUASH 1
|
||||
|
||||
/* Numbers of blocks needed for updates */
|
||||
#define V1_INIT_ALLOC 1
|
||||
#define V1_INIT_REWRITE 1
|
||||
#define V1_DEL_ALLOC 0
|
||||
#define V1_DEL_REWRITE 2
|
||||
|
||||
/* Special information about quotafile */
|
||||
struct v1_mem_dqinfo {
|
||||
};
|
||||
|
|
|
@ -10,6 +10,12 @@
|
|||
/* id numbers of quota format */
|
||||
#define QFMT_VFS_V0 2
|
||||
|
||||
/* Numbers of blocks needed for updates */
|
||||
#define V2_INIT_ALLOC 4
|
||||
#define V2_INIT_REWRITE 2
|
||||
#define V2_DEL_ALLOC 0
|
||||
#define V2_DEL_REWRITE 6
|
||||
|
||||
/* Inmemory copy of version specific information */
|
||||
struct v2_mem_dqinfo {
|
||||
unsigned int dqi_blocks;
|
||||
|
|
|
@ -315,7 +315,7 @@ extern struct efi_memory_map memmap;
|
|||
*/
|
||||
static inline int efi_range_is_wc(unsigned long start, unsigned long len)
|
||||
{
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < len; i += (1UL << EFI_PAGE_SHIFT)) {
|
||||
unsigned long paddr = __pa(start + i);
|
||||
|
|
|
@ -16,9 +16,9 @@ typedef void (elevator_remove_req_fn) (request_queue_t *, struct request *);
|
|||
typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *);
|
||||
typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *);
|
||||
typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *);
|
||||
typedef int (elevator_may_queue_fn) (request_queue_t *, int);
|
||||
typedef int (elevator_may_queue_fn) (request_queue_t *, int, struct bio *);
|
||||
|
||||
typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, int);
|
||||
typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, int);
|
||||
typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
|
||||
typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
|
||||
|
||||
|
@ -96,9 +96,9 @@ extern struct request *elv_former_request(request_queue_t *, struct request *);
|
|||
extern struct request *elv_latter_request(request_queue_t *, struct request *);
|
||||
extern int elv_register_queue(request_queue_t *q);
|
||||
extern void elv_unregister_queue(request_queue_t *q);
|
||||
extern int elv_may_queue(request_queue_t *, int);
|
||||
extern int elv_may_queue(request_queue_t *, int, struct bio *);
|
||||
extern void elv_completed_request(request_queue_t *, struct request *);
|
||||
extern int elv_set_request(request_queue_t *, struct request *, int);
|
||||
extern int elv_set_request(request_queue_t *, struct request *, struct bio *, int);
|
||||
extern void elv_put_request(request_queue_t *, struct request *);
|
||||
|
||||
/*
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#define _LINUX_ETHERDEVICE_H
|
||||
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
@ -32,7 +33,7 @@ extern int eth_header(struct sk_buff *skb, struct net_device *dev,
|
|||
unsigned short type, void *daddr,
|
||||
void *saddr, unsigned len);
|
||||
extern int eth_rebuild_header(struct sk_buff *skb);
|
||||
extern unsigned short eth_type_trans(struct sk_buff *skb, struct net_device *dev);
|
||||
extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
|
||||
extern void eth_header_cache_update(struct hh_cache *hh, struct net_device *dev,
|
||||
unsigned char * haddr);
|
||||
extern int eth_header_cache(struct neighbour *neigh,
|
||||
|
@ -65,7 +66,7 @@ static inline int is_zero_ether_addr(const u8 *addr)
|
|||
*/
|
||||
static inline int is_multicast_ether_addr(const u8 *addr)
|
||||
{
|
||||
return addr[0] & 0x01;
|
||||
return ((addr[0] != 0xff) && (0x01 & addr[0]));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -300,18 +300,19 @@ struct ext2_inode {
|
|||
/*
|
||||
* Mount flags
|
||||
*/
|
||||
#define EXT2_MOUNT_CHECK 0x0001 /* Do mount-time checks */
|
||||
#define EXT2_MOUNT_OLDALLOC 0x0002 /* Don't use the new Orlov allocator */
|
||||
#define EXT2_MOUNT_GRPID 0x0004 /* Create files with directory's group */
|
||||
#define EXT2_MOUNT_DEBUG 0x0008 /* Some debugging messages */
|
||||
#define EXT2_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */
|
||||
#define EXT2_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */
|
||||
#define EXT2_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */
|
||||
#define EXT2_MOUNT_MINIX_DF 0x0080 /* Mimics the Minix statfs */
|
||||
#define EXT2_MOUNT_NOBH 0x0100 /* No buffer_heads */
|
||||
#define EXT2_MOUNT_NO_UID32 0x0200 /* Disable 32-bit UIDs */
|
||||
#define EXT2_MOUNT_XATTR_USER 0x4000 /* Extended user attributes */
|
||||
#define EXT2_MOUNT_POSIX_ACL 0x8000 /* POSIX Access Control Lists */
|
||||
#define EXT2_MOUNT_CHECK 0x000001 /* Do mount-time checks */
|
||||
#define EXT2_MOUNT_OLDALLOC 0x000002 /* Don't use the new Orlov allocator */
|
||||
#define EXT2_MOUNT_GRPID 0x000004 /* Create files with directory's group */
|
||||
#define EXT2_MOUNT_DEBUG 0x000008 /* Some debugging messages */
|
||||
#define EXT2_MOUNT_ERRORS_CONT 0x000010 /* Continue on errors */
|
||||
#define EXT2_MOUNT_ERRORS_RO 0x000020 /* Remount fs ro on errors */
|
||||
#define EXT2_MOUNT_ERRORS_PANIC 0x000040 /* Panic on errors */
|
||||
#define EXT2_MOUNT_MINIX_DF 0x000080 /* Mimics the Minix statfs */
|
||||
#define EXT2_MOUNT_NOBH 0x000100 /* No buffer_heads */
|
||||
#define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */
|
||||
#define EXT2_MOUNT_XATTR_USER 0x004000 /* Extended user attributes */
|
||||
#define EXT2_MOUNT_POSIX_ACL 0x008000 /* POSIX Access Control Lists */
|
||||
#define EXT2_MOUNT_XIP 0x010000 /* Execute in place */
|
||||
|
||||
#define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt
|
||||
#define set_opt(o, opt) o |= EXT2_MOUNT_##opt
|
||||
|
|
|
@ -238,6 +238,20 @@ struct ext3_new_group_data {
|
|||
#define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
|
||||
#define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
|
||||
|
||||
/*
|
||||
* Mount options
|
||||
*/
|
||||
struct ext3_mount_options {
|
||||
unsigned long s_mount_opt;
|
||||
uid_t s_resuid;
|
||||
gid_t s_resgid;
|
||||
unsigned long s_commit_interval;
|
||||
#ifdef CONFIG_QUOTA
|
||||
int s_jquota_fmt;
|
||||
char *s_qf_names[MAXQUOTAS];
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* Structure of an inode on the disk
|
||||
*/
|
||||
|
@ -358,6 +372,7 @@ struct ext3_inode {
|
|||
#define EXT3_MOUNT_RESERVATION 0x10000 /* Preallocation */
|
||||
#define EXT3_MOUNT_BARRIER 0x20000 /* Use block barriers */
|
||||
#define EXT3_MOUNT_NOBH 0x40000 /* No bufferheads */
|
||||
#define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */
|
||||
|
||||
/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
|
||||
#ifndef _LINUX_EXT2_FS_H
|
||||
|
|
|
@ -42,15 +42,15 @@
|
|||
* superblock only gets updated once, of course, so don't bother
|
||||
* counting that again for the quota updates. */
|
||||
|
||||
#define EXT3_DATA_TRANS_BLOCKS (EXT3_SINGLEDATA_TRANS_BLOCKS + \
|
||||
#define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \
|
||||
EXT3_XATTR_TRANS_BLOCKS - 2 + \
|
||||
2*EXT3_QUOTA_TRANS_BLOCKS)
|
||||
2*EXT3_QUOTA_TRANS_BLOCKS(sb))
|
||||
|
||||
/* Delete operations potentially hit one directory's namespace plus an
|
||||
* entire inode, plus arbitrary amounts of bitmap/indirection data. Be
|
||||
* generous. We can grow the delete transaction later if necessary. */
|
||||
|
||||
#define EXT3_DELETE_TRANS_BLOCKS (2 * EXT3_DATA_TRANS_BLOCKS + 64)
|
||||
#define EXT3_DELETE_TRANS_BLOCKS(sb) (2 * EXT3_DATA_TRANS_BLOCKS(sb) + 64)
|
||||
|
||||
/* Define an arbitrary limit for the amount of data we will anticipate
|
||||
* writing to any given transaction. For unbounded transactions such as
|
||||
|
@ -74,14 +74,17 @@
|
|||
#ifdef CONFIG_QUOTA
|
||||
/* Amount of blocks needed for quota update - we know that the structure was
|
||||
* allocated so we need to update only inode+data */
|
||||
#define EXT3_QUOTA_TRANS_BLOCKS 2
|
||||
#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
|
||||
/* Amount of blocks needed for quota insert/delete - we do some block writes
|
||||
* but inode, sb and group updates are done only once */
|
||||
#define EXT3_QUOTA_INIT_BLOCKS (DQUOT_MAX_WRITES*\
|
||||
(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3)
|
||||
#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
|
||||
(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
|
||||
#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
|
||||
(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
|
||||
#else
|
||||
#define EXT3_QUOTA_TRANS_BLOCKS 0
|
||||
#define EXT3_QUOTA_INIT_BLOCKS 0
|
||||
#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
|
||||
#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
|
||||
#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
|
||||
#endif
|
||||
|
||||
int
|
||||
|
|
|
@ -5,7 +5,17 @@
|
|||
#define POSIX_FADV_RANDOM 1 /* Expect random page references. */
|
||||
#define POSIX_FADV_SEQUENTIAL 2 /* Expect sequential page references. */
|
||||
#define POSIX_FADV_WILLNEED 3 /* Will need these pages. */
|
||||
|
||||
/*
|
||||
* The advise values for POSIX_FADV_DONTNEED and POSIX_ADV_NOREUSE
|
||||
* for s390-64 differ from the values for the rest of the world.
|
||||
*/
|
||||
#if defined(__s390x__)
|
||||
#define POSIX_FADV_DONTNEED 6 /* Don't need these pages. */
|
||||
#define POSIX_FADV_NOREUSE 7 /* Data will be accessed once. */
|
||||
#else
|
||||
#define POSIX_FADV_DONTNEED 4 /* Don't need these pages. */
|
||||
#define POSIX_FADV_NOREUSE 5 /* Data will be accessed once. */
|
||||
#endif
|
||||
|
||||
#endif /* FADVISE_H_INCLUDED */
|
||||
|
|
|
@ -25,6 +25,10 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifndef force_o_largefile
|
||||
#define force_o_largefile() (BITS_PER_LONG != 32)
|
||||
#endif
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
#define IS_GETLK32(cmd) ((cmd) == F_GETLK)
|
||||
#define IS_SETLK32(cmd) ((cmd) == F_SETLK)
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include <linux/if_fddi.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
extern unsigned short fddi_type_trans(struct sk_buff *skb,
|
||||
extern __be16 fddi_type_trans(struct sk_buff *skb,
|
||||
struct net_device *dev);
|
||||
extern struct net_device *alloc_fddidev(int sizeof_priv);
|
||||
#endif
|
||||
|
|
|
@ -213,6 +213,7 @@ extern int dir_notify_enable;
|
|||
#include <linux/radix-tree.h>
|
||||
#include <linux/prio_tree.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
@ -220,6 +221,7 @@ extern int dir_notify_enable;
|
|||
|
||||
struct iovec;
|
||||
struct nameidata;
|
||||
struct kiocb;
|
||||
struct pipe_inode_info;
|
||||
struct poll_table_struct;
|
||||
struct kstatfs;
|
||||
|
@ -240,7 +242,7 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock,
|
|||
typedef int (get_blocks_t)(struct inode *inode, sector_t iblock,
|
||||
unsigned long max_blocks,
|
||||
struct buffer_head *bh_result, int create);
|
||||
typedef void (dio_iodone_t)(struct inode *inode, loff_t offset,
|
||||
typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
|
||||
ssize_t bytes, void *private);
|
||||
|
||||
/*
|
||||
|
@ -302,7 +304,6 @@ struct iattr {
|
|||
struct page;
|
||||
struct address_space;
|
||||
struct writeback_control;
|
||||
struct kiocb;
|
||||
|
||||
struct address_space_operations {
|
||||
int (*writepage)(struct page *page, struct writeback_control *wbc);
|
||||
|
@ -330,6 +331,8 @@ struct address_space_operations {
|
|||
int (*releasepage) (struct page *, int);
|
||||
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs);
|
||||
struct page* (*get_xip_page)(struct address_space *, sector_t,
|
||||
int);
|
||||
};
|
||||
|
||||
struct backing_dev_info;
|
||||
|
@ -471,6 +474,11 @@ struct inode {
|
|||
struct dnotify_struct *i_dnotify; /* for directory notifications */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_INOTIFY
|
||||
struct list_head inotify_watches; /* watches on this inode */
|
||||
struct semaphore inotify_sem; /* protects the watches list */
|
||||
#endif
|
||||
|
||||
unsigned long i_state;
|
||||
unsigned long dirtied_when; /* jiffies of first dirtying */
|
||||
|
||||
|
@ -581,7 +589,6 @@ struct file {
|
|||
atomic_t f_count;
|
||||
unsigned int f_flags;
|
||||
mode_t f_mode;
|
||||
int f_error;
|
||||
loff_t f_pos;
|
||||
struct fown_struct f_owner;
|
||||
unsigned int f_uid, f_gid;
|
||||
|
@ -674,6 +681,7 @@ struct file_lock {
|
|||
struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
|
||||
union {
|
||||
struct nfs_lock_info nfs_fl;
|
||||
struct nfs4_lock_info nfs4_fl;
|
||||
} fl_u;
|
||||
};
|
||||
|
||||
|
@ -689,11 +697,13 @@ extern struct list_head file_lock_list;
|
|||
#include <linux/fcntl.h>
|
||||
|
||||
extern int fcntl_getlk(struct file *, struct flock __user *);
|
||||
extern int fcntl_setlk(struct file *, unsigned int, struct flock __user *);
|
||||
extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
|
||||
struct flock __user *);
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
extern int fcntl_getlk64(struct file *, struct flock64 __user *);
|
||||
extern int fcntl_setlk64(struct file *, unsigned int, struct flock64 __user *);
|
||||
extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
|
||||
struct flock64 __user *);
|
||||
#endif
|
||||
|
||||
extern void send_sigio(struct fown_struct *fown, int fd, int band);
|
||||
|
@ -820,16 +830,34 @@ enum {
|
|||
#define vfs_check_frozen(sb, level) \
|
||||
wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level)))
|
||||
|
||||
static inline void get_fs_excl(void)
|
||||
{
|
||||
atomic_inc(¤t->fs_excl);
|
||||
}
|
||||
|
||||
static inline void put_fs_excl(void)
|
||||
{
|
||||
atomic_dec(¤t->fs_excl);
|
||||
}
|
||||
|
||||
static inline int has_fs_excl(void)
|
||||
{
|
||||
return atomic_read(¤t->fs_excl);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Superblock locking.
|
||||
*/
|
||||
static inline void lock_super(struct super_block * sb)
|
||||
{
|
||||
get_fs_excl();
|
||||
down(&sb->s_lock);
|
||||
}
|
||||
|
||||
static inline void unlock_super(struct super_block * sb)
|
||||
{
|
||||
put_fs_excl();
|
||||
up(&sb->s_lock);
|
||||
}
|
||||
|
||||
|
@ -883,7 +911,9 @@ struct block_device_operations {
|
|||
int (*open) (struct inode *, struct file *);
|
||||
int (*release) (struct inode *, struct file *);
|
||||
int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
|
||||
long (*unlocked_ioctl) (struct file *, unsigned, unsigned long);
|
||||
long (*compat_ioctl) (struct file *, unsigned, unsigned long);
|
||||
int (*direct_access) (struct block_device *, sector_t, unsigned long *);
|
||||
int (*media_changed) (struct gendisk *);
|
||||
int (*revalidate_disk) (struct gendisk *);
|
||||
struct module *owner;
|
||||
|
@ -963,8 +993,8 @@ struct inode_operations {
|
|||
int (*rename) (struct inode *, struct dentry *,
|
||||
struct inode *, struct dentry *);
|
||||
int (*readlink) (struct dentry *, char __user *,int);
|
||||
int (*follow_link) (struct dentry *, struct nameidata *);
|
||||
void (*put_link) (struct dentry *, struct nameidata *);
|
||||
void * (*follow_link) (struct dentry *, struct nameidata *);
|
||||
void (*put_link) (struct dentry *, struct nameidata *, void *);
|
||||
void (*truncate) (struct inode *);
|
||||
int (*permission) (struct inode *, int, struct nameidata *);
|
||||
int (*setattr) (struct dentry *, struct iattr *);
|
||||
|
@ -1024,6 +1054,7 @@ struct super_operations {
|
|||
#define I_FREEING 16
|
||||
#define I_CLEAR 32
|
||||
#define I_NEW 64
|
||||
#define I_WILL_FREE 128
|
||||
|
||||
#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
|
||||
|
||||
|
@ -1369,7 +1400,6 @@ extern void emergency_remount(void);
|
|||
extern int do_remount_sb(struct super_block *sb, int flags,
|
||||
void *data, int force);
|
||||
extern sector_t bmap(struct inode *, sector_t);
|
||||
extern int setattr_mask(unsigned int);
|
||||
extern int notify_change(struct dentry *, struct iattr *);
|
||||
extern int permission(struct inode *, int, struct nameidata *);
|
||||
extern int generic_permission(struct inode *, int,
|
||||
|
@ -1411,7 +1441,11 @@ extern struct inode * igrab(struct inode *);
|
|||
extern ino_t iunique(struct super_block *, ino_t);
|
||||
extern int inode_needs_sync(struct inode *inode);
|
||||
extern void generic_delete_inode(struct inode *inode);
|
||||
extern void generic_drop_inode(struct inode *inode);
|
||||
|
||||
extern struct inode *ilookup5_nowait(struct super_block *sb,
|
||||
unsigned long hashval, int (*test)(struct inode *, void *),
|
||||
void *data);
|
||||
extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
|
||||
int (*test)(struct inode *, void *), void *data);
|
||||
extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
|
||||
|
@ -1494,6 +1528,23 @@ extern loff_t remote_llseek(struct file *file, loff_t offset, int origin);
|
|||
extern int generic_file_open(struct inode * inode, struct file * filp);
|
||||
extern int nonseekable_open(struct inode * inode, struct file * filp);
|
||||
|
||||
#ifdef CONFIG_FS_XIP
|
||||
extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len,
|
||||
loff_t *ppos);
|
||||
extern ssize_t xip_file_sendfile(struct file *in_file, loff_t *ppos,
|
||||
size_t count, read_actor_t actor,
|
||||
void *target);
|
||||
extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma);
|
||||
extern ssize_t xip_file_write(struct file *filp, const char __user *buf,
|
||||
size_t len, loff_t *ppos);
|
||||
extern int xip_truncate_page(struct address_space *mapping, loff_t from);
|
||||
#else
|
||||
static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void do_generic_file_read(struct file * filp, loff_t *ppos,
|
||||
read_descriptor_t * desc,
|
||||
read_actor_t actor)
|
||||
|
@ -1551,8 +1602,8 @@ extern struct file_operations generic_ro_fops;
|
|||
extern int vfs_readlink(struct dentry *, char __user *, int, const char *);
|
||||
extern int vfs_follow_link(struct nameidata *, const char *);
|
||||
extern int page_readlink(struct dentry *, char __user *, int);
|
||||
extern int page_follow_link_light(struct dentry *, struct nameidata *);
|
||||
extern void page_put_link(struct dentry *, struct nameidata *);
|
||||
extern void *page_follow_link_light(struct dentry *, struct nameidata *);
|
||||
extern void page_put_link(struct dentry *, struct nameidata *, void *);
|
||||
extern int page_symlink(struct inode *inode, const char *symname, int len);
|
||||
extern struct inode_operations page_symlink_inode_operations;
|
||||
extern int generic_readlink(struct dentry *, char __user *, int);
|
||||
|
|
251
include/linux/fsnotify.h
Normal file
251
include/linux/fsnotify.h
Normal file
|
@ -0,0 +1,251 @@
|
|||
#ifndef _LINUX_FS_NOTIFY_H
|
||||
#define _LINUX_FS_NOTIFY_H
|
||||
|
||||
/*
|
||||
* include/linux/fsnotify.h - generic hooks for filesystem notification, to
|
||||
* reduce in-source duplication from both dnotify and inotify.
|
||||
*
|
||||
* We don't compile any of this away in some complicated menagerie of ifdefs.
|
||||
* Instead, we rely on the code inside to optimize away as needed.
|
||||
*
|
||||
* (C) Copyright 2005 Robert Love
|
||||
*/
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/dnotify.h>
|
||||
#include <linux/inotify.h>
|
||||
|
||||
/*
|
||||
* fsnotify_move - file old_name at old_dir was moved to new_name at new_dir
|
||||
*/
|
||||
static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
|
||||
const char *old_name, const char *new_name,
|
||||
int isdir, struct inode *target, struct inode *source)
|
||||
{
|
||||
u32 cookie = inotify_get_cookie();
|
||||
|
||||
if (old_dir == new_dir)
|
||||
inode_dir_notify(old_dir, DN_RENAME);
|
||||
else {
|
||||
inode_dir_notify(old_dir, DN_DELETE);
|
||||
inode_dir_notify(new_dir, DN_CREATE);
|
||||
}
|
||||
|
||||
if (isdir)
|
||||
isdir = IN_ISDIR;
|
||||
inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir,cookie,old_name);
|
||||
inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, cookie, new_name);
|
||||
|
||||
if (target) {
|
||||
inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL);
|
||||
inotify_inode_is_dead(target);
|
||||
}
|
||||
|
||||
if (source) {
|
||||
inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_nameremove - a filename was removed from a directory
|
||||
*/
|
||||
static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
|
||||
{
|
||||
if (isdir)
|
||||
isdir = IN_ISDIR;
|
||||
dnotify_parent(dentry, DN_DELETE);
|
||||
inotify_dentry_parent_queue_event(dentry, IN_DELETE|isdir, 0, dentry->d_name.name);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_inoderemove - an inode is going away
|
||||
*/
|
||||
static inline void fsnotify_inoderemove(struct inode *inode)
|
||||
{
|
||||
inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL);
|
||||
inotify_inode_is_dead(inode);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_create - 'name' was linked in
|
||||
*/
|
||||
static inline void fsnotify_create(struct inode *inode, const char *name)
|
||||
{
|
||||
inode_dir_notify(inode, DN_CREATE);
|
||||
inotify_inode_queue_event(inode, IN_CREATE, 0, name);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_mkdir - directory 'name' was created
|
||||
*/
|
||||
static inline void fsnotify_mkdir(struct inode *inode, const char *name)
|
||||
{
|
||||
inode_dir_notify(inode, DN_CREATE);
|
||||
inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0, name);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_access - file was read
|
||||
*/
|
||||
static inline void fsnotify_access(struct dentry *dentry)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
u32 mask = IN_ACCESS;
|
||||
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
mask |= IN_ISDIR;
|
||||
|
||||
dnotify_parent(dentry, DN_ACCESS);
|
||||
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
|
||||
inotify_inode_queue_event(inode, mask, 0, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_modify - file was modified
|
||||
*/
|
||||
static inline void fsnotify_modify(struct dentry *dentry)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
u32 mask = IN_MODIFY;
|
||||
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
mask |= IN_ISDIR;
|
||||
|
||||
dnotify_parent(dentry, DN_MODIFY);
|
||||
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
|
||||
inotify_inode_queue_event(inode, mask, 0, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_open - file was opened
|
||||
*/
|
||||
static inline void fsnotify_open(struct dentry *dentry)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
u32 mask = IN_OPEN;
|
||||
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
mask |= IN_ISDIR;
|
||||
|
||||
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
|
||||
inotify_inode_queue_event(inode, mask, 0, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_close - file was closed
|
||||
*/
|
||||
static inline void fsnotify_close(struct file *file)
|
||||
{
|
||||
struct dentry *dentry = file->f_dentry;
|
||||
struct inode *inode = dentry->d_inode;
|
||||
const char *name = dentry->d_name.name;
|
||||
mode_t mode = file->f_mode;
|
||||
u32 mask = (mode & FMODE_WRITE) ? IN_CLOSE_WRITE : IN_CLOSE_NOWRITE;
|
||||
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
mask |= IN_ISDIR;
|
||||
|
||||
inotify_dentry_parent_queue_event(dentry, mask, 0, name);
|
||||
inotify_inode_queue_event(inode, mask, 0, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_xattr - extended attributes were changed
|
||||
*/
|
||||
static inline void fsnotify_xattr(struct dentry *dentry)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
u32 mask = IN_ATTRIB;
|
||||
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
mask |= IN_ISDIR;
|
||||
|
||||
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
|
||||
inotify_inode_queue_event(inode, mask, 0, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_change - notify_change event. file was modified and/or metadata
|
||||
* was changed.
|
||||
*/
|
||||
static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
int dn_mask = 0;
|
||||
u32 in_mask = 0;
|
||||
|
||||
if (ia_valid & ATTR_UID) {
|
||||
in_mask |= IN_ATTRIB;
|
||||
dn_mask |= DN_ATTRIB;
|
||||
}
|
||||
if (ia_valid & ATTR_GID) {
|
||||
in_mask |= IN_ATTRIB;
|
||||
dn_mask |= DN_ATTRIB;
|
||||
}
|
||||
if (ia_valid & ATTR_SIZE) {
|
||||
in_mask |= IN_MODIFY;
|
||||
dn_mask |= DN_MODIFY;
|
||||
}
|
||||
/* both times implies a utime(s) call */
|
||||
if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME))
|
||||
{
|
||||
in_mask |= IN_ATTRIB;
|
||||
dn_mask |= DN_ATTRIB;
|
||||
} else if (ia_valid & ATTR_ATIME) {
|
||||
in_mask |= IN_ACCESS;
|
||||
dn_mask |= DN_ACCESS;
|
||||
} else if (ia_valid & ATTR_MTIME) {
|
||||
in_mask |= IN_MODIFY;
|
||||
dn_mask |= DN_MODIFY;
|
||||
}
|
||||
if (ia_valid & ATTR_MODE) {
|
||||
in_mask |= IN_ATTRIB;
|
||||
dn_mask |= DN_ATTRIB;
|
||||
}
|
||||
|
||||
if (dn_mask)
|
||||
dnotify_parent(dentry, dn_mask);
|
||||
if (in_mask) {
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
in_mask |= IN_ISDIR;
|
||||
inotify_inode_queue_event(inode, in_mask, 0, NULL);
|
||||
inotify_dentry_parent_queue_event(dentry, in_mask, 0,
|
||||
dentry->d_name.name);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INOTIFY /* inotify helpers */
|
||||
|
||||
/*
|
||||
* fsnotify_oldname_init - save off the old filename before we change it
|
||||
*/
|
||||
static inline const char *fsnotify_oldname_init(const char *name)
|
||||
{
|
||||
return kstrdup(name, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_oldname_free - free the name we got from fsnotify_oldname_init
|
||||
*/
|
||||
static inline void fsnotify_oldname_free(const char *old_name)
|
||||
{
|
||||
kfree(old_name);
|
||||
}
|
||||
|
||||
#else /* CONFIG_INOTIFY */
|
||||
|
||||
static inline const char *fsnotify_oldname_init(const char *name)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void fsnotify_oldname_free(const char *old_name)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* ! CONFIG_INOTIFY */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _LINUX_FS_NOTIFY_H */
|
|
@ -165,7 +165,7 @@ typedef union {
|
|||
# undef CONFIG_FT_FDC_DMA
|
||||
# define CONFIG_FT_FDC_DMA 2
|
||||
# endif
|
||||
#elif CONFIG_FT_ALT_FDC == 1 /* CONFIG_FT_MACH2 */
|
||||
#elif defined(CONFIG_FT_ALT_FDC) /* CONFIG_FT_MACH2 */
|
||||
# if CONFIG_FT_FDC_BASE == 0
|
||||
# undef CONFIG_FT_FDC_BASE
|
||||
# define CONFIG_FT_FDC_BASE 0x370
|
||||
|
|
|
@ -224,7 +224,7 @@ static inline void free_disk_stats(struct gendisk *disk)
|
|||
extern void disk_round_stats(struct gendisk *disk);
|
||||
|
||||
/* drivers/block/genhd.c */
|
||||
extern int get_blkdev_list(char *);
|
||||
extern int get_blkdev_list(char *, int);
|
||||
extern void add_disk(struct gendisk *disk);
|
||||
extern void del_gendisk(struct gendisk *gp);
|
||||
extern void unlink_gendisk(struct gendisk *gp);
|
||||
|
@ -403,6 +403,7 @@ extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
|
|||
extern void add_partition(struct gendisk *, int, sector_t, sector_t);
|
||||
extern void delete_partition(struct gendisk *, int);
|
||||
|
||||
extern struct gendisk *alloc_disk_node(int minors, int node_id);
|
||||
extern struct gendisk *alloc_disk(int minors);
|
||||
extern struct kobject *get_disk(struct gendisk *disk);
|
||||
extern void put_disk(struct gendisk *disk);
|
||||
|
|
|
@ -12,8 +12,8 @@ struct vm_area_struct;
|
|||
* GFP bitmasks..
|
||||
*/
|
||||
/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
|
||||
#define __GFP_DMA 0x01
|
||||
#define __GFP_HIGHMEM 0x02
|
||||
#define __GFP_DMA 0x01u
|
||||
#define __GFP_HIGHMEM 0x02u
|
||||
|
||||
/*
|
||||
* Action modifiers - doesn't change the zoning
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define LINUX_HARDIRQ_H
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/system.h>
|
||||
|
|
|
@ -242,8 +242,8 @@ static __inline__ struct net_device_stats *hdlc_stats(struct net_device *dev)
|
|||
}
|
||||
|
||||
|
||||
static __inline__ unsigned short hdlc_type_trans(struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
hdlc_device *hdlc = dev_to_hdlc(dev);
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ static inline void *kmap(struct page *page)
|
|||
|
||||
#define kmap_atomic(page, idx) page_address(page)
|
||||
#define kunmap_atomic(addr, idx) do { } while (0)
|
||||
#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn))
|
||||
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
|
||||
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* i2c-sysfs.h - i2c chip driver sysfs defines
|
||||
* hwmon-sysfs.h - hardware monitoring chip driver sysfs defines
|
||||
*
|
||||
* Copyright (C) 2005 Yani Ioannou <yani.ioannou@gmail.com>
|
||||
*
|
||||
|
@ -17,8 +17,8 @@
|
|||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
#ifndef _LINUX_I2C_SYSFS_H
|
||||
#define _LINUX_I2C_SYSFS_H
|
||||
#ifndef _LINUX_HWMON_SYSFS_H
|
||||
#define _LINUX_HWMON_SYSFS_H
|
||||
|
||||
struct sensor_device_attribute{
|
||||
struct device_attribute dev_attr;
|
||||
|
@ -33,4 +33,4 @@ struct sensor_device_attribute sensor_dev_attr_##_name = { \
|
|||
.index = _index, \
|
||||
}
|
||||
|
||||
#endif /* _LINUX_I2C_SYSFS_H */
|
||||
#endif /* _LINUX_HWMON_SYSFS_H */
|
|
@ -25,6 +25,7 @@
|
|||
#define _LINUX_I2C_DEV_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/* Some IOCTL commands are defined in <linux/i2c.h> */
|
||||
/* Note: 10-bit addresses are NOT supported! */
|
||||
|
|
|
@ -108,6 +108,7 @@
|
|||
#define I2C_DRIVERID_TDA7313 62 /* TDA7313 audio processor */
|
||||
#define I2C_DRIVERID_MAX6900 63 /* MAX6900 real-time clock */
|
||||
#define I2C_DRIVERID_SAA7114H 64 /* video decoder */
|
||||
#define I2C_DRIVERID_DS1374 65 /* DS1374 real time clock */
|
||||
|
||||
|
||||
#define I2C_DRIVERID_EXP0 0xF0 /* experimental use id's */
|
||||
|
|
|
@ -97,3 +97,15 @@ static inline int vid_from_reg(int val, int vrm)
|
|||
2050 - (val) * 50);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int vid_to_reg(int val, int vrm)
|
||||
{
|
||||
switch (vrm) {
|
||||
case 91: /* VRM 9.1 */
|
||||
case 90: /* VRM 9.0 */
|
||||
return ((val >= 1100) && (val <= 1850) ?
|
||||
((18499 - val * 10) / 25 + 5) / 10 : -1);
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -290,11 +290,8 @@ static inline void i2c_set_adapdata (struct i2c_adapter *dev, void *data)
|
|||
*/
|
||||
struct i2c_client_address_data {
|
||||
unsigned short *normal_i2c;
|
||||
unsigned short *normal_i2c_range;
|
||||
unsigned short *probe;
|
||||
unsigned short *probe_range;
|
||||
unsigned short *ignore;
|
||||
unsigned short *ignore_range;
|
||||
unsigned short *force;
|
||||
};
|
||||
|
||||
|
@ -563,24 +560,15 @@ union i2c_smbus_data {
|
|||
#define I2C_CLIENT_INSMOD \
|
||||
I2C_CLIENT_MODULE_PARM(probe, \
|
||||
"List of adapter,address pairs to scan additionally"); \
|
||||
I2C_CLIENT_MODULE_PARM(probe_range, \
|
||||
"List of adapter,start-addr,end-addr triples to scan " \
|
||||
"additionally"); \
|
||||
I2C_CLIENT_MODULE_PARM(ignore, \
|
||||
"List of adapter,address pairs not to scan"); \
|
||||
I2C_CLIENT_MODULE_PARM(ignore_range, \
|
||||
"List of adapter,start-addr,end-addr triples not to " \
|
||||
"scan"); \
|
||||
I2C_CLIENT_MODULE_PARM(force, \
|
||||
"List of adapter,address pairs to boldly assume " \
|
||||
"to be present"); \
|
||||
static struct i2c_client_address_data addr_data = { \
|
||||
.normal_i2c = normal_i2c, \
|
||||
.normal_i2c_range = normal_i2c_range, \
|
||||
.probe = probe, \
|
||||
.probe_range = probe_range, \
|
||||
.ignore = ignore, \
|
||||
.ignore_range = ignore_range, \
|
||||
.force = force, \
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,13 @@
|
|||
#define MAX_I2O_CONTROLLERS 32
|
||||
|
||||
//#include <linux/ioctl.h>
|
||||
#ifndef __KERNEL__
|
||||
|
||||
typedef unsigned char u8;
|
||||
typedef unsigned short u16;
|
||||
typedef unsigned int u32;
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/*
|
||||
* I2O Control IOCTLs and structures
|
||||
|
@ -113,6 +120,10 @@ struct i2o_evt_get {
|
|||
int lost;
|
||||
};
|
||||
|
||||
typedef struct i2o_sg_io_hdr {
|
||||
unsigned int flags; /* see I2O_DPT_SG_IO_FLAGS */
|
||||
} i2o_sg_io_hdr_t;
|
||||
|
||||
/**************************************************************************
|
||||
* HRT related constants and structures
|
||||
**************************************************************************/
|
||||
|
@ -126,14 +137,6 @@ struct i2o_evt_get {
|
|||
#define I2O_BUS_CARDBUS 7
|
||||
#define I2O_BUS_UNKNOWN 0x80
|
||||
|
||||
#ifndef __KERNEL__
|
||||
|
||||
typedef unsigned char u8;
|
||||
typedef unsigned short u16;
|
||||
typedef unsigned int u32;
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
typedef struct _i2o_pci_bus {
|
||||
u8 PciFunctionNumber;
|
||||
u8 PciDeviceNumber;
|
||||
|
@ -333,7 +336,7 @@ typedef struct _i2o_status_block {
|
|||
#define I2O_CLASS_ATE_PERIPHERAL 0x061
|
||||
#define I2O_CLASS_FLOPPY_CONTROLLER 0x070
|
||||
#define I2O_CLASS_FLOPPY_DEVICE 0x071
|
||||
#define I2O_CLASS_BUS_ADAPTER_PORT 0x080
|
||||
#define I2O_CLASS_BUS_ADAPTER 0x080
|
||||
#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090
|
||||
#define I2O_CLASS_PEER_TRANSPORT 0x091
|
||||
#define I2O_CLASS_END 0xfff
|
||||
|
@ -399,4 +402,26 @@ typedef struct _i2o_status_block {
|
|||
#define ADAPTER_STATE_FAILED 0x10
|
||||
#define ADAPTER_STATE_FAULTED 0x11
|
||||
|
||||
/*
|
||||
* Software module types
|
||||
*/
|
||||
#define I2O_SOFTWARE_MODULE_IRTOS 0x11
|
||||
#define I2O_SOFTWARE_MODULE_IOP_PRIVATE 0x22
|
||||
#define I2O_SOFTWARE_MODULE_IOP_CONFIG 0x23
|
||||
|
||||
/*
|
||||
* Vendors
|
||||
*/
|
||||
#define I2O_VENDOR_DPT 0x001b
|
||||
|
||||
/*
|
||||
* DPT / Adaptec specific values for i2o_sg_io_hdr flags.
|
||||
*/
|
||||
#define I2O_DPT_SG_FLAG_INTERPRET 0x00010000
|
||||
#define I2O_DPT_SG_FLAG_PHYSICAL 0x00020000
|
||||
|
||||
#define I2O_DPT_FLASH_FRAG_SIZE 0x10000
|
||||
#define I2O_DPT_FLASH_READ 0x0101
|
||||
#define I2O_DPT_FLASH_WRITE 0x0102
|
||||
|
||||
#endif /* _I2O_DEV_H */
|
||||
|
|
|
@ -119,12 +119,21 @@ struct i2o_driver {
|
|||
};
|
||||
|
||||
/*
|
||||
* Contains all information which are necessary for DMA operations
|
||||
* Contains DMA mapped address information
|
||||
*/
|
||||
struct i2o_dma {
|
||||
void *virt;
|
||||
dma_addr_t phys;
|
||||
u32 len;
|
||||
size_t len;
|
||||
};
|
||||
|
||||
/*
|
||||
* Contains IO mapped address information
|
||||
*/
|
||||
struct i2o_io {
|
||||
void __iomem *virt;
|
||||
unsigned long phys;
|
||||
unsigned long len;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -147,28 +156,25 @@ struct i2o_controller {
|
|||
|
||||
struct pci_dev *pdev; /* PCI device */
|
||||
|
||||
unsigned int short_req:1; /* use small block sizes */
|
||||
unsigned int promise:1; /* Promise controller */
|
||||
unsigned int adaptec:1; /* DPT / Adaptec controller */
|
||||
unsigned int raptor:1; /* split bar */
|
||||
unsigned int no_quiesce:1; /* dont quiesce before reset */
|
||||
unsigned int raptor:1; /* split bar */
|
||||
unsigned int promise:1; /* Promise controller */
|
||||
|
||||
#ifdef CONFIG_MTRR
|
||||
int mtrr_reg0;
|
||||
int mtrr_reg1;
|
||||
#endif
|
||||
unsigned int short_req:1; /* use small block sizes */
|
||||
unsigned int limit_sectors:1; /* limit number of sectors / request */
|
||||
unsigned int pae_support:1; /* controller has 64-bit SGL support */
|
||||
|
||||
struct list_head devices; /* list of I2O devices */
|
||||
|
||||
struct notifier_block *event_notifer; /* Events */
|
||||
atomic_t users;
|
||||
struct list_head list; /* Controller list */
|
||||
void __iomem *post_port; /* Inbout port address */
|
||||
void __iomem *reply_port; /* Outbound port address */
|
||||
void __iomem *irq_mask; /* Interrupt register address */
|
||||
|
||||
void __iomem *in_port; /* Inbout port address */
|
||||
void __iomem *out_port; /* Outbound port address */
|
||||
void __iomem *irq_status; /* Interrupt status register address */
|
||||
void __iomem *irq_mask; /* Interrupt mask register address */
|
||||
|
||||
/* Dynamic LCT related data */
|
||||
|
||||
struct i2o_dma status; /* status of IOP */
|
||||
struct i2o_dma status; /* IOP status block */
|
||||
|
||||
struct i2o_dma hrt; /* HW Resource Table */
|
||||
i2o_lct *lct; /* Logical Config Table */
|
||||
|
@ -176,21 +182,19 @@ struct i2o_controller {
|
|||
struct semaphore lct_lock; /* Lock for LCT updates */
|
||||
struct i2o_dma status_block; /* IOP status block */
|
||||
|
||||
struct i2o_dma base; /* controller messaging unit */
|
||||
struct i2o_dma in_queue; /* inbound message queue Host->IOP */
|
||||
struct i2o_io base; /* controller messaging unit */
|
||||
struct i2o_io in_queue; /* inbound message queue Host->IOP */
|
||||
struct i2o_dma out_queue; /* outbound message queue IOP->Host */
|
||||
|
||||
unsigned int battery:1; /* Has a battery backup */
|
||||
unsigned int battery:1; /* Has a battery backup */
|
||||
unsigned int io_alloc:1; /* An I/O resource was allocated */
|
||||
unsigned int mem_alloc:1; /* A memory resource was allocated */
|
||||
|
||||
struct resource io_resource; /* I/O resource allocated to the IOP */
|
||||
struct resource mem_resource; /* Mem resource allocated to the IOP */
|
||||
|
||||
struct proc_dir_entry *proc_entry; /* /proc dir */
|
||||
|
||||
struct list_head bus_list; /* list of busses on IOP */
|
||||
struct device device;
|
||||
struct class_device classdev; /* I2O controller class */
|
||||
struct i2o_device *exec; /* Executive */
|
||||
#if BITS_PER_LONG == 64
|
||||
spinlock_t context_list_lock; /* lock for context_list */
|
||||
|
@ -241,9 +245,10 @@ struct i2o_sys_tbl {
|
|||
extern struct list_head i2o_controllers;
|
||||
|
||||
/* Message functions */
|
||||
static inline u32 i2o_msg_get(struct i2o_controller *, struct i2o_message __iomem **);
|
||||
extern u32 i2o_msg_get_wait(struct i2o_controller *, struct i2o_message __iomem **,
|
||||
int);
|
||||
static inline u32 i2o_msg_get(struct i2o_controller *,
|
||||
struct i2o_message __iomem **);
|
||||
extern u32 i2o_msg_get_wait(struct i2o_controller *,
|
||||
struct i2o_message __iomem **, int);
|
||||
static inline void i2o_msg_post(struct i2o_controller *, u32);
|
||||
static inline int i2o_msg_post_wait(struct i2o_controller *, u32,
|
||||
unsigned long);
|
||||
|
@ -252,15 +257,6 @@ extern int i2o_msg_post_wait_mem(struct i2o_controller *, u32, unsigned long,
|
|||
extern void i2o_msg_nop(struct i2o_controller *, u32);
|
||||
static inline void i2o_flush_reply(struct i2o_controller *, u32);
|
||||
|
||||
/* DMA handling functions */
|
||||
static inline int i2o_dma_alloc(struct device *, struct i2o_dma *, size_t,
|
||||
unsigned int);
|
||||
static inline void i2o_dma_free(struct device *, struct i2o_dma *);
|
||||
int i2o_dma_realloc(struct device *, struct i2o_dma *, size_t, unsigned int);
|
||||
|
||||
static inline int i2o_dma_map(struct device *, struct i2o_dma *);
|
||||
static inline void i2o_dma_unmap(struct device *, struct i2o_dma *);
|
||||
|
||||
/* IOP functions */
|
||||
extern int i2o_status_get(struct i2o_controller *);
|
||||
|
||||
|
@ -285,6 +281,16 @@ static inline u32 i2o_ptr_high(void *ptr)
|
|||
{
|
||||
return (u32) ((u64) ptr >> 32);
|
||||
};
|
||||
|
||||
static inline u32 i2o_dma_low(dma_addr_t dma_addr)
|
||||
{
|
||||
return (u32) (u64) dma_addr;
|
||||
};
|
||||
|
||||
static inline u32 i2o_dma_high(dma_addr_t dma_addr)
|
||||
{
|
||||
return (u32) ((u64) dma_addr >> 32);
|
||||
};
|
||||
#else
|
||||
static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr)
|
||||
{
|
||||
|
@ -315,8 +321,246 @@ static inline u32 i2o_ptr_high(void *ptr)
|
|||
{
|
||||
return 0;
|
||||
};
|
||||
|
||||
static inline u32 i2o_dma_low(dma_addr_t dma_addr)
|
||||
{
|
||||
return (u32) dma_addr;
|
||||
};
|
||||
|
||||
static inline u32 i2o_dma_high(dma_addr_t dma_addr)
|
||||
{
|
||||
return 0;
|
||||
};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* i2o_sg_tablesize - Calculate the maximum number of elements in a SGL
|
||||
* @c: I2O controller for which the calculation should be done
|
||||
* @body_size: maximum body size used for message in 32-bit words.
|
||||
*
|
||||
* Return the maximum number of SG elements in a SG list.
|
||||
*/
|
||||
static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
|
||||
{
|
||||
i2o_status_block *sb = c->status_block.virt;
|
||||
u16 sg_count =
|
||||
(sb->inbound_frame_size - sizeof(struct i2o_message) / 4) -
|
||||
body_size;
|
||||
|
||||
if (c->pae_support) {
|
||||
/*
|
||||
* for 64-bit a SG attribute element must be added and each
|
||||
* SG element needs 12 bytes instead of 8.
|
||||
*/
|
||||
sg_count -= 2;
|
||||
sg_count /= 3;
|
||||
} else
|
||||
sg_count /= 2;
|
||||
|
||||
if (c->short_req && (sg_count > 8))
|
||||
sg_count = 8;
|
||||
|
||||
return sg_count;
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_map_single - Map pointer to controller and fill in I2O message.
|
||||
* @c: I2O controller
|
||||
* @ptr: pointer to the data which should be mapped
|
||||
* @size: size of data in bytes
|
||||
* @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
|
||||
* @sg_ptr: pointer to the SG list inside the I2O message
|
||||
*
|
||||
* This function does all necessary DMA handling and also writes the I2O
|
||||
* SGL elements into the I2O message. For details on DMA handling see also
|
||||
* dma_map_single(). The pointer sg_ptr will only be set to the end of the
|
||||
* SG list if the allocation was successful.
|
||||
*
|
||||
* Returns DMA address which must be checked for failures using
|
||||
* dma_mapping_error().
|
||||
*/
|
||||
static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
|
||||
size_t size,
|
||||
enum dma_data_direction direction,
|
||||
u32 __iomem ** sg_ptr)
|
||||
{
|
||||
u32 sg_flags;
|
||||
u32 __iomem *mptr = *sg_ptr;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
switch (direction) {
|
||||
case DMA_TO_DEVICE:
|
||||
sg_flags = 0xd4000000;
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
sg_flags = 0xd0000000;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
|
||||
if (!dma_mapping_error(dma_addr)) {
|
||||
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
|
||||
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
|
||||
writel(0x7C020002, mptr++);
|
||||
writel(PAGE_SIZE, mptr++);
|
||||
}
|
||||
#endif
|
||||
|
||||
writel(sg_flags | size, mptr++);
|
||||
writel(i2o_dma_low(dma_addr), mptr++);
|
||||
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
|
||||
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
|
||||
writel(i2o_dma_high(dma_addr), mptr++);
|
||||
#endif
|
||||
*sg_ptr = mptr;
|
||||
}
|
||||
return dma_addr;
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_map_sg - Map a SG List to controller and fill in I2O message.
|
||||
* @c: I2O controller
|
||||
* @sg: SG list to be mapped
|
||||
* @sg_count: number of elements in the SG list
|
||||
* @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
|
||||
* @sg_ptr: pointer to the SG list inside the I2O message
|
||||
*
|
||||
* This function does all necessary DMA handling and also writes the I2O
|
||||
* SGL elements into the I2O message. For details on DMA handling see also
|
||||
* dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG
|
||||
* list if the allocation was successful.
|
||||
*
|
||||
* Returns 0 on failure or 1 on success.
|
||||
*/
|
||||
static inline int i2o_dma_map_sg(struct i2o_controller *c,
|
||||
struct scatterlist *sg, int sg_count,
|
||||
enum dma_data_direction direction,
|
||||
u32 __iomem ** sg_ptr)
|
||||
{
|
||||
u32 sg_flags;
|
||||
u32 __iomem *mptr = *sg_ptr;
|
||||
|
||||
switch (direction) {
|
||||
case DMA_TO_DEVICE:
|
||||
sg_flags = 0x14000000;
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
sg_flags = 0x10000000;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction);
|
||||
if (!sg_count)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
|
||||
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
|
||||
writel(0x7C020002, mptr++);
|
||||
writel(PAGE_SIZE, mptr++);
|
||||
}
|
||||
#endif
|
||||
|
||||
while (sg_count-- > 0) {
|
||||
if (!sg_count)
|
||||
sg_flags |= 0xC0000000;
|
||||
writel(sg_flags | sg_dma_len(sg), mptr++);
|
||||
writel(i2o_dma_low(sg_dma_address(sg)), mptr++);
|
||||
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
|
||||
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
|
||||
writel(i2o_dma_high(sg_dma_address(sg)), mptr++);
|
||||
#endif
|
||||
sg++;
|
||||
}
|
||||
*sg_ptr = mptr;
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_alloc - Allocate DMA memory
|
||||
* @dev: struct device pointer to the PCI device of the I2O controller
|
||||
* @addr: i2o_dma struct which should get the DMA buffer
|
||||
* @len: length of the new DMA memory
|
||||
* @gfp_mask: GFP mask
|
||||
*
|
||||
* Allocate a coherent DMA memory and write the pointers into addr.
|
||||
*
|
||||
* Returns 0 on success or -ENOMEM on failure.
|
||||
*/
|
||||
static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
|
||||
size_t len, unsigned int gfp_mask)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
int dma_64 = 0;
|
||||
|
||||
if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) {
|
||||
dma_64 = 1;
|
||||
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask);
|
||||
|
||||
if ((sizeof(dma_addr_t) > 4) && dma_64)
|
||||
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
|
||||
printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
|
||||
|
||||
if (!addr->virt)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(addr->virt, 0, len);
|
||||
addr->len = len;
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_free - Free DMA memory
|
||||
* @dev: struct device pointer to the PCI device of the I2O controller
|
||||
* @addr: i2o_dma struct which contains the DMA buffer
|
||||
*
|
||||
* Free a coherent DMA memory and set virtual address of addr to NULL.
|
||||
*/
|
||||
static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
|
||||
{
|
||||
if (addr->virt) {
|
||||
if (addr->phys)
|
||||
dma_free_coherent(dev, addr->len, addr->virt,
|
||||
addr->phys);
|
||||
else
|
||||
kfree(addr->virt);
|
||||
addr->virt = NULL;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_realloc - Realloc DMA memory
|
||||
* @dev: struct device pointer to the PCI device of the I2O controller
|
||||
* @addr: pointer to a i2o_dma struct DMA buffer
|
||||
* @len: new length of memory
|
||||
* @gfp_mask: GFP mask
|
||||
*
|
||||
* If there was something allocated in the addr, free it first. If len > 0
|
||||
* than try to allocate it and write the addresses back to the addr
|
||||
* structure. If len == 0 set the virtual address to NULL.
|
||||
*
|
||||
* Returns the 0 on success or negative error code on failure.
|
||||
*/
|
||||
static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
|
||||
size_t len, unsigned int gfp_mask)
|
||||
{
|
||||
i2o_dma_free(dev, addr);
|
||||
|
||||
if (len)
|
||||
return i2o_dma_alloc(dev, addr, len, gfp_mask);
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
/* I2O driver (OSM) functions */
|
||||
extern int i2o_driver_register(struct i2o_driver *);
|
||||
extern void i2o_driver_unregister(struct i2o_driver *);
|
||||
|
@ -385,49 +629,11 @@ extern int i2o_device_claim_release(struct i2o_device *);
|
|||
/* Exec OSM functions */
|
||||
extern int i2o_exec_lct_get(struct i2o_controller *);
|
||||
|
||||
/* device to i2o_device and driver to i2o_driver convertion functions */
|
||||
/* device / driver / kobject conversion functions */
|
||||
#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver)
|
||||
#define to_i2o_device(dev) container_of(dev, struct i2o_device, device)
|
||||
|
||||
/*
|
||||
* Messenger inlines
|
||||
*/
|
||||
static inline u32 I2O_POST_READ32(struct i2o_controller *c)
|
||||
{
|
||||
rmb();
|
||||
return readl(c->post_port);
|
||||
};
|
||||
|
||||
static inline void I2O_POST_WRITE32(struct i2o_controller *c, u32 val)
|
||||
{
|
||||
wmb();
|
||||
writel(val, c->post_port);
|
||||
};
|
||||
|
||||
static inline u32 I2O_REPLY_READ32(struct i2o_controller *c)
|
||||
{
|
||||
rmb();
|
||||
return readl(c->reply_port);
|
||||
};
|
||||
|
||||
static inline void I2O_REPLY_WRITE32(struct i2o_controller *c, u32 val)
|
||||
{
|
||||
wmb();
|
||||
writel(val, c->reply_port);
|
||||
};
|
||||
|
||||
static inline u32 I2O_IRQ_READ32(struct i2o_controller *c)
|
||||
{
|
||||
rmb();
|
||||
return readl(c->irq_mask);
|
||||
};
|
||||
|
||||
static inline void I2O_IRQ_WRITE32(struct i2o_controller *c, u32 val)
|
||||
{
|
||||
wmb();
|
||||
writel(val, c->irq_mask);
|
||||
wmb();
|
||||
};
|
||||
#define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device)
|
||||
#define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj))
|
||||
|
||||
/**
|
||||
* i2o_msg_get - obtain an I2O message from the IOP
|
||||
|
@ -443,11 +649,11 @@ static inline void I2O_IRQ_WRITE32(struct i2o_controller *c, u32 val)
|
|||
* available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
|
||||
*/
|
||||
static inline u32 i2o_msg_get(struct i2o_controller *c,
|
||||
struct i2o_message __iomem **msg)
|
||||
struct i2o_message __iomem ** msg)
|
||||
{
|
||||
u32 m;
|
||||
u32 m = readl(c->in_port);
|
||||
|
||||
if ((m = I2O_POST_READ32(c)) != I2O_QUEUE_EMPTY)
|
||||
if (m != I2O_QUEUE_EMPTY)
|
||||
*msg = c->in_queue.virt + m;
|
||||
|
||||
return m;
|
||||
|
@ -462,7 +668,7 @@ static inline u32 i2o_msg_get(struct i2o_controller *c,
|
|||
*/
|
||||
static inline void i2o_msg_post(struct i2o_controller *c, u32 m)
|
||||
{
|
||||
I2O_POST_WRITE32(c, m);
|
||||
writel(m, c->in_port);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -491,12 +697,10 @@ static inline int i2o_msg_post_wait(struct i2o_controller *c, u32 m,
|
|||
* The I2O controller must be informed that the reply message is not needed
|
||||
* anymore. If you forget to flush the reply, the message frame can't be
|
||||
* used by the controller anymore and is therefore lost.
|
||||
*
|
||||
* FIXME: is there a timeout after which the controller reuse the message?
|
||||
*/
|
||||
static inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
|
||||
{
|
||||
I2O_REPLY_WRITE32(c, m);
|
||||
writel(m, c->out_port);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -530,97 +734,13 @@ static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c,
|
|||
* work for receive side messages as they are kmalloc objects
|
||||
* in a different pool.
|
||||
*/
|
||||
static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct i2o_controller *c,
|
||||
u32 m)
|
||||
static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct
|
||||
i2o_controller *c,
|
||||
u32 m)
|
||||
{
|
||||
return c->in_queue.virt + m;
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_alloc - Allocate DMA memory
|
||||
* @dev: struct device pointer to the PCI device of the I2O controller
|
||||
* @addr: i2o_dma struct which should get the DMA buffer
|
||||
* @len: length of the new DMA memory
|
||||
* @gfp_mask: GFP mask
|
||||
*
|
||||
* Allocate a coherent DMA memory and write the pointers into addr.
|
||||
*
|
||||
* Returns 0 on success or -ENOMEM on failure.
|
||||
*/
|
||||
static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
|
||||
size_t len, unsigned int gfp_mask)
|
||||
{
|
||||
addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask);
|
||||
if (!addr->virt)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(addr->virt, 0, len);
|
||||
addr->len = len;
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_free - Free DMA memory
|
||||
* @dev: struct device pointer to the PCI device of the I2O controller
|
||||
* @addr: i2o_dma struct which contains the DMA buffer
|
||||
*
|
||||
* Free a coherent DMA memory and set virtual address of addr to NULL.
|
||||
*/
|
||||
static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
|
||||
{
|
||||
if (addr->virt) {
|
||||
if (addr->phys)
|
||||
dma_free_coherent(dev, addr->len, addr->virt,
|
||||
addr->phys);
|
||||
else
|
||||
kfree(addr->virt);
|
||||
addr->virt = NULL;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_map - Map the memory to DMA
|
||||
* @dev: struct device pointer to the PCI device of the I2O controller
|
||||
* @addr: i2o_dma struct which should be mapped
|
||||
*
|
||||
* Map the memory in addr->virt to coherent DMA memory and write the
|
||||
* physical address into addr->phys.
|
||||
*
|
||||
* Returns 0 on success or -ENOMEM on failure.
|
||||
*/
|
||||
static inline int i2o_dma_map(struct device *dev, struct i2o_dma *addr)
|
||||
{
|
||||
if (!addr->virt)
|
||||
return -EFAULT;
|
||||
|
||||
if (!addr->phys)
|
||||
addr->phys = dma_map_single(dev, addr->virt, addr->len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (!addr->phys)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_unmap - Unmap the DMA memory
|
||||
* @dev: struct device pointer to the PCI device of the I2O controller
|
||||
* @addr: i2o_dma struct which should be unmapped
|
||||
*
|
||||
* Unmap the memory in addr->virt from DMA memory.
|
||||
*/
|
||||
static inline void i2o_dma_unmap(struct device *dev, struct i2o_dma *addr)
|
||||
{
|
||||
if (!addr->virt)
|
||||
return;
|
||||
|
||||
if (addr->phys) {
|
||||
dma_unmap_single(dev, addr->phys, addr->len, DMA_BIDIRECTIONAL);
|
||||
addr->phys = 0;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Endian handling wrapped into the macro - keeps the core code
|
||||
* cleaner.
|
||||
|
@ -772,6 +892,14 @@ extern void i2o_debug_state(struct i2o_controller *c);
|
|||
#define I2O_CMD_SCSI_ABORT 0x83
|
||||
#define I2O_CMD_SCSI_BUSRESET 0x27
|
||||
|
||||
/*
|
||||
* Bus Adapter Class
|
||||
*/
|
||||
#define I2O_CMD_BUS_ADAPTER_RESET 0x85
|
||||
#define I2O_CMD_BUS_RESET 0x87
|
||||
#define I2O_CMD_BUS_SCAN 0x89
|
||||
#define I2O_CMD_BUS_QUIESCE 0x8b
|
||||
|
||||
/*
|
||||
* Random Block Storage Class
|
||||
*/
|
||||
|
@ -784,7 +912,7 @@ extern void i2o_debug_state(struct i2o_controller *c);
|
|||
#define I2O_CMD_BLOCK_MEJECT 0x43
|
||||
#define I2O_CMD_BLOCK_POWER 0x70
|
||||
|
||||
#define I2O_PRIVATE_MSG 0xFF
|
||||
#define I2O_CMD_PRIVATE 0xFF
|
||||
|
||||
/* Command status values */
|
||||
|
||||
|
@ -922,7 +1050,7 @@ extern void i2o_debug_state(struct i2o_controller *c);
|
|||
#define I2OVER15 0x0001
|
||||
#define I2OVER20 0x0002
|
||||
|
||||
/* Default is 1.5, FIXME: Need support for both 1.5 and 2.0 */
|
||||
/* Default is 1.5 */
|
||||
#define I2OVERSION I2OVER15
|
||||
|
||||
#define SGL_OFFSET_0 I2OVERSION
|
||||
|
@ -933,9 +1061,9 @@ extern void i2o_debug_state(struct i2o_controller *c);
|
|||
#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
|
||||
#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
|
||||
#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
|
||||
|
||||
#define TRL_OFFSET_5 (0x0050 | I2OVERSION)
|
||||
#define TRL_OFFSET_6 (0x0060 | I2OVERSION)
|
||||
#define SGL_OFFSET_11 (0x00B0 | I2OVERSION)
|
||||
#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
|
||||
#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION)
|
||||
|
||||
/* Transaction Reply Lists (TRL) Control Word structure */
|
||||
#define TRL_SINGLE_FIXED_LENGTH 0x00
|
||||
|
@ -962,17 +1090,13 @@ extern void i2o_debug_state(struct i2o_controller *c);
|
|||
#define ELEVEN_WORD_MSG_SIZE 0x000B0000
|
||||
#define I2O_MESSAGE_SIZE(x) ((x)<<16)
|
||||
|
||||
/* Special TID Assignments */
|
||||
|
||||
/* special TID assignments */
|
||||
#define ADAPTER_TID 0
|
||||
#define HOST_TID 1
|
||||
|
||||
#define MSG_FRAME_SIZE 128 /* i2o_scsi assumes >= 32 */
|
||||
#define REPLY_FRAME_SIZE 17
|
||||
#define SG_TABLESIZE 30
|
||||
#define NMBR_MSG_FRAMES 128
|
||||
|
||||
#define MSG_POOL_SIZE (MSG_FRAME_SIZE*NMBR_MSG_FRAMES*sizeof(u32))
|
||||
/* outbound queue defines */
|
||||
#define I2O_MAX_OUTBOUND_MSG_FRAMES 128
|
||||
#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
|
||||
|
||||
#define I2O_POST_WAIT_OK 0
|
||||
#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
|
||||
|
@ -993,11 +1117,10 @@ extern void i2o_debug_state(struct i2o_controller *c);
|
|||
#define I2O_HRT_GET_TRIES 3
|
||||
#define I2O_LCT_GET_TRIES 3
|
||||
|
||||
/* request queue sizes */
|
||||
/* defines for max_sectors and max_phys_segments */
|
||||
#define I2O_MAX_SECTORS 1024
|
||||
#define I2O_MAX_SEGMENTS 128
|
||||
|
||||
#define I2O_REQ_MEMPOOL_SIZE 32
|
||||
#define I2O_MAX_SECTORS_LIMITED 256
|
||||
#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _I2O_H */
|
||||
|
|
|
@ -917,7 +917,7 @@ typedef struct hwif_s {
|
|||
unsigned dma;
|
||||
|
||||
void (*led_act)(void *data, int rw);
|
||||
} ide_hwif_t;
|
||||
} ____cacheline_maxaligned_in_smp ide_hwif_t;
|
||||
|
||||
/*
|
||||
* internal ide interrupt handler type
|
||||
|
@ -1501,4 +1501,10 @@ extern struct bus_type ide_bus_type;
|
|||
#define ide_id_has_flush_cache_ext(id) \
|
||||
(((id)->cfs_enable_2 & 0x2400) == 0x2400)
|
||||
|
||||
static inline int hwif_to_node(ide_hwif_t *hwif)
|
||||
{
|
||||
struct pci_dev *dev = hwif->pci_dev;
|
||||
return dev ? pcibus_to_node(dev->bus) : -1;
|
||||
}
|
||||
|
||||
#endif /* _IDE_H */
|
||||
|
|
|
@ -35,6 +35,9 @@
|
|||
*
|
||||
* 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
|
||||
* - Code cleanup and style changes
|
||||
*
|
||||
* 2005/05/05 - Jason Gabler <jygabler at lbl dot gov>
|
||||
* - added definitions for various XOR hashing policies
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_IF_BONDING_H
|
||||
|
@ -80,6 +83,10 @@
|
|||
|
||||
#define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */
|
||||
|
||||
/* hashing types */
|
||||
#define BOND_XMIT_POLICY_LAYER2 0 /* layer 2 (MAC only), default */
|
||||
#define BOND_XMIT_POLICY_LAYER34 1 /* layer 3+4 (IP ^ MAC) */
|
||||
|
||||
typedef struct ifbond {
|
||||
__s32 bond_mode;
|
||||
__s32 num_slaves;
|
||||
|
|
|
@ -23,7 +23,7 @@ struct shaper
|
|||
__u32 shapeclock;
|
||||
unsigned long recovery; /* Time we can next clock a packet out on
|
||||
an empty queue */
|
||||
struct semaphore sem;
|
||||
spinlock_t lock;
|
||||
struct net_device_stats stats;
|
||||
struct net_device *dev;
|
||||
int (*hard_start_xmit) (struct sk_buff *skb,
|
||||
|
|
|
@ -148,7 +148,6 @@ struct ip_sf_socklist
|
|||
struct ip_mc_socklist
|
||||
{
|
||||
struct ip_mc_socklist *next;
|
||||
int count;
|
||||
struct ip_mreqn multi;
|
||||
unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
|
||||
struct ip_sf_socklist *sflist;
|
||||
|
|
|
@ -156,7 +156,7 @@ struct in6_flowlabel_req
|
|||
#define IPV6_CHECKSUM 7
|
||||
#define IPV6_HOPLIMIT 8
|
||||
#define IPV6_NEXTHOP 9
|
||||
#define IPV6_AUTHHDR 10
|
||||
#define IPV6_AUTHHDR 10 /* obsolete */
|
||||
#define IPV6_FLOWINFO 11
|
||||
|
||||
#define IPV6_UNICAST_HOPS 16
|
||||
|
|
|
@ -229,6 +229,18 @@ void __init parse_early_param(void);
|
|||
#define __devexitdata __exitdata
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#define __cpuinit
|
||||
#define __cpuinitdata
|
||||
#define __cpuexit
|
||||
#define __cpuexitdata
|
||||
#else
|
||||
#define __cpuinit __init
|
||||
#define __cpuinitdata __initdata
|
||||
#define __cpuexit __exit
|
||||
#define __cpuexitdata __exitdata
|
||||
#endif
|
||||
|
||||
/* Functions marked as __devexit may be discarded at kernel link time, depending
|
||||
on config options. Newer versions of binutils detect references from
|
||||
retained sections to discarded sections and flag an error. Pointers to
|
||||
|
|
|
@ -81,6 +81,7 @@ extern struct group_info init_groups;
|
|||
.mm = NULL, \
|
||||
.active_mm = &init_mm, \
|
||||
.run_list = LIST_HEAD_INIT(tsk.run_list), \
|
||||
.ioprio = 0, \
|
||||
.time_slice = HZ, \
|
||||
.tasks = LIST_HEAD_INIT(tsk.tasks), \
|
||||
.ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \
|
||||
|
@ -108,9 +109,9 @@ extern struct group_info init_groups;
|
|||
.blocked = {{0}}, \
|
||||
.alloc_lock = SPIN_LOCK_UNLOCKED, \
|
||||
.proc_lock = SPIN_LOCK_UNLOCKED, \
|
||||
.switch_lock = SPIN_LOCK_UNLOCKED, \
|
||||
.journal_info = NULL, \
|
||||
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
|
||||
.fs_excl = ATOMIC_INIT(0), \
|
||||
}
|
||||
|
||||
|
||||
|
|
110
include/linux/inotify.h
Normal file
110
include/linux/inotify.h
Normal file
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
* Inode based directory notification for Linux
|
||||
*
|
||||
* Copyright (C) 2005 John McCutchan
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_INOTIFY_H
|
||||
#define _LINUX_INOTIFY_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* struct inotify_event - structure read from the inotify device for each event
|
||||
*
|
||||
* When you are watching a directory, you will receive the filename for events
|
||||
* such as IN_CREATE, IN_DELETE, IN_OPEN, IN_CLOSE, ..., relative to the wd.
|
||||
*/
|
||||
struct inotify_event {
|
||||
__s32 wd; /* watch descriptor */
|
||||
__u32 mask; /* watch mask */
|
||||
__u32 cookie; /* cookie to synchronize two events */
|
||||
__u32 len; /* length (including nulls) of name */
|
||||
char name[0]; /* stub for possible name */
|
||||
};
|
||||
|
||||
/* the following are legal, implemented events that user-space can watch for */
|
||||
#define IN_ACCESS 0x00000001 /* File was accessed */
|
||||
#define IN_MODIFY 0x00000002 /* File was modified */
|
||||
#define IN_ATTRIB 0x00000004 /* Metadata changed */
|
||||
#define IN_CLOSE_WRITE 0x00000008 /* Writtable file was closed */
|
||||
#define IN_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
|
||||
#define IN_OPEN 0x00000020 /* File was opened */
|
||||
#define IN_MOVED_FROM 0x00000040 /* File was moved from X */
|
||||
#define IN_MOVED_TO 0x00000080 /* File was moved to Y */
|
||||
#define IN_CREATE 0x00000100 /* Subfile was created */
|
||||
#define IN_DELETE 0x00000200 /* Subfile was deleted */
|
||||
#define IN_DELETE_SELF 0x00000400 /* Self was deleted */
|
||||
#define IN_MOVE_SELF 0x00000800 /* Self was moved */
|
||||
|
||||
/* the following are legal events. they are sent as needed to any watch */
|
||||
#define IN_UNMOUNT 0x00002000 /* Backing fs was unmounted */
|
||||
#define IN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
|
||||
#define IN_IGNORED 0x00008000 /* File was ignored */
|
||||
|
||||
/* helper events */
|
||||
#define IN_CLOSE (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) /* close */
|
||||
#define IN_MOVE (IN_MOVED_FROM | IN_MOVED_TO) /* moves */
|
||||
|
||||
/* special flags */
|
||||
#define IN_ISDIR 0x40000000 /* event occurred against dir */
|
||||
#define IN_ONESHOT 0x80000000 /* only send event once */
|
||||
|
||||
/*
|
||||
* All of the events - we build the list by hand so that we can add flags in
|
||||
* the future and not break backward compatibility. Apps will get only the
|
||||
* events that they originally wanted. Be sure to add new events here!
|
||||
*/
|
||||
#define IN_ALL_EVENTS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \
|
||||
IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \
|
||||
IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF | \
|
||||
IN_MOVE_SELF)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/dcache.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/config.h>
|
||||
|
||||
#ifdef CONFIG_INOTIFY
|
||||
|
||||
extern void inotify_inode_queue_event(struct inode *, __u32, __u32,
|
||||
const char *);
|
||||
extern void inotify_dentry_parent_queue_event(struct dentry *, __u32, __u32,
|
||||
const char *);
|
||||
extern void inotify_unmount_inodes(struct list_head *);
|
||||
extern void inotify_inode_is_dead(struct inode *);
|
||||
extern u32 inotify_get_cookie(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline void inotify_inode_queue_event(struct inode *inode,
|
||||
__u32 mask, __u32 cookie,
|
||||
const char *filename)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void inotify_dentry_parent_queue_event(struct dentry *dentry,
|
||||
__u32 mask, __u32 cookie,
|
||||
const char *filename)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void inotify_unmount_inodes(struct list_head *list)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void inotify_inode_is_dead(struct inode *inode)
|
||||
{
|
||||
}
|
||||
|
||||
static inline u32 inotify_get_cookie(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_INOTIFY */
|
||||
|
||||
#endif /* __KERNEL __ */
|
||||
|
||||
#endif /* _LINUX_INOTIFY_H */
|
|
@ -811,9 +811,9 @@ struct input_dev {
|
|||
|
||||
void *private;
|
||||
|
||||
char *name;
|
||||
char *phys;
|
||||
char *uniq;
|
||||
const char *name;
|
||||
const char *phys;
|
||||
const char *uniq;
|
||||
struct input_id id;
|
||||
|
||||
unsigned long evbit[NBITS(EV_MAX)];
|
||||
|
@ -859,6 +859,10 @@ struct input_dev {
|
|||
int (*erase_effect)(struct input_dev *dev, int effect_id);
|
||||
|
||||
struct input_handle *grab;
|
||||
|
||||
struct semaphore sem; /* serializes open and close operations */
|
||||
unsigned int users;
|
||||
|
||||
struct device *dev;
|
||||
|
||||
struct list_head h_list;
|
||||
|
|
85
include/linux/ioprio.h
Normal file
85
include/linux/ioprio.h
Normal file
|
@ -0,0 +1,85 @@
|
|||
#ifndef IOPRIO_H
|
||||
#define IOPRIO_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
||||
/*
|
||||
* Gives us 8 prio classes with 13-bits of data for each class
|
||||
*/
|
||||
#define IOPRIO_BITS (16)
|
||||
#define IOPRIO_CLASS_SHIFT (13)
|
||||
#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
|
||||
|
||||
#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT)
|
||||
#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK)
|
||||
#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
|
||||
|
||||
#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE)
|
||||
|
||||
/*
|
||||
* These are the io priority groups as implemented by CFQ. RT is the realtime
|
||||
* class, it always gets premium service. BE is the best-effort scheduling
|
||||
* class, the default for any process. IDLE is the idle scheduling class, it
|
||||
* is only served when no one else is using the disk.
|
||||
*/
|
||||
enum {
|
||||
IOPRIO_CLASS_NONE,
|
||||
IOPRIO_CLASS_RT,
|
||||
IOPRIO_CLASS_BE,
|
||||
IOPRIO_CLASS_IDLE,
|
||||
};
|
||||
|
||||
/*
|
||||
* 8 best effort priority levels are supported
|
||||
*/
|
||||
#define IOPRIO_BE_NR (8)
|
||||
|
||||
enum {
|
||||
IOPRIO_WHO_PROCESS = 1,
|
||||
IOPRIO_WHO_PGRP,
|
||||
IOPRIO_WHO_USER,
|
||||
};
|
||||
|
||||
/*
|
||||
* if process has set io priority explicitly, use that. if not, convert
|
||||
* the cpu scheduler nice value to an io priority
|
||||
*/
|
||||
#define IOPRIO_NORM (4)
|
||||
static inline int task_ioprio(struct task_struct *task)
|
||||
{
|
||||
WARN_ON(!ioprio_valid(task->ioprio));
|
||||
return IOPRIO_PRIO_DATA(task->ioprio);
|
||||
}
|
||||
|
||||
static inline int task_nice_ioprio(struct task_struct *task)
|
||||
{
|
||||
return (task_nice(task) + 20) / 5;
|
||||
}
|
||||
|
||||
/*
|
||||
* For inheritance, return the highest of the two given priorities
|
||||
*/
|
||||
static inline int ioprio_best(unsigned short aprio, unsigned short bprio)
|
||||
{
|
||||
unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
|
||||
unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
|
||||
|
||||
if (!ioprio_valid(aprio))
|
||||
return bprio;
|
||||
if (!ioprio_valid(bprio))
|
||||
return aprio;
|
||||
|
||||
if (aclass == IOPRIO_CLASS_NONE)
|
||||
aclass = IOPRIO_CLASS_BE;
|
||||
if (bclass == IOPRIO_CLASS_NONE)
|
||||
bclass = IOPRIO_CLASS_BE;
|
||||
|
||||
if (aclass == bclass)
|
||||
return min(aprio, bprio);
|
||||
if (aclass > bclass)
|
||||
return bprio;
|
||||
else
|
||||
return aprio;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -209,6 +209,11 @@ struct kernel_ipmi_msg
|
|||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
#include <linux/proc_fs.h>
|
||||
extern struct proc_dir_entry *proc_ipmi_root;
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
/* Opaque type for a IPMI message user. One of these is needed to
|
||||
send and receive messages. */
|
||||
typedef struct ipmi_user *ipmi_user_t;
|
||||
|
|
|
@ -85,10 +85,10 @@ extern int no_irq_affinity;
|
|||
extern int noirqdebug_setup(char *str);
|
||||
|
||||
extern fastcall int handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
|
||||
struct irqaction *action);
|
||||
struct irqaction *action);
|
||||
extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
|
||||
extern void note_interrupt(unsigned int irq, irq_desc_t *desc, int action_ret);
|
||||
extern void report_bad_irq(unsigned int irq, irq_desc_t *desc, int action_ret);
|
||||
extern void note_interrupt(unsigned int irq, irq_desc_t *desc,
|
||||
int action_ret, struct pt_regs *regs);
|
||||
extern int can_request_irq(unsigned int irq, unsigned long irqflags);
|
||||
|
||||
extern void init_irq_proc(void);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $Id: jffs2_fs_sb.h,v 1.48 2004/11/20 10:41:12 dwmw2 Exp $ */
|
||||
/* $Id: jffs2_fs_sb.h,v 1.52 2005/05/19 16:12:17 gleixner Exp $ */
|
||||
|
||||
#ifndef _JFFS2_FS_SB
|
||||
#define _JFFS2_FS_SB
|
||||
|
@ -14,7 +14,8 @@
|
|||
#include <linux/rwsem.h>
|
||||
|
||||
#define JFFS2_SB_FLAG_RO 1
|
||||
#define JFFS2_SB_FLAG_MOUNTING 2
|
||||
#define JFFS2_SB_FLAG_SCANNING 2 /* Flash scanning is in progress */
|
||||
#define JFFS2_SB_FLAG_BUILDING 4 /* File system building is in progress */
|
||||
|
||||
struct jffs2_inodirty;
|
||||
|
||||
|
@ -31,7 +32,7 @@ struct jffs2_sb_info {
|
|||
unsigned int flags;
|
||||
|
||||
struct task_struct *gc_task; /* GC task struct */
|
||||
struct semaphore gc_thread_start; /* GC thread start mutex */
|
||||
struct completion gc_thread_start; /* GC thread start completion */
|
||||
struct completion gc_thread_exit; /* GC thread exit completion port */
|
||||
|
||||
struct semaphore alloc_sem; /* Used to protect all the following
|
||||
|
@ -94,7 +95,7 @@ struct jffs2_sb_info {
|
|||
to an obsoleted node. I don't like this. Alternatives welcomed. */
|
||||
struct semaphore erase_free_sem;
|
||||
|
||||
#if defined CONFIG_JFFS2_FS_NAND || defined CONFIG_JFFS2_FS_NOR_ECC
|
||||
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
|
||||
/* Write-behind buffer for NAND flash */
|
||||
unsigned char *wbuf;
|
||||
uint32_t wbuf_ofs;
|
||||
|
|
|
@ -111,18 +111,35 @@ struct js_corr {
|
|||
#define JS_SET_ALL 8
|
||||
|
||||
struct JS_DATA_TYPE {
|
||||
int buttons;
|
||||
int x;
|
||||
int y;
|
||||
__s32 buttons;
|
||||
__s32 x;
|
||||
__s32 y;
|
||||
};
|
||||
|
||||
struct JS_DATA_SAVE_TYPE {
|
||||
int JS_TIMEOUT;
|
||||
int BUSY;
|
||||
long JS_EXPIRETIME;
|
||||
long JS_TIMELIMIT;
|
||||
struct JS_DATA_SAVE_TYPE_32 {
|
||||
__s32 JS_TIMEOUT;
|
||||
__s32 BUSY;
|
||||
__s32 JS_EXPIRETIME;
|
||||
__s32 JS_TIMELIMIT;
|
||||
struct JS_DATA_TYPE JS_SAVE;
|
||||
struct JS_DATA_TYPE JS_CORR;
|
||||
};
|
||||
|
||||
struct JS_DATA_SAVE_TYPE_64 {
|
||||
__s32 JS_TIMEOUT;
|
||||
__s32 BUSY;
|
||||
__s64 JS_EXPIRETIME;
|
||||
__s64 JS_TIMELIMIT;
|
||||
struct JS_DATA_TYPE JS_SAVE;
|
||||
struct JS_DATA_TYPE JS_CORR;
|
||||
};
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
#define JS_DATA_SAVE_TYPE JS_DATA_SAVE_TYPE_64
|
||||
#elif BITS_PER_LONG == 32
|
||||
#define JS_DATA_SAVE_TYPE JS_DATA_SAVE_TYPE_32
|
||||
#else
|
||||
#error Unexpected BITS_PER_LONG
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_JOYSTICK_H */
|
||||
|
|
|
@ -58,15 +58,23 @@ struct completion;
|
|||
* be biten later when the calling function happens to sleep when it is not
|
||||
* supposed to.
|
||||
*/
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
|
||||
#define might_sleep() __might_sleep(__FILE__, __LINE__)
|
||||
#define might_sleep_if(cond) do { if (unlikely(cond)) might_sleep(); } while (0)
|
||||
void __might_sleep(char *file, int line);
|
||||
#ifdef CONFIG_PREEMPT_VOLUNTARY
|
||||
extern int cond_resched(void);
|
||||
# define might_resched() cond_resched()
|
||||
#else
|
||||
#define might_sleep() do {} while(0)
|
||||
#define might_sleep_if(cond) do {} while (0)
|
||||
# define might_resched() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
|
||||
void __might_sleep(char *file, int line);
|
||||
# define might_sleep() \
|
||||
do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
|
||||
#else
|
||||
# define might_sleep() do { might_resched(); } while (0)
|
||||
#endif
|
||||
|
||||
#define might_sleep_if(cond) do { if (unlikely(cond)) might_sleep(); } while (0)
|
||||
|
||||
#define abs(x) ({ \
|
||||
int __x = (x); \
|
||||
(__x < 0) ? -__x : __x; \
|
||||
|
|
135
include/linux/kexec.h
Normal file
135
include/linux/kexec.h
Normal file
|
@ -0,0 +1,135 @@
|
|||
#ifndef LINUX_KEXEC_H
|
||||
#define LINUX_KEXEC_H
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/compat.h>
|
||||
#include <asm/kexec.h>
|
||||
|
||||
/* Verify architecture specific macros are defined */
|
||||
|
||||
#ifndef KEXEC_SOURCE_MEMORY_LIMIT
|
||||
#error KEXEC_SOURCE_MEMORY_LIMIT not defined
|
||||
#endif
|
||||
|
||||
#ifndef KEXEC_DESTINATION_MEMORY_LIMIT
|
||||
#error KEXEC_DESTINATION_MEMORY_LIMIT not defined
|
||||
#endif
|
||||
|
||||
#ifndef KEXEC_CONTROL_MEMORY_LIMIT
|
||||
#error KEXEC_CONTROL_MEMORY_LIMIT not defined
|
||||
#endif
|
||||
|
||||
#ifndef KEXEC_CONTROL_CODE_SIZE
|
||||
#error KEXEC_CONTROL_CODE_SIZE not defined
|
||||
#endif
|
||||
|
||||
#ifndef KEXEC_ARCH
|
||||
#error KEXEC_ARCH not defined
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This structure is used to hold the arguments that are used when loading
|
||||
* kernel binaries.
|
||||
*/
|
||||
|
||||
typedef unsigned long kimage_entry_t;
|
||||
#define IND_DESTINATION 0x1
|
||||
#define IND_INDIRECTION 0x2
|
||||
#define IND_DONE 0x4
|
||||
#define IND_SOURCE 0x8
|
||||
|
||||
#define KEXEC_SEGMENT_MAX 8
|
||||
struct kexec_segment {
|
||||
void __user *buf;
|
||||
size_t bufsz;
|
||||
unsigned long mem; /* User space sees this as a (void *) ... */
|
||||
size_t memsz;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_kexec_segment {
|
||||
compat_uptr_t buf;
|
||||
compat_size_t bufsz;
|
||||
compat_ulong_t mem; /* User space sees this as a (void *) ... */
|
||||
compat_size_t memsz;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct kimage {
|
||||
kimage_entry_t head;
|
||||
kimage_entry_t *entry;
|
||||
kimage_entry_t *last_entry;
|
||||
|
||||
unsigned long destination;
|
||||
|
||||
unsigned long start;
|
||||
struct page *control_code_page;
|
||||
|
||||
unsigned long nr_segments;
|
||||
struct kexec_segment segment[KEXEC_SEGMENT_MAX];
|
||||
|
||||
struct list_head control_pages;
|
||||
struct list_head dest_pages;
|
||||
struct list_head unuseable_pages;
|
||||
|
||||
/* Address of next control page to allocate for crash kernels. */
|
||||
unsigned long control_page;
|
||||
|
||||
/* Flags to indicate special processing */
|
||||
unsigned int type : 1;
|
||||
#define KEXEC_TYPE_DEFAULT 0
|
||||
#define KEXEC_TYPE_CRASH 1
|
||||
};
|
||||
|
||||
|
||||
|
||||
/* kexec interface functions */
|
||||
extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET;
|
||||
extern int machine_kexec_prepare(struct kimage *image);
|
||||
extern void machine_kexec_cleanup(struct kimage *image);
|
||||
extern asmlinkage long sys_kexec_load(unsigned long entry,
|
||||
unsigned long nr_segments,
|
||||
struct kexec_segment __user *segments,
|
||||
unsigned long flags);
|
||||
#ifdef CONFIG_COMPAT
|
||||
extern asmlinkage long compat_sys_kexec_load(unsigned long entry,
|
||||
unsigned long nr_segments,
|
||||
struct compat_kexec_segment __user *segments,
|
||||
unsigned long flags);
|
||||
#endif
|
||||
extern struct page *kimage_alloc_control_pages(struct kimage *image,
|
||||
unsigned int order);
|
||||
extern void crash_kexec(struct pt_regs *);
|
||||
int kexec_should_crash(struct task_struct *);
|
||||
extern struct kimage *kexec_image;
|
||||
|
||||
#define KEXEC_ON_CRASH 0x00000001
|
||||
#define KEXEC_ARCH_MASK 0xffff0000
|
||||
|
||||
/* These values match the ELF architecture values.
|
||||
* Unless there is a good reason that should continue to be the case.
|
||||
*/
|
||||
#define KEXEC_ARCH_DEFAULT ( 0 << 16)
|
||||
#define KEXEC_ARCH_386 ( 3 << 16)
|
||||
#define KEXEC_ARCH_X86_64 (62 << 16)
|
||||
#define KEXEC_ARCH_PPC (20 << 16)
|
||||
#define KEXEC_ARCH_PPC64 (21 << 16)
|
||||
#define KEXEC_ARCH_IA_64 (50 << 16)
|
||||
#define KEXEC_ARCH_S390 (22 << 16)
|
||||
|
||||
#define KEXEC_FLAGS (KEXEC_ON_CRASH) /* List of defined/legal kexec flags */
|
||||
|
||||
/* Location of a reserved region to hold the crash kernel.
|
||||
*/
|
||||
extern struct resource crashk_res;
|
||||
|
||||
#else /* !CONFIG_KEXEC */
|
||||
struct pt_regs;
|
||||
struct task_struct;
|
||||
static inline void crash_kexec(struct pt_regs *regs) { }
|
||||
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
|
||||
#endif /* CONFIG_KEXEC */
|
||||
#endif /* LINUX_KEXEC_H */
|
|
@ -1,4 +1,4 @@
|
|||
/* key-ui.h: key userspace interface stuff for use by keyfs
|
||||
/* key-ui.h: key userspace interface stuff
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
|
@ -31,8 +31,10 @@ extern spinlock_t key_serial_lock;
|
|||
* subscribed
|
||||
*/
|
||||
struct keyring_list {
|
||||
unsigned maxkeys; /* max keys this list can hold */
|
||||
unsigned nkeys; /* number of keys currently held */
|
||||
struct rcu_head rcu; /* RCU deletion hook */
|
||||
unsigned short maxkeys; /* max keys this list can hold */
|
||||
unsigned short nkeys; /* number of keys currently held */
|
||||
unsigned short delkey; /* key to be unlinked by RCU */
|
||||
struct key *keys[0];
|
||||
};
|
||||
|
||||
|
@ -82,8 +84,45 @@ static inline int key_any_permission(const struct key *key, key_perm_t perm)
|
|||
return kperm != 0;
|
||||
}
|
||||
|
||||
static inline int key_task_groups_search(struct task_struct *tsk, gid_t gid)
|
||||
{
|
||||
int ret;
|
||||
|
||||
extern struct key *lookup_user_key(key_serial_t id, int create, int part,
|
||||
task_lock(tsk);
|
||||
ret = groups_search(tsk->group_info, gid);
|
||||
task_unlock(tsk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int key_task_permission(const struct key *key,
|
||||
struct task_struct *context,
|
||||
key_perm_t perm)
|
||||
{
|
||||
key_perm_t kperm;
|
||||
|
||||
if (key->uid == context->fsuid) {
|
||||
kperm = key->perm >> 16;
|
||||
}
|
||||
else if (key->gid != -1 &&
|
||||
key->perm & KEY_GRP_ALL && (
|
||||
key->gid == context->fsgid ||
|
||||
key_task_groups_search(context, key->gid)
|
||||
)
|
||||
) {
|
||||
kperm = key->perm >> 8;
|
||||
}
|
||||
else {
|
||||
kperm = key->perm;
|
||||
}
|
||||
|
||||
kperm = kperm & perm & KEY_ALL;
|
||||
|
||||
return kperm == perm;
|
||||
|
||||
}
|
||||
|
||||
extern struct key *lookup_user_key(struct task_struct *context,
|
||||
key_serial_t id, int create, int partial,
|
||||
key_perm_t perm);
|
||||
|
||||
extern long join_session_keyring(const char *name);
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
@ -78,7 +78,6 @@ struct key {
|
|||
key_serial_t serial; /* key serial number */
|
||||
struct rb_node serial_node;
|
||||
struct key_type *type; /* type of key */
|
||||
rwlock_t lock; /* examination vs change lock */
|
||||
struct rw_semaphore sem; /* change vs change sem */
|
||||
struct key_user *user; /* owner of this key */
|
||||
time_t expiry; /* time at which key expires (or 0) */
|
||||
|
@ -86,14 +85,10 @@ struct key {
|
|||
gid_t gid;
|
||||
key_perm_t perm; /* access permissions */
|
||||
unsigned short quotalen; /* length added to quota */
|
||||
unsigned short datalen; /* payload data length */
|
||||
unsigned short flags; /* status flags (change with lock writelocked) */
|
||||
#define KEY_FLAG_INSTANTIATED 0x00000001 /* set if key has been instantiated */
|
||||
#define KEY_FLAG_DEAD 0x00000002 /* set if key type has been deleted */
|
||||
#define KEY_FLAG_REVOKED 0x00000004 /* set if key had been revoked */
|
||||
#define KEY_FLAG_IN_QUOTA 0x00000008 /* set if key consumes quota */
|
||||
#define KEY_FLAG_USER_CONSTRUCT 0x00000010 /* set if key is being constructed in userspace */
|
||||
#define KEY_FLAG_NEGATIVE 0x00000020 /* set if key is negative */
|
||||
unsigned short datalen; /* payload data length
|
||||
* - may not match RCU dereferenced payload
|
||||
* - payload should contain own length
|
||||
*/
|
||||
|
||||
#ifdef KEY_DEBUGGING
|
||||
unsigned magic;
|
||||
|
@ -101,6 +96,14 @@ struct key {
|
|||
#define KEY_DEBUG_MAGIC_X 0xf8e9dacbu
|
||||
#endif
|
||||
|
||||
unsigned long flags; /* status flags (change with bitops) */
|
||||
#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */
|
||||
#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */
|
||||
#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */
|
||||
#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */
|
||||
#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */
|
||||
#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
|
||||
|
||||
/* the description string
|
||||
* - this is used to match a key against search criteria
|
||||
* - this should be a printable string
|
||||
|
@ -196,10 +199,12 @@ extern int key_payload_reserve(struct key *key, size_t datalen);
|
|||
extern int key_instantiate_and_link(struct key *key,
|
||||
const void *data,
|
||||
size_t datalen,
|
||||
struct key *keyring);
|
||||
struct key *keyring,
|
||||
struct key *instkey);
|
||||
extern int key_negate_and_link(struct key *key,
|
||||
unsigned timeout,
|
||||
struct key *keyring);
|
||||
struct key *keyring,
|
||||
struct key *instkey);
|
||||
extern void key_revoke(struct key *key);
|
||||
extern void key_put(struct key *key);
|
||||
|
||||
|
@ -242,14 +247,13 @@ extern struct key *keyring_search(struct key *keyring,
|
|||
struct key_type *type,
|
||||
const char *description);
|
||||
|
||||
extern struct key *search_process_keyrings(struct key_type *type,
|
||||
const char *description);
|
||||
|
||||
extern int keyring_add_key(struct key *keyring,
|
||||
struct key *key);
|
||||
|
||||
extern struct key *key_lookup(key_serial_t id);
|
||||
|
||||
extern void keyring_replace_payload(struct key *key, void *replacement);
|
||||
|
||||
#define key_serial(key) ((key) ? (key)->serial : 0)
|
||||
|
||||
/*
|
||||
|
@ -268,14 +272,22 @@ extern void key_fsuid_changed(struct task_struct *tsk);
|
|||
extern void key_fsgid_changed(struct task_struct *tsk);
|
||||
extern void key_init(void);
|
||||
|
||||
#define __install_session_keyring(tsk, keyring) \
|
||||
({ \
|
||||
struct key *old_session = tsk->signal->session_keyring; \
|
||||
tsk->signal->session_keyring = keyring; \
|
||||
old_session; \
|
||||
})
|
||||
|
||||
#else /* CONFIG_KEYS */
|
||||
|
||||
#define key_validate(k) 0
|
||||
#define key_serial(k) 0
|
||||
#define key_get(k) NULL
|
||||
#define key_get(k) ({ NULL; })
|
||||
#define key_put(k) do { } while(0)
|
||||
#define alloc_uid_keyring(u) 0
|
||||
#define switch_uid_keyring(u) do { } while(0)
|
||||
#define __install_session_keyring(t, k) ({ NULL; })
|
||||
#define copy_keys(f,t) 0
|
||||
#define copy_thread_group_keys(t) 0
|
||||
#define exit_keys(t) do { } while(0)
|
||||
|
|
|
@ -20,6 +20,16 @@
|
|||
#define KEY_SPEC_USER_SESSION_KEYRING -5 /* - key ID for UID-session keyring */
|
||||
#define KEY_SPEC_GROUP_KEYRING -6 /* - key ID for GID-specific keyring */
|
||||
|
||||
/* request-key default keyrings */
|
||||
#define KEY_REQKEY_DEFL_NO_CHANGE -1
|
||||
#define KEY_REQKEY_DEFL_DEFAULT 0
|
||||
#define KEY_REQKEY_DEFL_THREAD_KEYRING 1
|
||||
#define KEY_REQKEY_DEFL_PROCESS_KEYRING 2
|
||||
#define KEY_REQKEY_DEFL_SESSION_KEYRING 3
|
||||
#define KEY_REQKEY_DEFL_USER_KEYRING 4
|
||||
#define KEY_REQKEY_DEFL_USER_SESSION_KEYRING 5
|
||||
#define KEY_REQKEY_DEFL_GROUP_KEYRING 6
|
||||
|
||||
/* keyctl commands */
|
||||
#define KEYCTL_GET_KEYRING_ID 0 /* ask for a keyring's ID */
|
||||
#define KEYCTL_JOIN_SESSION_KEYRING 1 /* join or start named session keyring */
|
||||
|
@ -35,5 +45,6 @@
|
|||
#define KEYCTL_READ 11 /* read a key or keyring's contents */
|
||||
#define KEYCTL_INSTANTIATE 12 /* instantiate a partially constructed key */
|
||||
#define KEYCTL_NEGATE 13 /* negate a partially constructed key */
|
||||
#define KEYCTL_SET_REQKEY_KEYRING 14 /* set default request-key keyring */
|
||||
|
||||
#endif /* _LINUX_KEYCTL_H */
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/config.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/compiler.h>
|
||||
|
@ -34,7 +35,17 @@ static inline int request_module(const char * name, ...) { return -ENOSYS; }
|
|||
#endif
|
||||
|
||||
#define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x)))
|
||||
extern int call_usermodehelper(char *path, char *argv[], char *envp[], int wait);
|
||||
|
||||
struct key;
|
||||
extern int call_usermodehelper_keys(char *path, char *argv[], char *envp[],
|
||||
struct key *session_keyring, int wait);
|
||||
|
||||
static inline int
|
||||
call_usermodehelper(char *path, char **argv, char **envp, int wait)
|
||||
{
|
||||
return call_usermodehelper_keys(path, argv, envp, NULL, wait);
|
||||
}
|
||||
|
||||
extern void usermodehelper_init(void);
|
||||
|
||||
#endif /* __LINUX_KMOD_H__ */
|
||||
|
|
|
@ -25,27 +25,45 @@
|
|||
* Rusty Russell).
|
||||
* 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
|
||||
* interface to access function arguments.
|
||||
* 2005-May Hien Nguyen <hien@us.ibm.com> and Jim Keniston
|
||||
* <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
|
||||
* <prasanna@in.ibm.com> added function-return probes.
|
||||
*/
|
||||
#include <linux/config.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <asm/kprobes.h>
|
||||
|
||||
/* kprobe_status settings */
|
||||
#define KPROBE_HIT_ACTIVE 0x00000001
|
||||
#define KPROBE_HIT_SS 0x00000002
|
||||
#define KPROBE_REENTER 0x00000004
|
||||
#define KPROBE_HIT_SSDONE 0x00000008
|
||||
|
||||
struct kprobe;
|
||||
struct pt_regs;
|
||||
struct kretprobe;
|
||||
struct kretprobe_instance;
|
||||
typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
|
||||
typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *);
|
||||
typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
|
||||
unsigned long flags);
|
||||
typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
|
||||
int trapnr);
|
||||
typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
|
||||
struct pt_regs *);
|
||||
|
||||
struct kprobe {
|
||||
struct hlist_node hlist;
|
||||
|
||||
/* list of kprobes for multi-handler support */
|
||||
struct list_head list;
|
||||
|
||||
/*count the number of times this probe was temporarily disarmed */
|
||||
unsigned long nmissed;
|
||||
|
||||
/* location of the probe point */
|
||||
kprobe_opcode_t *addr;
|
||||
|
||||
|
@ -85,6 +103,41 @@ struct jprobe {
|
|||
kprobe_opcode_t *entry; /* probe handling code to jump to */
|
||||
};
|
||||
|
||||
#ifdef ARCH_SUPPORTS_KRETPROBES
|
||||
extern void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs);
|
||||
#else /* ARCH_SUPPORTS_KRETPROBES */
|
||||
static inline void arch_prepare_kretprobe(struct kretprobe *rp,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
#endif /* ARCH_SUPPORTS_KRETPROBES */
|
||||
/*
|
||||
* Function-return probe -
|
||||
* Note:
|
||||
* User needs to provide a handler function, and initialize maxactive.
|
||||
* maxactive - The maximum number of instances of the probed function that
|
||||
* can be active concurrently.
|
||||
* nmissed - tracks the number of times the probed function's return was
|
||||
* ignored, due to maxactive being too low.
|
||||
*
|
||||
*/
|
||||
struct kretprobe {
|
||||
struct kprobe kp;
|
||||
kretprobe_handler_t handler;
|
||||
int maxactive;
|
||||
int nmissed;
|
||||
struct hlist_head free_instances;
|
||||
struct hlist_head used_instances;
|
||||
};
|
||||
|
||||
struct kretprobe_instance {
|
||||
struct hlist_node uflist; /* either on free list or used list */
|
||||
struct hlist_node hlist;
|
||||
struct kretprobe *rp;
|
||||
kprobe_opcode_t *ret_addr;
|
||||
struct task_struct *task;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
/* Locks kprobe: irq must be disabled */
|
||||
void lock_kprobes(void);
|
||||
|
@ -99,11 +152,17 @@ static inline int kprobe_running(void)
|
|||
|
||||
extern int arch_prepare_kprobe(struct kprobe *p);
|
||||
extern void arch_copy_kprobe(struct kprobe *p);
|
||||
extern void arch_arm_kprobe(struct kprobe *p);
|
||||
extern void arch_disarm_kprobe(struct kprobe *p);
|
||||
extern void arch_remove_kprobe(struct kprobe *p);
|
||||
extern int arch_init_kprobes(void);
|
||||
extern void show_registers(struct pt_regs *regs);
|
||||
extern kprobe_opcode_t *get_insn_slot(void);
|
||||
extern void free_insn_slot(kprobe_opcode_t *slot);
|
||||
|
||||
/* Get the kprobe at this addr (if any). Must have called lock_kprobes */
|
||||
struct kprobe *get_kprobe(void *addr);
|
||||
struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
|
||||
|
||||
int register_kprobe(struct kprobe *p);
|
||||
void unregister_kprobe(struct kprobe *p);
|
||||
|
@ -113,7 +172,14 @@ int register_jprobe(struct jprobe *p);
|
|||
void unregister_jprobe(struct jprobe *p);
|
||||
void jprobe_return(void);
|
||||
|
||||
#else
|
||||
int register_kretprobe(struct kretprobe *rp);
|
||||
void unregister_kretprobe(struct kretprobe *rp);
|
||||
|
||||
struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp);
|
||||
void add_rp_inst(struct kretprobe_instance *ri);
|
||||
void kprobe_flush_task(struct task_struct *tk);
|
||||
void recycle_rp_inst(struct kretprobe_instance *ri);
|
||||
#else /* CONFIG_KPROBES */
|
||||
static inline int kprobe_running(void)
|
||||
{
|
||||
return 0;
|
||||
|
@ -135,5 +201,15 @@ static inline void unregister_jprobe(struct jprobe *p)
|
|||
static inline void jprobe_return(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
static inline int register_kretprobe(struct kretprobe *rp)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline void unregister_kretprobe(struct kretprobe *rp)
|
||||
{
|
||||
}
|
||||
static inline void kprobe_flush_task(struct task_struct *tk)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_KPROBES */
|
||||
#endif /* _LINUX_KPROBES_H */
|
||||
|
|
|
@ -41,6 +41,7 @@ struct ps2dev {
|
|||
|
||||
void ps2_init(struct ps2dev *ps2dev, struct serio *serio);
|
||||
int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout);
|
||||
void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout);
|
||||
int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command);
|
||||
int ps2_schedule_command(struct ps2dev *ps2dev, unsigned char *param, int command);
|
||||
int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data);
|
||||
|
|
|
@ -185,7 +185,7 @@ static inline void list_del(struct list_head *entry)
|
|||
* list_for_each_entry_rcu().
|
||||
*
|
||||
* Note that the caller is not permitted to immediately free
|
||||
* the newly deleted entry. Instead, either synchronize_kernel()
|
||||
* the newly deleted entry. Instead, either synchronize_rcu()
|
||||
* or call_rcu() must be used to defer freeing until an RCU
|
||||
* grace period has elapsed.
|
||||
*/
|
||||
|
|
|
@ -72,6 +72,8 @@ struct nlm_lockowner {
|
|||
uint32_t pid;
|
||||
};
|
||||
|
||||
struct nlm_wait;
|
||||
|
||||
/*
|
||||
* Memory chunk for NLM client RPC request.
|
||||
*/
|
||||
|
@ -81,6 +83,7 @@ struct nlm_rqst {
|
|||
struct nlm_host * a_host; /* host handle */
|
||||
struct nlm_args a_args; /* arguments */
|
||||
struct nlm_res a_res; /* result */
|
||||
struct nlm_wait * a_block;
|
||||
char a_owner[NLMCLNT_OHSIZE];
|
||||
};
|
||||
|
||||
|
@ -142,7 +145,9 @@ extern unsigned long nlmsvc_timeout;
|
|||
* Lockd client functions
|
||||
*/
|
||||
struct nlm_rqst * nlmclnt_alloc_call(void);
|
||||
int nlmclnt_block(struct nlm_host *, struct file_lock *, u32 *);
|
||||
int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl);
|
||||
void nlmclnt_finish_block(struct nlm_rqst *req);
|
||||
long nlmclnt_block(struct nlm_rqst *req, long timeout);
|
||||
int nlmclnt_cancel(struct nlm_host *, struct file_lock *);
|
||||
u32 nlmclnt_grant(struct nlm_lock *);
|
||||
void nlmclnt_recovery(struct nlm_host *, u32);
|
||||
|
|
|
@ -61,7 +61,7 @@ struct loop_device {
|
|||
struct semaphore lo_sem;
|
||||
struct semaphore lo_ctl_mutex;
|
||||
struct semaphore lo_bh_mutex;
|
||||
atomic_t lo_pending;
|
||||
int lo_pending;
|
||||
|
||||
request_queue_t *lo_queue;
|
||||
};
|
||||
|
|
|
@ -29,7 +29,7 @@ struct mb_cache_op {
|
|||
|
||||
struct mb_cache * mb_cache_create(const char *, struct mb_cache_op *, size_t,
|
||||
int, int);
|
||||
void mb_cache_shrink(struct mb_cache *, struct block_device *);
|
||||
void mb_cache_shrink(struct block_device *);
|
||||
void mb_cache_destroy(struct mb_cache *);
|
||||
|
||||
/* Functions on cache entries */
|
||||
|
|
|
@ -20,9 +20,14 @@ typedef struct mempool_s {
|
|||
mempool_free_t *free;
|
||||
wait_queue_head_t wait;
|
||||
} mempool_t;
|
||||
extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
|
||||
mempool_free_t *free_fn, void *pool_data);
|
||||
extern int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask);
|
||||
|
||||
extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
|
||||
mempool_free_t *free_fn, void *pool_data);
|
||||
extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
|
||||
mempool_free_t *free_fn, void *pool_data, int nid);
|
||||
|
||||
extern int mempool_resize(mempool_t *pool, int new_min_nr,
|
||||
unsigned int __nocast gfp_mask);
|
||||
extern void mempool_destroy(mempool_t *pool);
|
||||
extern void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask);
|
||||
extern void mempool_free(void *element, mempool_t *pool);
|
||||
|
|
|
@ -395,19 +395,81 @@ static inline void put_page(struct page *page)
|
|||
/*
|
||||
* The zone field is never updated after free_area_init_core()
|
||||
* sets it, so none of the operations on it need to be atomic.
|
||||
* We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
|
||||
* so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
|
||||
*/
|
||||
#define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
|
||||
#define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
|
||||
|
||||
|
||||
/*
|
||||
* page->flags layout:
|
||||
*
|
||||
* There are three possibilities for how page->flags get
|
||||
* laid out. The first is for the normal case, without
|
||||
* sparsemem. The second is for sparsemem when there is
|
||||
* plenty of space for node and section. The last is when
|
||||
* we have run out of space and have to fall back to an
|
||||
* alternate (slower) way of determining the node.
|
||||
*
|
||||
* No sparsemem: | NODE | ZONE | ... | FLAGS |
|
||||
* with space for node: | SECTION | NODE | ZONE | ... | FLAGS |
|
||||
* no space for node: | SECTION | ZONE | ... | FLAGS |
|
||||
*/
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
#define SECTIONS_WIDTH SECTIONS_SHIFT
|
||||
#else
|
||||
#define SECTIONS_WIDTH 0
|
||||
#endif
|
||||
|
||||
#define ZONES_WIDTH ZONES_SHIFT
|
||||
|
||||
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED
|
||||
#define NODES_WIDTH NODES_SHIFT
|
||||
#else
|
||||
#define NODES_WIDTH 0
|
||||
#endif
|
||||
|
||||
/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
|
||||
#define SECTIONS_PGOFF ((sizeof(page_flags_t)*8) - SECTIONS_WIDTH)
|
||||
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
|
||||
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
|
||||
|
||||
/*
|
||||
* We are going to use the flags for the page to node mapping if its in
|
||||
* there. This includes the case where there is no node, so it is implicit.
|
||||
*/
|
||||
#define FLAGS_HAS_NODE (NODES_WIDTH > 0 || NODES_SHIFT == 0)
|
||||
|
||||
#ifndef PFN_SECTION_SHIFT
|
||||
#define PFN_SECTION_SHIFT 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Define the bit shifts to access each section. For non-existant
|
||||
* sections we define the shift as 0; that plus a 0 mask ensures
|
||||
* the compiler will optimise away reference to them.
|
||||
*/
|
||||
#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
|
||||
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
|
||||
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
|
||||
|
||||
/* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */
|
||||
#if FLAGS_HAS_NODE
|
||||
#define ZONETABLE_SHIFT (NODES_SHIFT + ZONES_SHIFT)
|
||||
#else
|
||||
#define ZONETABLE_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
|
||||
#endif
|
||||
#define ZONETABLE_PGSHIFT ZONES_PGSHIFT
|
||||
|
||||
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
|
||||
#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
|
||||
#endif
|
||||
|
||||
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
|
||||
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
|
||||
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
|
||||
#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1)
|
||||
|
||||
static inline unsigned long page_zonenum(struct page *page)
|
||||
{
|
||||
return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
|
||||
}
|
||||
static inline unsigned long page_to_nid(struct page *page)
|
||||
{
|
||||
return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
|
||||
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
|
||||
}
|
||||
|
||||
struct zone;
|
||||
|
@ -415,13 +477,44 @@ extern struct zone *zone_table[];
|
|||
|
||||
static inline struct zone *page_zone(struct page *page)
|
||||
{
|
||||
return zone_table[page->flags >> NODEZONE_SHIFT];
|
||||
return zone_table[(page->flags >> ZONETABLE_PGSHIFT) &
|
||||
ZONETABLE_MASK];
|
||||
}
|
||||
|
||||
static inline void set_page_zone(struct page *page, unsigned long nodezone_num)
|
||||
static inline unsigned long page_to_nid(struct page *page)
|
||||
{
|
||||
page->flags &= ~(~0UL << NODEZONE_SHIFT);
|
||||
page->flags |= nodezone_num << NODEZONE_SHIFT;
|
||||
if (FLAGS_HAS_NODE)
|
||||
return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
|
||||
else
|
||||
return page_zone(page)->zone_pgdat->node_id;
|
||||
}
|
||||
static inline unsigned long page_to_section(struct page *page)
|
||||
{
|
||||
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
|
||||
}
|
||||
|
||||
static inline void set_page_zone(struct page *page, unsigned long zone)
|
||||
{
|
||||
page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
|
||||
page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
|
||||
}
|
||||
static inline void set_page_node(struct page *page, unsigned long node)
|
||||
{
|
||||
page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
|
||||
page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
|
||||
}
|
||||
static inline void set_page_section(struct page *page, unsigned long section)
|
||||
{
|
||||
page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
|
||||
page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
|
||||
}
|
||||
|
||||
static inline void set_page_links(struct page *page, unsigned long zone,
|
||||
unsigned long node, unsigned long pfn)
|
||||
{
|
||||
set_page_zone(page, zone);
|
||||
set_page_node(page, node);
|
||||
set_page_section(page, pfn_to_section_nr(pfn));
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DISCONTIGMEM
|
||||
|
@ -532,10 +625,16 @@ static inline int page_mapped(struct page *page)
|
|||
* Used to decide whether a process gets delivered SIGBUS or
|
||||
* just gets major/minor fault counters bumped up.
|
||||
*/
|
||||
#define VM_FAULT_OOM (-1)
|
||||
#define VM_FAULT_SIGBUS 0
|
||||
#define VM_FAULT_MINOR 1
|
||||
#define VM_FAULT_MAJOR 2
|
||||
#define VM_FAULT_OOM 0x00
|
||||
#define VM_FAULT_SIGBUS 0x01
|
||||
#define VM_FAULT_MINOR 0x02
|
||||
#define VM_FAULT_MAJOR 0x03
|
||||
|
||||
/*
|
||||
* Special case for get_user_pages.
|
||||
* Must be in a distinct bit from the above VM_FAULT_ flags.
|
||||
*/
|
||||
#define VM_FAULT_WRITE 0x10
|
||||
|
||||
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
|
||||
|
||||
|
@ -611,7 +710,13 @@ extern pte_t *FASTCALL(pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsign
|
|||
extern pte_t *FASTCALL(pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
|
||||
extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
|
||||
extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
|
||||
extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
|
||||
extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
|
||||
|
||||
static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access)
|
||||
{
|
||||
return __handle_mm_fault(mm, vma, address, write_access) & (~VM_FAULT_WRITE);
|
||||
}
|
||||
|
||||
extern int make_pages_present(unsigned long addr, unsigned long end);
|
||||
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
|
||||
void install_arg_page(struct vm_area_struct *, struct page *, unsigned long);
|
||||
|
|
|
@ -269,7 +269,9 @@ typedef struct pglist_data {
|
|||
struct zone node_zones[MAX_NR_ZONES];
|
||||
struct zonelist node_zonelists[GFP_ZONETYPES];
|
||||
int nr_zones;
|
||||
#ifdef CONFIG_FLAT_NODE_MEM_MAP
|
||||
struct page *node_mem_map;
|
||||
#endif
|
||||
struct bootmem_data *bdata;
|
||||
unsigned long node_start_pfn;
|
||||
unsigned long node_present_pages; /* total number of physical pages */
|
||||
|
@ -284,6 +286,12 @@ typedef struct pglist_data {
|
|||
|
||||
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
|
||||
#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
|
||||
#ifdef CONFIG_FLAT_NODE_MEM_MAP
|
||||
#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
|
||||
#else
|
||||
#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
|
||||
#endif
|
||||
#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
|
||||
|
||||
extern struct pglist_data *pgdat_list;
|
||||
|
||||
|
@ -400,7 +408,7 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
|
|||
/* Returns the number of the current Node. */
|
||||
#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
|
||||
|
||||
#ifndef CONFIG_DISCONTIGMEM
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
|
||||
extern struct pglist_data contig_page_data;
|
||||
#define NODE_DATA(nid) (&contig_page_data)
|
||||
|
@ -408,36 +416,177 @@ extern struct pglist_data contig_page_data;
|
|||
#define MAX_NODES_SHIFT 1
|
||||
#define pfn_to_nid(pfn) (0)
|
||||
|
||||
#else /* CONFIG_DISCONTIGMEM */
|
||||
#else /* CONFIG_NEED_MULTIPLE_NODES */
|
||||
|
||||
#include <asm/mmzone.h>
|
||||
|
||||
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
#include <asm/sparsemem.h>
|
||||
#endif
|
||||
|
||||
#if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED)
|
||||
/*
|
||||
* with 32 bit page->flags field, we reserve 8 bits for node/zone info.
|
||||
* there are 3 zones (2 bits) and this leaves 8-2=6 bits for nodes.
|
||||
*/
|
||||
#define MAX_NODES_SHIFT 6
|
||||
#define FLAGS_RESERVED 8
|
||||
|
||||
#elif BITS_PER_LONG == 64
|
||||
/*
|
||||
* with 64 bit flags field, there's plenty of room.
|
||||
*/
|
||||
#define MAX_NODES_SHIFT 10
|
||||
#define FLAGS_RESERVED 32
|
||||
|
||||
#else
|
||||
|
||||
#error BITS_PER_LONG not defined
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* !CONFIG_DISCONTIGMEM */
|
||||
|
||||
#if NODES_SHIFT > MAX_NODES_SHIFT
|
||||
#error NODES_SHIFT > MAX_NODES_SHIFT
|
||||
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
|
||||
#define early_pfn_to_nid(nid) (0UL)
|
||||
#endif
|
||||
|
||||
/* There are currently 3 zones: DMA, Normal & Highmem, thus we need 2 bits */
|
||||
#define MAX_ZONES_SHIFT 2
|
||||
#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
|
||||
#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
|
||||
|
||||
#if ZONES_SHIFT > MAX_ZONES_SHIFT
|
||||
#error ZONES_SHIFT > MAX_ZONES_SHIFT
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
|
||||
/*
|
||||
* SECTION_SHIFT #bits space required to store a section #
|
||||
*
|
||||
* PA_SECTION_SHIFT physical address to/from section number
|
||||
* PFN_SECTION_SHIFT pfn to/from section number
|
||||
*/
|
||||
#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
|
||||
|
||||
#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
|
||||
#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
|
||||
|
||||
#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
|
||||
|
||||
#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
|
||||
#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
|
||||
|
||||
#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
|
||||
#error Allocator MAX_ORDER exceeds SECTION_SIZE
|
||||
#endif
|
||||
|
||||
struct page;
|
||||
struct mem_section {
|
||||
/*
|
||||
* This is, logically, a pointer to an array of struct
|
||||
* pages. However, it is stored with some other magic.
|
||||
* (see sparse.c::sparse_init_one_section())
|
||||
*
|
||||
* Making it a UL at least makes someone do a cast
|
||||
* before using it wrong.
|
||||
*/
|
||||
unsigned long section_mem_map;
|
||||
};
|
||||
|
||||
extern struct mem_section mem_section[NR_MEM_SECTIONS];
|
||||
|
||||
static inline struct mem_section *__nr_to_section(unsigned long nr)
|
||||
{
|
||||
return &mem_section[nr];
|
||||
}
|
||||
|
||||
/*
|
||||
* We use the lower bits of the mem_map pointer to store
|
||||
* a little bit of information. There should be at least
|
||||
* 3 bits here due to 32-bit alignment.
|
||||
*/
|
||||
#define SECTION_MARKED_PRESENT (1UL<<0)
|
||||
#define SECTION_HAS_MEM_MAP (1UL<<1)
|
||||
#define SECTION_MAP_LAST_BIT (1UL<<2)
|
||||
#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
|
||||
|
||||
static inline struct page *__section_mem_map_addr(struct mem_section *section)
|
||||
{
|
||||
unsigned long map = section->section_mem_map;
|
||||
map &= SECTION_MAP_MASK;
|
||||
return (struct page *)map;
|
||||
}
|
||||
|
||||
static inline int valid_section(struct mem_section *section)
|
||||
{
|
||||
return (section->section_mem_map & SECTION_MARKED_PRESENT);
|
||||
}
|
||||
|
||||
static inline int section_has_mem_map(struct mem_section *section)
|
||||
{
|
||||
return (section->section_mem_map & SECTION_HAS_MEM_MAP);
|
||||
}
|
||||
|
||||
static inline int valid_section_nr(unsigned long nr)
|
||||
{
|
||||
return valid_section(__nr_to_section(nr));
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a kernel address, find the home node of the underlying memory.
|
||||
*/
|
||||
#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
|
||||
|
||||
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
|
||||
{
|
||||
return __nr_to_section(pfn_to_section_nr(pfn));
|
||||
}
|
||||
|
||||
#define pfn_to_page(pfn) \
|
||||
({ \
|
||||
unsigned long __pfn = (pfn); \
|
||||
__section_mem_map_addr(__pfn_to_section(__pfn)) + __pfn; \
|
||||
})
|
||||
#define page_to_pfn(page) \
|
||||
({ \
|
||||
page - __section_mem_map_addr(__nr_to_section( \
|
||||
page_to_section(page))); \
|
||||
})
|
||||
|
||||
static inline int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
|
||||
return 0;
|
||||
return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
|
||||
}
|
||||
|
||||
/*
|
||||
* These are _only_ used during initialisation, therefore they
|
||||
* can use __initdata ... They could have names to indicate
|
||||
* this restriction.
|
||||
*/
|
||||
#ifdef CONFIG_NUMA
|
||||
#define pfn_to_nid early_pfn_to_nid
|
||||
#endif
|
||||
|
||||
#define pfn_to_pgdat(pfn) \
|
||||
({ \
|
||||
NODE_DATA(pfn_to_nid(pfn)); \
|
||||
})
|
||||
|
||||
#define early_pfn_valid(pfn) pfn_valid(pfn)
|
||||
void sparse_init(void);
|
||||
#else
|
||||
#define sparse_init() do {} while (0)
|
||||
#endif /* CONFIG_SPARSEMEM */
|
||||
|
||||
#ifdef CONFIG_NODES_SPAN_OTHER_NODES
|
||||
#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid))
|
||||
#else
|
||||
#define early_pfn_in_nid(pfn, nid) (1)
|
||||
#endif
|
||||
|
||||
#ifndef early_pfn_valid
|
||||
#define early_pfn_valid(pfn) (1)
|
||||
#endif
|
||||
|
||||
void memory_present(int nid, unsigned long start, unsigned long end);
|
||||
unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_MMZONE_H */
|
||||
|
|
|
@ -174,5 +174,62 @@ struct serio_device_id {
|
|||
__u8 proto;
|
||||
};
|
||||
|
||||
/*
|
||||
* Struct used for matching a device
|
||||
*/
|
||||
struct of_device_id
|
||||
{
|
||||
char name[32];
|
||||
char type[32];
|
||||
char compatible[128];
|
||||
void *data;
|
||||
};
|
||||
|
||||
|
||||
/* PCMCIA */
|
||||
|
||||
struct pcmcia_device_id {
|
||||
__u16 match_flags;
|
||||
|
||||
__u16 manf_id;
|
||||
__u16 card_id;
|
||||
|
||||
__u8 func_id;
|
||||
|
||||
/* for real multi-function devices */
|
||||
__u8 function;
|
||||
|
||||
/* for pseude multi-function devices */
|
||||
__u8 device_no;
|
||||
|
||||
__u32 prod_id_hash[4];
|
||||
|
||||
/* not matched against in kernelspace*/
|
||||
#ifdef __KERNEL__
|
||||
const char * prod_id[4];
|
||||
#else
|
||||
kernel_ulong_t prod_id[4];
|
||||
#endif
|
||||
|
||||
/* not matched against */
|
||||
kernel_ulong_t driver_info;
|
||||
#ifdef __KERNEL__
|
||||
char * cisfile;
|
||||
#else
|
||||
kernel_ulong_t cisfile;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define PCMCIA_DEV_ID_MATCH_MANF_ID 0x0001
|
||||
#define PCMCIA_DEV_ID_MATCH_CARD_ID 0x0002
|
||||
#define PCMCIA_DEV_ID_MATCH_FUNC_ID 0x0004
|
||||
#define PCMCIA_DEV_ID_MATCH_FUNCTION 0x0008
|
||||
#define PCMCIA_DEV_ID_MATCH_PROD_ID1 0x0010
|
||||
#define PCMCIA_DEV_ID_MATCH_PROD_ID2 0x0020
|
||||
#define PCMCIA_DEV_ID_MATCH_PROD_ID3 0x0040
|
||||
#define PCMCIA_DEV_ID_MATCH_PROD_ID4 0x0080
|
||||
#define PCMCIA_DEV_ID_MATCH_DEVICE_NO 0x0100
|
||||
#define PCMCIA_DEV_ID_MATCH_FAKE_CIS 0x0200
|
||||
#define PCMCIA_DEV_ID_MATCH_ANONYMOUS 0x0400
|
||||
|
||||
#endif /* LINUX_MOD_DEVICETABLE_H */
|
||||
|
|
|
@ -51,6 +51,9 @@ struct module_attribute {
|
|||
ssize_t (*show)(struct module_attribute *, struct module *, char *);
|
||||
ssize_t (*store)(struct module_attribute *, struct module *,
|
||||
const char *, size_t count);
|
||||
void (*setup)(struct module *, const char *);
|
||||
int (*test)(struct module *);
|
||||
void (*free)(struct module *);
|
||||
};
|
||||
|
||||
struct module_kobject
|
||||
|
@ -239,6 +242,8 @@ struct module
|
|||
/* Sysfs stuff. */
|
||||
struct module_kobject mkobj;
|
||||
struct module_param_attrs *param_attrs;
|
||||
const char *version;
|
||||
const char *srcversion;
|
||||
|
||||
/* Exported symbols */
|
||||
const struct kernel_symbol *syms;
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#define _LINUX_MOUNT_H
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/atomic.h>
|
||||
|
@ -34,7 +35,7 @@ struct vfsmount
|
|||
int mnt_expiry_mark; /* true if marked for expiry */
|
||||
char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
|
||||
struct list_head mnt_list;
|
||||
struct list_head mnt_fslink; /* link in fs-specific expiry list */
|
||||
struct list_head mnt_expire; /* link in fs-specific expiry list */
|
||||
struct namespace *mnt_namespace; /* containing namespace */
|
||||
};
|
||||
|
||||
|
@ -47,7 +48,7 @@ static inline struct vfsmount *mntget(struct vfsmount *mnt)
|
|||
|
||||
extern void __mntput(struct vfsmount *mnt);
|
||||
|
||||
static inline void _mntput(struct vfsmount *mnt)
|
||||
static inline void mntput_no_expire(struct vfsmount *mnt)
|
||||
{
|
||||
if (mnt) {
|
||||
if (atomic_dec_and_test(&mnt->mnt_count))
|
||||
|
@ -59,7 +60,7 @@ static inline void mntput(struct vfsmount *mnt)
|
|||
{
|
||||
if (mnt) {
|
||||
mnt->mnt_expiry_mark = 0;
|
||||
_mntput(mnt);
|
||||
mntput_no_expire(mnt);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -76,6 +77,7 @@ extern int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
|
|||
extern void mark_mounts_for_expiry(struct list_head *mounts);
|
||||
|
||||
extern spinlock_t vfsmount_lock;
|
||||
extern dev_t name_to_dev_t(char *name);
|
||||
|
||||
#endif
|
||||
#endif /* _LINUX_MOUNT_H */
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
|
||||
/* Common Flash Interface structures
|
||||
* See http://support.intel.com/design/flash/technote/index.htm
|
||||
* $Id: cfi.h,v 1.50 2004/11/20 12:46:51 dwmw2 Exp $
|
||||
* $Id: cfi.h,v 1.54 2005/06/06 23:04:36 tpoynor Exp $
|
||||
*/
|
||||
|
||||
#ifndef __MTD_CFI_H__
|
||||
|
@ -148,6 +148,14 @@ struct cfi_pri_intelext {
|
|||
uint8_t extra[0];
|
||||
} __attribute__((packed));
|
||||
|
||||
struct cfi_intelext_otpinfo {
|
||||
uint32_t ProtRegAddr;
|
||||
uint16_t FactGroups;
|
||||
uint8_t FactProtRegSize;
|
||||
uint16_t UserGroups;
|
||||
uint8_t UserProtRegSize;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct cfi_intelext_blockinfo {
|
||||
uint16_t NumIdentBlocks;
|
||||
uint16_t BlockSize;
|
||||
|
@ -244,7 +252,7 @@ static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int
|
|||
* It looks too long to be inline, but in the common case it should almost all
|
||||
* get optimised away.
|
||||
*/
|
||||
static inline map_word cfi_build_cmd(u_char cmd, struct map_info *map, struct cfi_private *cfi)
|
||||
static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
|
||||
{
|
||||
map_word val = { {0} };
|
||||
int wordwidth, words_per_bus, chip_mode, chips_per_word;
|
||||
|
@ -307,6 +315,69 @@ static inline map_word cfi_build_cmd(u_char cmd, struct map_info *map, struct cf
|
|||
}
|
||||
#define CMD(x) cfi_build_cmd((x), map, cfi)
|
||||
|
||||
|
||||
static inline unsigned char cfi_merge_status(map_word val, struct map_info *map,
|
||||
struct cfi_private *cfi)
|
||||
{
|
||||
int wordwidth, words_per_bus, chip_mode, chips_per_word;
|
||||
unsigned long onestat, res = 0;
|
||||
int i;
|
||||
|
||||
/* We do it this way to give the compiler a fighting chance
|
||||
of optimising away all the crap for 'bankwidth' larger than
|
||||
an unsigned long, in the common case where that support is
|
||||
disabled */
|
||||
if (map_bankwidth_is_large(map)) {
|
||||
wordwidth = sizeof(unsigned long);
|
||||
words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
|
||||
} else {
|
||||
wordwidth = map_bankwidth(map);
|
||||
words_per_bus = 1;
|
||||
}
|
||||
|
||||
chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
|
||||
chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
|
||||
|
||||
onestat = val.x[0];
|
||||
/* Or all status words together */
|
||||
for (i=1; i < words_per_bus; i++) {
|
||||
onestat |= val.x[i];
|
||||
}
|
||||
|
||||
res = onestat;
|
||||
switch(chips_per_word) {
|
||||
default: BUG();
|
||||
#if BITS_PER_LONG >= 64
|
||||
case 8:
|
||||
res |= (onestat >> (chip_mode * 32));
|
||||
#endif
|
||||
case 4:
|
||||
res |= (onestat >> (chip_mode * 16));
|
||||
case 2:
|
||||
res |= (onestat >> (chip_mode * 8));
|
||||
case 1:
|
||||
;
|
||||
}
|
||||
|
||||
/* Last, determine what the bit-pattern should be for a single
|
||||
device, according to chip mode and endianness... */
|
||||
switch (chip_mode) {
|
||||
case 1:
|
||||
break;
|
||||
case 2:
|
||||
res = cfi16_to_cpu(res);
|
||||
break;
|
||||
case 4:
|
||||
res = cfi32_to_cpu(res);
|
||||
break;
|
||||
default: BUG();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
|
||||
|
||||
|
||||
/*
|
||||
* Sends a CFI command to a bank of flash for the given geometry.
|
||||
*
|
||||
|
@ -357,16 +428,6 @@ static inline void cfi_udelay(int us)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void cfi_spin_lock(spinlock_t *mutex)
|
||||
{
|
||||
spin_lock_bh(mutex);
|
||||
}
|
||||
|
||||
static inline void cfi_spin_unlock(spinlock_t *mutex)
|
||||
{
|
||||
spin_unlock_bh(mutex);
|
||||
}
|
||||
|
||||
struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
|
||||
const char* name);
|
||||
struct cfi_fixup {
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*
|
||||
* (C) 2000 Red Hat. GPLd.
|
||||
*
|
||||
* $Id: flashchip.h,v 1.15 2004/11/05 22:41:06 nico Exp $
|
||||
* $Id: flashchip.h,v 1.17 2005/03/14 18:27:15 bjd Exp $
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -29,6 +29,7 @@ typedef enum {
|
|||
FL_ERASE_SUSPENDED,
|
||||
FL_WRITING,
|
||||
FL_WRITING_TO_BUFFER,
|
||||
FL_OTP_WRITE,
|
||||
FL_WRITE_SUSPENDING,
|
||||
FL_WRITE_SUSPENDED,
|
||||
FL_PM_SUSPENDED,
|
||||
|
@ -62,8 +63,8 @@ struct flchip {
|
|||
flstate_t state;
|
||||
flstate_t oldstate;
|
||||
|
||||
int write_suspended:1;
|
||||
int erase_suspended:1;
|
||||
unsigned int write_suspended:1;
|
||||
unsigned int erase_suspended:1;
|
||||
unsigned long in_progress_block_addr;
|
||||
|
||||
spinlock_t *mutex;
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
|
||||
*
|
||||
* $Id: inftl.h,v 1.6 2004/06/30 14:49:00 dbrown Exp $
|
||||
* $Id: inftl.h,v 1.7 2005/06/13 13:08:45 sean Exp $
|
||||
*/
|
||||
|
||||
#ifndef __MTD_INFTL_H__
|
||||
|
@ -20,7 +20,7 @@
|
|||
#include <mtd/inftl-user.h>
|
||||
|
||||
#ifndef INFTL_MAJOR
|
||||
#define INFTL_MAJOR 94
|
||||
#define INFTL_MAJOR 96
|
||||
#endif
|
||||
#define INFTL_PARTN_BITS 4
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
|
||||
/* Overhauled routines for dealing with different mmap regions of flash */
|
||||
/* $Id: map.h,v 1.46 2005/01/05 17:09:44 dwmw2 Exp $ */
|
||||
/* $Id: map.h,v 1.52 2005/05/25 10:29:41 gleixner Exp $ */
|
||||
|
||||
#ifndef __LINUX_MTD_MAP_H__
|
||||
#define __LINUX_MTD_MAP_H__
|
||||
|
@ -263,6 +263,17 @@ static inline map_word map_word_and(struct map_info *map, map_word val1, map_wor
|
|||
return r;
|
||||
}
|
||||
|
||||
static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2)
|
||||
{
|
||||
map_word r;
|
||||
int i;
|
||||
|
||||
for (i=0; i<map_words(map); i++) {
|
||||
r.x[i] = val1.x[i] & ~val2.x[i];
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
|
||||
{
|
||||
map_word r;
|
||||
|
@ -273,6 +284,7 @@ static inline map_word map_word_or(struct map_info *map, map_word val1, map_word
|
|||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
#define map_word_andequal(m, a, b, z) map_word_equal(m, z, map_word_and(m, a, b))
|
||||
|
||||
static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
|
||||
|
@ -328,16 +340,27 @@ static inline map_word map_word_load_partial(struct map_info *map, map_word orig
|
|||
return orig;
|
||||
}
|
||||
|
||||
#if BITS_PER_LONG < 64
|
||||
#define MAP_FF_LIMIT 4
|
||||
#else
|
||||
#define MAP_FF_LIMIT 8
|
||||
#endif
|
||||
|
||||
static inline map_word map_word_ff(struct map_info *map)
|
||||
{
|
||||
map_word r;
|
||||
int i;
|
||||
|
||||
for (i=0; i<map_words(map); i++) {
|
||||
r.x[i] = ~0UL;
|
||||
|
||||
if (map_bankwidth(map) < MAP_FF_LIMIT) {
|
||||
int bw = 8 * map_bankwidth(map);
|
||||
r.x[0] = (1 << bw) - 1;
|
||||
} else {
|
||||
for (i=0; i<map_words(map); i++)
|
||||
r.x[i] = ~0UL;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
|
||||
{
|
||||
map_word r;
|
||||
|
@ -405,7 +428,7 @@ extern void simple_map_init(struct map_info *);
|
|||
|
||||
|
||||
#define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth))
|
||||
#define map_is_linear(map) (1)
|
||||
#define map_is_linear(map) ({ (void)(map); 1; })
|
||||
|
||||
#endif /* !CONFIG_MTD_COMPLEX_MAPPINGS */
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* $Id: mtd.h,v 1.56 2004/08/09 18:46:04 dmarlin Exp $
|
||||
* $Id: mtd.h,v 1.59 2005/04/11 10:19:02 gleixner Exp $
|
||||
*
|
||||
* Copyright (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> et al.
|
||||
*
|
||||
|
@ -18,6 +18,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/notifier.h>
|
||||
|
||||
#include <linux/mtd/compatmac.h>
|
||||
#include <mtd/mtd-abi.h>
|
||||
|
@ -69,7 +70,6 @@ struct mtd_info {
|
|||
|
||||
u_int32_t oobblock; // Size of OOB blocks (e.g. 512)
|
||||
u_int32_t oobsize; // Amount of OOB data per block (e.g. 16)
|
||||
u_int32_t oobavail; // Number of bytes in OOB area available for fs
|
||||
u_int32_t ecctype;
|
||||
u_int32_t eccsize;
|
||||
|
||||
|
@ -80,6 +80,7 @@ struct mtd_info {
|
|||
|
||||
// oobinfo is a nand_oobinfo structure, which can be set by iotcl (MEMSETOOBINFO)
|
||||
struct nand_oobinfo oobinfo;
|
||||
u_int32_t oobavail; // Number of bytes in OOB area available for fs
|
||||
|
||||
/* Data for variable erase regions. If numeraseregions is zero,
|
||||
* it means that the whole device has erasesize as given above.
|
||||
|
@ -113,12 +114,12 @@ struct mtd_info {
|
|||
* flash devices. The user data is one time programmable but the
|
||||
* factory data is read only.
|
||||
*/
|
||||
int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
|
||||
|
||||
int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len);
|
||||
int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
|
||||
|
||||
/* This function is not yet implemented */
|
||||
int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len);
|
||||
int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
|
||||
int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
|
||||
int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len);
|
||||
|
||||
/* kvec-based read/write methods. We need these especially for NAND flash,
|
||||
with its limited number of write cycles per erase.
|
||||
|
@ -147,6 +148,8 @@ struct mtd_info {
|
|||
int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
|
||||
int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
|
||||
|
||||
struct notifier_block reboot_notifier; /* default mode before reboot */
|
||||
|
||||
void *priv;
|
||||
|
||||
struct module *owner;
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* Steven J. Hill <sjhill@realitydiluted.com>
|
||||
* Thomas Gleixner <tglx@linutronix.de>
|
||||
*
|
||||
* $Id: nand.h,v 1.68 2004/11/12 10:40:37 gleixner Exp $
|
||||
* $Id: nand.h,v 1.73 2005/05/31 19:39:17 gleixner Exp $
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -48,6 +48,10 @@
|
|||
* 02-08-2004 tglx added option field to nand structure for chip anomalities
|
||||
* 05-25-2004 tglx added bad block table support, ST-MICRO manufacturer id
|
||||
* update of nand_chip structure description
|
||||
* 01-17-2005 dmarlin added extended commands for AG-AND device and added option
|
||||
* for BBT_AUTO_REFRESH.
|
||||
* 01-20-2005 dmarlin added optional pointer to hardware specific callback for
|
||||
* extra error status checks.
|
||||
*/
|
||||
#ifndef __LINUX_MTD_NAND_H
|
||||
#define __LINUX_MTD_NAND_H
|
||||
|
@ -115,6 +119,25 @@ extern int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_
|
|||
#define NAND_CMD_READSTART 0x30
|
||||
#define NAND_CMD_CACHEDPROG 0x15
|
||||
|
||||
/* Extended commands for AG-AND device */
|
||||
/*
|
||||
* Note: the command for NAND_CMD_DEPLETE1 is really 0x00 but
|
||||
* there is no way to distinguish that from NAND_CMD_READ0
|
||||
* until the remaining sequence of commands has been completed
|
||||
* so add a high order bit and mask it off in the command.
|
||||
*/
|
||||
#define NAND_CMD_DEPLETE1 0x100
|
||||
#define NAND_CMD_DEPLETE2 0x38
|
||||
#define NAND_CMD_STATUS_MULTI 0x71
|
||||
#define NAND_CMD_STATUS_ERROR 0x72
|
||||
/* multi-bank error status (banks 0-3) */
|
||||
#define NAND_CMD_STATUS_ERROR0 0x73
|
||||
#define NAND_CMD_STATUS_ERROR1 0x74
|
||||
#define NAND_CMD_STATUS_ERROR2 0x75
|
||||
#define NAND_CMD_STATUS_ERROR3 0x76
|
||||
#define NAND_CMD_STATUS_RESET 0x7f
|
||||
#define NAND_CMD_STATUS_CLEAR 0xff
|
||||
|
||||
/* Status bits */
|
||||
#define NAND_STATUS_FAIL 0x01
|
||||
#define NAND_STATUS_FAIL_N1 0x02
|
||||
|
@ -143,7 +166,7 @@ extern int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_
|
|||
|
||||
/*
|
||||
* Constants for Hardware ECC
|
||||
*/
|
||||
*/
|
||||
/* Reset Hardware ECC for read */
|
||||
#define NAND_ECC_READ 0
|
||||
/* Reset Hardware ECC for write */
|
||||
|
@ -151,6 +174,10 @@ extern int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_
|
|||
/* Enable Hardware ECC before syndrom is read back from flash */
|
||||
#define NAND_ECC_READSYN 2
|
||||
|
||||
/* Bit mask for flags passed to do_nand_read_ecc */
|
||||
#define NAND_GET_DEVICE 0x80
|
||||
|
||||
|
||||
/* Option constants for bizarre disfunctionality and real
|
||||
* features
|
||||
*/
|
||||
|
@ -170,6 +197,10 @@ extern int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_
|
|||
/* Chip has a array of 4 pages which can be read without
|
||||
* additional ready /busy waits */
|
||||
#define NAND_4PAGE_ARRAY 0x00000040
|
||||
/* Chip requires that BBT is periodically rewritten to prevent
|
||||
* bits from adjacent blocks from 'leaking' in altering data.
|
||||
* This happens with the Renesas AG-AND chips, possibly others. */
|
||||
#define BBT_AUTO_REFRESH 0x00000080
|
||||
|
||||
/* Options valid for Samsung large page devices */
|
||||
#define NAND_SAMSUNG_LP_OPTIONS \
|
||||
|
@ -192,7 +223,8 @@ extern int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_
|
|||
* This can only work if we have the ecc bytes directly behind the
|
||||
* data bytes. Applies for DOC and AG-AND Renesas HW Reed Solomon generators */
|
||||
#define NAND_HWECC_SYNDROME 0x00020000
|
||||
|
||||
/* This option skips the bbt scan during initialization. */
|
||||
#define NAND_SKIP_BBTSCAN 0x00040000
|
||||
|
||||
/* Options set by nand scan */
|
||||
/* Nand scan has allocated oob_buf */
|
||||
|
@ -221,10 +253,13 @@ struct nand_chip;
|
|||
* struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independend devices
|
||||
* @lock: protection lock
|
||||
* @active: the mtd device which holds the controller currently
|
||||
* @wq: wait queue to sleep on if a NAND operation is in progress
|
||||
* used instead of the per chip wait queue when a hw controller is available
|
||||
*/
|
||||
struct nand_hw_control {
|
||||
spinlock_t lock;
|
||||
struct nand_chip *active;
|
||||
wait_queue_head_t wq;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -283,6 +318,8 @@ struct nand_hw_control {
|
|||
* @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial bad block scan
|
||||
* @controller: [OPTIONAL] a pointer to a hardware controller structure which is shared among multiple independend devices
|
||||
* @priv: [OPTIONAL] pointer to private chip date
|
||||
* @errstat: [OPTIONAL] hardware specific function to perform additional error status checks
|
||||
* (determine if errors are correctable)
|
||||
*/
|
||||
|
||||
struct nand_chip {
|
||||
|
@ -338,6 +375,7 @@ struct nand_chip {
|
|||
struct nand_bbt_descr *badblock_pattern;
|
||||
struct nand_hw_control *controller;
|
||||
void *priv;
|
||||
int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state, int status, int page);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -349,6 +387,7 @@ struct nand_chip {
|
|||
#define NAND_MFR_NATIONAL 0x8f
|
||||
#define NAND_MFR_RENESAS 0x07
|
||||
#define NAND_MFR_STMICRO 0x20
|
||||
#define NAND_MFR_HYNIX 0xad
|
||||
|
||||
/**
|
||||
* struct nand_flash_dev - NAND Flash Device ID Structure
|
||||
|
@ -459,6 +498,9 @@ extern int nand_update_bbt (struct mtd_info *mtd, loff_t offs);
|
|||
extern int nand_default_bbt (struct mtd_info *mtd);
|
||||
extern int nand_isbad_bbt (struct mtd_info *mtd, loff_t offs, int allowbbt);
|
||||
extern int nand_erase_nand (struct mtd_info *mtd, struct erase_info *instr, int allowbbt);
|
||||
extern int nand_do_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
|
||||
size_t * retlen, u_char * buf, u_char * oob_buf,
|
||||
struct nand_oobinfo *oobsel, int flags);
|
||||
|
||||
/*
|
||||
* Constants for oob configuration
|
||||
|
|
35
include/linux/mtd/plat-ram.h
Normal file
35
include/linux/mtd/plat-ram.h
Normal file
|
@ -0,0 +1,35 @@
|
|||
/* linux/include/mtd/plat-ram.h
|
||||
*
|
||||
* (c) 2004 Simtec Electronics
|
||||
* http://www.simtec.co.uk/products/SWLINUX/
|
||||
* Ben Dooks <ben@simtec.co.uk>
|
||||
*
|
||||
* Generic platform device based RAM map
|
||||
*
|
||||
* $Id: plat-ram.h,v 1.2 2005/01/24 00:37:40 bjd Exp $
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_MTD_PLATRAM_H
|
||||
#define __LINUX_MTD_PLATRAM_H __FILE__
|
||||
|
||||
#define PLATRAM_RO (0)
|
||||
#define PLATRAM_RW (1)
|
||||
|
||||
struct platdata_mtd_ram {
|
||||
char *mapname;
|
||||
char **probes;
|
||||
struct mtd_partition *partitions;
|
||||
int nr_partitions;
|
||||
int bankwidth;
|
||||
|
||||
/* control callbacks */
|
||||
|
||||
void (*set_rw)(struct device *dev, int to);
|
||||
};
|
||||
|
||||
#endif /* __LINUX_MTD_PLATRAM_H */
|
|
@ -58,22 +58,16 @@
|
|||
* returned value is <= the real elapsed time.
|
||||
* note 2: this should be able to cope with a few seconds without
|
||||
* overflowing.
|
||||
*
|
||||
* xip_iprefetch()
|
||||
*
|
||||
* Macro to fill instruction prefetch
|
||||
* e.g. a series of nops: asm volatile (".rep 8; nop; .endr");
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_ARCH_SA1100) || defined(CONFIG_ARCH_PXA)
|
||||
#include <asm/mtd-xip.h>
|
||||
|
||||
#include <asm/hardware.h>
|
||||
#ifdef CONFIG_ARCH_PXA
|
||||
#include <asm/arch/pxa-regs.h>
|
||||
#endif
|
||||
|
||||
#define xip_irqpending() (ICIP & ICMR)
|
||||
|
||||
/* we sample OSCR and convert desired delta to usec (1/4 ~= 1000000/3686400) */
|
||||
#define xip_currtime() (OSCR)
|
||||
#define xip_elapsed_since(x) (signed)((OSCR - (x)) / 4)
|
||||
|
||||
#else
|
||||
#ifndef xip_irqpending
|
||||
|
||||
#warning "missing IRQ and timer primitives for XIP MTD support"
|
||||
#warning "some of the XIP MTD support code will be disabled"
|
||||
|
@ -85,16 +79,17 @@
|
|||
|
||||
#endif
|
||||
|
||||
#ifndef xip_iprefetch
|
||||
#define xip_iprefetch() do { } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* xip_cpu_idle() is used when waiting for a delay equal or larger than
|
||||
* the system timer tick period. This should put the CPU into idle mode
|
||||
* to save power and to be woken up only when some interrupts are pending.
|
||||
* As above, this should not rely upon standard kernel code.
|
||||
* This should not rely upon standard kernel code.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_CPU_XSCALE)
|
||||
#define xip_cpu_idle() asm volatile ("mcr p14, 0, %0, c7, c0, 0" :: "r" (1))
|
||||
#else
|
||||
#ifndef xip_cpu_idle
|
||||
#define xip_cpu_idle() do { } while (0)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -12,13 +12,13 @@ struct namespace {
|
|||
struct rw_semaphore sem;
|
||||
};
|
||||
|
||||
extern void umount_tree(struct vfsmount *);
|
||||
extern int copy_namespace(int, struct task_struct *);
|
||||
extern void __put_namespace(struct namespace *namespace);
|
||||
|
||||
static inline void put_namespace(struct namespace *namespace)
|
||||
{
|
||||
if (atomic_dec_and_test(&namespace->count))
|
||||
if (atomic_dec_and_lock(&namespace->count, &vfsmount_lock))
|
||||
/* releases vfsmount_lock */
|
||||
__put_namespace(namespace);
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
struct divert_blk;
|
||||
struct vlan_group;
|
||||
struct ethtool_ops;
|
||||
struct netpoll;
|
||||
struct netpoll_info;
|
||||
/* source back-compat hooks */
|
||||
#define SET_ETHTOOL_OPS(netdev,ops) \
|
||||
( (netdev)->ethtool_ops = (ops) )
|
||||
|
@ -164,12 +164,6 @@ struct netif_rx_stats
|
|||
unsigned total;
|
||||
unsigned dropped;
|
||||
unsigned time_squeeze;
|
||||
unsigned throttled;
|
||||
unsigned fastroute_hit;
|
||||
unsigned fastroute_success;
|
||||
unsigned fastroute_defer;
|
||||
unsigned fastroute_deferred_out;
|
||||
unsigned fastroute_latency_reduction;
|
||||
unsigned cpu_collision;
|
||||
};
|
||||
|
||||
|
@ -468,7 +462,7 @@ struct net_device
|
|||
unsigned char *haddr);
|
||||
int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
|
||||
#ifdef CONFIG_NETPOLL
|
||||
struct netpoll *np;
|
||||
struct netpoll_info *npinfo;
|
||||
#endif
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
void (*poll_controller)(struct net_device *dev);
|
||||
|
@ -562,12 +556,9 @@ static inline int unregister_gifconf(unsigned int family)
|
|||
|
||||
struct softnet_data
|
||||
{
|
||||
int throttle;
|
||||
int cng_level;
|
||||
int avg_blog;
|
||||
struct net_device *output_queue;
|
||||
struct sk_buff_head input_pkt_queue;
|
||||
struct list_head poll_list;
|
||||
struct net_device *output_queue;
|
||||
struct sk_buff *completion_queue;
|
||||
|
||||
struct net_device backlog_dev; /* Sorry. 8) */
|
||||
|
@ -925,10 +916,6 @@ extern int skb_checksum_help(struct sk_buff *skb, int inward);
|
|||
extern void net_enable_timestamp(void);
|
||||
extern void net_disable_timestamp(void);
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
extern char *net_sysctl_strdup(const char *s);
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _LINUX_DEV_H */
|
||||
|
|
|
@ -197,6 +197,9 @@ struct ip_conntrack_expect
|
|||
/* Timer function; deletes the expectation. */
|
||||
struct timer_list timeout;
|
||||
|
||||
/* Usage count. */
|
||||
atomic_t use;
|
||||
|
||||
#ifdef CONFIG_IP_NF_NAT_NEEDED
|
||||
/* This is the original per-proto part, used to map the
|
||||
* expected connection the way the recipient expects. */
|
||||
|
@ -236,7 +239,7 @@ ip_conntrack_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
|
|||
}
|
||||
|
||||
/* decrement reference count on a conntrack */
|
||||
extern inline void ip_conntrack_put(struct ip_conntrack *ct);
|
||||
extern void ip_conntrack_put(struct ip_conntrack *ct);
|
||||
|
||||
/* call to create an explicit dependency on ip_conntrack. */
|
||||
extern void need_ip_conntrack(void);
|
||||
|
|
|
@ -30,9 +30,10 @@ extern int ip_conntrack_helper_register(struct ip_conntrack_helper *);
|
|||
extern void ip_conntrack_helper_unregister(struct ip_conntrack_helper *);
|
||||
|
||||
/* Allocate space for an expectation: this is mandatory before calling
|
||||
ip_conntrack_expect_related. */
|
||||
extern struct ip_conntrack_expect *ip_conntrack_expect_alloc(void);
|
||||
extern void ip_conntrack_expect_free(struct ip_conntrack_expect *exp);
|
||||
ip_conntrack_expect_related. You will have to call put afterwards. */
|
||||
extern struct ip_conntrack_expect *
|
||||
ip_conntrack_expect_alloc(struct ip_conntrack *master);
|
||||
extern void ip_conntrack_expect_put(struct ip_conntrack_expect *exp);
|
||||
|
||||
/* Add an expected connection: can have more than one per connection */
|
||||
extern int ip_conntrack_expect_related(struct ip_conntrack_expect *exp);
|
||||
|
|
|
@ -18,7 +18,6 @@ struct clusterip_config;
|
|||
struct ipt_clusterip_tgt_info {
|
||||
|
||||
u_int32_t flags;
|
||||
struct clusterip_config *config;
|
||||
|
||||
/* only relevant for new ones */
|
||||
u_int8_t clustermac[6];
|
||||
|
@ -27,6 +26,8 @@ struct ipt_clusterip_tgt_info {
|
|||
u_int16_t local_nodes[CLUSTERIP_MAX_NODES];
|
||||
enum clusterip_hashmode hash_mode;
|
||||
u_int32_t hash_initval;
|
||||
|
||||
struct clusterip_config *config;
|
||||
};
|
||||
|
||||
#endif /*_IPT_CLUSTERIP_H_target*/
|
||||
|
|
|
@ -5,21 +5,20 @@
|
|||
#include <linux/types.h>
|
||||
|
||||
#define NETLINK_ROUTE 0 /* Routing/device hook */
|
||||
#define NETLINK_SKIP 1 /* Reserved for ENskip */
|
||||
#define NETLINK_W1 1 /* 1-wire subsystem */
|
||||
#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */
|
||||
#define NETLINK_FIREWALL 3 /* Firewalling hook */
|
||||
#define NETLINK_TCPDIAG 4 /* TCP socket monitoring */
|
||||
#define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */
|
||||
#define NETLINK_XFRM 6 /* ipsec */
|
||||
#define NETLINK_SELINUX 7 /* SELinux event notifications */
|
||||
#define NETLINK_ARPD 8
|
||||
#define NETLINK_ISCSI 8 /* Open-iSCSI */
|
||||
#define NETLINK_AUDIT 9 /* auditing */
|
||||
#define NETLINK_FIB_LOOKUP 10
|
||||
#define NETLINK_ROUTE6 11 /* af_inet6 route comm channel */
|
||||
#define NETLINK_NETFILTER 12 /* netfilter subsystem */
|
||||
#define NETLINK_IP6_FW 13
|
||||
#define NETLINK_DNRTMSG 14 /* DECnet routing messages */
|
||||
#define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */
|
||||
#define NETLINK_TAPBASE 16 /* 16 to 31 are ethertap */
|
||||
|
||||
#define MAX_LINKS 32
|
||||
|
||||
|
@ -168,6 +167,7 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
|
|||
nlh->nlmsg_flags = flags;
|
||||
nlh->nlmsg_pid = pid;
|
||||
nlh->nlmsg_seq = seq;
|
||||
memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
|
||||
return nlh;
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
struct netpoll;
|
||||
|
@ -16,14 +17,20 @@ struct netpoll;
|
|||
struct netpoll {
|
||||
struct net_device *dev;
|
||||
char dev_name[16], *name;
|
||||
int rx_flags;
|
||||
void (*rx_hook)(struct netpoll *, int, char *, int);
|
||||
void (*drop)(struct sk_buff *skb);
|
||||
u32 local_ip, remote_ip;
|
||||
u16 local_port, remote_port;
|
||||
unsigned char local_mac[6], remote_mac[6];
|
||||
};
|
||||
|
||||
struct netpoll_info {
|
||||
spinlock_t poll_lock;
|
||||
int poll_owner;
|
||||
int tries;
|
||||
int rx_flags;
|
||||
spinlock_t rx_lock;
|
||||
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
|
||||
};
|
||||
|
||||
void netpoll_poll(struct netpoll *np);
|
||||
|
@ -39,28 +46,47 @@ void netpoll_queue(struct sk_buff *skb);
|
|||
#ifdef CONFIG_NETPOLL
|
||||
static inline int netpoll_rx(struct sk_buff *skb)
|
||||
{
|
||||
return skb->dev->np && skb->dev->np->rx_flags && __netpoll_rx(skb);
|
||||
struct netpoll_info *npinfo = skb->dev->npinfo;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
||||
/* check rx_flags again with the lock held */
|
||||
if (npinfo->rx_flags && __netpoll_rx(skb))
|
||||
ret = 1;
|
||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void netpoll_poll_lock(struct net_device *dev)
|
||||
static inline void *netpoll_poll_lock(struct net_device *dev)
|
||||
{
|
||||
if (dev->np) {
|
||||
spin_lock(&dev->np->poll_lock);
|
||||
dev->np->poll_owner = smp_processor_id();
|
||||
rcu_read_lock(); /* deal with race on ->npinfo */
|
||||
if (dev->npinfo) {
|
||||
spin_lock(&dev->npinfo->poll_lock);
|
||||
dev->npinfo->poll_owner = smp_processor_id();
|
||||
return dev->npinfo;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void netpoll_poll_unlock(struct net_device *dev)
|
||||
static inline void netpoll_poll_unlock(void *have)
|
||||
{
|
||||
if (dev->np) {
|
||||
spin_unlock(&dev->np->poll_lock);
|
||||
dev->np->poll_owner = -1;
|
||||
struct netpoll_info *npi = have;
|
||||
|
||||
if (npi) {
|
||||
npi->poll_owner = -1;
|
||||
spin_unlock(&npi->poll_lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#else
|
||||
#define netpoll_rx(a) 0
|
||||
#define netpoll_poll_lock(a)
|
||||
#define netpoll_poll_lock(a) 0
|
||||
#define netpoll_poll_unlock(a)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#define NFS4_ACCESS_DELETE 0x0010
|
||||
#define NFS4_ACCESS_EXECUTE 0x0020
|
||||
|
||||
#define NFS4_FH_PERISTENT 0x0000
|
||||
#define NFS4_FH_PERSISTENT 0x0000
|
||||
#define NFS4_FH_NOEXPIRE_WITH_OPEN 0x0001
|
||||
#define NFS4_FH_VOLATILE_ANY 0x0002
|
||||
#define NFS4_FH_VOL_MIGRATION 0x0004
|
||||
|
@ -382,6 +382,8 @@ enum {
|
|||
NFSPROC4_CLNT_READDIR,
|
||||
NFSPROC4_CLNT_SERVER_CAPS,
|
||||
NFSPROC4_CLNT_DELEGRETURN,
|
||||
NFSPROC4_CLNT_GETACL,
|
||||
NFSPROC4_CLNT_SETACL,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#include <linux/pagemap.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/uio.h>
|
||||
|
||||
#include <linux/nfs_fs_sb.h>
|
||||
|
||||
|
@ -29,7 +28,6 @@
|
|||
#include <linux/nfs4.h>
|
||||
#include <linux/nfs_xdr.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/mempool.h>
|
||||
|
||||
/*
|
||||
|
@ -43,13 +41,6 @@
|
|||
#define NFS_MAX_FILE_IO_BUFFER_SIZE 32768
|
||||
#define NFS_DEF_FILE_IO_BUFFER_SIZE 4096
|
||||
|
||||
/*
|
||||
* The upper limit on timeouts for the exponential backoff algorithm.
|
||||
*/
|
||||
#define NFS_WRITEBACK_DELAY (5*HZ)
|
||||
#define NFS_WRITEBACK_LOCKDELAY (60*HZ)
|
||||
#define NFS_COMMIT_DELAY (5*HZ)
|
||||
|
||||
/*
|
||||
* superblock magic number for NFS
|
||||
*/
|
||||
|
@ -60,9 +51,6 @@
|
|||
*/
|
||||
#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS)
|
||||
|
||||
#define NFS_RW_SYNC 0x0001 /* O_SYNC handling */
|
||||
#define NFS_RW_SWAP 0x0002 /* This is a swap request */
|
||||
|
||||
/*
|
||||
* When flushing a cluster of dirty pages, there can be different
|
||||
* strategies:
|
||||
|
@ -96,7 +84,8 @@ struct nfs_open_context {
|
|||
int error;
|
||||
|
||||
struct list_head list;
|
||||
wait_queue_head_t waitq;
|
||||
|
||||
__u64 dir_cookie;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -104,6 +93,8 @@ struct nfs_open_context {
|
|||
*/
|
||||
struct nfs_delegation;
|
||||
|
||||
struct posix_acl;
|
||||
|
||||
/*
|
||||
* nfs fs inode data in memory
|
||||
*/
|
||||
|
@ -121,7 +112,8 @@ struct nfs_inode {
|
|||
/*
|
||||
* Various flags
|
||||
*/
|
||||
unsigned int flags;
|
||||
unsigned long flags; /* atomic bit ops */
|
||||
unsigned long cache_validity; /* bit mask */
|
||||
|
||||
/*
|
||||
* read_cache_jiffies is when we started read-caching this inode,
|
||||
|
@ -140,7 +132,6 @@ struct nfs_inode {
|
|||
*
|
||||
* mtime != read_cache_mtime
|
||||
*/
|
||||
unsigned long readdir_timestamp;
|
||||
unsigned long read_cache_jiffies;
|
||||
unsigned long attrtimeo;
|
||||
unsigned long attrtimeo_timestamp;
|
||||
|
@ -158,6 +149,10 @@ struct nfs_inode {
|
|||
atomic_t data_updates;
|
||||
|
||||
struct nfs_access_entry cache_access;
|
||||
#ifdef CONFIG_NFS_V3_ACL
|
||||
struct posix_acl *acl_access;
|
||||
struct posix_acl *acl_default;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is the cookie verifier used for NFSv3 readdir
|
||||
|
@ -180,29 +175,33 @@ struct nfs_inode {
|
|||
/* Open contexts for shared mmap writes */
|
||||
struct list_head open_files;
|
||||
|
||||
wait_queue_head_t nfs_i_wait;
|
||||
|
||||
#ifdef CONFIG_NFS_V4
|
||||
struct nfs4_cached_acl *nfs4_acl;
|
||||
/* NFSv4 state */
|
||||
struct list_head open_states;
|
||||
struct nfs_delegation *delegation;
|
||||
int delegation_state;
|
||||
struct rw_semaphore rwsem;
|
||||
#endif /* CONFIG_NFS_V4*/
|
||||
|
||||
struct inode vfs_inode;
|
||||
};
|
||||
|
||||
/*
|
||||
* Legal inode flag values
|
||||
* Cache validity bit flags
|
||||
*/
|
||||
#define NFS_INO_STALE 0x0001 /* possible stale inode */
|
||||
#define NFS_INO_ADVISE_RDPLUS 0x0002 /* advise readdirplus */
|
||||
#define NFS_INO_REVALIDATING 0x0004 /* revalidating attrs */
|
||||
#define NFS_INO_INVALID_ATTR 0x0008 /* cached attrs are invalid */
|
||||
#define NFS_INO_INVALID_DATA 0x0010 /* cached data is invalid */
|
||||
#define NFS_INO_INVALID_ATIME 0x0020 /* cached atime is invalid */
|
||||
#define NFS_INO_INVALID_ACCESS 0x0040 /* cached access cred invalid */
|
||||
#define NFS_INO_INVALID_ATTR 0x0001 /* cached attrs are invalid */
|
||||
#define NFS_INO_INVALID_DATA 0x0002 /* cached data is invalid */
|
||||
#define NFS_INO_INVALID_ATIME 0x0004 /* cached atime is invalid */
|
||||
#define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */
|
||||
#define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */
|
||||
#define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */
|
||||
|
||||
/*
|
||||
* Bit offsets in flags field
|
||||
*/
|
||||
#define NFS_INO_REVALIDATING (0) /* revalidating attrs */
|
||||
#define NFS_INO_ADVISE_RDPLUS (1) /* advise readdirplus */
|
||||
#define NFS_INO_STALE (2) /* possible stale inode */
|
||||
|
||||
static inline struct nfs_inode *NFS_I(struct inode *inode)
|
||||
{
|
||||
|
@ -228,8 +227,7 @@ static inline struct nfs_inode *NFS_I(struct inode *inode)
|
|||
#define NFS_ATTRTIMEO_UPDATE(inode) (NFS_I(inode)->attrtimeo_timestamp)
|
||||
|
||||
#define NFS_FLAGS(inode) (NFS_I(inode)->flags)
|
||||
#define NFS_REVALIDATING(inode) (NFS_FLAGS(inode) & NFS_INO_REVALIDATING)
|
||||
#define NFS_STALE(inode) (NFS_FLAGS(inode) & NFS_INO_STALE)
|
||||
#define NFS_STALE(inode) (test_bit(NFS_INO_STALE, &NFS_FLAGS(inode)))
|
||||
|
||||
#define NFS_FILEID(inode) (NFS_I(inode)->fileid)
|
||||
|
||||
|
@ -240,8 +238,11 @@ static inline int nfs_caches_unstable(struct inode *inode)
|
|||
|
||||
static inline void NFS_CACHEINV(struct inode *inode)
|
||||
{
|
||||
if (!nfs_caches_unstable(inode))
|
||||
NFS_FLAGS(inode) |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS;
|
||||
if (!nfs_caches_unstable(inode)) {
|
||||
spin_lock(&inode->i_lock);
|
||||
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS;
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int nfs_server_capable(struct inode *inode, int cap)
|
||||
|
@ -251,7 +252,7 @@ static inline int nfs_server_capable(struct inode *inode, int cap)
|
|||
|
||||
static inline int NFS_USE_READDIRPLUS(struct inode *inode)
|
||||
{
|
||||
return NFS_FLAGS(inode) & NFS_INO_ADVISE_RDPLUS;
|
||||
return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -294,12 +295,13 @@ extern int nfs_release(struct inode *, struct file *);
|
|||
extern int nfs_attribute_timeout(struct inode *inode);
|
||||
extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
|
||||
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
|
||||
extern void nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
|
||||
extern int nfs_setattr(struct dentry *, struct iattr *);
|
||||
extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
|
||||
extern void nfs_begin_attr_update(struct inode *);
|
||||
extern void nfs_end_attr_update(struct inode *);
|
||||
extern void nfs_begin_data_update(struct inode *);
|
||||
extern void nfs_end_data_update(struct inode *);
|
||||
extern void nfs_end_data_update_defer(struct inode *);
|
||||
extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred);
|
||||
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
|
||||
extern void put_nfs_open_context(struct nfs_open_context *ctx);
|
||||
|
@ -314,6 +316,9 @@ extern u32 root_nfs_parse_addr(char *name); /*__init*/
|
|||
* linux/fs/nfs/file.c
|
||||
*/
|
||||
extern struct inode_operations nfs_file_inode_operations;
|
||||
#ifdef CONFIG_NFS_V3
|
||||
extern struct inode_operations nfs3_file_inode_operations;
|
||||
#endif /* CONFIG_NFS_V3 */
|
||||
extern struct file_operations nfs_file_operations;
|
||||
extern struct address_space_operations nfs_file_aops;
|
||||
|
||||
|
@ -328,6 +333,22 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* linux/fs/nfs/xattr.c
|
||||
*/
|
||||
#ifdef CONFIG_NFS_V3_ACL
|
||||
extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t);
|
||||
extern ssize_t nfs3_getxattr(struct dentry *, const char *, void *, size_t);
|
||||
extern int nfs3_setxattr(struct dentry *, const char *,
|
||||
const void *, size_t, int);
|
||||
extern int nfs3_removexattr (struct dentry *, const char *name);
|
||||
#else
|
||||
# define nfs3_listxattr NULL
|
||||
# define nfs3_getxattr NULL
|
||||
# define nfs3_setxattr NULL
|
||||
# define nfs3_removexattr NULL
|
||||
#endif
|
||||
|
||||
/*
|
||||
* linux/fs/nfs/direct.c
|
||||
*/
|
||||
|
@ -342,6 +363,9 @@ extern ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf,
|
|||
* linux/fs/nfs/dir.c
|
||||
*/
|
||||
extern struct inode_operations nfs_dir_inode_operations;
|
||||
#ifdef CONFIG_NFS_V3
|
||||
extern struct inode_operations nfs3_dir_inode_operations;
|
||||
#endif /* CONFIG_NFS_V3 */
|
||||
extern struct file_operations nfs_dir_operations;
|
||||
extern struct dentry_operations nfs_dentry_operations;
|
||||
|
||||
|
@ -377,10 +401,10 @@ extern void nfs_commit_done(struct rpc_task *);
|
|||
*/
|
||||
extern int nfs_sync_inode(struct inode *, unsigned long, unsigned int, int);
|
||||
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
|
||||
extern int nfs_commit_inode(struct inode *, unsigned long, unsigned int, int);
|
||||
extern int nfs_commit_inode(struct inode *, int);
|
||||
#else
|
||||
static inline int
|
||||
nfs_commit_inode(struct inode *inode, unsigned long idx_start, unsigned int npages, int how)
|
||||
nfs_commit_inode(struct inode *inode, int how)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -434,11 +458,6 @@ static inline void nfs_writedata_free(struct nfs_write_data *p)
|
|||
mempool_free(p, nfs_wdata_mempool);
|
||||
}
|
||||
|
||||
/* Hack for future NFS swap support */
|
||||
#ifndef IS_SWAPFILE
|
||||
# define IS_SWAPFILE(inode) (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* linux/fs/nfs/read.c
|
||||
*/
|
||||
|
@ -467,6 +486,29 @@ static inline void nfs_readdata_free(struct nfs_read_data *p)
|
|||
|
||||
extern void nfs_readdata_release(struct rpc_task *task);
|
||||
|
||||
/*
|
||||
* linux/fs/nfs3proc.c
|
||||
*/
|
||||
#ifdef CONFIG_NFS_V3_ACL
|
||||
extern struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type);
|
||||
extern int nfs3_proc_setacl(struct inode *inode, int type,
|
||||
struct posix_acl *acl);
|
||||
extern int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode,
|
||||
mode_t mode);
|
||||
extern void nfs3_forget_cached_acls(struct inode *inode);
|
||||
#else
|
||||
static inline int nfs3_proc_set_default_acl(struct inode *dir,
|
||||
struct inode *inode,
|
||||
mode_t mode)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nfs3_forget_cached_acls(struct inode *inode)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_NFS_V3_ACL */
|
||||
|
||||
/*
|
||||
* linux/fs/mount_clnt.c
|
||||
* (Used only by nfsroot module)
|
||||
|
@ -515,230 +557,6 @@ extern void * nfs_root_data(void);
|
|||
|
||||
#define NFS_JUKEBOX_RETRY_TIME (5 * HZ)
|
||||
|
||||
#ifdef CONFIG_NFS_V4
|
||||
|
||||
struct idmap;
|
||||
|
||||
/*
|
||||
* In a seqid-mutating op, this macro controls which error return
|
||||
* values trigger incrementation of the seqid.
|
||||
*
|
||||
* from rfc 3010:
|
||||
* The client MUST monotonically increment the sequence number for the
|
||||
* CLOSE, LOCK, LOCKU, OPEN, OPEN_CONFIRM, and OPEN_DOWNGRADE
|
||||
* operations. This is true even in the event that the previous
|
||||
* operation that used the sequence number received an error. The only
|
||||
* exception to this rule is if the previous operation received one of
|
||||
* the following errors: NFSERR_STALE_CLIENTID, NFSERR_STALE_STATEID,
|
||||
* NFSERR_BAD_STATEID, NFSERR_BAD_SEQID, NFSERR_BADXDR,
|
||||
* NFSERR_RESOURCE, NFSERR_NOFILEHANDLE.
|
||||
*
|
||||
*/
|
||||
#define seqid_mutating_err(err) \
|
||||
(((err) != NFSERR_STALE_CLIENTID) && \
|
||||
((err) != NFSERR_STALE_STATEID) && \
|
||||
((err) != NFSERR_BAD_STATEID) && \
|
||||
((err) != NFSERR_BAD_SEQID) && \
|
||||
((err) != NFSERR_BAD_XDR) && \
|
||||
((err) != NFSERR_RESOURCE) && \
|
||||
((err) != NFSERR_NOFILEHANDLE))
|
||||
|
||||
enum nfs4_client_state {
|
||||
NFS4CLNT_OK = 0,
|
||||
};
|
||||
|
||||
/*
|
||||
* The nfs4_client identifies our client state to the server.
|
||||
*/
|
||||
struct nfs4_client {
|
||||
struct list_head cl_servers; /* Global list of servers */
|
||||
struct in_addr cl_addr; /* Server identifier */
|
||||
u64 cl_clientid; /* constant */
|
||||
nfs4_verifier cl_confirm;
|
||||
unsigned long cl_state;
|
||||
|
||||
u32 cl_lockowner_id;
|
||||
|
||||
/*
|
||||
* The following rwsem ensures exclusive access to the server
|
||||
* while we recover the state following a lease expiration.
|
||||
*/
|
||||
struct rw_semaphore cl_sem;
|
||||
|
||||
struct list_head cl_delegations;
|
||||
struct list_head cl_state_owners;
|
||||
struct list_head cl_unused;
|
||||
int cl_nunused;
|
||||
spinlock_t cl_lock;
|
||||
atomic_t cl_count;
|
||||
|
||||
struct rpc_clnt * cl_rpcclient;
|
||||
struct rpc_cred * cl_cred;
|
||||
|
||||
struct list_head cl_superblocks; /* List of nfs_server structs */
|
||||
|
||||
unsigned long cl_lease_time;
|
||||
unsigned long cl_last_renewal;
|
||||
struct work_struct cl_renewd;
|
||||
struct work_struct cl_recoverd;
|
||||
|
||||
wait_queue_head_t cl_waitq;
|
||||
struct rpc_wait_queue cl_rpcwaitq;
|
||||
|
||||
/* used for the setclientid verifier */
|
||||
struct timespec cl_boot_time;
|
||||
|
||||
/* idmapper */
|
||||
struct idmap * cl_idmap;
|
||||
|
||||
/* Our own IP address, as a null-terminated string.
|
||||
* This is used to generate the clientid, and the callback address.
|
||||
*/
|
||||
char cl_ipaddr[16];
|
||||
unsigned char cl_id_uniquifier;
|
||||
};
|
||||
|
||||
/*
|
||||
* NFS4 state_owners and lock_owners are simply labels for ordered
|
||||
* sequences of RPC calls. Their sole purpose is to provide once-only
|
||||
* semantics by allowing the server to identify replayed requests.
|
||||
*
|
||||
* The ->so_sema is held during all state_owner seqid-mutating operations:
|
||||
* OPEN, OPEN_DOWNGRADE, and CLOSE. Its purpose is to properly serialize
|
||||
* so_seqid.
|
||||
*/
|
||||
struct nfs4_state_owner {
|
||||
struct list_head so_list; /* per-clientid list of state_owners */
|
||||
struct nfs4_client *so_client;
|
||||
u32 so_id; /* 32-bit identifier, unique */
|
||||
struct semaphore so_sema;
|
||||
u32 so_seqid; /* protected by so_sema */
|
||||
atomic_t so_count;
|
||||
|
||||
struct rpc_cred *so_cred; /* Associated cred */
|
||||
struct list_head so_states;
|
||||
struct list_head so_delegations;
|
||||
};
|
||||
|
||||
/*
|
||||
* struct nfs4_state maintains the client-side state for a given
|
||||
* (state_owner,inode) tuple (OPEN) or state_owner (LOCK).
|
||||
*
|
||||
* OPEN:
|
||||
* In order to know when to OPEN_DOWNGRADE or CLOSE the state on the server,
|
||||
* we need to know how many files are open for reading or writing on a
|
||||
* given inode. This information too is stored here.
|
||||
*
|
||||
* LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN)
|
||||
*/
|
||||
|
||||
struct nfs4_lock_state {
|
||||
struct list_head ls_locks; /* Other lock stateids */
|
||||
fl_owner_t ls_owner; /* POSIX lock owner */
|
||||
#define NFS_LOCK_INITIALIZED 1
|
||||
int ls_flags;
|
||||
u32 ls_seqid;
|
||||
u32 ls_id;
|
||||
nfs4_stateid ls_stateid;
|
||||
atomic_t ls_count;
|
||||
};
|
||||
|
||||
/* bits for nfs4_state->flags */
|
||||
enum {
|
||||
LK_STATE_IN_USE,
|
||||
NFS_DELEGATED_STATE,
|
||||
};
|
||||
|
||||
struct nfs4_state {
|
||||
struct list_head open_states; /* List of states for the same state_owner */
|
||||
struct list_head inode_states; /* List of states for the same inode */
|
||||
struct list_head lock_states; /* List of subservient lock stateids */
|
||||
|
||||
struct nfs4_state_owner *owner; /* Pointer to the open owner */
|
||||
struct inode *inode; /* Pointer to the inode */
|
||||
|
||||
unsigned long flags; /* Do we hold any locks? */
|
||||
struct semaphore lock_sema; /* Serializes file locking operations */
|
||||
rwlock_t state_lock; /* Protects the lock_states list */
|
||||
|
||||
nfs4_stateid stateid;
|
||||
|
||||
unsigned int nreaders;
|
||||
unsigned int nwriters;
|
||||
int state; /* State on the server (R,W, or RW) */
|
||||
atomic_t count;
|
||||
};
|
||||
|
||||
|
||||
struct nfs4_exception {
|
||||
long timeout;
|
||||
int retry;
|
||||
};
|
||||
|
||||
struct nfs4_state_recovery_ops {
|
||||
int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);
|
||||
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
|
||||
};
|
||||
|
||||
extern struct dentry_operations nfs4_dentry_operations;
|
||||
extern struct inode_operations nfs4_dir_inode_operations;
|
||||
|
||||
/* nfs4proc.c */
|
||||
extern int nfs4_map_errors(int err);
|
||||
extern int nfs4_proc_setclientid(struct nfs4_client *, u32, unsigned short);
|
||||
extern int nfs4_proc_setclientid_confirm(struct nfs4_client *);
|
||||
extern int nfs4_proc_async_renew(struct nfs4_client *);
|
||||
extern int nfs4_proc_renew(struct nfs4_client *);
|
||||
extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode);
|
||||
extern struct inode *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
|
||||
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int);
|
||||
|
||||
extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;
|
||||
extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops;
|
||||
|
||||
/* nfs4renewd.c */
|
||||
extern void nfs4_schedule_state_renewal(struct nfs4_client *);
|
||||
extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
|
||||
extern void nfs4_kill_renewd(struct nfs4_client *);
|
||||
|
||||
/* nfs4state.c */
|
||||
extern void init_nfsv4_state(struct nfs_server *);
|
||||
extern void destroy_nfsv4_state(struct nfs_server *);
|
||||
extern struct nfs4_client *nfs4_get_client(struct in_addr *);
|
||||
extern void nfs4_put_client(struct nfs4_client *clp);
|
||||
extern int nfs4_init_client(struct nfs4_client *clp);
|
||||
extern struct nfs4_client *nfs4_find_client(struct in_addr *);
|
||||
extern u32 nfs4_alloc_lockowner_id(struct nfs4_client *);
|
||||
|
||||
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
|
||||
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
|
||||
extern void nfs4_drop_state_owner(struct nfs4_state_owner *);
|
||||
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
|
||||
extern void nfs4_put_open_state(struct nfs4_state *);
|
||||
extern void nfs4_close_state(struct nfs4_state *, mode_t);
|
||||
extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode);
|
||||
extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp);
|
||||
extern void nfs4_schedule_state_recovery(struct nfs4_client *);
|
||||
extern struct nfs4_lock_state *nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t);
|
||||
extern struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t);
|
||||
extern void nfs4_put_lock_state(struct nfs4_lock_state *state);
|
||||
extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls);
|
||||
extern void nfs4_notify_setlk(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *);
|
||||
extern void nfs4_notify_unlck(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *);
|
||||
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
|
||||
|
||||
|
||||
|
||||
struct nfs4_mount_data;
|
||||
#else
|
||||
#define init_nfsv4_state(server) do { } while (0)
|
||||
#define destroy_nfsv4_state(server) do { } while (0)
|
||||
#define nfs4_put_state_owner(inode, owner) do { } while (0)
|
||||
#define nfs4_put_open_state(state) do { } while (0)
|
||||
#define nfs4_close_state(a, b) do { } while (0)
|
||||
#define nfs4_renewd_prepare_shutdown(server) do { } while (0)
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/*
|
||||
|
|
|
@ -16,6 +16,11 @@ struct nfs_lock_info {
|
|||
struct nlm_lockowner *owner;
|
||||
};
|
||||
|
||||
struct nfs4_lock_state;
|
||||
struct nfs4_lock_info {
|
||||
struct nfs4_lock_state *owner;
|
||||
};
|
||||
|
||||
/*
|
||||
* Lock flag values
|
||||
*/
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
struct nfs_server {
|
||||
struct rpc_clnt * client; /* RPC client handle */
|
||||
struct rpc_clnt * client_sys; /* 2nd handle for FSINFO */
|
||||
struct rpc_clnt * client_acl; /* ACL RPC client handle */
|
||||
struct nfs_rpc_ops * rpc_ops; /* NFS protocol vector */
|
||||
struct backing_dev_info backing_dev_info;
|
||||
int flags; /* various flags */
|
||||
|
|
|
@ -58,6 +58,7 @@ struct nfs_mount_data {
|
|||
#define NFS_MOUNT_KERBEROS 0x0100 /* 3 */
|
||||
#define NFS_MOUNT_NONLM 0x0200 /* 3 */
|
||||
#define NFS_MOUNT_BROKEN_SUID 0x0400 /* 4 */
|
||||
#define NFS_MOUNT_NOACL 0x0800 /* 4 */
|
||||
#define NFS_MOUNT_STRICTLOCK 0x1000 /* reserved for NFSv4 */
|
||||
#define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */
|
||||
#define NFS_MOUNT_FLAGMASK 0xFFFF
|
||||
|
|
|
@ -19,6 +19,12 @@
|
|||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
* Valid flags for the radix tree
|
||||
*/
|
||||
#define NFS_PAGE_TAG_DIRTY 0
|
||||
#define NFS_PAGE_TAG_WRITEBACK 1
|
||||
|
||||
/*
|
||||
* Valid flags for a dirty buffer
|
||||
*/
|
||||
|
@ -26,6 +32,7 @@
|
|||
#define PG_NEED_COMMIT 1
|
||||
#define PG_NEED_RESCHED 2
|
||||
|
||||
struct nfs_inode;
|
||||
struct nfs_page {
|
||||
struct list_head wb_list, /* Defines state of page: */
|
||||
*wb_list_head; /* read/write/commit */
|
||||
|
@ -54,14 +61,17 @@ extern void nfs_clear_request(struct nfs_page *req);
|
|||
extern void nfs_release_request(struct nfs_page *req);
|
||||
|
||||
|
||||
extern void nfs_list_add_request(struct nfs_page *, struct list_head *);
|
||||
|
||||
extern int nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst,
|
||||
unsigned long idx_start, unsigned int npages);
|
||||
extern int nfs_scan_list(struct list_head *, struct list_head *,
|
||||
unsigned long, unsigned int);
|
||||
extern int nfs_coalesce_requests(struct list_head *, struct list_head *,
|
||||
unsigned int);
|
||||
extern int nfs_wait_on_request(struct nfs_page *);
|
||||
extern void nfs_unlock_request(struct nfs_page *req);
|
||||
extern int nfs_set_page_writeback_locked(struct nfs_page *req);
|
||||
extern void nfs_clear_page_writeback(struct nfs_page *req);
|
||||
|
||||
|
||||
/*
|
||||
* Lock the page of an asynchronous request without incrementing the wb_count
|
||||
|
@ -86,6 +96,18 @@ nfs_lock_request(struct nfs_page *req)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs_list_add_request - Insert a request into a list
|
||||
* @req: request
|
||||
* @head: head of list into which to insert the request.
|
||||
*/
|
||||
static inline void
|
||||
nfs_list_add_request(struct nfs_page *req, struct list_head *head)
|
||||
{
|
||||
list_add_tail(&req->wb_list, head);
|
||||
req->wb_list_head = head;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* nfs_list_remove_request - Remove a request from its wb_list
|
||||
|
@ -96,10 +118,6 @@ nfs_list_remove_request(struct nfs_page *req)
|
|||
{
|
||||
if (list_empty(&req->wb_list))
|
||||
return;
|
||||
if (!NFS_WBACK_BUSY(req)) {
|
||||
printk(KERN_ERR "NFS: unlocked request attempted removed from list!\n");
|
||||
BUG();
|
||||
}
|
||||
list_del_init(&req->wb_list);
|
||||
req->wb_list_head = NULL;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define _LINUX_NFS_XDR_H
|
||||
|
||||
#include <linux/sunrpc/xprt.h>
|
||||
#include <linux/nfsacl.h>
|
||||
|
||||
struct nfs4_fsid {
|
||||
__u64 major;
|
||||
|
@ -326,6 +327,20 @@ struct nfs_setattrargs {
|
|||
const u32 * bitmask;
|
||||
};
|
||||
|
||||
struct nfs_setaclargs {
|
||||
struct nfs_fh * fh;
|
||||
size_t acl_len;
|
||||
unsigned int acl_pgbase;
|
||||
struct page ** acl_pages;
|
||||
};
|
||||
|
||||
struct nfs_getaclargs {
|
||||
struct nfs_fh * fh;
|
||||
size_t acl_len;
|
||||
unsigned int acl_pgbase;
|
||||
struct page ** acl_pages;
|
||||
};
|
||||
|
||||
struct nfs_setattrres {
|
||||
struct nfs_fattr * fattr;
|
||||
const struct nfs_server * server;
|
||||
|
@ -354,6 +369,20 @@ struct nfs_readdirargs {
|
|||
struct page ** pages;
|
||||
};
|
||||
|
||||
struct nfs3_getaclargs {
|
||||
struct nfs_fh * fh;
|
||||
int mask;
|
||||
struct page ** pages;
|
||||
};
|
||||
|
||||
struct nfs3_setaclargs {
|
||||
struct inode * inode;
|
||||
int mask;
|
||||
struct posix_acl * acl_access;
|
||||
struct posix_acl * acl_default;
|
||||
struct page ** pages;
|
||||
};
|
||||
|
||||
struct nfs_diropok {
|
||||
struct nfs_fh * fh;
|
||||
struct nfs_fattr * fattr;
|
||||
|
@ -477,6 +506,15 @@ struct nfs3_readdirres {
|
|||
int plus;
|
||||
};
|
||||
|
||||
struct nfs3_getaclres {
|
||||
struct nfs_fattr * fattr;
|
||||
int mask;
|
||||
unsigned int acl_access_count;
|
||||
unsigned int acl_default_count;
|
||||
struct posix_acl * acl_access;
|
||||
struct posix_acl * acl_default;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_NFS_V4
|
||||
|
||||
typedef u64 clientid4;
|
||||
|
@ -667,6 +705,7 @@ struct nfs_rpc_ops {
|
|||
int version; /* Protocol version */
|
||||
struct dentry_operations *dentry_ops;
|
||||
struct inode_operations *dir_inode_ops;
|
||||
struct inode_operations *file_inode_ops;
|
||||
|
||||
int (*getroot) (struct nfs_server *, struct nfs_fh *,
|
||||
struct nfs_fsinfo *);
|
||||
|
@ -713,6 +752,7 @@ struct nfs_rpc_ops {
|
|||
int (*file_open) (struct inode *, struct file *);
|
||||
int (*file_release) (struct inode *, struct file *);
|
||||
int (*lock)(struct file *, int, struct file_lock *);
|
||||
void (*clear_acl_cache)(struct inode *);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -732,4 +772,7 @@ extern struct rpc_version nfs_version2;
|
|||
extern struct rpc_version nfs_version3;
|
||||
extern struct rpc_version nfs_version4;
|
||||
|
||||
extern struct rpc_version nfsacl_version3;
|
||||
extern struct rpc_program nfsacl_program;
|
||||
|
||||
#endif
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue