Merge with master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
This commit is contained in:
commit
d2f6409584
3119 changed files with 163508 additions and 62433 deletions
|
@ -138,7 +138,7 @@ enum machine_type {
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#define _N_SEGMENT_ROUND(x) (((x) + SEGMENT_SIZE - 1) & ~(SEGMENT_SIZE - 1))
|
||||
#define _N_SEGMENT_ROUND(x) ALIGN(x, SEGMENT_SIZE)
|
||||
|
||||
#define _N_TXTENDADDR(x) (N_TXTADDR(x)+(x).a_text)
|
||||
|
||||
|
|
|
@ -342,11 +342,19 @@ struct acpi_table_ecdt {
|
|||
|
||||
/* PCI MMCONFIG */
|
||||
|
||||
/* Defined in PCI Firmware Specification 3.0 */
|
||||
struct acpi_table_mcfg_config {
|
||||
u32 base_address;
|
||||
u32 base_reserved;
|
||||
u16 pci_segment_group_number;
|
||||
u8 start_bus_number;
|
||||
u8 end_bus_number;
|
||||
u8 reserved[4];
|
||||
} __attribute__ ((packed));
|
||||
struct acpi_table_mcfg {
|
||||
struct acpi_table_header header;
|
||||
u8 reserved[8];
|
||||
u32 base_address;
|
||||
u32 base_reserved;
|
||||
struct acpi_table_mcfg_config config[0];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* Table Handlers */
|
||||
|
@ -391,6 +399,7 @@ int acpi_table_parse (enum acpi_table_id id, acpi_table_handler handler);
|
|||
int acpi_get_table_header_early (enum acpi_table_id id, struct acpi_table_header **header);
|
||||
int acpi_table_parse_madt (enum acpi_madt_entry_id id, acpi_madt_entry_handler handler, unsigned int max_entries);
|
||||
int acpi_table_parse_srat (enum acpi_srat_entry_id id, acpi_madt_entry_handler handler, unsigned int max_entries);
|
||||
int acpi_parse_mcfg (unsigned long phys_addr, unsigned long size);
|
||||
void acpi_table_print (struct acpi_table_header *header, unsigned long phys_addr);
|
||||
void acpi_table_print_madt_entry (acpi_table_entry_header *madt);
|
||||
void acpi_table_print_srat_entry (acpi_table_entry_header *srat);
|
||||
|
@ -407,9 +416,13 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu);
|
|||
int acpi_unmap_lsapic(int cpu);
|
||||
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|
||||
|
||||
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
|
||||
int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
|
||||
|
||||
extern int acpi_mp_config;
|
||||
|
||||
extern u32 pci_mmcfg_base_addr;
|
||||
extern struct acpi_table_mcfg_config *pci_mmcfg_config;
|
||||
extern int pci_mmcfg_config_num;
|
||||
|
||||
extern int sbf_port ;
|
||||
|
||||
|
|
8
include/linux/arcfb.h
Normal file
8
include/linux/arcfb.h
Normal file
|
@ -0,0 +1,8 @@
|
|||
#ifndef __LINUX_ARCFB_H__
|
||||
#define __LINUX_ARCFB_H__
|
||||
|
||||
#define FBIO_WAITEVENT _IO('F', 0x88)
|
||||
#define FBIO_GETCONTROL2 _IOR('F', 0x89, size_t)
|
||||
|
||||
#endif
|
||||
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef __LINUX_ATALK_H__
|
||||
#define __LINUX_ATALK_H__
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
/*
|
||||
* AppleTalk networking structures
|
||||
*
|
||||
|
@ -20,7 +22,7 @@
|
|||
#define SIOCATALKDIFADDR (SIOCPROTOPRIVATE + 0)
|
||||
|
||||
struct atalk_addr {
|
||||
__u16 s_net;
|
||||
__be16 s_net;
|
||||
__u8 s_node;
|
||||
};
|
||||
|
||||
|
@ -33,8 +35,8 @@ struct sockaddr_at {
|
|||
|
||||
struct atalk_netrange {
|
||||
__u8 nr_phase;
|
||||
__u16 nr_firstnet;
|
||||
__u16 nr_lastnet;
|
||||
__be16 nr_firstnet;
|
||||
__be16 nr_lastnet;
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
@ -70,8 +72,8 @@ struct atalk_iface {
|
|||
struct atalk_sock {
|
||||
/* struct sock has to be the first member of atalk_sock */
|
||||
struct sock sk;
|
||||
unsigned short dest_net;
|
||||
unsigned short src_net;
|
||||
__be16 dest_net;
|
||||
__be16 src_net;
|
||||
unsigned char dest_node;
|
||||
unsigned char src_node;
|
||||
unsigned char dest_port;
|
||||
|
@ -95,9 +97,9 @@ struct ddpehdr {
|
|||
deh_hops:4,
|
||||
deh_len:10;
|
||||
#endif
|
||||
__u16 deh_sum;
|
||||
__u16 deh_dnet;
|
||||
__u16 deh_snet;
|
||||
__be16 deh_sum;
|
||||
__be16 deh_dnet;
|
||||
__be16 deh_snet;
|
||||
__u8 deh_dnode;
|
||||
__u8 deh_snode;
|
||||
__u8 deh_dport;
|
||||
|
@ -142,24 +144,24 @@ struct ddpshdr {
|
|||
|
||||
/* AppleTalk AARP headers */
|
||||
struct elapaarp {
|
||||
__u16 hw_type;
|
||||
__be16 hw_type;
|
||||
#define AARP_HW_TYPE_ETHERNET 1
|
||||
#define AARP_HW_TYPE_TOKENRING 2
|
||||
__u16 pa_type;
|
||||
__be16 pa_type;
|
||||
__u8 hw_len;
|
||||
__u8 pa_len;
|
||||
#define AARP_PA_ALEN 4
|
||||
__u16 function;
|
||||
__be16 function;
|
||||
#define AARP_REQUEST 1
|
||||
#define AARP_REPLY 2
|
||||
#define AARP_PROBE 3
|
||||
__u8 hw_src[ETH_ALEN] __attribute__ ((packed));
|
||||
__u8 pa_src_zero __attribute__ ((packed));
|
||||
__u16 pa_src_net __attribute__ ((packed));
|
||||
__be16 pa_src_net __attribute__ ((packed));
|
||||
__u8 pa_src_node __attribute__ ((packed));
|
||||
__u8 hw_dst[ETH_ALEN] __attribute__ ((packed));
|
||||
__u8 pa_dst_zero __attribute__ ((packed));
|
||||
__u16 pa_dst_net __attribute__ ((packed));
|
||||
__be16 pa_dst_net __attribute__ ((packed));
|
||||
__u8 pa_dst_node __attribute__ ((packed));
|
||||
};
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#define AUTOFS_MIN_PROTO_VERSION 3
|
||||
#define AUTOFS_MAX_PROTO_VERSION 4
|
||||
|
||||
#define AUTOFS_PROTO_SUBVERSION 6
|
||||
#define AUTOFS_PROTO_SUBVERSION 7
|
||||
|
||||
/* Mask for expire behaviour */
|
||||
#define AUTOFS_EXP_IMMEDIATE 1
|
||||
|
|
|
@ -69,6 +69,11 @@ extern void remove_arg_zero(struct linux_binprm *);
|
|||
extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
|
||||
extern int flush_old_exec(struct linux_binprm * bprm);
|
||||
|
||||
extern int suid_dumpable;
|
||||
#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
|
||||
#define SUID_DUMP_USER 1 /* Dump as user of process */
|
||||
#define SUID_DUMP_ROOT 2 /* Dump as root */
|
||||
|
||||
/* Stack area protections */
|
||||
#define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */
|
||||
#define EXSTACK_DISABLE_X 1 /* Disable executable stacks */
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/ioprio.h>
|
||||
|
||||
/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
|
||||
#include <asm/io.h>
|
||||
|
@ -149,6 +150,19 @@ struct bio {
|
|||
#define BIO_RW_FAILFAST 3
|
||||
#define BIO_RW_SYNC 4
|
||||
|
||||
/*
|
||||
* upper 16 bits of bi_rw define the io priority of this bio
|
||||
*/
|
||||
#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
|
||||
#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
|
||||
#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
|
||||
|
||||
#define bio_set_prio(bio, prio) do { \
|
||||
WARN_ON(prio >= (1 << IOPRIO_BITS)); \
|
||||
(bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
|
||||
(bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* various member access, note that bio_data should of course not be used
|
||||
* on highmem page vectors
|
||||
|
|
|
@ -54,16 +54,23 @@ struct as_io_context {
|
|||
|
||||
struct cfq_queue;
|
||||
struct cfq_io_context {
|
||||
void (*dtor)(struct cfq_io_context *);
|
||||
void (*exit)(struct cfq_io_context *);
|
||||
|
||||
struct io_context *ioc;
|
||||
|
||||
/*
|
||||
* circular list of cfq_io_contexts belonging to a process io context
|
||||
*/
|
||||
struct list_head list;
|
||||
struct cfq_queue *cfqq;
|
||||
void *key;
|
||||
|
||||
struct io_context *ioc;
|
||||
|
||||
unsigned long last_end_request;
|
||||
unsigned long last_queue;
|
||||
unsigned long ttime_total;
|
||||
unsigned long ttime_samples;
|
||||
unsigned long ttime_mean;
|
||||
|
||||
void (*dtor)(struct cfq_io_context *);
|
||||
void (*exit)(struct cfq_io_context *);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -73,7 +80,9 @@ struct cfq_io_context {
|
|||
*/
|
||||
struct io_context {
|
||||
atomic_t refcount;
|
||||
pid_t pid;
|
||||
struct task_struct *task;
|
||||
|
||||
int (*set_ioprio)(struct io_context *, unsigned int);
|
||||
|
||||
/*
|
||||
* For request batching
|
||||
|
@ -81,14 +90,13 @@ struct io_context {
|
|||
unsigned long last_waited; /* Time last woken after wait for request */
|
||||
int nr_batch_requests; /* Number of requests left in the batch */
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
struct as_io_context *aic;
|
||||
struct cfq_io_context *cic;
|
||||
};
|
||||
|
||||
void put_io_context(struct io_context *ioc);
|
||||
void exit_io_context(void);
|
||||
struct io_context *current_io_context(int gfp_flags);
|
||||
struct io_context *get_io_context(int gfp_flags);
|
||||
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
|
||||
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
|
||||
|
@ -134,6 +142,8 @@ struct request {
|
|||
|
||||
void *elevator_private;
|
||||
|
||||
unsigned short ioprio;
|
||||
|
||||
int rq_status; /* should split this into a few status bits */
|
||||
struct gendisk *rq_disk;
|
||||
int errors;
|
||||
|
@ -285,16 +295,12 @@ enum blk_queue_state {
|
|||
Queue_up,
|
||||
};
|
||||
|
||||
#define BLK_TAGS_PER_LONG (sizeof(unsigned long) * 8)
|
||||
#define BLK_TAGS_MASK (BLK_TAGS_PER_LONG - 1)
|
||||
|
||||
struct blk_queue_tag {
|
||||
struct request **tag_index; /* map of busy tags */
|
||||
unsigned long *tag_map; /* bit map of free/busy tags */
|
||||
struct list_head busy_list; /* fifo list of busy tags */
|
||||
int busy; /* current depth */
|
||||
int max_depth; /* what we will send to device */
|
||||
int real_max_depth; /* what the array can hold */
|
||||
atomic_t refcnt; /* map can be shared */
|
||||
};
|
||||
|
||||
|
@ -396,6 +402,7 @@ struct request_queue
|
|||
*/
|
||||
unsigned int sg_timeout;
|
||||
unsigned int sg_reserved_size;
|
||||
int node;
|
||||
|
||||
struct list_head drain_list;
|
||||
|
||||
|
@ -542,15 +549,12 @@ extern void generic_make_request(struct bio *bio);
|
|||
extern void blk_put_request(struct request *);
|
||||
extern void blk_end_sync_rq(struct request *rq);
|
||||
extern void blk_attempt_remerge(request_queue_t *, struct request *);
|
||||
extern void __blk_attempt_remerge(request_queue_t *, struct request *);
|
||||
extern struct request *blk_get_request(request_queue_t *, int, int);
|
||||
extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
|
||||
extern void blk_requeue_request(request_queue_t *, struct request *);
|
||||
extern void blk_plug_device(request_queue_t *);
|
||||
extern int blk_remove_plug(request_queue_t *);
|
||||
extern void blk_recount_segments(request_queue_t *, struct bio *);
|
||||
extern int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
|
||||
extern int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
|
||||
extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
|
||||
extern void blk_start_queue(request_queue_t *q);
|
||||
extern void blk_stop_queue(request_queue_t *q);
|
||||
|
@ -615,6 +619,8 @@ static inline void blkdev_dequeue_request(struct request *req)
|
|||
/*
|
||||
* Access functions for manipulating queue properties
|
||||
*/
|
||||
extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn,
|
||||
spinlock_t *lock, int node_id);
|
||||
extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
|
||||
extern void blk_cleanup_queue(request_queue_t *);
|
||||
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
|
||||
|
@ -632,7 +638,6 @@ extern void blk_queue_dma_alignment(request_queue_t *, int);
|
|||
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
|
||||
extern void blk_queue_ordered(request_queue_t *, int);
|
||||
extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
|
||||
extern int blkdev_scsi_issue_flush_fn(request_queue_t *, struct gendisk *, sector_t *);
|
||||
extern struct request *blk_start_pre_flush(request_queue_t *,struct request *);
|
||||
extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int);
|
||||
extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int);
|
||||
|
@ -646,7 +651,8 @@ extern void blk_wait_queue_drained(request_queue_t *, int);
|
|||
extern void blk_finish_queue_drain(request_queue_t *);
|
||||
|
||||
int blk_get_queue(request_queue_t *);
|
||||
request_queue_t *blk_alloc_queue(int);
|
||||
request_queue_t *blk_alloc_queue(int gfp_mask);
|
||||
request_queue_t *blk_alloc_queue_node(int,int);
|
||||
#define blk_put_queue(q) blk_cleanup_queue((q))
|
||||
|
||||
/*
|
||||
|
@ -675,8 +681,6 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *);
|
|||
|
||||
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
|
||||
|
||||
extern void drive_stat_acct(struct request *, int, int);
|
||||
|
||||
static inline int queue_hardsect_size(request_queue_t *q)
|
||||
{
|
||||
int retval = 512;
|
||||
|
|
|
@ -22,6 +22,10 @@ extern unsigned long min_low_pfn;
|
|||
*/
|
||||
extern unsigned long max_pfn;
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
extern unsigned long saved_max_pfn;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* node_bootmem_map is a map pointer - the bits represent all physical
|
||||
* memory pages (including holes) on the node.
|
||||
|
@ -67,6 +71,15 @@ extern void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size,
|
|||
__alloc_bootmem_node((pgdat), (x), PAGE_SIZE, 0)
|
||||
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
|
||||
extern void *alloc_remap(int nid, unsigned long size);
|
||||
#else
|
||||
static inline void *alloc_remap(int nid, unsigned long size)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern unsigned long __initdata nr_kernel_pages;
|
||||
extern unsigned long __initdata nr_all_pages;
|
||||
|
||||
|
|
|
@ -92,29 +92,32 @@
|
|||
#endif /* OPTIMIZE */
|
||||
|
||||
|
||||
static __inline__ __const__ __u32 __fswahw32(__u32 x)
|
||||
static inline __u32 __fswahw32(__u32 x)
|
||||
{
|
||||
return __arch__swahw32(x);
|
||||
}
|
||||
static __inline__ __u32 __swahw32p(__u32 *x)
|
||||
|
||||
static inline __u32 __swahw32p(__u32 *x)
|
||||
{
|
||||
return __arch__swahw32p(x);
|
||||
}
|
||||
static __inline__ void __swahw32s(__u32 *addr)
|
||||
|
||||
static inline void __swahw32s(__u32 *addr)
|
||||
{
|
||||
__arch__swahw32s(addr);
|
||||
}
|
||||
|
||||
|
||||
static __inline__ __const__ __u32 __fswahb32(__u32 x)
|
||||
static inline __u32 __fswahb32(__u32 x)
|
||||
{
|
||||
return __arch__swahb32(x);
|
||||
}
|
||||
static __inline__ __u32 __swahb32p(__u32 *x)
|
||||
|
||||
static inline __u32 __swahb32p(__u32 *x)
|
||||
{
|
||||
return __arch__swahb32p(x);
|
||||
}
|
||||
static __inline__ void __swahb32s(__u32 *addr)
|
||||
|
||||
static inline void __swahb32s(__u32 *addr)
|
||||
{
|
||||
__arch__swahb32s(addr);
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
typedef struct _cciss_pci_info_struct
|
||||
{
|
||||
unsigned char bus;
|
||||
unsigned short domain;
|
||||
unsigned char dev_fn;
|
||||
__u32 board_id;
|
||||
} cciss_pci_info_struct;
|
||||
|
|
|
@ -69,6 +69,7 @@ extern struct semaphore cpucontrol;
|
|||
register_cpu_notifier(&fn##_nb); \
|
||||
}
|
||||
int cpu_down(unsigned int cpu);
|
||||
extern int __attribute__((weak)) smp_prepare_cpu(int cpu);
|
||||
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
|
||||
#else
|
||||
#define lock_cpu_hotplug() do { } while (0)
|
||||
|
|
18
include/linux/crash_dump.h
Normal file
18
include/linux/crash_dump.h
Normal file
|
@ -0,0 +1,18 @@
|
|||
#ifndef LINUX_CRASH_DUMP_H
|
||||
#define LINUX_CRASH_DUMP_H
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/proc_fs.h>
|
||||
|
||||
#define ELFCORE_ADDR_MAX (-1ULL)
|
||||
extern unsigned long long elfcorehdr_addr;
|
||||
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
|
||||
unsigned long, int);
|
||||
extern struct file_operations proc_vmcore_operations;
|
||||
extern struct proc_dir_entry *proc_vmcore;
|
||||
|
||||
#endif /* CONFIG_CRASH_DUMP */
|
||||
#endif /* LINUX_CRASHDUMP_H */
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/config.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/klist.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -44,14 +45,15 @@ struct device;
|
|||
struct device_driver;
|
||||
struct class;
|
||||
struct class_device;
|
||||
struct class_simple;
|
||||
|
||||
struct bus_type {
|
||||
char * name;
|
||||
const char * name;
|
||||
|
||||
struct subsystem subsys;
|
||||
struct kset drivers;
|
||||
struct kset devices;
|
||||
struct klist klist_devices;
|
||||
struct klist klist_drivers;
|
||||
|
||||
struct bus_attribute * bus_attrs;
|
||||
struct device_attribute * dev_attrs;
|
||||
|
@ -67,7 +69,7 @@ struct bus_type {
|
|||
extern int bus_register(struct bus_type * bus);
|
||||
extern void bus_unregister(struct bus_type * bus);
|
||||
|
||||
extern int bus_rescan_devices(struct bus_type * bus);
|
||||
extern void bus_rescan_devices(struct bus_type * bus);
|
||||
|
||||
extern struct bus_type * get_bus(struct bus_type * bus);
|
||||
extern void put_bus(struct bus_type * bus);
|
||||
|
@ -78,6 +80,8 @@ extern struct bus_type * find_bus(char * name);
|
|||
|
||||
int bus_for_each_dev(struct bus_type * bus, struct device * start, void * data,
|
||||
int (*fn)(struct device *, void *));
|
||||
struct device * bus_find_device(struct bus_type *bus, struct device *start,
|
||||
void *data, int (*match)(struct device *, void *));
|
||||
|
||||
int bus_for_each_drv(struct bus_type * bus, struct device_driver * start,
|
||||
void * data, int (*fn)(struct device_driver *, void *));
|
||||
|
@ -98,17 +102,18 @@ extern int bus_create_file(struct bus_type *, struct bus_attribute *);
|
|||
extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
|
||||
|
||||
struct device_driver {
|
||||
char * name;
|
||||
const char * name;
|
||||
struct bus_type * bus;
|
||||
|
||||
struct completion unloaded;
|
||||
struct kobject kobj;
|
||||
struct list_head devices;
|
||||
struct klist klist_devices;
|
||||
struct klist_node knode_bus;
|
||||
|
||||
struct module * owner;
|
||||
struct module * owner;
|
||||
|
||||
int (*probe) (struct device * dev);
|
||||
int (*remove) (struct device * dev);
|
||||
int (*remove) (struct device * dev);
|
||||
void (*shutdown) (struct device * dev);
|
||||
int (*suspend) (struct device * dev, pm_message_t state, u32 level);
|
||||
int (*resume) (struct device * dev, u32 level);
|
||||
|
@ -137,12 +142,19 @@ struct driver_attribute driver_attr_##_name = __ATTR(_name,_mode,_show,_store)
|
|||
extern int driver_create_file(struct device_driver *, struct driver_attribute *);
|
||||
extern void driver_remove_file(struct device_driver *, struct driver_attribute *);
|
||||
|
||||
extern int driver_for_each_device(struct device_driver * drv, struct device * start,
|
||||
void * data, int (*fn)(struct device *, void *));
|
||||
struct device * driver_find_device(struct device_driver *drv,
|
||||
struct device *start, void *data,
|
||||
int (*match)(struct device *, void *));
|
||||
|
||||
|
||||
/*
|
||||
* device classes
|
||||
*/
|
||||
struct class {
|
||||
char * name;
|
||||
const char * name;
|
||||
struct module * owner;
|
||||
|
||||
struct subsystem subsys;
|
||||
struct list_head children;
|
||||
|
@ -185,6 +197,7 @@ struct class_device {
|
|||
struct kobject kobj;
|
||||
struct class * class; /* required */
|
||||
dev_t devt; /* dev_t, creates the sysfs "dev" */
|
||||
struct class_device_attribute *devt_attr;
|
||||
struct device * dev; /* not necessary, but nice to have */
|
||||
void * class_data; /* class-specific data */
|
||||
|
||||
|
@ -245,26 +258,28 @@ struct class_interface {
|
|||
extern int class_interface_register(struct class_interface *);
|
||||
extern void class_interface_unregister(struct class_interface *);
|
||||
|
||||
/* interface for class simple stuff */
|
||||
extern struct class_simple *class_simple_create(struct module *owner, char *name);
|
||||
extern void class_simple_destroy(struct class_simple *cs);
|
||||
extern struct class_device *class_simple_device_add(struct class_simple *cs, dev_t dev, struct device *device, const char *fmt, ...)
|
||||
__attribute__((format(printf,4,5)));
|
||||
extern int class_simple_set_hotplug(struct class_simple *,
|
||||
int (*hotplug)(struct class_device *dev, char **envp, int num_envp, char *buffer, int buffer_size));
|
||||
extern void class_simple_device_remove(dev_t dev);
|
||||
extern struct class *class_create(struct module *owner, char *name);
|
||||
extern void class_destroy(struct class *cls);
|
||||
extern struct class_device *class_device_create(struct class *cls, dev_t devt,
|
||||
struct device *device, char *fmt, ...)
|
||||
__attribute__((format(printf,4,5)));
|
||||
extern void class_device_destroy(struct class *cls, dev_t devt);
|
||||
|
||||
|
||||
struct device {
|
||||
struct list_head node; /* node in sibling list */
|
||||
struct list_head bus_list; /* node in bus's list */
|
||||
struct list_head driver_list;
|
||||
struct list_head children;
|
||||
struct klist klist_children;
|
||||
struct klist_node knode_parent; /* node in sibling list */
|
||||
struct klist_node knode_driver;
|
||||
struct klist_node knode_bus;
|
||||
struct device * parent;
|
||||
|
||||
struct kobject kobj;
|
||||
char bus_id[BUS_ID_SIZE]; /* position on parent bus */
|
||||
|
||||
struct semaphore sem; /* semaphore to synchronize calls to
|
||||
* its driver.
|
||||
*/
|
||||
|
||||
struct bus_type * bus; /* type of bus device is on */
|
||||
struct device_driver *driver; /* which driver has allocated this
|
||||
device */
|
||||
|
@ -288,12 +303,6 @@ struct device {
|
|||
void (*release)(struct device * dev);
|
||||
};
|
||||
|
||||
static inline struct device *
|
||||
list_to_dev(struct list_head *node)
|
||||
{
|
||||
return list_entry(node, struct device, node);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
dev_get_drvdata (struct device *dev)
|
||||
{
|
||||
|
@ -321,7 +330,6 @@ extern int device_for_each_child(struct device *, void *,
|
|||
* Manual binding of a device to driver. See drivers/base/bus.c
|
||||
* for information on use.
|
||||
*/
|
||||
extern int driver_probe_device(struct device_driver * drv, struct device * dev);
|
||||
extern void device_bind_driver(struct device * dev);
|
||||
extern void device_release_driver(struct device * dev);
|
||||
extern int device_attach(struct device * dev);
|
||||
|
@ -332,8 +340,10 @@ extern void driver_attach(struct device_driver * drv);
|
|||
|
||||
struct device_attribute {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct device * dev, char * buf);
|
||||
ssize_t (*store)(struct device * dev, const char * buf, size_t count);
|
||||
ssize_t (*show)(struct device *dev, struct device_attribute *attr,
|
||||
char *buf);
|
||||
ssize_t (*store)(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count);
|
||||
};
|
||||
|
||||
#define DEVICE_ATTR(_name,_mode,_show,_store) \
|
||||
|
@ -360,13 +370,12 @@ extern int (*platform_notify_remove)(struct device * dev);
|
|||
*/
|
||||
extern struct device * get_device(struct device * dev);
|
||||
extern void put_device(struct device * dev);
|
||||
extern struct device *device_find(const char *name, struct bus_type *bus);
|
||||
|
||||
|
||||
/* drivers/base/platform.c */
|
||||
|
||||
struct platform_device {
|
||||
char * name;
|
||||
const char * name;
|
||||
u32 id;
|
||||
struct device dev;
|
||||
u32 num_resources;
|
||||
|
|
|
@ -9,6 +9,7 @@ enum dmi_field {
|
|||
DMI_SYS_VENDOR,
|
||||
DMI_PRODUCT_NAME,
|
||||
DMI_PRODUCT_VERSION,
|
||||
DMI_PRODUCT_SERIAL,
|
||||
DMI_BOARD_VENDOR,
|
||||
DMI_BOARD_NAME,
|
||||
DMI_BOARD_VERSION,
|
||||
|
|
|
@ -11,6 +11,12 @@
|
|||
/* Root squash turned on */
|
||||
#define V1_DQF_RSQUASH 1
|
||||
|
||||
/* Numbers of blocks needed for updates */
|
||||
#define V1_INIT_ALLOC 1
|
||||
#define V1_INIT_REWRITE 1
|
||||
#define V1_DEL_ALLOC 0
|
||||
#define V1_DEL_REWRITE 2
|
||||
|
||||
/* Special information about quotafile */
|
||||
struct v1_mem_dqinfo {
|
||||
};
|
||||
|
|
|
@ -10,6 +10,12 @@
|
|||
/* id numbers of quota format */
|
||||
#define QFMT_VFS_V0 2
|
||||
|
||||
/* Numbers of blocks needed for updates */
|
||||
#define V2_INIT_ALLOC 4
|
||||
#define V2_INIT_REWRITE 2
|
||||
#define V2_DEL_ALLOC 0
|
||||
#define V2_DEL_REWRITE 6
|
||||
|
||||
/* Inmemory copy of version specific information */
|
||||
struct v2_mem_dqinfo {
|
||||
unsigned int dqi_blocks;
|
||||
|
|
|
@ -315,7 +315,7 @@ extern struct efi_memory_map memmap;
|
|||
*/
|
||||
static inline int efi_range_is_wc(unsigned long start, unsigned long len)
|
||||
{
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < len; i += (1UL << EFI_PAGE_SHIFT)) {
|
||||
unsigned long paddr = __pa(start + i);
|
||||
|
|
|
@ -16,9 +16,9 @@ typedef void (elevator_remove_req_fn) (request_queue_t *, struct request *);
|
|||
typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *);
|
||||
typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *);
|
||||
typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *);
|
||||
typedef int (elevator_may_queue_fn) (request_queue_t *, int);
|
||||
typedef int (elevator_may_queue_fn) (request_queue_t *, int, struct bio *);
|
||||
|
||||
typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, int);
|
||||
typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, int);
|
||||
typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
|
||||
typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
|
||||
|
||||
|
@ -96,9 +96,9 @@ extern struct request *elv_former_request(request_queue_t *, struct request *);
|
|||
extern struct request *elv_latter_request(request_queue_t *, struct request *);
|
||||
extern int elv_register_queue(request_queue_t *q);
|
||||
extern void elv_unregister_queue(request_queue_t *q);
|
||||
extern int elv_may_queue(request_queue_t *, int);
|
||||
extern int elv_may_queue(request_queue_t *, int, struct bio *);
|
||||
extern void elv_completed_request(request_queue_t *, struct request *);
|
||||
extern int elv_set_request(request_queue_t *, struct request *, int);
|
||||
extern int elv_set_request(request_queue_t *, struct request *, struct bio *, int);
|
||||
extern void elv_put_request(request_queue_t *, struct request *);
|
||||
|
||||
/*
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#define _LINUX_ETHERDEVICE_H
|
||||
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
@ -65,7 +66,7 @@ static inline int is_zero_ether_addr(const u8 *addr)
|
|||
*/
|
||||
static inline int is_multicast_ether_addr(const u8 *addr)
|
||||
{
|
||||
return addr[0] & 0x01;
|
||||
return ((addr[0] != 0xff) && (0x01 & addr[0]));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -300,18 +300,19 @@ struct ext2_inode {
|
|||
/*
|
||||
* Mount flags
|
||||
*/
|
||||
#define EXT2_MOUNT_CHECK 0x0001 /* Do mount-time checks */
|
||||
#define EXT2_MOUNT_OLDALLOC 0x0002 /* Don't use the new Orlov allocator */
|
||||
#define EXT2_MOUNT_GRPID 0x0004 /* Create files with directory's group */
|
||||
#define EXT2_MOUNT_DEBUG 0x0008 /* Some debugging messages */
|
||||
#define EXT2_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */
|
||||
#define EXT2_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */
|
||||
#define EXT2_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */
|
||||
#define EXT2_MOUNT_MINIX_DF 0x0080 /* Mimics the Minix statfs */
|
||||
#define EXT2_MOUNT_NOBH 0x0100 /* No buffer_heads */
|
||||
#define EXT2_MOUNT_NO_UID32 0x0200 /* Disable 32-bit UIDs */
|
||||
#define EXT2_MOUNT_XATTR_USER 0x4000 /* Extended user attributes */
|
||||
#define EXT2_MOUNT_POSIX_ACL 0x8000 /* POSIX Access Control Lists */
|
||||
#define EXT2_MOUNT_CHECK 0x000001 /* Do mount-time checks */
|
||||
#define EXT2_MOUNT_OLDALLOC 0x000002 /* Don't use the new Orlov allocator */
|
||||
#define EXT2_MOUNT_GRPID 0x000004 /* Create files with directory's group */
|
||||
#define EXT2_MOUNT_DEBUG 0x000008 /* Some debugging messages */
|
||||
#define EXT2_MOUNT_ERRORS_CONT 0x000010 /* Continue on errors */
|
||||
#define EXT2_MOUNT_ERRORS_RO 0x000020 /* Remount fs ro on errors */
|
||||
#define EXT2_MOUNT_ERRORS_PANIC 0x000040 /* Panic on errors */
|
||||
#define EXT2_MOUNT_MINIX_DF 0x000080 /* Mimics the Minix statfs */
|
||||
#define EXT2_MOUNT_NOBH 0x000100 /* No buffer_heads */
|
||||
#define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */
|
||||
#define EXT2_MOUNT_XATTR_USER 0x004000 /* Extended user attributes */
|
||||
#define EXT2_MOUNT_POSIX_ACL 0x008000 /* POSIX Access Control Lists */
|
||||
#define EXT2_MOUNT_XIP 0x010000 /* Execute in place */
|
||||
|
||||
#define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt
|
||||
#define set_opt(o, opt) o |= EXT2_MOUNT_##opt
|
||||
|
|
|
@ -358,6 +358,7 @@ struct ext3_inode {
|
|||
#define EXT3_MOUNT_RESERVATION 0x10000 /* Preallocation */
|
||||
#define EXT3_MOUNT_BARRIER 0x20000 /* Use block barriers */
|
||||
#define EXT3_MOUNT_NOBH 0x40000 /* No bufferheads */
|
||||
#define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */
|
||||
|
||||
/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
|
||||
#ifndef _LINUX_EXT2_FS_H
|
||||
|
|
|
@ -42,15 +42,15 @@
|
|||
* superblock only gets updated once, of course, so don't bother
|
||||
* counting that again for the quota updates. */
|
||||
|
||||
#define EXT3_DATA_TRANS_BLOCKS (EXT3_SINGLEDATA_TRANS_BLOCKS + \
|
||||
#define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \
|
||||
EXT3_XATTR_TRANS_BLOCKS - 2 + \
|
||||
2*EXT3_QUOTA_TRANS_BLOCKS)
|
||||
2*EXT3_QUOTA_TRANS_BLOCKS(sb))
|
||||
|
||||
/* Delete operations potentially hit one directory's namespace plus an
|
||||
* entire inode, plus arbitrary amounts of bitmap/indirection data. Be
|
||||
* generous. We can grow the delete transaction later if necessary. */
|
||||
|
||||
#define EXT3_DELETE_TRANS_BLOCKS (2 * EXT3_DATA_TRANS_BLOCKS + 64)
|
||||
#define EXT3_DELETE_TRANS_BLOCKS(sb) (2 * EXT3_DATA_TRANS_BLOCKS(sb) + 64)
|
||||
|
||||
/* Define an arbitrary limit for the amount of data we will anticipate
|
||||
* writing to any given transaction. For unbounded transactions such as
|
||||
|
@ -74,14 +74,17 @@
|
|||
#ifdef CONFIG_QUOTA
|
||||
/* Amount of blocks needed for quota update - we know that the structure was
|
||||
* allocated so we need to update only inode+data */
|
||||
#define EXT3_QUOTA_TRANS_BLOCKS 2
|
||||
#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
|
||||
/* Amount of blocks needed for quota insert/delete - we do some block writes
|
||||
* but inode, sb and group updates are done only once */
|
||||
#define EXT3_QUOTA_INIT_BLOCKS (DQUOT_MAX_WRITES*\
|
||||
(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3)
|
||||
#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
|
||||
(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
|
||||
#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
|
||||
(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
|
||||
#else
|
||||
#define EXT3_QUOTA_TRANS_BLOCKS 0
|
||||
#define EXT3_QUOTA_INIT_BLOCKS 0
|
||||
#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
|
||||
#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
|
||||
#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
|
||||
#endif
|
||||
|
||||
int
|
||||
|
|
|
@ -524,11 +524,11 @@ struct fb_pixmap {
|
|||
u32 offset; /* current offset to buffer */
|
||||
u32 buf_align; /* byte alignment of each bitmap */
|
||||
u32 scan_align; /* alignment per scanline */
|
||||
u32 access_align; /* alignment per read/write */
|
||||
u32 access_align; /* alignment per read/write (bits) */
|
||||
u32 flags; /* see FB_PIXMAP_* */
|
||||
/* access methods */
|
||||
void (*outbuf)(struct fb_info *info, u8 *addr, u8 *src, unsigned int size);
|
||||
u8 (*inbuf) (struct fb_info *info, u8 *addr);
|
||||
void (*writeio)(struct fb_info *info, void __iomem *dst, void *src, unsigned int size);
|
||||
void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size);
|
||||
};
|
||||
|
||||
|
||||
|
@ -816,18 +816,9 @@ extern int unregister_framebuffer(struct fb_info *fb_info);
|
|||
extern int fb_prepare_logo(struct fb_info *fb_info);
|
||||
extern int fb_show_logo(struct fb_info *fb_info);
|
||||
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
|
||||
extern void fb_iomove_buf_unaligned(struct fb_info *info, struct fb_pixmap *buf,
|
||||
u8 *dst, u32 d_pitch, u8 *src, u32 idx,
|
||||
extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx,
|
||||
u32 height, u32 shift_high, u32 shift_low, u32 mod);
|
||||
extern void fb_iomove_buf_aligned(struct fb_info *info, struct fb_pixmap *buf,
|
||||
u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch,
|
||||
u32 height);
|
||||
extern void fb_sysmove_buf_unaligned(struct fb_info *info, struct fb_pixmap *buf,
|
||||
u8 *dst, u32 d_pitch, u8 *src, u32 idx,
|
||||
u32 height, u32 shift_high, u32 shift_low, u32 mod);
|
||||
extern void fb_sysmove_buf_aligned(struct fb_info *info, struct fb_pixmap *buf,
|
||||
u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch,
|
||||
u32 height);
|
||||
extern void fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u32 height);
|
||||
extern void fb_set_suspend(struct fb_info *info, int state);
|
||||
extern int fb_get_color_depth(struct fb_var_screeninfo *var);
|
||||
extern int fb_get_options(char *name, char **option);
|
||||
|
|
|
@ -25,6 +25,10 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifndef force_o_largefile
|
||||
#define force_o_largefile() (BITS_PER_LONG != 32)
|
||||
#endif
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
#define IS_GETLK32(cmd) ((cmd) == F_GETLK)
|
||||
#define IS_SETLK32(cmd) ((cmd) == F_SETLK)
|
||||
|
|
|
@ -25,19 +25,23 @@ struct font_desc {
|
|||
#define VGA8x16_IDX 1
|
||||
#define PEARL8x8_IDX 2
|
||||
#define VGA6x11_IDX 3
|
||||
#define SUN8x16_IDX 4
|
||||
#define SUN12x22_IDX 5
|
||||
#define ACORN8x8_IDX 6
|
||||
#define MINI4x6_IDX 7
|
||||
#define FONT7x14_IDX 4
|
||||
#define FONT10x18_IDX 5
|
||||
#define SUN8x16_IDX 6
|
||||
#define SUN12x22_IDX 7
|
||||
#define ACORN8x8_IDX 8
|
||||
#define MINI4x6_IDX 9
|
||||
|
||||
extern struct font_desc font_vga_8x8,
|
||||
font_vga_8x16,
|
||||
font_pearl_8x8,
|
||||
font_vga_6x11,
|
||||
font_sun_8x16,
|
||||
font_sun_12x22,
|
||||
font_acorn_8x8,
|
||||
font_mini_4x6;
|
||||
font_vga_8x16,
|
||||
font_pearl_8x8,
|
||||
font_vga_6x11,
|
||||
font_7x14,
|
||||
font_10x18,
|
||||
font_sun_8x16,
|
||||
font_sun_12x22,
|
||||
font_acorn_8x8,
|
||||
font_mini_4x6;
|
||||
|
||||
/* Find a font with a specific name */
|
||||
|
||||
|
|
|
@ -213,6 +213,7 @@ extern int dir_notify_enable;
|
|||
#include <linux/radix-tree.h>
|
||||
#include <linux/prio_tree.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
@ -220,6 +221,7 @@ extern int dir_notify_enable;
|
|||
|
||||
struct iovec;
|
||||
struct nameidata;
|
||||
struct kiocb;
|
||||
struct pipe_inode_info;
|
||||
struct poll_table_struct;
|
||||
struct kstatfs;
|
||||
|
@ -240,7 +242,7 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock,
|
|||
typedef int (get_blocks_t)(struct inode *inode, sector_t iblock,
|
||||
unsigned long max_blocks,
|
||||
struct buffer_head *bh_result, int create);
|
||||
typedef void (dio_iodone_t)(struct inode *inode, loff_t offset,
|
||||
typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
|
||||
ssize_t bytes, void *private);
|
||||
|
||||
/*
|
||||
|
@ -302,7 +304,6 @@ struct iattr {
|
|||
struct page;
|
||||
struct address_space;
|
||||
struct writeback_control;
|
||||
struct kiocb;
|
||||
|
||||
struct address_space_operations {
|
||||
int (*writepage)(struct page *page, struct writeback_control *wbc);
|
||||
|
@ -330,6 +331,8 @@ struct address_space_operations {
|
|||
int (*releasepage) (struct page *, int);
|
||||
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs);
|
||||
struct page* (*get_xip_page)(struct address_space *, sector_t,
|
||||
int);
|
||||
};
|
||||
|
||||
struct backing_dev_info;
|
||||
|
@ -581,7 +584,6 @@ struct file {
|
|||
atomic_t f_count;
|
||||
unsigned int f_flags;
|
||||
mode_t f_mode;
|
||||
int f_error;
|
||||
loff_t f_pos;
|
||||
struct fown_struct f_owner;
|
||||
unsigned int f_uid, f_gid;
|
||||
|
@ -674,6 +676,7 @@ struct file_lock {
|
|||
struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
|
||||
union {
|
||||
struct nfs_lock_info nfs_fl;
|
||||
struct nfs4_lock_info nfs4_fl;
|
||||
} fl_u;
|
||||
};
|
||||
|
||||
|
@ -820,16 +823,34 @@ enum {
|
|||
#define vfs_check_frozen(sb, level) \
|
||||
wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level)))
|
||||
|
||||
static inline void get_fs_excl(void)
|
||||
{
|
||||
atomic_inc(¤t->fs_excl);
|
||||
}
|
||||
|
||||
static inline void put_fs_excl(void)
|
||||
{
|
||||
atomic_dec(¤t->fs_excl);
|
||||
}
|
||||
|
||||
static inline int has_fs_excl(void)
|
||||
{
|
||||
return atomic_read(¤t->fs_excl);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Superblock locking.
|
||||
*/
|
||||
static inline void lock_super(struct super_block * sb)
|
||||
{
|
||||
get_fs_excl();
|
||||
down(&sb->s_lock);
|
||||
}
|
||||
|
||||
static inline void unlock_super(struct super_block * sb)
|
||||
{
|
||||
put_fs_excl();
|
||||
up(&sb->s_lock);
|
||||
}
|
||||
|
||||
|
@ -883,7 +904,9 @@ struct block_device_operations {
|
|||
int (*open) (struct inode *, struct file *);
|
||||
int (*release) (struct inode *, struct file *);
|
||||
int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
|
||||
long (*unlocked_ioctl) (struct file *, unsigned, unsigned long);
|
||||
long (*compat_ioctl) (struct file *, unsigned, unsigned long);
|
||||
int (*direct_access) (struct block_device *, sector_t, unsigned long *);
|
||||
int (*media_changed) (struct gendisk *);
|
||||
int (*revalidate_disk) (struct gendisk *);
|
||||
struct module *owner;
|
||||
|
@ -1024,6 +1047,7 @@ struct super_operations {
|
|||
#define I_FREEING 16
|
||||
#define I_CLEAR 32
|
||||
#define I_NEW 64
|
||||
#define I_WILL_FREE 128
|
||||
|
||||
#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
|
||||
|
||||
|
@ -1494,6 +1518,23 @@ extern loff_t remote_llseek(struct file *file, loff_t offset, int origin);
|
|||
extern int generic_file_open(struct inode * inode, struct file * filp);
|
||||
extern int nonseekable_open(struct inode * inode, struct file * filp);
|
||||
|
||||
#ifdef CONFIG_FS_XIP
|
||||
extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len,
|
||||
loff_t *ppos);
|
||||
extern ssize_t xip_file_sendfile(struct file *in_file, loff_t *ppos,
|
||||
size_t count, read_actor_t actor,
|
||||
void *target);
|
||||
extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma);
|
||||
extern ssize_t xip_file_write(struct file *filp, const char __user *buf,
|
||||
size_t len, loff_t *ppos);
|
||||
extern int xip_truncate_page(struct address_space *mapping, loff_t from);
|
||||
#else
|
||||
static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void do_generic_file_read(struct file * filp, loff_t *ppos,
|
||||
read_descriptor_t * desc,
|
||||
read_actor_t actor)
|
||||
|
@ -1657,6 +1698,52 @@ static inline void simple_transaction_set(struct file *file, size_t n)
|
|||
ar->size = n;
|
||||
}
|
||||
|
||||
/*
|
||||
* simple attribute files
|
||||
*
|
||||
* These attributes behave similar to those in sysfs:
|
||||
*
|
||||
* Writing to an attribute immediately sets a value, an open file can be
|
||||
* written to multiple times.
|
||||
*
|
||||
* Reading from an attribute creates a buffer from the value that might get
|
||||
* read with multiple read calls. When the attribute has been read
|
||||
* completely, no further read calls are possible until the file is opened
|
||||
* again.
|
||||
*
|
||||
* All attributes contain a text representation of a numeric value
|
||||
* that are accessed with the get() and set() functions.
|
||||
*/
|
||||
#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
|
||||
static int __fops ## _open(struct inode *inode, struct file *file) \
|
||||
{ \
|
||||
__simple_attr_check_format(__fmt, 0ull); \
|
||||
return simple_attr_open(inode, file, __get, __set, __fmt); \
|
||||
} \
|
||||
static struct file_operations __fops = { \
|
||||
.owner = THIS_MODULE, \
|
||||
.open = __fops ## _open, \
|
||||
.release = simple_attr_close, \
|
||||
.read = simple_attr_read, \
|
||||
.write = simple_attr_write, \
|
||||
};
|
||||
|
||||
static inline void __attribute__((format(printf, 1, 2)))
|
||||
__simple_attr_check_format(const char *fmt, ...)
|
||||
{
|
||||
/* don't do anything, just let the compiler check the arguments; */
|
||||
}
|
||||
|
||||
int simple_attr_open(struct inode *inode, struct file *file,
|
||||
u64 (*get)(void *), void (*set)(void *, u64),
|
||||
const char *fmt);
|
||||
int simple_attr_close(struct inode *inode, struct file *file);
|
||||
ssize_t simple_attr_read(struct file *file, char __user *buf,
|
||||
size_t len, loff_t *ppos);
|
||||
ssize_t simple_attr_write(struct file *file, const char __user *buf,
|
||||
size_t len, loff_t *ppos);
|
||||
|
||||
|
||||
#ifdef CONFIG_SECURITY
|
||||
static inline char *alloc_secdata(void)
|
||||
{
|
||||
|
|
|
@ -51,6 +51,7 @@ struct gianfar_platform_data {
|
|||
|
||||
/* board specific information */
|
||||
u32 board_flags;
|
||||
u32 phy_flags;
|
||||
u32 phyid;
|
||||
u32 interruptPHY;
|
||||
u8 mac_addr[6];
|
||||
|
@ -61,9 +62,14 @@ struct gianfar_platform_data {
|
|||
#define FSL_GIANFAR_DEV_HAS_COALESCE 0x00000002
|
||||
#define FSL_GIANFAR_DEV_HAS_RMON 0x00000004
|
||||
#define FSL_GIANFAR_DEV_HAS_MULTI_INTR 0x00000008
|
||||
#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
|
||||
#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
|
||||
#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
|
||||
#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080
|
||||
|
||||
/* Flags in gianfar_platform_data */
|
||||
#define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* if not set use a timer */
|
||||
#define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* set or use a timer */
|
||||
#define FSL_GIANFAR_BRD_IS_REDUCED 0x00000002 /* Set if RGMII, RMII */
|
||||
|
||||
struct fsl_i2c_platform_data {
|
||||
/* device specific information */
|
||||
|
|
40
include/linux/genalloc.h
Normal file
40
include/linux/genalloc.h
Normal file
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Basic general purpose allocator for managing special purpose memory
|
||||
* not managed by the regular kmalloc/kfree interface.
|
||||
* Uses for this includes on-device special memory, uncached memory
|
||||
* etc.
|
||||
*
|
||||
* This code is based on the buddy allocator found in the sym53c8xx_2
|
||||
* driver, adapted for general purpose use.
|
||||
*
|
||||
* This source code is licensed under the GNU General Public License,
|
||||
* Version 2. See the file COPYING for more details.
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#define ALLOC_MIN_SHIFT 5 /* 32 bytes minimum */
|
||||
/*
|
||||
* Link between free memory chunks of a given size.
|
||||
*/
|
||||
struct gen_pool_link {
|
||||
struct gen_pool_link *next;
|
||||
};
|
||||
|
||||
/*
|
||||
* Memory pool descriptor.
|
||||
*/
|
||||
struct gen_pool {
|
||||
spinlock_t lock;
|
||||
unsigned long (*get_new_chunk)(struct gen_pool *);
|
||||
struct gen_pool *next;
|
||||
struct gen_pool_link *h;
|
||||
unsigned long private;
|
||||
int max_chunk_shift;
|
||||
};
|
||||
|
||||
unsigned long gen_pool_alloc(struct gen_pool *poolp, int size);
|
||||
void gen_pool_free(struct gen_pool *mp, unsigned long ptr, int size);
|
||||
struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift,
|
||||
unsigned long (*fp)(struct gen_pool *),
|
||||
unsigned long data);
|
|
@ -224,7 +224,7 @@ static inline void free_disk_stats(struct gendisk *disk)
|
|||
extern void disk_round_stats(struct gendisk *disk);
|
||||
|
||||
/* drivers/block/genhd.c */
|
||||
extern int get_blkdev_list(char *);
|
||||
extern int get_blkdev_list(char *, int);
|
||||
extern void add_disk(struct gendisk *disk);
|
||||
extern void del_gendisk(struct gendisk *gp);
|
||||
extern void unlink_gendisk(struct gendisk *gp);
|
||||
|
@ -403,6 +403,7 @@ extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
|
|||
extern void add_partition(struct gendisk *, int, sector_t, sector_t);
|
||||
extern void delete_partition(struct gendisk *, int);
|
||||
|
||||
extern struct gendisk *alloc_disk_node(int minors, int node_id);
|
||||
extern struct gendisk *alloc_disk(int minors);
|
||||
extern struct kobject *get_disk(struct gendisk *disk);
|
||||
extern void put_disk(struct gendisk *disk);
|
||||
|
|
|
@ -39,6 +39,7 @@ struct vm_area_struct;
|
|||
#define __GFP_COMP 0x4000u /* Add compound page metadata */
|
||||
#define __GFP_ZERO 0x8000u /* Return zeroed page on success */
|
||||
#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */
|
||||
#define __GFP_NORECLAIM 0x20000u /* No realy zone reclaim during allocation */
|
||||
|
||||
#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
|
||||
#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
|
||||
|
@ -47,7 +48,7 @@ struct vm_area_struct;
|
|||
#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
|
||||
__GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
|
||||
__GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
|
||||
__GFP_NOMEMALLOC)
|
||||
__GFP_NOMEMALLOC|__GFP_NORECLAIM)
|
||||
|
||||
#define GFP_ATOMIC (__GFP_HIGH)
|
||||
#define GFP_NOIO (__GFP_WAIT)
|
||||
|
@ -132,5 +133,10 @@ extern void FASTCALL(free_cold_page(struct page *page));
|
|||
#define free_page(addr) free_pages((addr),0)
|
||||
|
||||
void page_alloc_init(void);
|
||||
#ifdef CONFIG_NUMA
|
||||
void drain_remote_pages(void);
|
||||
#else
|
||||
static inline void drain_remote_pages(void) { };
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_GFP_H */
|
||||
|
|
|
@ -28,6 +28,7 @@ static inline void *kmap(struct page *page)
|
|||
|
||||
#define kmap_atomic(page, idx) page_address(page)
|
||||
#define kunmap_atomic(addr, idx) do { } while (0)
|
||||
#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn))
|
||||
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
|
||||
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
|
||||
#include <linux/mempolicy.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
struct ctl_table;
|
||||
|
||||
|
@ -22,12 +23,6 @@ int hugetlb_report_meminfo(char *);
|
|||
int hugetlb_report_node_meminfo(int, char *);
|
||||
int is_hugepage_mem_enough(size_t);
|
||||
unsigned long hugetlb_total_pages(void);
|
||||
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
|
||||
int write);
|
||||
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
||||
pmd_t *pmd, int write);
|
||||
int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
|
||||
int pmd_huge(pmd_t pmd);
|
||||
struct page *alloc_huge_page(void);
|
||||
void free_huge_page(struct page *);
|
||||
|
||||
|
@ -35,6 +30,17 @@ extern unsigned long max_huge_pages;
|
|||
extern const unsigned long hugetlb_zero, hugetlb_infinity;
|
||||
extern int sysctl_hugetlb_shm_group;
|
||||
|
||||
/* arch callbacks */
|
||||
|
||||
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
|
||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
|
||||
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
|
||||
int write);
|
||||
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
||||
pmd_t *pmd, int write);
|
||||
int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
|
||||
int pmd_huge(pmd_t pmd);
|
||||
|
||||
#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
|
||||
#define is_hugepage_only_range(mm, addr, len) 0
|
||||
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
|
||||
|
@ -48,6 +54,28 @@ extern int sysctl_hugetlb_shm_group;
|
|||
int prepare_hugepage_range(unsigned long addr, unsigned long len);
|
||||
#endif
|
||||
|
||||
#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
|
||||
#define set_huge_pte_at(mm, addr, ptep, pte) set_pte_at(mm, addr, ptep, pte)
|
||||
#define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep)
|
||||
#else
|
||||
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte);
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
#endif
|
||||
|
||||
#ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK
|
||||
#define hugetlb_prefault_arch_hook(mm) do { } while (0)
|
||||
#else
|
||||
void hugetlb_prefault_arch_hook(struct mm_struct *mm);
|
||||
#endif
|
||||
|
||||
#ifndef ARCH_HAS_HUGETLB_CLEAN_STALE_PGTABLE
|
||||
#define hugetlb_clean_stale_pgtable(pte) BUG()
|
||||
#else
|
||||
void hugetlb_clean_stale_pgtable(pte_t *pte);
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_HUGETLB_PAGE */
|
||||
|
||||
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
|
||||
|
|
36
include/linux/hwmon-sysfs.h
Normal file
36
include/linux/hwmon-sysfs.h
Normal file
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* hwmon-sysfs.h - hardware monitoring chip driver sysfs defines
|
||||
*
|
||||
* Copyright (C) 2005 Yani Ioannou <yani.ioannou@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
#ifndef _LINUX_HWMON_SYSFS_H
|
||||
#define _LINUX_HWMON_SYSFS_H
|
||||
|
||||
struct sensor_device_attribute{
|
||||
struct device_attribute dev_attr;
|
||||
int index;
|
||||
};
|
||||
#define to_sensor_dev_attr(_dev_attr) \
|
||||
container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
|
||||
|
||||
#define SENSOR_DEVICE_ATTR(_name,_mode,_show,_store,_index) \
|
||||
struct sensor_device_attribute sensor_dev_attr_##_name = { \
|
||||
.dev_attr = __ATTR(_name,_mode,_show,_store), \
|
||||
.index = _index, \
|
||||
}
|
||||
|
||||
#endif /* _LINUX_HWMON_SYSFS_H */
|
|
@ -25,6 +25,7 @@
|
|||
#define _LINUX_I2C_DEV_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/* Some IOCTL commands are defined in <linux/i2c.h> */
|
||||
/* Note: 10-bit addresses are NOT supported! */
|
||||
|
|
|
@ -108,6 +108,7 @@
|
|||
#define I2C_DRIVERID_TDA7313 62 /* TDA7313 audio processor */
|
||||
#define I2C_DRIVERID_MAX6900 63 /* MAX6900 real-time clock */
|
||||
#define I2C_DRIVERID_SAA7114H 64 /* video decoder */
|
||||
#define I2C_DRIVERID_DS1374 65 /* DS1374 real time clock */
|
||||
|
||||
|
||||
#define I2C_DRIVERID_EXP0 0xF0 /* experimental use id's */
|
||||
|
|
|
@ -97,3 +97,15 @@ static inline int vid_from_reg(int val, int vrm)
|
|||
2050 - (val) * 50);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int vid_to_reg(int val, int vrm)
|
||||
{
|
||||
switch (vrm) {
|
||||
case 91: /* VRM 9.1 */
|
||||
case 90: /* VRM 9.0 */
|
||||
return ((val >= 1100) && (val <= 1850) ?
|
||||
((18499 - val * 10) / 25 + 5) / 10 : -1);
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -290,11 +290,8 @@ static inline void i2c_set_adapdata (struct i2c_adapter *dev, void *data)
|
|||
*/
|
||||
struct i2c_client_address_data {
|
||||
unsigned short *normal_i2c;
|
||||
unsigned short *normal_i2c_range;
|
||||
unsigned short *probe;
|
||||
unsigned short *probe_range;
|
||||
unsigned short *ignore;
|
||||
unsigned short *ignore_range;
|
||||
unsigned short *force;
|
||||
};
|
||||
|
||||
|
@ -563,24 +560,15 @@ union i2c_smbus_data {
|
|||
#define I2C_CLIENT_INSMOD \
|
||||
I2C_CLIENT_MODULE_PARM(probe, \
|
||||
"List of adapter,address pairs to scan additionally"); \
|
||||
I2C_CLIENT_MODULE_PARM(probe_range, \
|
||||
"List of adapter,start-addr,end-addr triples to scan " \
|
||||
"additionally"); \
|
||||
I2C_CLIENT_MODULE_PARM(ignore, \
|
||||
"List of adapter,address pairs not to scan"); \
|
||||
I2C_CLIENT_MODULE_PARM(ignore_range, \
|
||||
"List of adapter,start-addr,end-addr triples not to " \
|
||||
"scan"); \
|
||||
I2C_CLIENT_MODULE_PARM(force, \
|
||||
"List of adapter,address pairs to boldly assume " \
|
||||
"to be present"); \
|
||||
static struct i2c_client_address_data addr_data = { \
|
||||
.normal_i2c = normal_i2c, \
|
||||
.normal_i2c_range = normal_i2c_range, \
|
||||
.probe = probe, \
|
||||
.probe_range = probe_range, \
|
||||
.ignore = ignore, \
|
||||
.ignore_range = ignore_range, \
|
||||
.force = force, \
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,13 @@
|
|||
#define MAX_I2O_CONTROLLERS 32
|
||||
|
||||
//#include <linux/ioctl.h>
|
||||
#ifndef __KERNEL__
|
||||
|
||||
typedef unsigned char u8;
|
||||
typedef unsigned short u16;
|
||||
typedef unsigned int u32;
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/*
|
||||
* I2O Control IOCTLs and structures
|
||||
|
@ -113,6 +120,10 @@ struct i2o_evt_get {
|
|||
int lost;
|
||||
};
|
||||
|
||||
typedef struct i2o_sg_io_hdr {
|
||||
unsigned int flags; /* see I2O_DPT_SG_IO_FLAGS */
|
||||
} i2o_sg_io_hdr_t;
|
||||
|
||||
/**************************************************************************
|
||||
* HRT related constants and structures
|
||||
**************************************************************************/
|
||||
|
@ -126,14 +137,6 @@ struct i2o_evt_get {
|
|||
#define I2O_BUS_CARDBUS 7
|
||||
#define I2O_BUS_UNKNOWN 0x80
|
||||
|
||||
#ifndef __KERNEL__
|
||||
|
||||
typedef unsigned char u8;
|
||||
typedef unsigned short u16;
|
||||
typedef unsigned int u32;
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
typedef struct _i2o_pci_bus {
|
||||
u8 PciFunctionNumber;
|
||||
u8 PciDeviceNumber;
|
||||
|
@ -333,7 +336,7 @@ typedef struct _i2o_status_block {
|
|||
#define I2O_CLASS_ATE_PERIPHERAL 0x061
|
||||
#define I2O_CLASS_FLOPPY_CONTROLLER 0x070
|
||||
#define I2O_CLASS_FLOPPY_DEVICE 0x071
|
||||
#define I2O_CLASS_BUS_ADAPTER_PORT 0x080
|
||||
#define I2O_CLASS_BUS_ADAPTER 0x080
|
||||
#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090
|
||||
#define I2O_CLASS_PEER_TRANSPORT 0x091
|
||||
#define I2O_CLASS_END 0xfff
|
||||
|
@ -399,4 +402,26 @@ typedef struct _i2o_status_block {
|
|||
#define ADAPTER_STATE_FAILED 0x10
|
||||
#define ADAPTER_STATE_FAULTED 0x11
|
||||
|
||||
/*
|
||||
* Software module types
|
||||
*/
|
||||
#define I2O_SOFTWARE_MODULE_IRTOS 0x11
|
||||
#define I2O_SOFTWARE_MODULE_IOP_PRIVATE 0x22
|
||||
#define I2O_SOFTWARE_MODULE_IOP_CONFIG 0x23
|
||||
|
||||
/*
|
||||
* Vendors
|
||||
*/
|
||||
#define I2O_VENDOR_DPT 0x001b
|
||||
|
||||
/*
|
||||
* DPT / Adaptec specific values for i2o_sg_io_hdr flags.
|
||||
*/
|
||||
#define I2O_DPT_SG_FLAG_INTERPRET 0x00010000
|
||||
#define I2O_DPT_SG_FLAG_PHYSICAL 0x00020000
|
||||
|
||||
#define I2O_DPT_FLASH_FRAG_SIZE 0x10000
|
||||
#define I2O_DPT_FLASH_READ 0x0101
|
||||
#define I2O_DPT_FLASH_WRITE 0x0102
|
||||
|
||||
#endif /* _I2O_DEV_H */
|
||||
|
|
|
@ -119,12 +119,21 @@ struct i2o_driver {
|
|||
};
|
||||
|
||||
/*
|
||||
* Contains all information which are necessary for DMA operations
|
||||
* Contains DMA mapped address information
|
||||
*/
|
||||
struct i2o_dma {
|
||||
void *virt;
|
||||
dma_addr_t phys;
|
||||
u32 len;
|
||||
size_t len;
|
||||
};
|
||||
|
||||
/*
|
||||
* Contains IO mapped address information
|
||||
*/
|
||||
struct i2o_io {
|
||||
void __iomem *virt;
|
||||
unsigned long phys;
|
||||
unsigned long len;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -147,28 +156,25 @@ struct i2o_controller {
|
|||
|
||||
struct pci_dev *pdev; /* PCI device */
|
||||
|
||||
unsigned int short_req:1; /* use small block sizes */
|
||||
unsigned int promise:1; /* Promise controller */
|
||||
unsigned int adaptec:1; /* DPT / Adaptec controller */
|
||||
unsigned int raptor:1; /* split bar */
|
||||
unsigned int no_quiesce:1; /* dont quiesce before reset */
|
||||
unsigned int raptor:1; /* split bar */
|
||||
unsigned int promise:1; /* Promise controller */
|
||||
|
||||
#ifdef CONFIG_MTRR
|
||||
int mtrr_reg0;
|
||||
int mtrr_reg1;
|
||||
#endif
|
||||
unsigned int short_req:1; /* use small block sizes */
|
||||
unsigned int limit_sectors:1; /* limit number of sectors / request */
|
||||
unsigned int pae_support:1; /* controller has 64-bit SGL support */
|
||||
|
||||
struct list_head devices; /* list of I2O devices */
|
||||
|
||||
struct notifier_block *event_notifer; /* Events */
|
||||
atomic_t users;
|
||||
struct list_head list; /* Controller list */
|
||||
void __iomem *post_port; /* Inbout port address */
|
||||
void __iomem *reply_port; /* Outbound port address */
|
||||
void __iomem *irq_mask; /* Interrupt register address */
|
||||
|
||||
void __iomem *in_port; /* Inbout port address */
|
||||
void __iomem *out_port; /* Outbound port address */
|
||||
void __iomem *irq_status; /* Interrupt status register address */
|
||||
void __iomem *irq_mask; /* Interrupt mask register address */
|
||||
|
||||
/* Dynamic LCT related data */
|
||||
|
||||
struct i2o_dma status; /* status of IOP */
|
||||
struct i2o_dma status; /* IOP status block */
|
||||
|
||||
struct i2o_dma hrt; /* HW Resource Table */
|
||||
i2o_lct *lct; /* Logical Config Table */
|
||||
|
@ -176,21 +182,19 @@ struct i2o_controller {
|
|||
struct semaphore lct_lock; /* Lock for LCT updates */
|
||||
struct i2o_dma status_block; /* IOP status block */
|
||||
|
||||
struct i2o_dma base; /* controller messaging unit */
|
||||
struct i2o_dma in_queue; /* inbound message queue Host->IOP */
|
||||
struct i2o_io base; /* controller messaging unit */
|
||||
struct i2o_io in_queue; /* inbound message queue Host->IOP */
|
||||
struct i2o_dma out_queue; /* outbound message queue IOP->Host */
|
||||
|
||||
unsigned int battery:1; /* Has a battery backup */
|
||||
unsigned int battery:1; /* Has a battery backup */
|
||||
unsigned int io_alloc:1; /* An I/O resource was allocated */
|
||||
unsigned int mem_alloc:1; /* A memory resource was allocated */
|
||||
|
||||
struct resource io_resource; /* I/O resource allocated to the IOP */
|
||||
struct resource mem_resource; /* Mem resource allocated to the IOP */
|
||||
|
||||
struct proc_dir_entry *proc_entry; /* /proc dir */
|
||||
|
||||
struct list_head bus_list; /* list of busses on IOP */
|
||||
struct device device;
|
||||
struct class_device classdev; /* I2O controller class */
|
||||
struct i2o_device *exec; /* Executive */
|
||||
#if BITS_PER_LONG == 64
|
||||
spinlock_t context_list_lock; /* lock for context_list */
|
||||
|
@ -241,9 +245,10 @@ struct i2o_sys_tbl {
|
|||
extern struct list_head i2o_controllers;
|
||||
|
||||
/* Message functions */
|
||||
static inline u32 i2o_msg_get(struct i2o_controller *, struct i2o_message __iomem **);
|
||||
extern u32 i2o_msg_get_wait(struct i2o_controller *, struct i2o_message __iomem **,
|
||||
int);
|
||||
static inline u32 i2o_msg_get(struct i2o_controller *,
|
||||
struct i2o_message __iomem **);
|
||||
extern u32 i2o_msg_get_wait(struct i2o_controller *,
|
||||
struct i2o_message __iomem **, int);
|
||||
static inline void i2o_msg_post(struct i2o_controller *, u32);
|
||||
static inline int i2o_msg_post_wait(struct i2o_controller *, u32,
|
||||
unsigned long);
|
||||
|
@ -252,15 +257,6 @@ extern int i2o_msg_post_wait_mem(struct i2o_controller *, u32, unsigned long,
|
|||
extern void i2o_msg_nop(struct i2o_controller *, u32);
|
||||
static inline void i2o_flush_reply(struct i2o_controller *, u32);
|
||||
|
||||
/* DMA handling functions */
|
||||
static inline int i2o_dma_alloc(struct device *, struct i2o_dma *, size_t,
|
||||
unsigned int);
|
||||
static inline void i2o_dma_free(struct device *, struct i2o_dma *);
|
||||
int i2o_dma_realloc(struct device *, struct i2o_dma *, size_t, unsigned int);
|
||||
|
||||
static inline int i2o_dma_map(struct device *, struct i2o_dma *);
|
||||
static inline void i2o_dma_unmap(struct device *, struct i2o_dma *);
|
||||
|
||||
/* IOP functions */
|
||||
extern int i2o_status_get(struct i2o_controller *);
|
||||
|
||||
|
@ -285,6 +281,16 @@ static inline u32 i2o_ptr_high(void *ptr)
|
|||
{
|
||||
return (u32) ((u64) ptr >> 32);
|
||||
};
|
||||
|
||||
static inline u32 i2o_dma_low(dma_addr_t dma_addr)
|
||||
{
|
||||
return (u32) (u64) dma_addr;
|
||||
};
|
||||
|
||||
static inline u32 i2o_dma_high(dma_addr_t dma_addr)
|
||||
{
|
||||
return (u32) ((u64) dma_addr >> 32);
|
||||
};
|
||||
#else
|
||||
static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr)
|
||||
{
|
||||
|
@ -315,8 +321,246 @@ static inline u32 i2o_ptr_high(void *ptr)
|
|||
{
|
||||
return 0;
|
||||
};
|
||||
|
||||
static inline u32 i2o_dma_low(dma_addr_t dma_addr)
|
||||
{
|
||||
return (u32) dma_addr;
|
||||
};
|
||||
|
||||
static inline u32 i2o_dma_high(dma_addr_t dma_addr)
|
||||
{
|
||||
return 0;
|
||||
};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* i2o_sg_tablesize - Calculate the maximum number of elements in a SGL
|
||||
* @c: I2O controller for which the calculation should be done
|
||||
* @body_size: maximum body size used for message in 32-bit words.
|
||||
*
|
||||
* Return the maximum number of SG elements in a SG list.
|
||||
*/
|
||||
static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
|
||||
{
|
||||
i2o_status_block *sb = c->status_block.virt;
|
||||
u16 sg_count =
|
||||
(sb->inbound_frame_size - sizeof(struct i2o_message) / 4) -
|
||||
body_size;
|
||||
|
||||
if (c->pae_support) {
|
||||
/*
|
||||
* for 64-bit a SG attribute element must be added and each
|
||||
* SG element needs 12 bytes instead of 8.
|
||||
*/
|
||||
sg_count -= 2;
|
||||
sg_count /= 3;
|
||||
} else
|
||||
sg_count /= 2;
|
||||
|
||||
if (c->short_req && (sg_count > 8))
|
||||
sg_count = 8;
|
||||
|
||||
return sg_count;
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_map_single - Map pointer to controller and fill in I2O message.
|
||||
* @c: I2O controller
|
||||
* @ptr: pointer to the data which should be mapped
|
||||
* @size: size of data in bytes
|
||||
* @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
|
||||
* @sg_ptr: pointer to the SG list inside the I2O message
|
||||
*
|
||||
* This function does all necessary DMA handling and also writes the I2O
|
||||
* SGL elements into the I2O message. For details on DMA handling see also
|
||||
* dma_map_single(). The pointer sg_ptr will only be set to the end of the
|
||||
* SG list if the allocation was successful.
|
||||
*
|
||||
* Returns DMA address which must be checked for failures using
|
||||
* dma_mapping_error().
|
||||
*/
|
||||
static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
|
||||
size_t size,
|
||||
enum dma_data_direction direction,
|
||||
u32 __iomem ** sg_ptr)
|
||||
{
|
||||
u32 sg_flags;
|
||||
u32 __iomem *mptr = *sg_ptr;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
switch (direction) {
|
||||
case DMA_TO_DEVICE:
|
||||
sg_flags = 0xd4000000;
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
sg_flags = 0xd0000000;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
|
||||
if (!dma_mapping_error(dma_addr)) {
|
||||
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
|
||||
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
|
||||
writel(0x7C020002, mptr++);
|
||||
writel(PAGE_SIZE, mptr++);
|
||||
}
|
||||
#endif
|
||||
|
||||
writel(sg_flags | size, mptr++);
|
||||
writel(i2o_dma_low(dma_addr), mptr++);
|
||||
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
|
||||
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
|
||||
writel(i2o_dma_high(dma_addr), mptr++);
|
||||
#endif
|
||||
*sg_ptr = mptr;
|
||||
}
|
||||
return dma_addr;
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_map_sg - Map a SG List to controller and fill in I2O message.
|
||||
* @c: I2O controller
|
||||
* @sg: SG list to be mapped
|
||||
* @sg_count: number of elements in the SG list
|
||||
* @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
|
||||
* @sg_ptr: pointer to the SG list inside the I2O message
|
||||
*
|
||||
* This function does all necessary DMA handling and also writes the I2O
|
||||
* SGL elements into the I2O message. For details on DMA handling see also
|
||||
* dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG
|
||||
* list if the allocation was successful.
|
||||
*
|
||||
* Returns 0 on failure or 1 on success.
|
||||
*/
|
||||
static inline int i2o_dma_map_sg(struct i2o_controller *c,
|
||||
struct scatterlist *sg, int sg_count,
|
||||
enum dma_data_direction direction,
|
||||
u32 __iomem ** sg_ptr)
|
||||
{
|
||||
u32 sg_flags;
|
||||
u32 __iomem *mptr = *sg_ptr;
|
||||
|
||||
switch (direction) {
|
||||
case DMA_TO_DEVICE:
|
||||
sg_flags = 0x14000000;
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
sg_flags = 0x10000000;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction);
|
||||
if (!sg_count)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
|
||||
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
|
||||
writel(0x7C020002, mptr++);
|
||||
writel(PAGE_SIZE, mptr++);
|
||||
}
|
||||
#endif
|
||||
|
||||
while (sg_count-- > 0) {
|
||||
if (!sg_count)
|
||||
sg_flags |= 0xC0000000;
|
||||
writel(sg_flags | sg_dma_len(sg), mptr++);
|
||||
writel(i2o_dma_low(sg_dma_address(sg)), mptr++);
|
||||
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
|
||||
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
|
||||
writel(i2o_dma_high(sg_dma_address(sg)), mptr++);
|
||||
#endif
|
||||
sg++;
|
||||
}
|
||||
*sg_ptr = mptr;
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_alloc - Allocate DMA memory
|
||||
* @dev: struct device pointer to the PCI device of the I2O controller
|
||||
* @addr: i2o_dma struct which should get the DMA buffer
|
||||
* @len: length of the new DMA memory
|
||||
* @gfp_mask: GFP mask
|
||||
*
|
||||
* Allocate a coherent DMA memory and write the pointers into addr.
|
||||
*
|
||||
* Returns 0 on success or -ENOMEM on failure.
|
||||
*/
|
||||
static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
|
||||
size_t len, unsigned int gfp_mask)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
int dma_64 = 0;
|
||||
|
||||
if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) {
|
||||
dma_64 = 1;
|
||||
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask);
|
||||
|
||||
if ((sizeof(dma_addr_t) > 4) && dma_64)
|
||||
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
|
||||
printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
|
||||
|
||||
if (!addr->virt)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(addr->virt, 0, len);
|
||||
addr->len = len;
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_free - Free DMA memory
|
||||
* @dev: struct device pointer to the PCI device of the I2O controller
|
||||
* @addr: i2o_dma struct which contains the DMA buffer
|
||||
*
|
||||
* Free a coherent DMA memory and set virtual address of addr to NULL.
|
||||
*/
|
||||
static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
|
||||
{
|
||||
if (addr->virt) {
|
||||
if (addr->phys)
|
||||
dma_free_coherent(dev, addr->len, addr->virt,
|
||||
addr->phys);
|
||||
else
|
||||
kfree(addr->virt);
|
||||
addr->virt = NULL;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_realloc - Realloc DMA memory
|
||||
* @dev: struct device pointer to the PCI device of the I2O controller
|
||||
* @addr: pointer to a i2o_dma struct DMA buffer
|
||||
* @len: new length of memory
|
||||
* @gfp_mask: GFP mask
|
||||
*
|
||||
* If there was something allocated in the addr, free it first. If len > 0
|
||||
* than try to allocate it and write the addresses back to the addr
|
||||
* structure. If len == 0 set the virtual address to NULL.
|
||||
*
|
||||
* Returns the 0 on success or negative error code on failure.
|
||||
*/
|
||||
static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
|
||||
size_t len, unsigned int gfp_mask)
|
||||
{
|
||||
i2o_dma_free(dev, addr);
|
||||
|
||||
if (len)
|
||||
return i2o_dma_alloc(dev, addr, len, gfp_mask);
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
/* I2O driver (OSM) functions */
|
||||
extern int i2o_driver_register(struct i2o_driver *);
|
||||
extern void i2o_driver_unregister(struct i2o_driver *);
|
||||
|
@ -385,49 +629,11 @@ extern int i2o_device_claim_release(struct i2o_device *);
|
|||
/* Exec OSM functions */
|
||||
extern int i2o_exec_lct_get(struct i2o_controller *);
|
||||
|
||||
/* device to i2o_device and driver to i2o_driver convertion functions */
|
||||
/* device / driver / kobject conversion functions */
|
||||
#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver)
|
||||
#define to_i2o_device(dev) container_of(dev, struct i2o_device, device)
|
||||
|
||||
/*
|
||||
* Messenger inlines
|
||||
*/
|
||||
static inline u32 I2O_POST_READ32(struct i2o_controller *c)
|
||||
{
|
||||
rmb();
|
||||
return readl(c->post_port);
|
||||
};
|
||||
|
||||
static inline void I2O_POST_WRITE32(struct i2o_controller *c, u32 val)
|
||||
{
|
||||
wmb();
|
||||
writel(val, c->post_port);
|
||||
};
|
||||
|
||||
static inline u32 I2O_REPLY_READ32(struct i2o_controller *c)
|
||||
{
|
||||
rmb();
|
||||
return readl(c->reply_port);
|
||||
};
|
||||
|
||||
static inline void I2O_REPLY_WRITE32(struct i2o_controller *c, u32 val)
|
||||
{
|
||||
wmb();
|
||||
writel(val, c->reply_port);
|
||||
};
|
||||
|
||||
static inline u32 I2O_IRQ_READ32(struct i2o_controller *c)
|
||||
{
|
||||
rmb();
|
||||
return readl(c->irq_mask);
|
||||
};
|
||||
|
||||
static inline void I2O_IRQ_WRITE32(struct i2o_controller *c, u32 val)
|
||||
{
|
||||
wmb();
|
||||
writel(val, c->irq_mask);
|
||||
wmb();
|
||||
};
|
||||
#define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device)
|
||||
#define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj))
|
||||
|
||||
/**
|
||||
* i2o_msg_get - obtain an I2O message from the IOP
|
||||
|
@ -443,11 +649,11 @@ static inline void I2O_IRQ_WRITE32(struct i2o_controller *c, u32 val)
|
|||
* available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
|
||||
*/
|
||||
static inline u32 i2o_msg_get(struct i2o_controller *c,
|
||||
struct i2o_message __iomem **msg)
|
||||
struct i2o_message __iomem ** msg)
|
||||
{
|
||||
u32 m;
|
||||
u32 m = readl(c->in_port);
|
||||
|
||||
if ((m = I2O_POST_READ32(c)) != I2O_QUEUE_EMPTY)
|
||||
if (m != I2O_QUEUE_EMPTY)
|
||||
*msg = c->in_queue.virt + m;
|
||||
|
||||
return m;
|
||||
|
@ -462,7 +668,7 @@ static inline u32 i2o_msg_get(struct i2o_controller *c,
|
|||
*/
|
||||
static inline void i2o_msg_post(struct i2o_controller *c, u32 m)
|
||||
{
|
||||
I2O_POST_WRITE32(c, m);
|
||||
writel(m, c->in_port);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -491,12 +697,10 @@ static inline int i2o_msg_post_wait(struct i2o_controller *c, u32 m,
|
|||
* The I2O controller must be informed that the reply message is not needed
|
||||
* anymore. If you forget to flush the reply, the message frame can't be
|
||||
* used by the controller anymore and is therefore lost.
|
||||
*
|
||||
* FIXME: is there a timeout after which the controller reuse the message?
|
||||
*/
|
||||
static inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
|
||||
{
|
||||
I2O_REPLY_WRITE32(c, m);
|
||||
writel(m, c->out_port);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -530,97 +734,13 @@ static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c,
|
|||
* work for receive side messages as they are kmalloc objects
|
||||
* in a different pool.
|
||||
*/
|
||||
static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct i2o_controller *c,
|
||||
u32 m)
|
||||
static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct
|
||||
i2o_controller *c,
|
||||
u32 m)
|
||||
{
|
||||
return c->in_queue.virt + m;
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_alloc - Allocate DMA memory
|
||||
* @dev: struct device pointer to the PCI device of the I2O controller
|
||||
* @addr: i2o_dma struct which should get the DMA buffer
|
||||
* @len: length of the new DMA memory
|
||||
* @gfp_mask: GFP mask
|
||||
*
|
||||
* Allocate a coherent DMA memory and write the pointers into addr.
|
||||
*
|
||||
* Returns 0 on success or -ENOMEM on failure.
|
||||
*/
|
||||
static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
|
||||
size_t len, unsigned int gfp_mask)
|
||||
{
|
||||
addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask);
|
||||
if (!addr->virt)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(addr->virt, 0, len);
|
||||
addr->len = len;
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_free - Free DMA memory
|
||||
* @dev: struct device pointer to the PCI device of the I2O controller
|
||||
* @addr: i2o_dma struct which contains the DMA buffer
|
||||
*
|
||||
* Free a coherent DMA memory and set virtual address of addr to NULL.
|
||||
*/
|
||||
static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
|
||||
{
|
||||
if (addr->virt) {
|
||||
if (addr->phys)
|
||||
dma_free_coherent(dev, addr->len, addr->virt,
|
||||
addr->phys);
|
||||
else
|
||||
kfree(addr->virt);
|
||||
addr->virt = NULL;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_map - Map the memory to DMA
|
||||
* @dev: struct device pointer to the PCI device of the I2O controller
|
||||
* @addr: i2o_dma struct which should be mapped
|
||||
*
|
||||
* Map the memory in addr->virt to coherent DMA memory and write the
|
||||
* physical address into addr->phys.
|
||||
*
|
||||
* Returns 0 on success or -ENOMEM on failure.
|
||||
*/
|
||||
static inline int i2o_dma_map(struct device *dev, struct i2o_dma *addr)
|
||||
{
|
||||
if (!addr->virt)
|
||||
return -EFAULT;
|
||||
|
||||
if (!addr->phys)
|
||||
addr->phys = dma_map_single(dev, addr->virt, addr->len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (!addr->phys)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* i2o_dma_unmap - Unmap the DMA memory
|
||||
* @dev: struct device pointer to the PCI device of the I2O controller
|
||||
* @addr: i2o_dma struct which should be unmapped
|
||||
*
|
||||
* Unmap the memory in addr->virt from DMA memory.
|
||||
*/
|
||||
static inline void i2o_dma_unmap(struct device *dev, struct i2o_dma *addr)
|
||||
{
|
||||
if (!addr->virt)
|
||||
return;
|
||||
|
||||
if (addr->phys) {
|
||||
dma_unmap_single(dev, addr->phys, addr->len, DMA_BIDIRECTIONAL);
|
||||
addr->phys = 0;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Endian handling wrapped into the macro - keeps the core code
|
||||
* cleaner.
|
||||
|
@ -772,6 +892,14 @@ extern void i2o_debug_state(struct i2o_controller *c);
|
|||
#define I2O_CMD_SCSI_ABORT 0x83
|
||||
#define I2O_CMD_SCSI_BUSRESET 0x27
|
||||
|
||||
/*
|
||||
* Bus Adapter Class
|
||||
*/
|
||||
#define I2O_CMD_BUS_ADAPTER_RESET 0x85
|
||||
#define I2O_CMD_BUS_RESET 0x87
|
||||
#define I2O_CMD_BUS_SCAN 0x89
|
||||
#define I2O_CMD_BUS_QUIESCE 0x8b
|
||||
|
||||
/*
|
||||
* Random Block Storage Class
|
||||
*/
|
||||
|
@ -784,7 +912,7 @@ extern void i2o_debug_state(struct i2o_controller *c);
|
|||
#define I2O_CMD_BLOCK_MEJECT 0x43
|
||||
#define I2O_CMD_BLOCK_POWER 0x70
|
||||
|
||||
#define I2O_PRIVATE_MSG 0xFF
|
||||
#define I2O_CMD_PRIVATE 0xFF
|
||||
|
||||
/* Command status values */
|
||||
|
||||
|
@ -922,7 +1050,7 @@ extern void i2o_debug_state(struct i2o_controller *c);
|
|||
#define I2OVER15 0x0001
|
||||
#define I2OVER20 0x0002
|
||||
|
||||
/* Default is 1.5, FIXME: Need support for both 1.5 and 2.0 */
|
||||
/* Default is 1.5 */
|
||||
#define I2OVERSION I2OVER15
|
||||
|
||||
#define SGL_OFFSET_0 I2OVERSION
|
||||
|
@ -933,9 +1061,9 @@ extern void i2o_debug_state(struct i2o_controller *c);
|
|||
#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
|
||||
#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
|
||||
#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
|
||||
|
||||
#define TRL_OFFSET_5 (0x0050 | I2OVERSION)
|
||||
#define TRL_OFFSET_6 (0x0060 | I2OVERSION)
|
||||
#define SGL_OFFSET_11 (0x00B0 | I2OVERSION)
|
||||
#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
|
||||
#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION)
|
||||
|
||||
/* Transaction Reply Lists (TRL) Control Word structure */
|
||||
#define TRL_SINGLE_FIXED_LENGTH 0x00
|
||||
|
@ -962,17 +1090,13 @@ extern void i2o_debug_state(struct i2o_controller *c);
|
|||
#define ELEVEN_WORD_MSG_SIZE 0x000B0000
|
||||
#define I2O_MESSAGE_SIZE(x) ((x)<<16)
|
||||
|
||||
/* Special TID Assignments */
|
||||
|
||||
/* special TID assignments */
|
||||
#define ADAPTER_TID 0
|
||||
#define HOST_TID 1
|
||||
|
||||
#define MSG_FRAME_SIZE 128 /* i2o_scsi assumes >= 32 */
|
||||
#define REPLY_FRAME_SIZE 17
|
||||
#define SG_TABLESIZE 30
|
||||
#define NMBR_MSG_FRAMES 128
|
||||
|
||||
#define MSG_POOL_SIZE (MSG_FRAME_SIZE*NMBR_MSG_FRAMES*sizeof(u32))
|
||||
/* outbound queue defines */
|
||||
#define I2O_MAX_OUTBOUND_MSG_FRAMES 128
|
||||
#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
|
||||
|
||||
#define I2O_POST_WAIT_OK 0
|
||||
#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
|
||||
|
@ -993,11 +1117,10 @@ extern void i2o_debug_state(struct i2o_controller *c);
|
|||
#define I2O_HRT_GET_TRIES 3
|
||||
#define I2O_LCT_GET_TRIES 3
|
||||
|
||||
/* request queue sizes */
|
||||
/* defines for max_sectors and max_phys_segments */
|
||||
#define I2O_MAX_SECTORS 1024
|
||||
#define I2O_MAX_SEGMENTS 128
|
||||
|
||||
#define I2O_REQ_MEMPOOL_SIZE 32
|
||||
#define I2O_MAX_SECTORS_LIMITED 256
|
||||
#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _I2O_H */
|
||||
|
|
|
@ -917,7 +917,7 @@ typedef struct hwif_s {
|
|||
unsigned dma;
|
||||
|
||||
void (*led_act)(void *data, int rw);
|
||||
} ide_hwif_t;
|
||||
} ____cacheline_maxaligned_in_smp ide_hwif_t;
|
||||
|
||||
/*
|
||||
* internal ide interrupt handler type
|
||||
|
|
|
@ -35,6 +35,9 @@
|
|||
*
|
||||
* 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
|
||||
* - Code cleanup and style changes
|
||||
*
|
||||
* 2005/05/05 - Jason Gabler <jygabler at lbl dot gov>
|
||||
* - added definitions for various XOR hashing policies
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_IF_BONDING_H
|
||||
|
@ -80,6 +83,10 @@
|
|||
|
||||
#define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */
|
||||
|
||||
/* hashing types */
|
||||
#define BOND_XMIT_POLICY_LAYER2 0 /* layer 2 (MAC only), default */
|
||||
#define BOND_XMIT_POLICY_LAYER34 1 /* layer 3+4 (IP ^ MAC) */
|
||||
|
||||
typedef struct ifbond {
|
||||
__s32 bond_mode;
|
||||
__s32 num_slaves;
|
||||
|
|
|
@ -156,7 +156,7 @@ struct in6_flowlabel_req
|
|||
#define IPV6_CHECKSUM 7
|
||||
#define IPV6_HOPLIMIT 8
|
||||
#define IPV6_NEXTHOP 9
|
||||
#define IPV6_AUTHHDR 10
|
||||
#define IPV6_AUTHHDR 10 /* obsolete */
|
||||
#define IPV6_FLOWINFO 11
|
||||
|
||||
#define IPV6_UNICAST_HOPS 16
|
||||
|
|
|
@ -229,6 +229,18 @@ void __init parse_early_param(void);
|
|||
#define __devexitdata __exitdata
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#define __cpuinit
|
||||
#define __cpuinitdata
|
||||
#define __cpuexit
|
||||
#define __cpuexitdata
|
||||
#else
|
||||
#define __cpuinit __init
|
||||
#define __cpuinitdata __initdata
|
||||
#define __cpuexit __exit
|
||||
#define __cpuexitdata __exitdata
|
||||
#endif
|
||||
|
||||
/* Functions marked as __devexit may be discarded at kernel link time, depending
|
||||
on config options. Newer versions of binutils detect references from
|
||||
retained sections to discarded sections and flag an error. Pointers to
|
||||
|
|
|
@ -81,6 +81,7 @@ extern struct group_info init_groups;
|
|||
.mm = NULL, \
|
||||
.active_mm = &init_mm, \
|
||||
.run_list = LIST_HEAD_INIT(tsk.run_list), \
|
||||
.ioprio = 0, \
|
||||
.time_slice = HZ, \
|
||||
.tasks = LIST_HEAD_INIT(tsk.tasks), \
|
||||
.ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \
|
||||
|
@ -108,9 +109,9 @@ extern struct group_info init_groups;
|
|||
.blocked = {{0}}, \
|
||||
.alloc_lock = SPIN_LOCK_UNLOCKED, \
|
||||
.proc_lock = SPIN_LOCK_UNLOCKED, \
|
||||
.switch_lock = SPIN_LOCK_UNLOCKED, \
|
||||
.journal_info = NULL, \
|
||||
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
|
||||
.fs_excl = ATOMIC_INIT(0), \
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -859,6 +859,10 @@ struct input_dev {
|
|||
int (*erase_effect)(struct input_dev *dev, int effect_id);
|
||||
|
||||
struct input_handle *grab;
|
||||
|
||||
struct semaphore sem; /* serializes open and close operations */
|
||||
unsigned int users;
|
||||
|
||||
struct device *dev;
|
||||
|
||||
struct list_head h_list;
|
||||
|
@ -1015,7 +1019,7 @@ static inline void input_set_abs_params(struct input_dev *dev, int axis, int min
|
|||
dev->absbit[LONG(axis)] |= BIT(axis);
|
||||
}
|
||||
|
||||
extern struct class_simple *input_class;
|
||||
extern struct class *input_class;
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
179
include/linux/ioc4.h
Normal file
179
include/linux/ioc4.h
Normal file
|
@ -0,0 +1,179 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_IOC4_H
|
||||
#define _LINUX_IOC4_H
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
/***************
|
||||
* Definitions *
|
||||
***************/
|
||||
|
||||
/* Miscellaneous values inherent to hardware */
|
||||
|
||||
#define IOC4_EXTINT_COUNT_DIVISOR 520 /* PCI clocks per COUNT tick */
|
||||
|
||||
/***********************************
|
||||
* Structures needed by subdrivers *
|
||||
***********************************/
|
||||
|
||||
/* This structure fully describes the IOC4 miscellaneous registers which
|
||||
* appear at bar[0]+0x00000 through bar[0]+0x0005c. The corresponding
|
||||
* PCI resource is managed by the main IOC4 driver because it contains
|
||||
* registers of interest to many different IOC4 subdrivers.
|
||||
*/
|
||||
struct ioc4_misc_regs {
|
||||
/* Miscellaneous IOC4 registers */
|
||||
union ioc4_pci_err_addr_l {
|
||||
uint32_t raw;
|
||||
struct {
|
||||
uint32_t valid:1; /* Address captured */
|
||||
uint32_t master_id:4; /* Unit causing error
|
||||
* 0/1: Serial port 0 TX/RX
|
||||
* 2/3: Serial port 1 TX/RX
|
||||
* 4/5: Serial port 2 TX/RX
|
||||
* 6/7: Serial port 3 TX/RX
|
||||
* 8: ATA/ATAPI
|
||||
* 9-15: Undefined
|
||||
*/
|
||||
uint32_t mul_err:1; /* Multiple errors occurred */
|
||||
uint32_t addr:26; /* Bits 31-6 of error addr */
|
||||
} fields;
|
||||
} pci_err_addr_l;
|
||||
uint32_t pci_err_addr_h; /* Bits 63-32 of error addr */
|
||||
union ioc4_sio_int {
|
||||
uint32_t raw;
|
||||
struct {
|
||||
uint8_t tx_mt:1; /* TX ring buffer empty */
|
||||
uint8_t rx_full:1; /* RX ring buffer full */
|
||||
uint8_t rx_high:1; /* RX high-water exceeded */
|
||||
uint8_t rx_timer:1; /* RX timer has triggered */
|
||||
uint8_t delta_dcd:1; /* DELTA_DCD seen */
|
||||
uint8_t delta_cts:1; /* DELTA_CTS seen */
|
||||
uint8_t intr_pass:1; /* Interrupt pass-through */
|
||||
uint8_t tx_explicit:1; /* TX, MCW, or delay complete */
|
||||
} fields[4];
|
||||
} sio_ir; /* Serial interrupt state */
|
||||
union ioc4_other_int {
|
||||
uint32_t raw;
|
||||
struct {
|
||||
uint32_t ata_int:1; /* ATA port passthru */
|
||||
uint32_t ata_memerr:1; /* ATA halted by mem error */
|
||||
uint32_t memerr:4; /* Serial halted by mem err */
|
||||
uint32_t kbd_int:1; /* kbd/mouse intr asserted */
|
||||
uint32_t reserved:16; /* zero */
|
||||
uint32_t rt_int:1; /* INT_OUT section latch */
|
||||
uint32_t gen_int:8; /* Intr. from generic pins */
|
||||
} fields;
|
||||
} other_ir; /* Other interrupt state */
|
||||
union ioc4_sio_int sio_ies; /* Serial interrupt enable set */
|
||||
union ioc4_other_int other_ies; /* Other interrupt enable set */
|
||||
union ioc4_sio_int sio_iec; /* Serial interrupt enable clear */
|
||||
union ioc4_other_int other_iec; /* Other interrupt enable clear */
|
||||
union ioc4_sio_cr {
|
||||
uint32_t raw;
|
||||
struct {
|
||||
uint32_t cmd_pulse:4; /* Bytebus strobe width */
|
||||
uint32_t arb_diag:3; /* PCI bus requester */
|
||||
uint32_t sio_diag_idle:1; /* Active ser req? */
|
||||
uint32_t ata_diag_idle:1; /* Active ATA req? */
|
||||
uint32_t ata_diag_active:1; /* ATA req is winner */
|
||||
uint32_t reserved:22; /* zero */
|
||||
} fields;
|
||||
} sio_cr;
|
||||
uint32_t unused1;
|
||||
union ioc4_int_out {
|
||||
uint32_t raw;
|
||||
struct {
|
||||
uint32_t count:16; /* Period control */
|
||||
uint32_t mode:3; /* Output signal shape */
|
||||
uint32_t reserved:11; /* zero */
|
||||
uint32_t diag:1; /* Timebase control */
|
||||
uint32_t int_out:1; /* Current value */
|
||||
} fields;
|
||||
} int_out; /* External interrupt output control */
|
||||
uint32_t unused2;
|
||||
union ioc4_gpcr {
|
||||
uint32_t raw;
|
||||
struct {
|
||||
uint32_t dir:8; /* Pin direction */
|
||||
uint32_t edge:8; /* Edge/level mode */
|
||||
uint32_t reserved1:4; /* zero */
|
||||
uint32_t int_out_en:1; /* INT_OUT enable */
|
||||
uint32_t reserved2:11; /* zero */
|
||||
} fields;
|
||||
} gpcr_s; /* Generic PIO control set */
|
||||
union ioc4_gpcr gpcr_c; /* Generic PIO control clear */
|
||||
union ioc4_gpdr {
|
||||
uint32_t raw;
|
||||
struct {
|
||||
uint32_t gen_pin:8; /* State of pins */
|
||||
uint32_t reserved:24;
|
||||
} fields;
|
||||
} gpdr; /* Generic PIO data */
|
||||
uint32_t unused3;
|
||||
union ioc4_gppr {
|
||||
uint32_t raw;
|
||||
struct {
|
||||
uint32_t gen_pin:1; /* Single pin state */
|
||||
uint32_t reserved:31;
|
||||
} fields;
|
||||
} gppr[8]; /* Generic PIO pins */
|
||||
};
|
||||
|
||||
/* Masks for GPCR DIR pins */
|
||||
#define IOC4_GPCR_DIR_0 0x01 /* External interrupt output */
|
||||
#define IOC4_GPCR_DIR_1 0x02 /* External interrupt input */
|
||||
#define IOC4_GPCR_DIR_2 0x04
|
||||
#define IOC4_GPCR_DIR_3 0x08 /* Keyboard/mouse presence */
|
||||
#define IOC4_GPCR_DIR_4 0x10 /* Ser. port 0 xcvr select (0=232, 1=422) */
|
||||
#define IOC4_GPCR_DIR_5 0x20 /* Ser. port 1 xcvr select (0=232, 1=422) */
|
||||
#define IOC4_GPCR_DIR_6 0x40 /* Ser. port 2 xcvr select (0=232, 1=422) */
|
||||
#define IOC4_GPCR_DIR_7 0x80 /* Ser. port 3 xcvr select (0=232, 1=422) */
|
||||
|
||||
/* Masks for GPCR EDGE pins */
|
||||
#define IOC4_GPCR_EDGE_0 0x01
|
||||
#define IOC4_GPCR_EDGE_1 0x02 /* External interrupt input */
|
||||
#define IOC4_GPCR_EDGE_2 0x04
|
||||
#define IOC4_GPCR_EDGE_3 0x08
|
||||
#define IOC4_GPCR_EDGE_4 0x10
|
||||
#define IOC4_GPCR_EDGE_5 0x20
|
||||
#define IOC4_GPCR_EDGE_6 0x40
|
||||
#define IOC4_GPCR_EDGE_7 0x80
|
||||
|
||||
/* One of these per IOC4 */
|
||||
struct ioc4_driver_data {
|
||||
struct list_head idd_list;
|
||||
unsigned long idd_bar0;
|
||||
struct pci_dev *idd_pdev;
|
||||
const struct pci_device_id *idd_pci_id;
|
||||
struct __iomem ioc4_misc_regs *idd_misc_regs;
|
||||
unsigned long count_period;
|
||||
void *idd_serial_data;
|
||||
};
|
||||
|
||||
/* One per submodule */
|
||||
struct ioc4_submodule {
|
||||
struct list_head is_list;
|
||||
char *is_name;
|
||||
struct module *is_owner;
|
||||
int (*is_probe) (struct ioc4_driver_data *);
|
||||
int (*is_remove) (struct ioc4_driver_data *);
|
||||
};
|
||||
|
||||
#define IOC4_NUM_CARDS 8 /* max cards per partition */
|
||||
|
||||
/**********************************
|
||||
* Functions needed by submodules *
|
||||
**********************************/
|
||||
|
||||
extern int ioc4_register_submodule(struct ioc4_submodule *);
|
||||
extern void ioc4_unregister_submodule(struct ioc4_submodule *);
|
||||
|
||||
#endif /* _LINUX_IOC4_H */
|
|
@ -1,21 +0,0 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_IOC4_COMMON_H
|
||||
#define _LINUX_IOC4_COMMON_H
|
||||
|
||||
/* prototypes */
|
||||
|
||||
int ioc4_serial_init(void);
|
||||
|
||||
int ioc4_serial_attach_one(struct pci_dev *pdev, const struct
|
||||
pci_device_id *pci_id);
|
||||
int ioc4_ide_attach_one(struct pci_dev *pdev, const struct
|
||||
pci_device_id *pci_id);
|
||||
|
||||
#endif /* _LINUX_IOC4_COMMON_H */
|
88
include/linux/ioprio.h
Normal file
88
include/linux/ioprio.h
Normal file
|
@ -0,0 +1,88 @@
|
|||
#ifndef IOPRIO_H
|
||||
#define IOPRIO_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
||||
/*
|
||||
* Gives us 8 prio classes with 13-bits of data for each class
|
||||
*/
|
||||
#define IOPRIO_BITS (16)
|
||||
#define IOPRIO_CLASS_SHIFT (13)
|
||||
#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
|
||||
|
||||
#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT)
|
||||
#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK)
|
||||
#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
|
||||
|
||||
#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE)
|
||||
|
||||
/*
|
||||
* These are the io priority groups as implemented by CFQ. RT is the realtime
|
||||
* class, it always gets premium service. BE is the best-effort scheduling
|
||||
* class, the default for any process. IDLE is the idle scheduling class, it
|
||||
* is only served when no one else is using the disk.
|
||||
*/
|
||||
enum {
|
||||
IOPRIO_CLASS_NONE,
|
||||
IOPRIO_CLASS_RT,
|
||||
IOPRIO_CLASS_BE,
|
||||
IOPRIO_CLASS_IDLE,
|
||||
};
|
||||
|
||||
/*
|
||||
* 8 best effort priority levels are supported
|
||||
*/
|
||||
#define IOPRIO_BE_NR (8)
|
||||
|
||||
asmlinkage int sys_ioprio_set(int, int, int);
|
||||
asmlinkage int sys_ioprio_get(int, int);
|
||||
|
||||
enum {
|
||||
IOPRIO_WHO_PROCESS = 1,
|
||||
IOPRIO_WHO_PGRP,
|
||||
IOPRIO_WHO_USER,
|
||||
};
|
||||
|
||||
/*
|
||||
* if process has set io priority explicitly, use that. if not, convert
|
||||
* the cpu scheduler nice value to an io priority
|
||||
*/
|
||||
#define IOPRIO_NORM (4)
|
||||
static inline int task_ioprio(struct task_struct *task)
|
||||
{
|
||||
WARN_ON(!ioprio_valid(task->ioprio));
|
||||
return IOPRIO_PRIO_DATA(task->ioprio);
|
||||
}
|
||||
|
||||
static inline int task_nice_ioprio(struct task_struct *task)
|
||||
{
|
||||
return (task_nice(task) + 20) / 5;
|
||||
}
|
||||
|
||||
/*
|
||||
* For inheritance, return the highest of the two given priorities
|
||||
*/
|
||||
static inline int ioprio_best(unsigned short aprio, unsigned short bprio)
|
||||
{
|
||||
unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
|
||||
unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
|
||||
|
||||
if (!ioprio_valid(aprio))
|
||||
return bprio;
|
||||
if (!ioprio_valid(bprio))
|
||||
return aprio;
|
||||
|
||||
if (aclass == IOPRIO_CLASS_NONE)
|
||||
aclass = IOPRIO_CLASS_BE;
|
||||
if (bclass == IOPRIO_CLASS_NONE)
|
||||
bclass = IOPRIO_CLASS_BE;
|
||||
|
||||
if (aclass == bclass)
|
||||
return min(aprio, bprio);
|
||||
if (aclass > bclass)
|
||||
return bprio;
|
||||
else
|
||||
return aprio;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -209,6 +209,11 @@ struct kernel_ipmi_msg
|
|||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
#include <linux/proc_fs.h>
|
||||
extern struct proc_dir_entry *proc_ipmi_root;
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
/* Opaque type for a IPMI message user. One of these is needed to
|
||||
send and receive messages. */
|
||||
typedef struct ipmi_user *ipmi_user_t;
|
||||
|
|
|
@ -47,6 +47,10 @@ struct hw_interrupt_type {
|
|||
void (*ack)(unsigned int irq);
|
||||
void (*end)(unsigned int irq);
|
||||
void (*set_affinity)(unsigned int irq, cpumask_t dest);
|
||||
/* Currently used only by UML, might disappear one day.*/
|
||||
#ifdef CONFIG_IRQ_RELEASE_METHOD
|
||||
void (*release)(unsigned int irq, void *dev_id);
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef struct hw_interrupt_type hw_irq_controller;
|
||||
|
@ -81,10 +85,10 @@ extern int no_irq_affinity;
|
|||
extern int noirqdebug_setup(char *str);
|
||||
|
||||
extern fastcall int handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
|
||||
struct irqaction *action);
|
||||
struct irqaction *action);
|
||||
extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
|
||||
extern void note_interrupt(unsigned int irq, irq_desc_t *desc, int action_ret);
|
||||
extern void report_bad_irq(unsigned int irq, irq_desc_t *desc, int action_ret);
|
||||
extern void note_interrupt(unsigned int irq, irq_desc_t *desc,
|
||||
int action_ret, struct pt_regs *regs);
|
||||
extern int can_request_irq(unsigned int irq, unsigned long irqflags);
|
||||
|
||||
extern void init_irq_proc(void);
|
||||
|
|
|
@ -111,18 +111,35 @@ struct js_corr {
|
|||
#define JS_SET_ALL 8
|
||||
|
||||
struct JS_DATA_TYPE {
|
||||
int buttons;
|
||||
int x;
|
||||
int y;
|
||||
__s32 buttons;
|
||||
__s32 x;
|
||||
__s32 y;
|
||||
};
|
||||
|
||||
struct JS_DATA_SAVE_TYPE {
|
||||
int JS_TIMEOUT;
|
||||
int BUSY;
|
||||
long JS_EXPIRETIME;
|
||||
long JS_TIMELIMIT;
|
||||
struct JS_DATA_SAVE_TYPE_32 {
|
||||
__s32 JS_TIMEOUT;
|
||||
__s32 BUSY;
|
||||
__s32 JS_EXPIRETIME;
|
||||
__s32 JS_TIMELIMIT;
|
||||
struct JS_DATA_TYPE JS_SAVE;
|
||||
struct JS_DATA_TYPE JS_CORR;
|
||||
};
|
||||
|
||||
struct JS_DATA_SAVE_TYPE_64 {
|
||||
__s32 JS_TIMEOUT;
|
||||
__s32 BUSY;
|
||||
__s64 JS_EXPIRETIME;
|
||||
__s64 JS_TIMELIMIT;
|
||||
struct JS_DATA_TYPE JS_SAVE;
|
||||
struct JS_DATA_TYPE JS_CORR;
|
||||
};
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
#define JS_DATA_SAVE_TYPE JS_DATA_SAVE_TYPE_64
|
||||
#elif BITS_PER_LONG == 32
|
||||
#define JS_DATA_SAVE_TYPE JS_DATA_SAVE_TYPE_32
|
||||
#else
|
||||
#error Unexpected BITS_PER_LONG
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_JOYSTICK_H */
|
||||
|
|
|
@ -58,15 +58,23 @@ struct completion;
|
|||
* be biten later when the calling function happens to sleep when it is not
|
||||
* supposed to.
|
||||
*/
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
|
||||
#define might_sleep() __might_sleep(__FILE__, __LINE__)
|
||||
#define might_sleep_if(cond) do { if (unlikely(cond)) might_sleep(); } while (0)
|
||||
void __might_sleep(char *file, int line);
|
||||
#ifdef CONFIG_PREEMPT_VOLUNTARY
|
||||
extern int cond_resched(void);
|
||||
# define might_resched() cond_resched()
|
||||
#else
|
||||
#define might_sleep() do {} while(0)
|
||||
#define might_sleep_if(cond) do {} while (0)
|
||||
# define might_resched() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
|
||||
void __might_sleep(char *file, int line);
|
||||
# define might_sleep() \
|
||||
do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
|
||||
#else
|
||||
# define might_sleep() do { might_resched(); } while (0)
|
||||
#endif
|
||||
|
||||
#define might_sleep_if(cond) do { if (unlikely(cond)) might_sleep(); } while (0)
|
||||
|
||||
#define abs(x) ({ \
|
||||
int __x = (x); \
|
||||
(__x < 0) ? -__x : __x; \
|
||||
|
|
135
include/linux/kexec.h
Normal file
135
include/linux/kexec.h
Normal file
|
@ -0,0 +1,135 @@
|
|||
#ifndef LINUX_KEXEC_H
|
||||
#define LINUX_KEXEC_H
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/compat.h>
|
||||
#include <asm/kexec.h>
|
||||
|
||||
/* Verify architecture specific macros are defined */
|
||||
|
||||
#ifndef KEXEC_SOURCE_MEMORY_LIMIT
|
||||
#error KEXEC_SOURCE_MEMORY_LIMIT not defined
|
||||
#endif
|
||||
|
||||
#ifndef KEXEC_DESTINATION_MEMORY_LIMIT
|
||||
#error KEXEC_DESTINATION_MEMORY_LIMIT not defined
|
||||
#endif
|
||||
|
||||
#ifndef KEXEC_CONTROL_MEMORY_LIMIT
|
||||
#error KEXEC_CONTROL_MEMORY_LIMIT not defined
|
||||
#endif
|
||||
|
||||
#ifndef KEXEC_CONTROL_CODE_SIZE
|
||||
#error KEXEC_CONTROL_CODE_SIZE not defined
|
||||
#endif
|
||||
|
||||
#ifndef KEXEC_ARCH
|
||||
#error KEXEC_ARCH not defined
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This structure is used to hold the arguments that are used when loading
|
||||
* kernel binaries.
|
||||
*/
|
||||
|
||||
typedef unsigned long kimage_entry_t;
|
||||
#define IND_DESTINATION 0x1
|
||||
#define IND_INDIRECTION 0x2
|
||||
#define IND_DONE 0x4
|
||||
#define IND_SOURCE 0x8
|
||||
|
||||
#define KEXEC_SEGMENT_MAX 8
|
||||
struct kexec_segment {
|
||||
void __user *buf;
|
||||
size_t bufsz;
|
||||
unsigned long mem; /* User space sees this as a (void *) ... */
|
||||
size_t memsz;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_kexec_segment {
|
||||
compat_uptr_t buf;
|
||||
compat_size_t bufsz;
|
||||
compat_ulong_t mem; /* User space sees this as a (void *) ... */
|
||||
compat_size_t memsz;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct kimage {
|
||||
kimage_entry_t head;
|
||||
kimage_entry_t *entry;
|
||||
kimage_entry_t *last_entry;
|
||||
|
||||
unsigned long destination;
|
||||
|
||||
unsigned long start;
|
||||
struct page *control_code_page;
|
||||
|
||||
unsigned long nr_segments;
|
||||
struct kexec_segment segment[KEXEC_SEGMENT_MAX];
|
||||
|
||||
struct list_head control_pages;
|
||||
struct list_head dest_pages;
|
||||
struct list_head unuseable_pages;
|
||||
|
||||
/* Address of next control page to allocate for crash kernels. */
|
||||
unsigned long control_page;
|
||||
|
||||
/* Flags to indicate special processing */
|
||||
unsigned int type : 1;
|
||||
#define KEXEC_TYPE_DEFAULT 0
|
||||
#define KEXEC_TYPE_CRASH 1
|
||||
};
|
||||
|
||||
|
||||
|
||||
/* kexec interface functions */
|
||||
extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET;
|
||||
extern int machine_kexec_prepare(struct kimage *image);
|
||||
extern void machine_kexec_cleanup(struct kimage *image);
|
||||
extern asmlinkage long sys_kexec_load(unsigned long entry,
|
||||
unsigned long nr_segments,
|
||||
struct kexec_segment __user *segments,
|
||||
unsigned long flags);
|
||||
#ifdef CONFIG_COMPAT
|
||||
extern asmlinkage long compat_sys_kexec_load(unsigned long entry,
|
||||
unsigned long nr_segments,
|
||||
struct compat_kexec_segment __user *segments,
|
||||
unsigned long flags);
|
||||
#endif
|
||||
extern struct page *kimage_alloc_control_pages(struct kimage *image,
|
||||
unsigned int order);
|
||||
extern void crash_kexec(struct pt_regs *);
|
||||
int kexec_should_crash(struct task_struct *);
|
||||
extern struct kimage *kexec_image;
|
||||
|
||||
#define KEXEC_ON_CRASH 0x00000001
|
||||
#define KEXEC_ARCH_MASK 0xffff0000
|
||||
|
||||
/* These values match the ELF architecture values.
|
||||
* Unless there is a good reason that should continue to be the case.
|
||||
*/
|
||||
#define KEXEC_ARCH_DEFAULT ( 0 << 16)
|
||||
#define KEXEC_ARCH_386 ( 3 << 16)
|
||||
#define KEXEC_ARCH_X86_64 (62 << 16)
|
||||
#define KEXEC_ARCH_PPC (20 << 16)
|
||||
#define KEXEC_ARCH_PPC64 (21 << 16)
|
||||
#define KEXEC_ARCH_IA_64 (50 << 16)
|
||||
#define KEXEC_ARCH_S390 (22 << 16)
|
||||
|
||||
#define KEXEC_FLAGS (KEXEC_ON_CRASH) /* List of defined/legal kexec flags */
|
||||
|
||||
/* Location of a reserved region to hold the crash kernel.
|
||||
*/
|
||||
extern struct resource crashk_res;
|
||||
|
||||
#else /* !CONFIG_KEXEC */
|
||||
struct pt_regs;
|
||||
struct task_struct;
|
||||
static inline void crash_kexec(struct pt_regs *regs) { }
|
||||
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
|
||||
#endif /* CONFIG_KEXEC */
|
||||
#endif /* LINUX_KEXEC_H */
|
|
@ -1,4 +1,4 @@
|
|||
/* key-ui.h: key userspace interface stuff for use by keyfs
|
||||
/* key-ui.h: key userspace interface stuff
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
|
@ -31,8 +31,10 @@ extern spinlock_t key_serial_lock;
|
|||
* subscribed
|
||||
*/
|
||||
struct keyring_list {
|
||||
unsigned maxkeys; /* max keys this list can hold */
|
||||
unsigned nkeys; /* number of keys currently held */
|
||||
struct rcu_head rcu; /* RCU deletion hook */
|
||||
unsigned short maxkeys; /* max keys this list can hold */
|
||||
unsigned short nkeys; /* number of keys currently held */
|
||||
unsigned short delkey; /* key to be unlinked by RCU */
|
||||
struct key *keys[0];
|
||||
};
|
||||
|
||||
|
@ -82,8 +84,45 @@ static inline int key_any_permission(const struct key *key, key_perm_t perm)
|
|||
return kperm != 0;
|
||||
}
|
||||
|
||||
static inline int key_task_groups_search(struct task_struct *tsk, gid_t gid)
|
||||
{
|
||||
int ret;
|
||||
|
||||
extern struct key *lookup_user_key(key_serial_t id, int create, int part,
|
||||
task_lock(tsk);
|
||||
ret = groups_search(tsk->group_info, gid);
|
||||
task_unlock(tsk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int key_task_permission(const struct key *key,
|
||||
struct task_struct *context,
|
||||
key_perm_t perm)
|
||||
{
|
||||
key_perm_t kperm;
|
||||
|
||||
if (key->uid == context->fsuid) {
|
||||
kperm = key->perm >> 16;
|
||||
}
|
||||
else if (key->gid != -1 &&
|
||||
key->perm & KEY_GRP_ALL && (
|
||||
key->gid == context->fsgid ||
|
||||
key_task_groups_search(context, key->gid)
|
||||
)
|
||||
) {
|
||||
kperm = key->perm >> 8;
|
||||
}
|
||||
else {
|
||||
kperm = key->perm;
|
||||
}
|
||||
|
||||
kperm = kperm & perm & KEY_ALL;
|
||||
|
||||
return kperm == perm;
|
||||
|
||||
}
|
||||
|
||||
extern struct key *lookup_user_key(struct task_struct *context,
|
||||
key_serial_t id, int create, int partial,
|
||||
key_perm_t perm);
|
||||
|
||||
extern long join_session_keyring(const char *name);
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
@ -78,7 +78,6 @@ struct key {
|
|||
key_serial_t serial; /* key serial number */
|
||||
struct rb_node serial_node;
|
||||
struct key_type *type; /* type of key */
|
||||
rwlock_t lock; /* examination vs change lock */
|
||||
struct rw_semaphore sem; /* change vs change sem */
|
||||
struct key_user *user; /* owner of this key */
|
||||
time_t expiry; /* time at which key expires (or 0) */
|
||||
|
@ -86,14 +85,10 @@ struct key {
|
|||
gid_t gid;
|
||||
key_perm_t perm; /* access permissions */
|
||||
unsigned short quotalen; /* length added to quota */
|
||||
unsigned short datalen; /* payload data length */
|
||||
unsigned short flags; /* status flags (change with lock writelocked) */
|
||||
#define KEY_FLAG_INSTANTIATED 0x00000001 /* set if key has been instantiated */
|
||||
#define KEY_FLAG_DEAD 0x00000002 /* set if key type has been deleted */
|
||||
#define KEY_FLAG_REVOKED 0x00000004 /* set if key had been revoked */
|
||||
#define KEY_FLAG_IN_QUOTA 0x00000008 /* set if key consumes quota */
|
||||
#define KEY_FLAG_USER_CONSTRUCT 0x00000010 /* set if key is being constructed in userspace */
|
||||
#define KEY_FLAG_NEGATIVE 0x00000020 /* set if key is negative */
|
||||
unsigned short datalen; /* payload data length
|
||||
* - may not match RCU dereferenced payload
|
||||
* - payload should contain own length
|
||||
*/
|
||||
|
||||
#ifdef KEY_DEBUGGING
|
||||
unsigned magic;
|
||||
|
@ -101,6 +96,14 @@ struct key {
|
|||
#define KEY_DEBUG_MAGIC_X 0xf8e9dacbu
|
||||
#endif
|
||||
|
||||
unsigned long flags; /* status flags (change with bitops) */
|
||||
#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */
|
||||
#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */
|
||||
#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */
|
||||
#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */
|
||||
#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */
|
||||
#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
|
||||
|
||||
/* the description string
|
||||
* - this is used to match a key against search criteria
|
||||
* - this should be a printable string
|
||||
|
@ -196,10 +199,12 @@ extern int key_payload_reserve(struct key *key, size_t datalen);
|
|||
extern int key_instantiate_and_link(struct key *key,
|
||||
const void *data,
|
||||
size_t datalen,
|
||||
struct key *keyring);
|
||||
struct key *keyring,
|
||||
struct key *instkey);
|
||||
extern int key_negate_and_link(struct key *key,
|
||||
unsigned timeout,
|
||||
struct key *keyring);
|
||||
struct key *keyring,
|
||||
struct key *instkey);
|
||||
extern void key_revoke(struct key *key);
|
||||
extern void key_put(struct key *key);
|
||||
|
||||
|
@ -242,14 +247,13 @@ extern struct key *keyring_search(struct key *keyring,
|
|||
struct key_type *type,
|
||||
const char *description);
|
||||
|
||||
extern struct key *search_process_keyrings(struct key_type *type,
|
||||
const char *description);
|
||||
|
||||
extern int keyring_add_key(struct key *keyring,
|
||||
struct key *key);
|
||||
|
||||
extern struct key *key_lookup(key_serial_t id);
|
||||
|
||||
extern void keyring_replace_payload(struct key *key, void *replacement);
|
||||
|
||||
#define key_serial(key) ((key) ? (key)->serial : 0)
|
||||
|
||||
/*
|
||||
|
@ -268,14 +272,22 @@ extern void key_fsuid_changed(struct task_struct *tsk);
|
|||
extern void key_fsgid_changed(struct task_struct *tsk);
|
||||
extern void key_init(void);
|
||||
|
||||
#define __install_session_keyring(tsk, keyring) \
|
||||
({ \
|
||||
struct key *old_session = tsk->signal->session_keyring; \
|
||||
tsk->signal->session_keyring = keyring; \
|
||||
old_session; \
|
||||
})
|
||||
|
||||
#else /* CONFIG_KEYS */
|
||||
|
||||
#define key_validate(k) 0
|
||||
#define key_serial(k) 0
|
||||
#define key_get(k) NULL
|
||||
#define key_get(k) ({ NULL; })
|
||||
#define key_put(k) do { } while(0)
|
||||
#define alloc_uid_keyring(u) 0
|
||||
#define switch_uid_keyring(u) do { } while(0)
|
||||
#define __install_session_keyring(t, k) ({ NULL; })
|
||||
#define copy_keys(f,t) 0
|
||||
#define copy_thread_group_keys(t) 0
|
||||
#define exit_keys(t) do { } while(0)
|
||||
|
|
|
@ -20,6 +20,16 @@
|
|||
#define KEY_SPEC_USER_SESSION_KEYRING -5 /* - key ID for UID-session keyring */
|
||||
#define KEY_SPEC_GROUP_KEYRING -6 /* - key ID for GID-specific keyring */
|
||||
|
||||
/* request-key default keyrings */
|
||||
#define KEY_REQKEY_DEFL_NO_CHANGE -1
|
||||
#define KEY_REQKEY_DEFL_DEFAULT 0
|
||||
#define KEY_REQKEY_DEFL_THREAD_KEYRING 1
|
||||
#define KEY_REQKEY_DEFL_PROCESS_KEYRING 2
|
||||
#define KEY_REQKEY_DEFL_SESSION_KEYRING 3
|
||||
#define KEY_REQKEY_DEFL_USER_KEYRING 4
|
||||
#define KEY_REQKEY_DEFL_USER_SESSION_KEYRING 5
|
||||
#define KEY_REQKEY_DEFL_GROUP_KEYRING 6
|
||||
|
||||
/* keyctl commands */
|
||||
#define KEYCTL_GET_KEYRING_ID 0 /* ask for a keyring's ID */
|
||||
#define KEYCTL_JOIN_SESSION_KEYRING 1 /* join or start named session keyring */
|
||||
|
@ -35,5 +45,6 @@
|
|||
#define KEYCTL_READ 11 /* read a key or keyring's contents */
|
||||
#define KEYCTL_INSTANTIATE 12 /* instantiate a partially constructed key */
|
||||
#define KEYCTL_NEGATE 13 /* negate a partially constructed key */
|
||||
#define KEYCTL_SET_REQKEY_KEYRING 14 /* set default request-key keyring */
|
||||
|
||||
#endif /* _LINUX_KEYCTL_H */
|
||||
|
|
55
include/linux/klist.h
Normal file
55
include/linux/klist.h
Normal file
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* klist.h - Some generic list helpers, extending struct list_head a bit.
|
||||
*
|
||||
* Implementations are found in lib/klist.c
|
||||
*
|
||||
*
|
||||
* Copyright (C) 2005 Patrick Mochel
|
||||
*
|
||||
* This file is rleased under the GPL v2.
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
|
||||
struct klist {
|
||||
spinlock_t k_lock;
|
||||
struct list_head k_list;
|
||||
};
|
||||
|
||||
|
||||
extern void klist_init(struct klist * k);
|
||||
|
||||
|
||||
struct klist_node {
|
||||
struct klist * n_klist;
|
||||
struct list_head n_node;
|
||||
struct kref n_ref;
|
||||
struct completion n_removed;
|
||||
};
|
||||
|
||||
extern void klist_add_tail(struct klist * k, struct klist_node * n);
|
||||
extern void klist_add_head(struct klist * k, struct klist_node * n);
|
||||
|
||||
extern void klist_del(struct klist_node * n);
|
||||
extern void klist_remove(struct klist_node * n);
|
||||
|
||||
extern int klist_node_attached(struct klist_node * n);
|
||||
|
||||
|
||||
struct klist_iter {
|
||||
struct klist * i_klist;
|
||||
struct list_head * i_head;
|
||||
struct klist_node * i_cur;
|
||||
};
|
||||
|
||||
|
||||
extern void klist_iter_init(struct klist * k, struct klist_iter * i);
|
||||
extern void klist_iter_init_node(struct klist * k, struct klist_iter * i,
|
||||
struct klist_node * n);
|
||||
extern void klist_iter_exit(struct klist_iter * i);
|
||||
extern struct klist_node * klist_next(struct klist_iter * i);
|
||||
|
|
@ -19,6 +19,7 @@
|
|||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/config.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/compiler.h>
|
||||
|
@ -34,7 +35,17 @@ static inline int request_module(const char * name, ...) { return -ENOSYS; }
|
|||
#endif
|
||||
|
||||
#define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x)))
|
||||
extern int call_usermodehelper(char *path, char *argv[], char *envp[], int wait);
|
||||
|
||||
struct key;
|
||||
extern int call_usermodehelper_keys(char *path, char *argv[], char *envp[],
|
||||
struct key *session_keyring, int wait);
|
||||
|
||||
static inline int
|
||||
call_usermodehelper(char *path, char **argv, char **envp, int wait)
|
||||
{
|
||||
return call_usermodehelper_keys(path, argv, envp, NULL, wait);
|
||||
}
|
||||
|
||||
extern void usermodehelper_init(void);
|
||||
|
||||
#endif /* __LINUX_KMOD_H__ */
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
extern u64 hotplug_seqnum;
|
||||
|
||||
struct kobject {
|
||||
char * k_name;
|
||||
const char * k_name;
|
||||
char name[KOBJ_NAME_LEN];
|
||||
struct kref kref;
|
||||
struct list_head entry;
|
||||
|
@ -46,7 +46,7 @@ struct kobject {
|
|||
extern int kobject_set_name(struct kobject *, const char *, ...)
|
||||
__attribute__((format(printf,2,3)));
|
||||
|
||||
static inline char * kobject_name(struct kobject * kobj)
|
||||
static inline const char * kobject_name(const struct kobject * kobj)
|
||||
{
|
||||
return kobj->k_name;
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ extern void kobject_cleanup(struct kobject *);
|
|||
extern int kobject_add(struct kobject *);
|
||||
extern void kobject_del(struct kobject *);
|
||||
|
||||
extern int kobject_rename(struct kobject *, char *new_name);
|
||||
extern int kobject_rename(struct kobject *, const char *new_name);
|
||||
|
||||
extern int kobject_register(struct kobject *);
|
||||
extern void kobject_unregister(struct kobject *);
|
||||
|
@ -94,7 +94,7 @@ struct kobj_type {
|
|||
*/
|
||||
struct kset_hotplug_ops {
|
||||
int (*filter)(struct kset *kset, struct kobject *kobj);
|
||||
char *(*name)(struct kset *kset, struct kobject *kobj);
|
||||
const char *(*name)(struct kset *kset, struct kobject *kobj);
|
||||
int (*hotplug)(struct kset *kset, struct kobject *kobj, char **envp,
|
||||
int num_envp, char *buffer, int buffer_size);
|
||||
};
|
||||
|
|
|
@ -25,27 +25,45 @@
|
|||
* Rusty Russell).
|
||||
* 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
|
||||
* interface to access function arguments.
|
||||
* 2005-May Hien Nguyen <hien@us.ibm.com> and Jim Keniston
|
||||
* <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
|
||||
* <prasanna@in.ibm.com> added function-return probes.
|
||||
*/
|
||||
#include <linux/config.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <asm/kprobes.h>
|
||||
|
||||
/* kprobe_status settings */
|
||||
#define KPROBE_HIT_ACTIVE 0x00000001
|
||||
#define KPROBE_HIT_SS 0x00000002
|
||||
#define KPROBE_REENTER 0x00000004
|
||||
#define KPROBE_HIT_SSDONE 0x00000008
|
||||
|
||||
struct kprobe;
|
||||
struct pt_regs;
|
||||
struct kretprobe;
|
||||
struct kretprobe_instance;
|
||||
typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
|
||||
typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *);
|
||||
typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
|
||||
unsigned long flags);
|
||||
typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
|
||||
int trapnr);
|
||||
typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
|
||||
struct pt_regs *);
|
||||
|
||||
struct kprobe {
|
||||
struct hlist_node hlist;
|
||||
|
||||
/* list of kprobes for multi-handler support */
|
||||
struct list_head list;
|
||||
|
||||
/*count the number of times this probe was temporarily disarmed */
|
||||
unsigned long nmissed;
|
||||
|
||||
/* location of the probe point */
|
||||
kprobe_opcode_t *addr;
|
||||
|
||||
|
@ -85,6 +103,41 @@ struct jprobe {
|
|||
kprobe_opcode_t *entry; /* probe handling code to jump to */
|
||||
};
|
||||
|
||||
#ifdef ARCH_SUPPORTS_KRETPROBES
|
||||
extern void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs);
|
||||
#else /* ARCH_SUPPORTS_KRETPROBES */
|
||||
static inline void arch_prepare_kretprobe(struct kretprobe *rp,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
#endif /* ARCH_SUPPORTS_KRETPROBES */
|
||||
/*
|
||||
* Function-return probe -
|
||||
* Note:
|
||||
* User needs to provide a handler function, and initialize maxactive.
|
||||
* maxactive - The maximum number of instances of the probed function that
|
||||
* can be active concurrently.
|
||||
* nmissed - tracks the number of times the probed function's return was
|
||||
* ignored, due to maxactive being too low.
|
||||
*
|
||||
*/
|
||||
struct kretprobe {
|
||||
struct kprobe kp;
|
||||
kretprobe_handler_t handler;
|
||||
int maxactive;
|
||||
int nmissed;
|
||||
struct hlist_head free_instances;
|
||||
struct hlist_head used_instances;
|
||||
};
|
||||
|
||||
struct kretprobe_instance {
|
||||
struct hlist_node uflist; /* either on free list or used list */
|
||||
struct hlist_node hlist;
|
||||
struct kretprobe *rp;
|
||||
kprobe_opcode_t *ret_addr;
|
||||
struct task_struct *task;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
/* Locks kprobe: irq must be disabled */
|
||||
void lock_kprobes(void);
|
||||
|
@ -99,11 +152,17 @@ static inline int kprobe_running(void)
|
|||
|
||||
extern int arch_prepare_kprobe(struct kprobe *p);
|
||||
extern void arch_copy_kprobe(struct kprobe *p);
|
||||
extern void arch_arm_kprobe(struct kprobe *p);
|
||||
extern void arch_disarm_kprobe(struct kprobe *p);
|
||||
extern void arch_remove_kprobe(struct kprobe *p);
|
||||
extern int arch_init(void);
|
||||
extern void show_registers(struct pt_regs *regs);
|
||||
extern kprobe_opcode_t *get_insn_slot(void);
|
||||
extern void free_insn_slot(kprobe_opcode_t *slot);
|
||||
|
||||
/* Get the kprobe at this addr (if any). Must have called lock_kprobes */
|
||||
struct kprobe *get_kprobe(void *addr);
|
||||
struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
|
||||
|
||||
int register_kprobe(struct kprobe *p);
|
||||
void unregister_kprobe(struct kprobe *p);
|
||||
|
@ -113,7 +172,14 @@ int register_jprobe(struct jprobe *p);
|
|||
void unregister_jprobe(struct jprobe *p);
|
||||
void jprobe_return(void);
|
||||
|
||||
#else
|
||||
int register_kretprobe(struct kretprobe *rp);
|
||||
void unregister_kretprobe(struct kretprobe *rp);
|
||||
|
||||
struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp);
|
||||
void add_rp_inst(struct kretprobe_instance *ri);
|
||||
void kprobe_flush_task(struct task_struct *tk);
|
||||
void recycle_rp_inst(struct kretprobe_instance *ri);
|
||||
#else /* CONFIG_KPROBES */
|
||||
static inline int kprobe_running(void)
|
||||
{
|
||||
return 0;
|
||||
|
@ -135,5 +201,15 @@ static inline void unregister_jprobe(struct jprobe *p)
|
|||
static inline void jprobe_return(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
static inline int register_kretprobe(struct kretprobe *rp)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline void unregister_kretprobe(struct kretprobe *rp)
|
||||
{
|
||||
}
|
||||
static inline void kprobe_flush_task(struct task_struct *tk)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_KPROBES */
|
||||
#endif /* _LINUX_KPROBES_H */
|
||||
|
|
|
@ -41,6 +41,7 @@ struct ps2dev {
|
|||
|
||||
void ps2_init(struct ps2dev *ps2dev, struct serio *serio);
|
||||
int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout);
|
||||
void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout);
|
||||
int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command);
|
||||
int ps2_schedule_command(struct ps2dev *ps2dev, unsigned char *param, int command);
|
||||
int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data);
|
||||
|
|
|
@ -185,7 +185,7 @@ static inline void list_del(struct list_head *entry)
|
|||
* list_for_each_entry_rcu().
|
||||
*
|
||||
* Note that the caller is not permitted to immediately free
|
||||
* the newly deleted entry. Instead, either synchronize_kernel()
|
||||
* the newly deleted entry. Instead, either synchronize_rcu()
|
||||
* or call_rcu() must be used to defer freeing until an RCU
|
||||
* grace period has elapsed.
|
||||
*/
|
||||
|
|
|
@ -72,6 +72,8 @@ struct nlm_lockowner {
|
|||
uint32_t pid;
|
||||
};
|
||||
|
||||
struct nlm_wait;
|
||||
|
||||
/*
|
||||
* Memory chunk for NLM client RPC request.
|
||||
*/
|
||||
|
@ -81,6 +83,7 @@ struct nlm_rqst {
|
|||
struct nlm_host * a_host; /* host handle */
|
||||
struct nlm_args a_args; /* arguments */
|
||||
struct nlm_res a_res; /* result */
|
||||
struct nlm_wait * a_block;
|
||||
char a_owner[NLMCLNT_OHSIZE];
|
||||
};
|
||||
|
||||
|
@ -142,7 +145,9 @@ extern unsigned long nlmsvc_timeout;
|
|||
* Lockd client functions
|
||||
*/
|
||||
struct nlm_rqst * nlmclnt_alloc_call(void);
|
||||
int nlmclnt_block(struct nlm_host *, struct file_lock *, u32 *);
|
||||
int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl);
|
||||
void nlmclnt_finish_block(struct nlm_rqst *req);
|
||||
long nlmclnt_block(struct nlm_rqst *req, long timeout);
|
||||
int nlmclnt_cancel(struct nlm_host *, struct file_lock *);
|
||||
u32 nlmclnt_grant(struct nlm_lock *);
|
||||
void nlmclnt_recovery(struct nlm_host *, u32);
|
||||
|
|
|
@ -61,7 +61,7 @@ struct loop_device {
|
|||
struct semaphore lo_sem;
|
||||
struct semaphore lo_ctl_mutex;
|
||||
struct semaphore lo_bh_mutex;
|
||||
atomic_t lo_pending;
|
||||
int lo_pending;
|
||||
|
||||
request_queue_t *lo_queue;
|
||||
};
|
||||
|
|
|
@ -20,9 +20,14 @@ typedef struct mempool_s {
|
|||
mempool_free_t *free;
|
||||
wait_queue_head_t wait;
|
||||
} mempool_t;
|
||||
extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
|
||||
mempool_free_t *free_fn, void *pool_data);
|
||||
extern int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask);
|
||||
|
||||
extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
|
||||
mempool_free_t *free_fn, void *pool_data);
|
||||
extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
|
||||
mempool_free_t *free_fn, void *pool_data, int nid);
|
||||
|
||||
extern int mempool_resize(mempool_t *pool, int new_min_nr,
|
||||
unsigned int __nocast gfp_mask);
|
||||
extern void mempool_destroy(mempool_t *pool);
|
||||
extern void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask);
|
||||
extern void mempool_free(void *element, mempool_t *pool);
|
||||
|
|
|
@ -395,19 +395,81 @@ static inline void put_page(struct page *page)
|
|||
/*
|
||||
* The zone field is never updated after free_area_init_core()
|
||||
* sets it, so none of the operations on it need to be atomic.
|
||||
* We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
|
||||
* so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
|
||||
*/
|
||||
#define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
|
||||
#define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
|
||||
|
||||
|
||||
/*
|
||||
* page->flags layout:
|
||||
*
|
||||
* There are three possibilities for how page->flags get
|
||||
* laid out. The first is for the normal case, without
|
||||
* sparsemem. The second is for sparsemem when there is
|
||||
* plenty of space for node and section. The last is when
|
||||
* we have run out of space and have to fall back to an
|
||||
* alternate (slower) way of determining the node.
|
||||
*
|
||||
* No sparsemem: | NODE | ZONE | ... | FLAGS |
|
||||
* with space for node: | SECTION | NODE | ZONE | ... | FLAGS |
|
||||
* no space for node: | SECTION | ZONE | ... | FLAGS |
|
||||
*/
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
#define SECTIONS_WIDTH SECTIONS_SHIFT
|
||||
#else
|
||||
#define SECTIONS_WIDTH 0
|
||||
#endif
|
||||
|
||||
#define ZONES_WIDTH ZONES_SHIFT
|
||||
|
||||
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED
|
||||
#define NODES_WIDTH NODES_SHIFT
|
||||
#else
|
||||
#define NODES_WIDTH 0
|
||||
#endif
|
||||
|
||||
/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
|
||||
#define SECTIONS_PGOFF ((sizeof(page_flags_t)*8) - SECTIONS_WIDTH)
|
||||
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
|
||||
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
|
||||
|
||||
/*
|
||||
* We are going to use the flags for the page to node mapping if its in
|
||||
* there. This includes the case where there is no node, so it is implicit.
|
||||
*/
|
||||
#define FLAGS_HAS_NODE (NODES_WIDTH > 0 || NODES_SHIFT == 0)
|
||||
|
||||
#ifndef PFN_SECTION_SHIFT
|
||||
#define PFN_SECTION_SHIFT 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Define the bit shifts to access each section. For non-existant
|
||||
* sections we define the shift as 0; that plus a 0 mask ensures
|
||||
* the compiler will optimise away reference to them.
|
||||
*/
|
||||
#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
|
||||
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
|
||||
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
|
||||
|
||||
/* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */
|
||||
#if FLAGS_HAS_NODE
|
||||
#define ZONETABLE_SHIFT (NODES_SHIFT + ZONES_SHIFT)
|
||||
#else
|
||||
#define ZONETABLE_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
|
||||
#endif
|
||||
#define ZONETABLE_PGSHIFT ZONES_PGSHIFT
|
||||
|
||||
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
|
||||
#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
|
||||
#endif
|
||||
|
||||
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
|
||||
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
|
||||
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
|
||||
#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1)
|
||||
|
||||
static inline unsigned long page_zonenum(struct page *page)
|
||||
{
|
||||
return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
|
||||
}
|
||||
static inline unsigned long page_to_nid(struct page *page)
|
||||
{
|
||||
return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
|
||||
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
|
||||
}
|
||||
|
||||
struct zone;
|
||||
|
@ -415,13 +477,44 @@ extern struct zone *zone_table[];
|
|||
|
||||
static inline struct zone *page_zone(struct page *page)
|
||||
{
|
||||
return zone_table[page->flags >> NODEZONE_SHIFT];
|
||||
return zone_table[(page->flags >> ZONETABLE_PGSHIFT) &
|
||||
ZONETABLE_MASK];
|
||||
}
|
||||
|
||||
static inline void set_page_zone(struct page *page, unsigned long nodezone_num)
|
||||
static inline unsigned long page_to_nid(struct page *page)
|
||||
{
|
||||
page->flags &= ~(~0UL << NODEZONE_SHIFT);
|
||||
page->flags |= nodezone_num << NODEZONE_SHIFT;
|
||||
if (FLAGS_HAS_NODE)
|
||||
return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
|
||||
else
|
||||
return page_zone(page)->zone_pgdat->node_id;
|
||||
}
|
||||
static inline unsigned long page_to_section(struct page *page)
|
||||
{
|
||||
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
|
||||
}
|
||||
|
||||
static inline void set_page_zone(struct page *page, unsigned long zone)
|
||||
{
|
||||
page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
|
||||
page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
|
||||
}
|
||||
static inline void set_page_node(struct page *page, unsigned long node)
|
||||
{
|
||||
page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
|
||||
page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
|
||||
}
|
||||
static inline void set_page_section(struct page *page, unsigned long section)
|
||||
{
|
||||
page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
|
||||
page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
|
||||
}
|
||||
|
||||
static inline void set_page_links(struct page *page, unsigned long zone,
|
||||
unsigned long node, unsigned long pfn)
|
||||
{
|
||||
set_page_zone(page, zone);
|
||||
set_page_node(page, node);
|
||||
set_page_section(page, pfn_to_section_nr(pfn));
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DISCONTIGMEM
|
||||
|
@ -691,6 +784,12 @@ extern void show_mem(void);
|
|||
extern void si_meminfo(struct sysinfo * val);
|
||||
extern void si_meminfo_node(struct sysinfo *val, int nid);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern void setup_per_cpu_pageset(void);
|
||||
#else
|
||||
static inline void setup_per_cpu_pageset(void) {}
|
||||
#endif
|
||||
|
||||
/* prio_tree.c */
|
||||
void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
|
||||
void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
|
||||
|
|
|
@ -63,6 +63,12 @@ struct per_cpu_pageset {
|
|||
#endif
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
|
||||
#else
|
||||
#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
|
||||
#endif
|
||||
|
||||
#define ZONE_DMA 0
|
||||
#define ZONE_NORMAL 1
|
||||
#define ZONE_HIGHMEM 2
|
||||
|
@ -122,8 +128,11 @@ struct zone {
|
|||
*/
|
||||
unsigned long lowmem_reserve[MAX_NR_ZONES];
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
struct per_cpu_pageset *pageset[NR_CPUS];
|
||||
#else
|
||||
struct per_cpu_pageset pageset[NR_CPUS];
|
||||
|
||||
#endif
|
||||
/*
|
||||
* free areas of different sizes
|
||||
*/
|
||||
|
@ -144,6 +153,14 @@ struct zone {
|
|||
unsigned long pages_scanned; /* since last reclaim */
|
||||
int all_unreclaimable; /* All pages pinned */
|
||||
|
||||
/*
|
||||
* Does the allocator try to reclaim pages from the zone as soon
|
||||
* as it fails a watermark_ok() in __alloc_pages?
|
||||
*/
|
||||
int reclaim_pages;
|
||||
/* A count of how many reclaimers are scanning this zone */
|
||||
atomic_t reclaim_in_progress;
|
||||
|
||||
/*
|
||||
* prev_priority holds the scanning priority for this zone. It is
|
||||
* defined as the scanning priority at which we achieved our reclaim
|
||||
|
@ -252,7 +269,9 @@ typedef struct pglist_data {
|
|||
struct zone node_zones[MAX_NR_ZONES];
|
||||
struct zonelist node_zonelists[GFP_ZONETYPES];
|
||||
int nr_zones;
|
||||
#ifdef CONFIG_FLAT_NODE_MEM_MAP
|
||||
struct page *node_mem_map;
|
||||
#endif
|
||||
struct bootmem_data *bdata;
|
||||
unsigned long node_start_pfn;
|
||||
unsigned long node_present_pages; /* total number of physical pages */
|
||||
|
@ -267,6 +286,12 @@ typedef struct pglist_data {
|
|||
|
||||
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
|
||||
#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
|
||||
#ifdef CONFIG_FLAT_NODE_MEM_MAP
|
||||
#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
|
||||
#else
|
||||
#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
|
||||
#endif
|
||||
#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
|
||||
|
||||
extern struct pglist_data *pgdat_list;
|
||||
|
||||
|
@ -381,9 +406,9 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
|
|||
|
||||
#include <linux/topology.h>
|
||||
/* Returns the number of the current Node. */
|
||||
#define numa_node_id() (cpu_to_node(_smp_processor_id()))
|
||||
#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
|
||||
|
||||
#ifndef CONFIG_DISCONTIGMEM
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
|
||||
extern struct pglist_data contig_page_data;
|
||||
#define NODE_DATA(nid) (&contig_page_data)
|
||||
|
@ -391,36 +416,177 @@ extern struct pglist_data contig_page_data;
|
|||
#define MAX_NODES_SHIFT 1
|
||||
#define pfn_to_nid(pfn) (0)
|
||||
|
||||
#else /* CONFIG_DISCONTIGMEM */
|
||||
#else /* CONFIG_NEED_MULTIPLE_NODES */
|
||||
|
||||
#include <asm/mmzone.h>
|
||||
|
||||
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
#include <asm/sparsemem.h>
|
||||
#endif
|
||||
|
||||
#if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED)
|
||||
/*
|
||||
* with 32 bit page->flags field, we reserve 8 bits for node/zone info.
|
||||
* there are 3 zones (2 bits) and this leaves 8-2=6 bits for nodes.
|
||||
*/
|
||||
#define MAX_NODES_SHIFT 6
|
||||
#define FLAGS_RESERVED 8
|
||||
|
||||
#elif BITS_PER_LONG == 64
|
||||
/*
|
||||
* with 64 bit flags field, there's plenty of room.
|
||||
*/
|
||||
#define MAX_NODES_SHIFT 10
|
||||
#define FLAGS_RESERVED 32
|
||||
|
||||
#else
|
||||
|
||||
#error BITS_PER_LONG not defined
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* !CONFIG_DISCONTIGMEM */
|
||||
|
||||
#if NODES_SHIFT > MAX_NODES_SHIFT
|
||||
#error NODES_SHIFT > MAX_NODES_SHIFT
|
||||
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
|
||||
#define early_pfn_to_nid(nid) (0UL)
|
||||
#endif
|
||||
|
||||
/* There are currently 3 zones: DMA, Normal & Highmem, thus we need 2 bits */
|
||||
#define MAX_ZONES_SHIFT 2
|
||||
#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
|
||||
#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
|
||||
|
||||
#if ZONES_SHIFT > MAX_ZONES_SHIFT
|
||||
#error ZONES_SHIFT > MAX_ZONES_SHIFT
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
|
||||
/*
|
||||
* SECTION_SHIFT #bits space required to store a section #
|
||||
*
|
||||
* PA_SECTION_SHIFT physical address to/from section number
|
||||
* PFN_SECTION_SHIFT pfn to/from section number
|
||||
*/
|
||||
#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
|
||||
|
||||
#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
|
||||
#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
|
||||
|
||||
#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
|
||||
|
||||
#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
|
||||
#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
|
||||
|
||||
#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
|
||||
#error Allocator MAX_ORDER exceeds SECTION_SIZE
|
||||
#endif
|
||||
|
||||
struct page;
|
||||
struct mem_section {
|
||||
/*
|
||||
* This is, logically, a pointer to an array of struct
|
||||
* pages. However, it is stored with some other magic.
|
||||
* (see sparse.c::sparse_init_one_section())
|
||||
*
|
||||
* Making it a UL at least makes someone do a cast
|
||||
* before using it wrong.
|
||||
*/
|
||||
unsigned long section_mem_map;
|
||||
};
|
||||
|
||||
extern struct mem_section mem_section[NR_MEM_SECTIONS];
|
||||
|
||||
static inline struct mem_section *__nr_to_section(unsigned long nr)
|
||||
{
|
||||
return &mem_section[nr];
|
||||
}
|
||||
|
||||
/*
|
||||
* We use the lower bits of the mem_map pointer to store
|
||||
* a little bit of information. There should be at least
|
||||
* 3 bits here due to 32-bit alignment.
|
||||
*/
|
||||
#define SECTION_MARKED_PRESENT (1UL<<0)
|
||||
#define SECTION_HAS_MEM_MAP (1UL<<1)
|
||||
#define SECTION_MAP_LAST_BIT (1UL<<2)
|
||||
#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
|
||||
|
||||
static inline struct page *__section_mem_map_addr(struct mem_section *section)
|
||||
{
|
||||
unsigned long map = section->section_mem_map;
|
||||
map &= SECTION_MAP_MASK;
|
||||
return (struct page *)map;
|
||||
}
|
||||
|
||||
static inline int valid_section(struct mem_section *section)
|
||||
{
|
||||
return (section->section_mem_map & SECTION_MARKED_PRESENT);
|
||||
}
|
||||
|
||||
static inline int section_has_mem_map(struct mem_section *section)
|
||||
{
|
||||
return (section->section_mem_map & SECTION_HAS_MEM_MAP);
|
||||
}
|
||||
|
||||
static inline int valid_section_nr(unsigned long nr)
|
||||
{
|
||||
return valid_section(__nr_to_section(nr));
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a kernel address, find the home node of the underlying memory.
|
||||
*/
|
||||
#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
|
||||
|
||||
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
|
||||
{
|
||||
return __nr_to_section(pfn_to_section_nr(pfn));
|
||||
}
|
||||
|
||||
#define pfn_to_page(pfn) \
|
||||
({ \
|
||||
unsigned long __pfn = (pfn); \
|
||||
__section_mem_map_addr(__pfn_to_section(__pfn)) + __pfn; \
|
||||
})
|
||||
#define page_to_pfn(page) \
|
||||
({ \
|
||||
page - __section_mem_map_addr(__nr_to_section( \
|
||||
page_to_section(page))); \
|
||||
})
|
||||
|
||||
static inline int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
|
||||
return 0;
|
||||
return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
|
||||
}
|
||||
|
||||
/*
|
||||
* These are _only_ used during initialisation, therefore they
|
||||
* can use __initdata ... They could have names to indicate
|
||||
* this restriction.
|
||||
*/
|
||||
#ifdef CONFIG_NUMA
|
||||
#define pfn_to_nid early_pfn_to_nid
|
||||
#endif
|
||||
|
||||
#define pfn_to_pgdat(pfn) \
|
||||
({ \
|
||||
NODE_DATA(pfn_to_nid(pfn)); \
|
||||
})
|
||||
|
||||
#define early_pfn_valid(pfn) pfn_valid(pfn)
|
||||
void sparse_init(void);
|
||||
#else
|
||||
#define sparse_init() do {} while (0)
|
||||
#endif /* CONFIG_SPARSEMEM */
|
||||
|
||||
#ifdef CONFIG_NODES_SPAN_OTHER_NODES
|
||||
#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid))
|
||||
#else
|
||||
#define early_pfn_in_nid(pfn, nid) (1)
|
||||
#endif
|
||||
|
||||
#ifndef early_pfn_valid
|
||||
#define early_pfn_valid(pfn) (1)
|
||||
#endif
|
||||
|
||||
void memory_present(int nid, unsigned long start, unsigned long end);
|
||||
unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_MMZONE_H */
|
||||
|
|
|
@ -175,4 +175,50 @@ struct serio_device_id {
|
|||
};
|
||||
|
||||
|
||||
/* PCMCIA */
|
||||
|
||||
struct pcmcia_device_id {
|
||||
__u16 match_flags;
|
||||
|
||||
__u16 manf_id;
|
||||
__u16 card_id;
|
||||
|
||||
__u8 func_id;
|
||||
|
||||
/* for real multi-function devices */
|
||||
__u8 function;
|
||||
|
||||
/* for pseude multi-function devices */
|
||||
__u8 device_no;
|
||||
|
||||
__u32 prod_id_hash[4];
|
||||
|
||||
/* not matched against in kernelspace*/
|
||||
#ifdef __KERNEL__
|
||||
const char * prod_id[4];
|
||||
#else
|
||||
kernel_ulong_t prod_id[4];
|
||||
#endif
|
||||
|
||||
/* not matched against */
|
||||
kernel_ulong_t driver_info;
|
||||
#ifdef __KERNEL__
|
||||
char * cisfile;
|
||||
#else
|
||||
kernel_ulong_t cisfile;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define PCMCIA_DEV_ID_MATCH_MANF_ID 0x0001
|
||||
#define PCMCIA_DEV_ID_MATCH_CARD_ID 0x0002
|
||||
#define PCMCIA_DEV_ID_MATCH_FUNC_ID 0x0004
|
||||
#define PCMCIA_DEV_ID_MATCH_FUNCTION 0x0008
|
||||
#define PCMCIA_DEV_ID_MATCH_PROD_ID1 0x0010
|
||||
#define PCMCIA_DEV_ID_MATCH_PROD_ID2 0x0020
|
||||
#define PCMCIA_DEV_ID_MATCH_PROD_ID3 0x0040
|
||||
#define PCMCIA_DEV_ID_MATCH_PROD_ID4 0x0080
|
||||
#define PCMCIA_DEV_ID_MATCH_DEVICE_NO 0x0100
|
||||
#define PCMCIA_DEV_ID_MATCH_FAKE_CIS 0x0200
|
||||
#define PCMCIA_DEV_ID_MATCH_ANONYMOUS 0x0400
|
||||
|
||||
#endif /* LINUX_MOD_DEVICETABLE_H */
|
||||
|
|
|
@ -51,6 +51,9 @@ struct module_attribute {
|
|||
ssize_t (*show)(struct module_attribute *, struct module *, char *);
|
||||
ssize_t (*store)(struct module_attribute *, struct module *,
|
||||
const char *, size_t count);
|
||||
void (*setup)(struct module *, const char *);
|
||||
int (*test)(struct module *);
|
||||
void (*free)(struct module *);
|
||||
};
|
||||
|
||||
struct module_kobject
|
||||
|
@ -239,6 +242,8 @@ struct module
|
|||
/* Sysfs stuff. */
|
||||
struct module_kobject mkobj;
|
||||
struct module_param_attrs *param_attrs;
|
||||
const char *version;
|
||||
const char *srcversion;
|
||||
|
||||
/* Exported symbols */
|
||||
const struct kernel_symbol *syms;
|
||||
|
|
|
@ -12,7 +12,6 @@ struct namespace {
|
|||
struct rw_semaphore sem;
|
||||
};
|
||||
|
||||
extern void umount_tree(struct vfsmount *);
|
||||
extern int copy_namespace(int, struct task_struct *);
|
||||
extern void __put_namespace(struct namespace *namespace);
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
struct divert_blk;
|
||||
struct vlan_group;
|
||||
struct ethtool_ops;
|
||||
struct netpoll;
|
||||
struct netpoll_info;
|
||||
/* source back-compat hooks */
|
||||
#define SET_ETHTOOL_OPS(netdev,ops) \
|
||||
( (netdev)->ethtool_ops = (ops) )
|
||||
|
@ -164,12 +164,6 @@ struct netif_rx_stats
|
|||
unsigned total;
|
||||
unsigned dropped;
|
||||
unsigned time_squeeze;
|
||||
unsigned throttled;
|
||||
unsigned fastroute_hit;
|
||||
unsigned fastroute_success;
|
||||
unsigned fastroute_defer;
|
||||
unsigned fastroute_deferred_out;
|
||||
unsigned fastroute_latency_reduction;
|
||||
unsigned cpu_collision;
|
||||
};
|
||||
|
||||
|
@ -468,7 +462,7 @@ struct net_device
|
|||
unsigned char *haddr);
|
||||
int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
|
||||
#ifdef CONFIG_NETPOLL
|
||||
struct netpoll *np;
|
||||
struct netpoll_info *npinfo;
|
||||
#endif
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
void (*poll_controller)(struct net_device *dev);
|
||||
|
@ -562,12 +556,9 @@ static inline int unregister_gifconf(unsigned int family)
|
|||
|
||||
struct softnet_data
|
||||
{
|
||||
int throttle;
|
||||
int cng_level;
|
||||
int avg_blog;
|
||||
struct net_device *output_queue;
|
||||
struct sk_buff_head input_pkt_queue;
|
||||
struct list_head poll_list;
|
||||
struct net_device *output_queue;
|
||||
struct sk_buff *completion_queue;
|
||||
|
||||
struct net_device backlog_dev; /* Sorry. 8) */
|
||||
|
@ -925,10 +916,6 @@ extern int skb_checksum_help(struct sk_buff *skb, int inward);
|
|||
extern void net_enable_timestamp(void);
|
||||
extern void net_disable_timestamp(void);
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
extern char *net_sysctl_strdup(const char *s);
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _LINUX_DEV_H */
|
||||
|
|
|
@ -75,12 +75,6 @@ enum nf_ip_hook_priorities {
|
|||
#define SO_ORIGINAL_DST 80
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifdef CONFIG_NETFILTER_DEBUG
|
||||
void nf_debug_ip_local_deliver(struct sk_buff *skb);
|
||||
void nf_debug_ip_loopback_xmit(struct sk_buff *newskb);
|
||||
void nf_debug_ip_finish_output2(struct sk_buff *skb);
|
||||
#endif /*CONFIG_NETFILTER_DEBUG*/
|
||||
|
||||
extern int ip_route_me_harder(struct sk_buff **pskb);
|
||||
|
||||
/* Call this before modifying an existing IP packet: ensures it is
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
#ifndef _IP_CONNTRACK_CORE_H
|
||||
#define _IP_CONNTRACK_CORE_H
|
||||
#include <linux/netfilter.h>
|
||||
#include <linux/netfilter_ipv4/lockhelp.h>
|
||||
|
||||
/* This header is used to share core functionality between the
|
||||
standalone connection tracking module, and the compatibility layer's use
|
||||
|
@ -47,6 +46,6 @@ static inline int ip_conntrack_confirm(struct sk_buff **pskb)
|
|||
|
||||
extern struct list_head *ip_conntrack_hash;
|
||||
extern struct list_head ip_conntrack_expect_list;
|
||||
DECLARE_RWLOCK_EXTERN(ip_conntrack_lock);
|
||||
extern rwlock_t ip_conntrack_lock;
|
||||
#endif /* _IP_CONNTRACK_CORE_H */
|
||||
|
||||
|
|
|
@ -50,10 +50,9 @@ struct ip_nat_multi_range_compat
|
|||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/list.h>
|
||||
#include <linux/netfilter_ipv4/lockhelp.h>
|
||||
|
||||
/* Protects NAT hash tables, and NAT-private part of conntracks. */
|
||||
DECLARE_RWLOCK_EXTERN(ip_nat_lock);
|
||||
extern rwlock_t ip_nat_lock;
|
||||
|
||||
/* The structure embedded in the conntrack structure. */
|
||||
struct ip_nat_info
|
||||
|
|
|
@ -18,7 +18,6 @@ struct clusterip_config;
|
|||
struct ipt_clusterip_tgt_info {
|
||||
|
||||
u_int32_t flags;
|
||||
struct clusterip_config *config;
|
||||
|
||||
/* only relevant for new ones */
|
||||
u_int8_t clustermac[6];
|
||||
|
@ -27,6 +26,8 @@ struct ipt_clusterip_tgt_info {
|
|||
u_int16_t local_nodes[CLUSTERIP_MAX_NODES];
|
||||
enum clusterip_hashmode hash_mode;
|
||||
u_int32_t hash_initval;
|
||||
|
||||
struct clusterip_config *config;
|
||||
};
|
||||
|
||||
#endif /*_IPT_CLUSTERIP_H_target*/
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
#define _LISTHELP_H
|
||||
#include <linux/config.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/netfilter_ipv4/lockhelp.h>
|
||||
|
||||
/* Header to do more comprehensive job than linux/list.h; assume list
|
||||
is first entry in structure. */
|
||||
|
|
|
@ -1,129 +0,0 @@
|
|||
#ifndef _LOCKHELP_H
|
||||
#define _LOCKHELP_H
|
||||
#include <linux/config.h>
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
/* Header to do help in lock debugging. */
|
||||
|
||||
#ifdef CONFIG_NETFILTER_DEBUG
|
||||
struct spinlock_debug
|
||||
{
|
||||
spinlock_t l;
|
||||
atomic_t locked_by;
|
||||
};
|
||||
|
||||
struct rwlock_debug
|
||||
{
|
||||
rwlock_t l;
|
||||
long read_locked_map;
|
||||
long write_locked_map;
|
||||
};
|
||||
|
||||
#define DECLARE_LOCK(l) \
|
||||
struct spinlock_debug l = { SPIN_LOCK_UNLOCKED, ATOMIC_INIT(-1) }
|
||||
#define DECLARE_LOCK_EXTERN(l) \
|
||||
extern struct spinlock_debug l
|
||||
#define DECLARE_RWLOCK(l) \
|
||||
struct rwlock_debug l = { RW_LOCK_UNLOCKED, 0, 0 }
|
||||
#define DECLARE_RWLOCK_EXTERN(l) \
|
||||
extern struct rwlock_debug l
|
||||
|
||||
#define MUST_BE_LOCKED(l) \
|
||||
do { if (atomic_read(&(l)->locked_by) != smp_processor_id()) \
|
||||
printk("ASSERT %s:%u %s unlocked\n", __FILE__, __LINE__, #l); \
|
||||
} while(0)
|
||||
|
||||
#define MUST_BE_UNLOCKED(l) \
|
||||
do { if (atomic_read(&(l)->locked_by) == smp_processor_id()) \
|
||||
printk("ASSERT %s:%u %s locked\n", __FILE__, __LINE__, #l); \
|
||||
} while(0)
|
||||
|
||||
/* Write locked OK as well. */
|
||||
#define MUST_BE_READ_LOCKED(l) \
|
||||
do { if (!((l)->read_locked_map & (1UL << smp_processor_id())) \
|
||||
&& !((l)->write_locked_map & (1UL << smp_processor_id()))) \
|
||||
printk("ASSERT %s:%u %s not readlocked\n", __FILE__, __LINE__, #l); \
|
||||
} while(0)
|
||||
|
||||
#define MUST_BE_WRITE_LOCKED(l) \
|
||||
do { if (!((l)->write_locked_map & (1UL << smp_processor_id()))) \
|
||||
printk("ASSERT %s:%u %s not writelocked\n", __FILE__, __LINE__, #l); \
|
||||
} while(0)
|
||||
|
||||
#define MUST_BE_READ_WRITE_UNLOCKED(l) \
|
||||
do { if ((l)->read_locked_map & (1UL << smp_processor_id())) \
|
||||
printk("ASSERT %s:%u %s readlocked\n", __FILE__, __LINE__, #l); \
|
||||
else if ((l)->write_locked_map & (1UL << smp_processor_id())) \
|
||||
printk("ASSERT %s:%u %s writelocked\n", __FILE__, __LINE__, #l); \
|
||||
} while(0)
|
||||
|
||||
#define LOCK_BH(lk) \
|
||||
do { \
|
||||
MUST_BE_UNLOCKED(lk); \
|
||||
spin_lock_bh(&(lk)->l); \
|
||||
atomic_set(&(lk)->locked_by, smp_processor_id()); \
|
||||
} while(0)
|
||||
|
||||
#define UNLOCK_BH(lk) \
|
||||
do { \
|
||||
MUST_BE_LOCKED(lk); \
|
||||
atomic_set(&(lk)->locked_by, -1); \
|
||||
spin_unlock_bh(&(lk)->l); \
|
||||
} while(0)
|
||||
|
||||
#define READ_LOCK(lk) \
|
||||
do { \
|
||||
MUST_BE_READ_WRITE_UNLOCKED(lk); \
|
||||
read_lock_bh(&(lk)->l); \
|
||||
set_bit(smp_processor_id(), &(lk)->read_locked_map); \
|
||||
} while(0)
|
||||
|
||||
#define WRITE_LOCK(lk) \
|
||||
do { \
|
||||
MUST_BE_READ_WRITE_UNLOCKED(lk); \
|
||||
write_lock_bh(&(lk)->l); \
|
||||
set_bit(smp_processor_id(), &(lk)->write_locked_map); \
|
||||
} while(0)
|
||||
|
||||
#define READ_UNLOCK(lk) \
|
||||
do { \
|
||||
if (!((lk)->read_locked_map & (1UL << smp_processor_id()))) \
|
||||
printk("ASSERT: %s:%u %s not readlocked\n", \
|
||||
__FILE__, __LINE__, #lk); \
|
||||
clear_bit(smp_processor_id(), &(lk)->read_locked_map); \
|
||||
read_unlock_bh(&(lk)->l); \
|
||||
} while(0)
|
||||
|
||||
#define WRITE_UNLOCK(lk) \
|
||||
do { \
|
||||
MUST_BE_WRITE_LOCKED(lk); \
|
||||
clear_bit(smp_processor_id(), &(lk)->write_locked_map); \
|
||||
write_unlock_bh(&(lk)->l); \
|
||||
} while(0)
|
||||
|
||||
#else
|
||||
#define DECLARE_LOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
|
||||
#define DECLARE_LOCK_EXTERN(l) extern spinlock_t l
|
||||
#define DECLARE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
|
||||
#define DECLARE_RWLOCK_EXTERN(l) extern rwlock_t l
|
||||
|
||||
#define MUST_BE_LOCKED(l)
|
||||
#define MUST_BE_UNLOCKED(l)
|
||||
#define MUST_BE_READ_LOCKED(l)
|
||||
#define MUST_BE_WRITE_LOCKED(l)
|
||||
#define MUST_BE_READ_WRITE_UNLOCKED(l)
|
||||
|
||||
#define LOCK_BH(l) spin_lock_bh(l)
|
||||
#define UNLOCK_BH(l) spin_unlock_bh(l)
|
||||
|
||||
#define READ_LOCK(l) read_lock_bh(l)
|
||||
#define WRITE_LOCK(l) write_lock_bh(l)
|
||||
#define READ_UNLOCK(l) read_unlock_bh(l)
|
||||
#define WRITE_UNLOCK(l) write_unlock_bh(l)
|
||||
#endif /*CONFIG_NETFILTER_DEBUG*/
|
||||
|
||||
#endif /* _LOCKHELP_H */
|
|
@ -14,6 +14,7 @@
|
|||
#define NETLINK_SELINUX 7 /* SELinux event notifications */
|
||||
#define NETLINK_ARPD 8
|
||||
#define NETLINK_AUDIT 9 /* auditing */
|
||||
#define NETLINK_FIB_LOOKUP 10
|
||||
#define NETLINK_ROUTE6 11 /* af_inet6 route comm channel */
|
||||
#define NETLINK_IP6_FW 13
|
||||
#define NETLINK_DNRTMSG 14 /* DECnet routing messages */
|
||||
|
@ -146,7 +147,7 @@ struct netlink_callback
|
|||
int (*dump)(struct sk_buff * skb, struct netlink_callback *cb);
|
||||
int (*done)(struct netlink_callback *cb);
|
||||
int family;
|
||||
long args[4];
|
||||
long args[5];
|
||||
};
|
||||
|
||||
struct netlink_notify
|
||||
|
@ -167,6 +168,7 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
|
|||
nlh->nlmsg_flags = flags;
|
||||
nlh->nlmsg_pid = pid;
|
||||
nlh->nlmsg_seq = seq;
|
||||
memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
|
||||
return nlh;
|
||||
}
|
||||
|
||||
|
|
|
@ -16,14 +16,19 @@ struct netpoll;
|
|||
struct netpoll {
|
||||
struct net_device *dev;
|
||||
char dev_name[16], *name;
|
||||
int rx_flags;
|
||||
void (*rx_hook)(struct netpoll *, int, char *, int);
|
||||
void (*drop)(struct sk_buff *skb);
|
||||
u32 local_ip, remote_ip;
|
||||
u16 local_port, remote_port;
|
||||
unsigned char local_mac[6], remote_mac[6];
|
||||
};
|
||||
|
||||
struct netpoll_info {
|
||||
spinlock_t poll_lock;
|
||||
int poll_owner;
|
||||
int rx_flags;
|
||||
spinlock_t rx_lock;
|
||||
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
|
||||
};
|
||||
|
||||
void netpoll_poll(struct netpoll *np);
|
||||
|
@ -39,22 +44,35 @@ void netpoll_queue(struct sk_buff *skb);
|
|||
#ifdef CONFIG_NETPOLL
|
||||
static inline int netpoll_rx(struct sk_buff *skb)
|
||||
{
|
||||
return skb->dev->np && skb->dev->np->rx_flags && __netpoll_rx(skb);
|
||||
struct netpoll_info *npinfo = skb->dev->npinfo;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
||||
/* check rx_flags again with the lock held */
|
||||
if (npinfo->rx_flags && __netpoll_rx(skb))
|
||||
ret = 1;
|
||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void netpoll_poll_lock(struct net_device *dev)
|
||||
{
|
||||
if (dev->np) {
|
||||
spin_lock(&dev->np->poll_lock);
|
||||
dev->np->poll_owner = smp_processor_id();
|
||||
if (dev->npinfo) {
|
||||
spin_lock(&dev->npinfo->poll_lock);
|
||||
dev->npinfo->poll_owner = smp_processor_id();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void netpoll_poll_unlock(struct net_device *dev)
|
||||
{
|
||||
if (dev->np) {
|
||||
spin_unlock(&dev->np->poll_lock);
|
||||
dev->np->poll_owner = -1;
|
||||
if (dev->npinfo) {
|
||||
dev->npinfo->poll_owner = -1;
|
||||
spin_unlock(&dev->npinfo->poll_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#define NFS4_ACCESS_DELETE 0x0010
|
||||
#define NFS4_ACCESS_EXECUTE 0x0020
|
||||
|
||||
#define NFS4_FH_PERISTENT 0x0000
|
||||
#define NFS4_FH_PERSISTENT 0x0000
|
||||
#define NFS4_FH_NOEXPIRE_WITH_OPEN 0x0001
|
||||
#define NFS4_FH_VOLATILE_ANY 0x0002
|
||||
#define NFS4_FH_VOL_MIGRATION 0x0004
|
||||
|
@ -382,6 +382,8 @@ enum {
|
|||
NFSPROC4_CLNT_READDIR,
|
||||
NFSPROC4_CLNT_SERVER_CAPS,
|
||||
NFSPROC4_CLNT_DELEGRETURN,
|
||||
NFSPROC4_CLNT_GETACL,
|
||||
NFSPROC4_CLNT_SETACL,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#include <linux/pagemap.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/uio.h>
|
||||
|
||||
#include <linux/nfs_fs_sb.h>
|
||||
|
||||
|
@ -29,7 +28,6 @@
|
|||
#include <linux/nfs4.h>
|
||||
#include <linux/nfs_xdr.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/mempool.h>
|
||||
|
||||
/*
|
||||
|
@ -43,13 +41,6 @@
|
|||
#define NFS_MAX_FILE_IO_BUFFER_SIZE 32768
|
||||
#define NFS_DEF_FILE_IO_BUFFER_SIZE 4096
|
||||
|
||||
/*
|
||||
* The upper limit on timeouts for the exponential backoff algorithm.
|
||||
*/
|
||||
#define NFS_WRITEBACK_DELAY (5*HZ)
|
||||
#define NFS_WRITEBACK_LOCKDELAY (60*HZ)
|
||||
#define NFS_COMMIT_DELAY (5*HZ)
|
||||
|
||||
/*
|
||||
* superblock magic number for NFS
|
||||
*/
|
||||
|
@ -60,9 +51,6 @@
|
|||
*/
|
||||
#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS)
|
||||
|
||||
#define NFS_RW_SYNC 0x0001 /* O_SYNC handling */
|
||||
#define NFS_RW_SWAP 0x0002 /* This is a swap request */
|
||||
|
||||
/*
|
||||
* When flushing a cluster of dirty pages, there can be different
|
||||
* strategies:
|
||||
|
@ -96,7 +84,8 @@ struct nfs_open_context {
|
|||
int error;
|
||||
|
||||
struct list_head list;
|
||||
wait_queue_head_t waitq;
|
||||
|
||||
__u64 dir_cookie;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -104,6 +93,8 @@ struct nfs_open_context {
|
|||
*/
|
||||
struct nfs_delegation;
|
||||
|
||||
struct posix_acl;
|
||||
|
||||
/*
|
||||
* nfs fs inode data in memory
|
||||
*/
|
||||
|
@ -140,7 +131,6 @@ struct nfs_inode {
|
|||
*
|
||||
* mtime != read_cache_mtime
|
||||
*/
|
||||
unsigned long readdir_timestamp;
|
||||
unsigned long read_cache_jiffies;
|
||||
unsigned long attrtimeo;
|
||||
unsigned long attrtimeo_timestamp;
|
||||
|
@ -158,6 +148,10 @@ struct nfs_inode {
|
|||
atomic_t data_updates;
|
||||
|
||||
struct nfs_access_entry cache_access;
|
||||
#ifdef CONFIG_NFS_V3_ACL
|
||||
struct posix_acl *acl_access;
|
||||
struct posix_acl *acl_default;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is the cookie verifier used for NFSv3 readdir
|
||||
|
@ -183,13 +177,13 @@ struct nfs_inode {
|
|||
wait_queue_head_t nfs_i_wait;
|
||||
|
||||
#ifdef CONFIG_NFS_V4
|
||||
struct nfs4_cached_acl *nfs4_acl;
|
||||
/* NFSv4 state */
|
||||
struct list_head open_states;
|
||||
struct nfs_delegation *delegation;
|
||||
int delegation_state;
|
||||
struct rw_semaphore rwsem;
|
||||
#endif /* CONFIG_NFS_V4*/
|
||||
|
||||
struct inode vfs_inode;
|
||||
};
|
||||
|
||||
|
@ -203,6 +197,8 @@ struct nfs_inode {
|
|||
#define NFS_INO_INVALID_DATA 0x0010 /* cached data is invalid */
|
||||
#define NFS_INO_INVALID_ATIME 0x0020 /* cached atime is invalid */
|
||||
#define NFS_INO_INVALID_ACCESS 0x0040 /* cached access cred invalid */
|
||||
#define NFS_INO_INVALID_ACL 0x0080 /* cached acls are invalid */
|
||||
#define NFS_INO_REVAL_PAGECACHE 0x1000 /* must revalidate pagecache */
|
||||
|
||||
static inline struct nfs_inode *NFS_I(struct inode *inode)
|
||||
{
|
||||
|
@ -294,12 +290,12 @@ extern int nfs_release(struct inode *, struct file *);
|
|||
extern int nfs_attribute_timeout(struct inode *inode);
|
||||
extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
|
||||
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
|
||||
extern void nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
|
||||
extern int nfs_setattr(struct dentry *, struct iattr *);
|
||||
extern void nfs_begin_attr_update(struct inode *);
|
||||
extern void nfs_end_attr_update(struct inode *);
|
||||
extern void nfs_begin_data_update(struct inode *);
|
||||
extern void nfs_end_data_update(struct inode *);
|
||||
extern void nfs_end_data_update_defer(struct inode *);
|
||||
extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred);
|
||||
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
|
||||
extern void put_nfs_open_context(struct nfs_open_context *ctx);
|
||||
|
@ -314,6 +310,9 @@ extern u32 root_nfs_parse_addr(char *name); /*__init*/
|
|||
* linux/fs/nfs/file.c
|
||||
*/
|
||||
extern struct inode_operations nfs_file_inode_operations;
|
||||
#ifdef CONFIG_NFS_V3
|
||||
extern struct inode_operations nfs3_file_inode_operations;
|
||||
#endif /* CONFIG_NFS_V3 */
|
||||
extern struct file_operations nfs_file_operations;
|
||||
extern struct address_space_operations nfs_file_aops;
|
||||
|
||||
|
@ -328,6 +327,22 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* linux/fs/nfs/xattr.c
|
||||
*/
|
||||
#ifdef CONFIG_NFS_V3_ACL
|
||||
extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t);
|
||||
extern ssize_t nfs3_getxattr(struct dentry *, const char *, void *, size_t);
|
||||
extern int nfs3_setxattr(struct dentry *, const char *,
|
||||
const void *, size_t, int);
|
||||
extern int nfs3_removexattr (struct dentry *, const char *name);
|
||||
#else
|
||||
# define nfs3_listxattr NULL
|
||||
# define nfs3_getxattr NULL
|
||||
# define nfs3_setxattr NULL
|
||||
# define nfs3_removexattr NULL
|
||||
#endif
|
||||
|
||||
/*
|
||||
* linux/fs/nfs/direct.c
|
||||
*/
|
||||
|
@ -342,6 +357,9 @@ extern ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf,
|
|||
* linux/fs/nfs/dir.c
|
||||
*/
|
||||
extern struct inode_operations nfs_dir_inode_operations;
|
||||
#ifdef CONFIG_NFS_V3
|
||||
extern struct inode_operations nfs3_dir_inode_operations;
|
||||
#endif /* CONFIG_NFS_V3 */
|
||||
extern struct file_operations nfs_dir_operations;
|
||||
extern struct dentry_operations nfs_dentry_operations;
|
||||
|
||||
|
@ -377,10 +395,10 @@ extern void nfs_commit_done(struct rpc_task *);
|
|||
*/
|
||||
extern int nfs_sync_inode(struct inode *, unsigned long, unsigned int, int);
|
||||
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
|
||||
extern int nfs_commit_inode(struct inode *, unsigned long, unsigned int, int);
|
||||
extern int nfs_commit_inode(struct inode *, int);
|
||||
#else
|
||||
static inline int
|
||||
nfs_commit_inode(struct inode *inode, unsigned long idx_start, unsigned int npages, int how)
|
||||
nfs_commit_inode(struct inode *inode, int how)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -434,11 +452,6 @@ static inline void nfs_writedata_free(struct nfs_write_data *p)
|
|||
mempool_free(p, nfs_wdata_mempool);
|
||||
}
|
||||
|
||||
/* Hack for future NFS swap support */
|
||||
#ifndef IS_SWAPFILE
|
||||
# define IS_SWAPFILE(inode) (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* linux/fs/nfs/read.c
|
||||
*/
|
||||
|
@ -467,6 +480,29 @@ static inline void nfs_readdata_free(struct nfs_read_data *p)
|
|||
|
||||
extern void nfs_readdata_release(struct rpc_task *task);
|
||||
|
||||
/*
|
||||
* linux/fs/nfs3proc.c
|
||||
*/
|
||||
#ifdef CONFIG_NFS_V3_ACL
|
||||
extern struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type);
|
||||
extern int nfs3_proc_setacl(struct inode *inode, int type,
|
||||
struct posix_acl *acl);
|
||||
extern int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode,
|
||||
mode_t mode);
|
||||
extern void nfs3_forget_cached_acls(struct inode *inode);
|
||||
#else
|
||||
static inline int nfs3_proc_set_default_acl(struct inode *dir,
|
||||
struct inode *inode,
|
||||
mode_t mode)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nfs3_forget_cached_acls(struct inode *inode)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_NFS_V3_ACL */
|
||||
|
||||
/*
|
||||
* linux/fs/mount_clnt.c
|
||||
* (Used only by nfsroot module)
|
||||
|
@ -515,230 +551,6 @@ extern void * nfs_root_data(void);
|
|||
|
||||
#define NFS_JUKEBOX_RETRY_TIME (5 * HZ)
|
||||
|
||||
#ifdef CONFIG_NFS_V4
|
||||
|
||||
struct idmap;
|
||||
|
||||
/*
|
||||
* In a seqid-mutating op, this macro controls which error return
|
||||
* values trigger incrementation of the seqid.
|
||||
*
|
||||
* from rfc 3010:
|
||||
* The client MUST monotonically increment the sequence number for the
|
||||
* CLOSE, LOCK, LOCKU, OPEN, OPEN_CONFIRM, and OPEN_DOWNGRADE
|
||||
* operations. This is true even in the event that the previous
|
||||
* operation that used the sequence number received an error. The only
|
||||
* exception to this rule is if the previous operation received one of
|
||||
* the following errors: NFSERR_STALE_CLIENTID, NFSERR_STALE_STATEID,
|
||||
* NFSERR_BAD_STATEID, NFSERR_BAD_SEQID, NFSERR_BADXDR,
|
||||
* NFSERR_RESOURCE, NFSERR_NOFILEHANDLE.
|
||||
*
|
||||
*/
|
||||
#define seqid_mutating_err(err) \
|
||||
(((err) != NFSERR_STALE_CLIENTID) && \
|
||||
((err) != NFSERR_STALE_STATEID) && \
|
||||
((err) != NFSERR_BAD_STATEID) && \
|
||||
((err) != NFSERR_BAD_SEQID) && \
|
||||
((err) != NFSERR_BAD_XDR) && \
|
||||
((err) != NFSERR_RESOURCE) && \
|
||||
((err) != NFSERR_NOFILEHANDLE))
|
||||
|
||||
enum nfs4_client_state {
|
||||
NFS4CLNT_OK = 0,
|
||||
};
|
||||
|
||||
/*
|
||||
* The nfs4_client identifies our client state to the server.
|
||||
*/
|
||||
struct nfs4_client {
|
||||
struct list_head cl_servers; /* Global list of servers */
|
||||
struct in_addr cl_addr; /* Server identifier */
|
||||
u64 cl_clientid; /* constant */
|
||||
nfs4_verifier cl_confirm;
|
||||
unsigned long cl_state;
|
||||
|
||||
u32 cl_lockowner_id;
|
||||
|
||||
/*
|
||||
* The following rwsem ensures exclusive access to the server
|
||||
* while we recover the state following a lease expiration.
|
||||
*/
|
||||
struct rw_semaphore cl_sem;
|
||||
|
||||
struct list_head cl_delegations;
|
||||
struct list_head cl_state_owners;
|
||||
struct list_head cl_unused;
|
||||
int cl_nunused;
|
||||
spinlock_t cl_lock;
|
||||
atomic_t cl_count;
|
||||
|
||||
struct rpc_clnt * cl_rpcclient;
|
||||
struct rpc_cred * cl_cred;
|
||||
|
||||
struct list_head cl_superblocks; /* List of nfs_server structs */
|
||||
|
||||
unsigned long cl_lease_time;
|
||||
unsigned long cl_last_renewal;
|
||||
struct work_struct cl_renewd;
|
||||
struct work_struct cl_recoverd;
|
||||
|
||||
wait_queue_head_t cl_waitq;
|
||||
struct rpc_wait_queue cl_rpcwaitq;
|
||||
|
||||
/* used for the setclientid verifier */
|
||||
struct timespec cl_boot_time;
|
||||
|
||||
/* idmapper */
|
||||
struct idmap * cl_idmap;
|
||||
|
||||
/* Our own IP address, as a null-terminated string.
|
||||
* This is used to generate the clientid, and the callback address.
|
||||
*/
|
||||
char cl_ipaddr[16];
|
||||
unsigned char cl_id_uniquifier;
|
||||
};
|
||||
|
||||
/*
|
||||
* NFS4 state_owners and lock_owners are simply labels for ordered
|
||||
* sequences of RPC calls. Their sole purpose is to provide once-only
|
||||
* semantics by allowing the server to identify replayed requests.
|
||||
*
|
||||
* The ->so_sema is held during all state_owner seqid-mutating operations:
|
||||
* OPEN, OPEN_DOWNGRADE, and CLOSE. Its purpose is to properly serialize
|
||||
* so_seqid.
|
||||
*/
|
||||
struct nfs4_state_owner {
|
||||
struct list_head so_list; /* per-clientid list of state_owners */
|
||||
struct nfs4_client *so_client;
|
||||
u32 so_id; /* 32-bit identifier, unique */
|
||||
struct semaphore so_sema;
|
||||
u32 so_seqid; /* protected by so_sema */
|
||||
atomic_t so_count;
|
||||
|
||||
struct rpc_cred *so_cred; /* Associated cred */
|
||||
struct list_head so_states;
|
||||
struct list_head so_delegations;
|
||||
};
|
||||
|
||||
/*
|
||||
* struct nfs4_state maintains the client-side state for a given
|
||||
* (state_owner,inode) tuple (OPEN) or state_owner (LOCK).
|
||||
*
|
||||
* OPEN:
|
||||
* In order to know when to OPEN_DOWNGRADE or CLOSE the state on the server,
|
||||
* we need to know how many files are open for reading or writing on a
|
||||
* given inode. This information too is stored here.
|
||||
*
|
||||
* LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN)
|
||||
*/
|
||||
|
||||
struct nfs4_lock_state {
|
||||
struct list_head ls_locks; /* Other lock stateids */
|
||||
fl_owner_t ls_owner; /* POSIX lock owner */
|
||||
#define NFS_LOCK_INITIALIZED 1
|
||||
int ls_flags;
|
||||
u32 ls_seqid;
|
||||
u32 ls_id;
|
||||
nfs4_stateid ls_stateid;
|
||||
atomic_t ls_count;
|
||||
};
|
||||
|
||||
/* bits for nfs4_state->flags */
|
||||
enum {
|
||||
LK_STATE_IN_USE,
|
||||
NFS_DELEGATED_STATE,
|
||||
};
|
||||
|
||||
struct nfs4_state {
|
||||
struct list_head open_states; /* List of states for the same state_owner */
|
||||
struct list_head inode_states; /* List of states for the same inode */
|
||||
struct list_head lock_states; /* List of subservient lock stateids */
|
||||
|
||||
struct nfs4_state_owner *owner; /* Pointer to the open owner */
|
||||
struct inode *inode; /* Pointer to the inode */
|
||||
|
||||
unsigned long flags; /* Do we hold any locks? */
|
||||
struct semaphore lock_sema; /* Serializes file locking operations */
|
||||
rwlock_t state_lock; /* Protects the lock_states list */
|
||||
|
||||
nfs4_stateid stateid;
|
||||
|
||||
unsigned int nreaders;
|
||||
unsigned int nwriters;
|
||||
int state; /* State on the server (R,W, or RW) */
|
||||
atomic_t count;
|
||||
};
|
||||
|
||||
|
||||
struct nfs4_exception {
|
||||
long timeout;
|
||||
int retry;
|
||||
};
|
||||
|
||||
struct nfs4_state_recovery_ops {
|
||||
int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);
|
||||
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
|
||||
};
|
||||
|
||||
extern struct dentry_operations nfs4_dentry_operations;
|
||||
extern struct inode_operations nfs4_dir_inode_operations;
|
||||
|
||||
/* nfs4proc.c */
|
||||
extern int nfs4_map_errors(int err);
|
||||
extern int nfs4_proc_setclientid(struct nfs4_client *, u32, unsigned short);
|
||||
extern int nfs4_proc_setclientid_confirm(struct nfs4_client *);
|
||||
extern int nfs4_proc_async_renew(struct nfs4_client *);
|
||||
extern int nfs4_proc_renew(struct nfs4_client *);
|
||||
extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode);
|
||||
extern struct inode *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
|
||||
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int);
|
||||
|
||||
extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;
|
||||
extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops;
|
||||
|
||||
/* nfs4renewd.c */
|
||||
extern void nfs4_schedule_state_renewal(struct nfs4_client *);
|
||||
extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
|
||||
extern void nfs4_kill_renewd(struct nfs4_client *);
|
||||
|
||||
/* nfs4state.c */
|
||||
extern void init_nfsv4_state(struct nfs_server *);
|
||||
extern void destroy_nfsv4_state(struct nfs_server *);
|
||||
extern struct nfs4_client *nfs4_get_client(struct in_addr *);
|
||||
extern void nfs4_put_client(struct nfs4_client *clp);
|
||||
extern int nfs4_init_client(struct nfs4_client *clp);
|
||||
extern struct nfs4_client *nfs4_find_client(struct in_addr *);
|
||||
extern u32 nfs4_alloc_lockowner_id(struct nfs4_client *);
|
||||
|
||||
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
|
||||
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
|
||||
extern void nfs4_drop_state_owner(struct nfs4_state_owner *);
|
||||
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
|
||||
extern void nfs4_put_open_state(struct nfs4_state *);
|
||||
extern void nfs4_close_state(struct nfs4_state *, mode_t);
|
||||
extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode);
|
||||
extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp);
|
||||
extern void nfs4_schedule_state_recovery(struct nfs4_client *);
|
||||
extern struct nfs4_lock_state *nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t);
|
||||
extern struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t);
|
||||
extern void nfs4_put_lock_state(struct nfs4_lock_state *state);
|
||||
extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls);
|
||||
extern void nfs4_notify_setlk(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *);
|
||||
extern void nfs4_notify_unlck(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *);
|
||||
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
|
||||
|
||||
|
||||
|
||||
struct nfs4_mount_data;
|
||||
#else
|
||||
#define init_nfsv4_state(server) do { } while (0)
|
||||
#define destroy_nfsv4_state(server) do { } while (0)
|
||||
#define nfs4_put_state_owner(inode, owner) do { } while (0)
|
||||
#define nfs4_put_open_state(state) do { } while (0)
|
||||
#define nfs4_close_state(a, b) do { } while (0)
|
||||
#define nfs4_renewd_prepare_shutdown(server) do { } while (0)
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/*
|
||||
|
|
|
@ -16,6 +16,11 @@ struct nfs_lock_info {
|
|||
struct nlm_lockowner *owner;
|
||||
};
|
||||
|
||||
struct nfs4_lock_state;
|
||||
struct nfs4_lock_info {
|
||||
struct nfs4_lock_state *owner;
|
||||
};
|
||||
|
||||
/*
|
||||
* Lock flag values
|
||||
*/
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
struct nfs_server {
|
||||
struct rpc_clnt * client; /* RPC client handle */
|
||||
struct rpc_clnt * client_sys; /* 2nd handle for FSINFO */
|
||||
struct rpc_clnt * client_acl; /* ACL RPC client handle */
|
||||
struct nfs_rpc_ops * rpc_ops; /* NFS protocol vector */
|
||||
struct backing_dev_info backing_dev_info;
|
||||
int flags; /* various flags */
|
||||
|
|
|
@ -58,6 +58,7 @@ struct nfs_mount_data {
|
|||
#define NFS_MOUNT_KERBEROS 0x0100 /* 3 */
|
||||
#define NFS_MOUNT_NONLM 0x0200 /* 3 */
|
||||
#define NFS_MOUNT_BROKEN_SUID 0x0400 /* 4 */
|
||||
#define NFS_MOUNT_NOACL 0x0800 /* 4 */
|
||||
#define NFS_MOUNT_STRICTLOCK 0x1000 /* reserved for NFSv4 */
|
||||
#define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */
|
||||
#define NFS_MOUNT_FLAGMASK 0xFFFF
|
||||
|
|
|
@ -19,6 +19,12 @@
|
|||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
* Valid flags for the radix tree
|
||||
*/
|
||||
#define NFS_PAGE_TAG_DIRTY 0
|
||||
#define NFS_PAGE_TAG_WRITEBACK 1
|
||||
|
||||
/*
|
||||
* Valid flags for a dirty buffer
|
||||
*/
|
||||
|
@ -26,6 +32,7 @@
|
|||
#define PG_NEED_COMMIT 1
|
||||
#define PG_NEED_RESCHED 2
|
||||
|
||||
struct nfs_inode;
|
||||
struct nfs_page {
|
||||
struct list_head wb_list, /* Defines state of page: */
|
||||
*wb_list_head; /* read/write/commit */
|
||||
|
@ -54,14 +61,17 @@ extern void nfs_clear_request(struct nfs_page *req);
|
|||
extern void nfs_release_request(struct nfs_page *req);
|
||||
|
||||
|
||||
extern void nfs_list_add_request(struct nfs_page *, struct list_head *);
|
||||
|
||||
extern int nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst,
|
||||
unsigned long idx_start, unsigned int npages);
|
||||
extern int nfs_scan_list(struct list_head *, struct list_head *,
|
||||
unsigned long, unsigned int);
|
||||
extern int nfs_coalesce_requests(struct list_head *, struct list_head *,
|
||||
unsigned int);
|
||||
extern int nfs_wait_on_request(struct nfs_page *);
|
||||
extern void nfs_unlock_request(struct nfs_page *req);
|
||||
extern int nfs_set_page_writeback_locked(struct nfs_page *req);
|
||||
extern void nfs_clear_page_writeback(struct nfs_page *req);
|
||||
|
||||
|
||||
/*
|
||||
* Lock the page of an asynchronous request without incrementing the wb_count
|
||||
|
@ -86,6 +96,18 @@ nfs_lock_request(struct nfs_page *req)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs_list_add_request - Insert a request into a list
|
||||
* @req: request
|
||||
* @head: head of list into which to insert the request.
|
||||
*/
|
||||
static inline void
|
||||
nfs_list_add_request(struct nfs_page *req, struct list_head *head)
|
||||
{
|
||||
list_add_tail(&req->wb_list, head);
|
||||
req->wb_list_head = head;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* nfs_list_remove_request - Remove a request from its wb_list
|
||||
|
@ -96,10 +118,6 @@ nfs_list_remove_request(struct nfs_page *req)
|
|||
{
|
||||
if (list_empty(&req->wb_list))
|
||||
return;
|
||||
if (!NFS_WBACK_BUSY(req)) {
|
||||
printk(KERN_ERR "NFS: unlocked request attempted removed from list!\n");
|
||||
BUG();
|
||||
}
|
||||
list_del_init(&req->wb_list);
|
||||
req->wb_list_head = NULL;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define _LINUX_NFS_XDR_H
|
||||
|
||||
#include <linux/sunrpc/xprt.h>
|
||||
#include <linux/nfsacl.h>
|
||||
|
||||
struct nfs4_fsid {
|
||||
__u64 major;
|
||||
|
@ -326,6 +327,20 @@ struct nfs_setattrargs {
|
|||
const u32 * bitmask;
|
||||
};
|
||||
|
||||
struct nfs_setaclargs {
|
||||
struct nfs_fh * fh;
|
||||
size_t acl_len;
|
||||
unsigned int acl_pgbase;
|
||||
struct page ** acl_pages;
|
||||
};
|
||||
|
||||
struct nfs_getaclargs {
|
||||
struct nfs_fh * fh;
|
||||
size_t acl_len;
|
||||
unsigned int acl_pgbase;
|
||||
struct page ** acl_pages;
|
||||
};
|
||||
|
||||
struct nfs_setattrres {
|
||||
struct nfs_fattr * fattr;
|
||||
const struct nfs_server * server;
|
||||
|
@ -354,6 +369,20 @@ struct nfs_readdirargs {
|
|||
struct page ** pages;
|
||||
};
|
||||
|
||||
struct nfs3_getaclargs {
|
||||
struct nfs_fh * fh;
|
||||
int mask;
|
||||
struct page ** pages;
|
||||
};
|
||||
|
||||
struct nfs3_setaclargs {
|
||||
struct inode * inode;
|
||||
int mask;
|
||||
struct posix_acl * acl_access;
|
||||
struct posix_acl * acl_default;
|
||||
struct page ** pages;
|
||||
};
|
||||
|
||||
struct nfs_diropok {
|
||||
struct nfs_fh * fh;
|
||||
struct nfs_fattr * fattr;
|
||||
|
@ -477,6 +506,15 @@ struct nfs3_readdirres {
|
|||
int plus;
|
||||
};
|
||||
|
||||
struct nfs3_getaclres {
|
||||
struct nfs_fattr * fattr;
|
||||
int mask;
|
||||
unsigned int acl_access_count;
|
||||
unsigned int acl_default_count;
|
||||
struct posix_acl * acl_access;
|
||||
struct posix_acl * acl_default;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_NFS_V4
|
||||
|
||||
typedef u64 clientid4;
|
||||
|
@ -667,6 +705,7 @@ struct nfs_rpc_ops {
|
|||
int version; /* Protocol version */
|
||||
struct dentry_operations *dentry_ops;
|
||||
struct inode_operations *dir_inode_ops;
|
||||
struct inode_operations *file_inode_ops;
|
||||
|
||||
int (*getroot) (struct nfs_server *, struct nfs_fh *,
|
||||
struct nfs_fsinfo *);
|
||||
|
@ -713,6 +752,7 @@ struct nfs_rpc_ops {
|
|||
int (*file_open) (struct inode *, struct file *);
|
||||
int (*file_release) (struct inode *, struct file *);
|
||||
int (*lock)(struct file *, int, struct file_lock *);
|
||||
void (*clear_acl_cache)(struct inode *);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -732,4 +772,7 @@ extern struct rpc_version nfs_version2;
|
|||
extern struct rpc_version nfs_version3;
|
||||
extern struct rpc_version nfs_version4;
|
||||
|
||||
extern struct rpc_version nfsacl_version3;
|
||||
extern struct rpc_program nfsacl_program;
|
||||
|
||||
#endif
|
||||
|
|
58
include/linux/nfsacl.h
Normal file
58
include/linux/nfsacl.h
Normal file
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* File: linux/nfsacl.h
|
||||
*
|
||||
* (C) 2003 Andreas Gruenbacher <agruen@suse.de>
|
||||
*/
|
||||
#ifndef __LINUX_NFSACL_H
|
||||
#define __LINUX_NFSACL_H
|
||||
|
||||
#define NFS_ACL_PROGRAM 100227
|
||||
|
||||
#define ACLPROC2_GETACL 1
|
||||
#define ACLPROC2_SETACL 2
|
||||
#define ACLPROC2_GETATTR 3
|
||||
#define ACLPROC2_ACCESS 4
|
||||
|
||||
#define ACLPROC3_GETACL 1
|
||||
#define ACLPROC3_SETACL 2
|
||||
|
||||
|
||||
/* Flags for the getacl/setacl mode */
|
||||
#define NFS_ACL 0x0001
|
||||
#define NFS_ACLCNT 0x0002
|
||||
#define NFS_DFACL 0x0004
|
||||
#define NFS_DFACLCNT 0x0008
|
||||
|
||||
/* Flag for Default ACL entries */
|
||||
#define NFS_ACL_DEFAULT 0x1000
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/posix_acl.h>
|
||||
|
||||
/* Maximum number of ACL entries over NFS */
|
||||
#define NFS_ACL_MAX_ENTRIES 1024
|
||||
|
||||
#define NFSACL_MAXWORDS (2*(2+3*NFS_ACL_MAX_ENTRIES))
|
||||
#define NFSACL_MAXPAGES ((2*(8+12*NFS_ACL_MAX_ENTRIES) + PAGE_SIZE-1) \
|
||||
>> PAGE_SHIFT)
|
||||
|
||||
static inline unsigned int
|
||||
nfsacl_size(struct posix_acl *acl_access, struct posix_acl *acl_default)
|
||||
{
|
||||
unsigned int w = 16;
|
||||
w += max(acl_access ? (int)acl_access->a_count : 3, 4) * 12;
|
||||
if (acl_default)
|
||||
w += max((int)acl_default->a_count, 4) * 12;
|
||||
return w;
|
||||
}
|
||||
|
||||
extern unsigned int
|
||||
nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
|
||||
struct posix_acl *acl, int encode_entries, int typeflag);
|
||||
extern unsigned int
|
||||
nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
|
||||
struct posix_acl **pacl);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __LINUX_NFSACL_H */
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/unistd.h>
|
||||
#include <linux/dirent.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/posix_acl.h>
|
||||
#include <linux/mount.h>
|
||||
|
||||
#include <linux/nfsd/debug.h>
|
||||
|
@ -124,20 +125,39 @@ int nfsd_statfs(struct svc_rqst *, struct svc_fh *,
|
|||
int nfsd_notify_change(struct inode *, struct iattr *);
|
||||
int nfsd_permission(struct svc_export *, struct dentry *, int);
|
||||
|
||||
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
|
||||
#ifdef CONFIG_NFSD_V2_ACL
|
||||
extern struct svc_version nfsd_acl_version2;
|
||||
#else
|
||||
#define nfsd_acl_version2 NULL
|
||||
#endif
|
||||
#ifdef CONFIG_NFSD_V3_ACL
|
||||
extern struct svc_version nfsd_acl_version3;
|
||||
#else
|
||||
#define nfsd_acl_version3 NULL
|
||||
#endif
|
||||
struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int);
|
||||
int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* NFSv4 State
|
||||
*/
|
||||
#ifdef CONFIG_NFSD_V4
|
||||
int nfs4_state_init(void);
|
||||
void nfs4_state_init(void);
|
||||
int nfs4_state_start(void);
|
||||
void nfs4_state_shutdown(void);
|
||||
time_t nfs4_lease_time(void);
|
||||
void nfs4_reset_lease(time_t leasetime);
|
||||
int nfs4_reset_recoverydir(char *recdir);
|
||||
#else
|
||||
static inline int nfs4_state_init(void){return 0;}
|
||||
static inline void nfs4_state_init(void){};
|
||||
static inline int nfs4_state_start(void){return 0;}
|
||||
static inline void nfs4_state_shutdown(void){}
|
||||
static inline time_t nfs4_lease_time(void){return 0;}
|
||||
static inline void nfs4_reset_lease(time_t leasetime){}
|
||||
static inline int nfs4_reset_recoverydir(char *recdir) {return 0;}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -61,11 +61,6 @@ typedef struct {
|
|||
#define si_stateownerid si_opaque.so_stateownerid
|
||||
#define si_fileid si_opaque.so_fileid
|
||||
|
||||
extern stateid_t zerostateid;
|
||||
extern stateid_t onestateid;
|
||||
|
||||
#define ZERO_STATEID(stateid) (!memcmp((stateid), &zerostateid, sizeof(stateid_t)))
|
||||
#define ONE_STATEID(stateid) (!memcmp((stateid), &onestateid, sizeof(stateid_t)))
|
||||
|
||||
struct nfs4_cb_recall {
|
||||
u32 cbr_ident;
|
||||
|
@ -77,8 +72,8 @@ struct nfs4_cb_recall {
|
|||
};
|
||||
|
||||
struct nfs4_delegation {
|
||||
struct list_head dl_del_perfile; /* nfs4_file->fi_del_perfile */
|
||||
struct list_head dl_del_perclnt; /* nfs4_client->cl_del_perclnt*/
|
||||
struct list_head dl_perfile;
|
||||
struct list_head dl_perclnt;
|
||||
struct list_head dl_recall_lru; /* delegation recalled */
|
||||
atomic_t dl_count; /* ref count */
|
||||
struct nfs4_client *dl_client;
|
||||
|
@ -97,7 +92,6 @@ struct nfs4_delegation {
|
|||
/* client delegation callback info */
|
||||
struct nfs4_callback {
|
||||
/* SETCLIENTID info */
|
||||
u32 cb_parsed; /* addr parsed */
|
||||
u32 cb_addr;
|
||||
unsigned short cb_port;
|
||||
u32 cb_prog;
|
||||
|
@ -109,6 +103,8 @@ struct nfs4_callback {
|
|||
struct rpc_clnt * cb_client;
|
||||
};
|
||||
|
||||
#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */
|
||||
|
||||
/*
|
||||
* struct nfs4_client - one per client. Clientids live here.
|
||||
* o Each nfs4_client is hashed by clientid.
|
||||
|
@ -122,10 +118,11 @@ struct nfs4_callback {
|
|||
struct nfs4_client {
|
||||
struct list_head cl_idhash; /* hash by cl_clientid.id */
|
||||
struct list_head cl_strhash; /* hash by cl_name */
|
||||
struct list_head cl_perclient; /* list: stateowners */
|
||||
struct list_head cl_del_perclnt; /* list: delegations */
|
||||
struct list_head cl_openowners;
|
||||
struct list_head cl_delegations;
|
||||
struct list_head cl_lru; /* tail queue */
|
||||
struct xdr_netobj cl_name; /* id generated by client */
|
||||
char cl_recdir[HEXDIR_LEN]; /* recovery dir */
|
||||
nfs4_verifier cl_verifier; /* generated by client */
|
||||
time_t cl_time; /* time of last lease renewal */
|
||||
u32 cl_addr; /* client ipaddress */
|
||||
|
@ -134,6 +131,7 @@ struct nfs4_client {
|
|||
nfs4_verifier cl_confirm; /* generated by server */
|
||||
struct nfs4_callback cl_callback; /* callback info */
|
||||
atomic_t cl_count; /* ref count */
|
||||
u32 cl_firststate; /* recovery dir creation */
|
||||
};
|
||||
|
||||
/* struct nfs4_client_reset
|
||||
|
@ -143,7 +141,7 @@ struct nfs4_client {
|
|||
*/
|
||||
struct nfs4_client_reclaim {
|
||||
struct list_head cr_strhash; /* hash by cr_name */
|
||||
struct xdr_netobj cr_name; /* id generated by client */
|
||||
char cr_recdir[HEXDIR_LEN]; /* recover dir */
|
||||
};
|
||||
|
||||
static inline void
|
||||
|
@ -197,9 +195,9 @@ struct nfs4_stateowner {
|
|||
struct kref so_ref;
|
||||
struct list_head so_idhash; /* hash by so_id */
|
||||
struct list_head so_strhash; /* hash by op_name */
|
||||
struct list_head so_perclient; /* nfs4_client->cl_perclient */
|
||||
struct list_head so_perfilestate; /* list: nfs4_stateid */
|
||||
struct list_head so_perlockowner; /* nfs4_stateid->st_perlockowner */
|
||||
struct list_head so_perclient;
|
||||
struct list_head so_stateids;
|
||||
struct list_head so_perstateid; /* for lockowners only */
|
||||
struct list_head so_close_lru; /* tail queue */
|
||||
time_t so_time; /* time of placement on so_close_lru */
|
||||
int so_is_open_owner; /* 1=openowner,0=lockowner */
|
||||
|
@ -217,9 +215,10 @@ struct nfs4_stateowner {
|
|||
* share_acces, share_deny on the file.
|
||||
*/
|
||||
struct nfs4_file {
|
||||
struct kref fi_ref;
|
||||
struct list_head fi_hash; /* hash by "struct inode *" */
|
||||
struct list_head fi_perfile; /* list: nfs4_stateid */
|
||||
struct list_head fi_del_perfile; /* list: nfs4_delegation */
|
||||
struct list_head fi_stateids;
|
||||
struct list_head fi_delegations;
|
||||
struct inode *fi_inode;
|
||||
u32 fi_id; /* used with stateowner->so_id
|
||||
* for stateid_hashtbl hash */
|
||||
|
@ -241,8 +240,8 @@ struct nfs4_file {
|
|||
struct nfs4_stateid {
|
||||
struct list_head st_hash;
|
||||
struct list_head st_perfile;
|
||||
struct list_head st_perfilestate;
|
||||
struct list_head st_perlockowner;
|
||||
struct list_head st_perstateowner;
|
||||
struct list_head st_lockowners;
|
||||
struct nfs4_stateowner * st_stateowner;
|
||||
struct nfs4_file * st_file;
|
||||
stateid_t st_stateid;
|
||||
|
@ -267,12 +266,9 @@ struct nfs4_stateid {
|
|||
((err) != nfserr_stale_stateid) && \
|
||||
((err) != nfserr_bad_stateid))
|
||||
|
||||
extern time_t nfs4_laundromat(void);
|
||||
extern int nfsd4_renew(clientid_t *clid);
|
||||
extern int nfs4_preprocess_stateid_op(struct svc_fh *current_fh,
|
||||
stateid_t *stateid, int flags, struct file **filp);
|
||||
extern int nfs4_share_conflict(struct svc_fh *current_fh,
|
||||
unsigned int deny_type);
|
||||
extern void nfs4_lock_state(void);
|
||||
extern void nfs4_unlock_state(void);
|
||||
extern int nfs4_in_grace(void);
|
||||
|
@ -282,6 +278,15 @@ extern void nfs4_free_stateowner(struct kref *kref);
|
|||
extern void nfsd4_probe_callback(struct nfs4_client *clp);
|
||||
extern void nfsd4_cb_recall(struct nfs4_delegation *dp);
|
||||
extern void nfs4_put_delegation(struct nfs4_delegation *dp);
|
||||
extern int nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname);
|
||||
extern void nfsd4_init_recdir(char *recdir_name);
|
||||
extern int nfsd4_recdir_load(void);
|
||||
extern void nfsd4_shutdown_recdir(void);
|
||||
extern int nfs4_client_to_reclaim(const char *name);
|
||||
extern int nfs4_has_reclaimed_state(const char *name);
|
||||
extern void nfsd4_recdir_purge_old(void);
|
||||
extern int nfsd4_create_clid_dir(struct nfs4_client *clp);
|
||||
extern void nfsd4_remove_clid_dir(struct nfs4_client *clp);
|
||||
|
||||
static inline void
|
||||
nfs4_put_stateowner(struct nfs4_stateowner *so)
|
||||
|
|
|
@ -169,4 +169,8 @@ int nfssvc_encode_entry(struct readdir_cd *, const char *name,
|
|||
|
||||
int nfssvc_release_fhandle(struct svc_rqst *, u32 *, struct nfsd_fhandle *);
|
||||
|
||||
/* Helper functions for NFSv2 ACL code */
|
||||
u32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp);
|
||||
u32 *nfs2svc_decode_fh(u32 *p, struct svc_fh *fhp);
|
||||
|
||||
#endif /* LINUX_NFSD_H */
|
||||
|
|
|
@ -110,6 +110,19 @@ struct nfsd3_commitargs {
|
|||
__u32 count;
|
||||
};
|
||||
|
||||
struct nfsd3_getaclargs {
|
||||
struct svc_fh fh;
|
||||
int mask;
|
||||
};
|
||||
|
||||
struct posix_acl;
|
||||
struct nfsd3_setaclargs {
|
||||
struct svc_fh fh;
|
||||
int mask;
|
||||
struct posix_acl *acl_access;
|
||||
struct posix_acl *acl_default;
|
||||
};
|
||||
|
||||
struct nfsd3_attrstat {
|
||||
__u32 status;
|
||||
struct svc_fh fh;
|
||||
|
@ -209,6 +222,14 @@ struct nfsd3_commitres {
|
|||
struct svc_fh fh;
|
||||
};
|
||||
|
||||
struct nfsd3_getaclres {
|
||||
__u32 status;
|
||||
struct svc_fh fh;
|
||||
int mask;
|
||||
struct posix_acl *acl_access;
|
||||
struct posix_acl *acl_default;
|
||||
};
|
||||
|
||||
/* dummy type for release */
|
||||
struct nfsd3_fhandle_pair {
|
||||
__u32 dummy;
|
||||
|
@ -241,6 +262,7 @@ union nfsd3_xdrstore {
|
|||
struct nfsd3_fsinfores fsinfores;
|
||||
struct nfsd3_pathconfres pathconfres;
|
||||
struct nfsd3_commitres commitres;
|
||||
struct nfsd3_getaclres getaclres;
|
||||
};
|
||||
|
||||
#define NFS3_SVC_XDRSIZE sizeof(union nfsd3_xdrstore)
|
||||
|
@ -316,6 +338,10 @@ int nfs3svc_encode_entry(struct readdir_cd *, const char *name,
|
|||
int nfs3svc_encode_entry_plus(struct readdir_cd *, const char *name,
|
||||
int namlen, loff_t offset, ino_t ino,
|
||||
unsigned int);
|
||||
/* Helper functions for NFSv3 ACL code */
|
||||
u32 *nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, u32 *p,
|
||||
struct svc_fh *fhp);
|
||||
u32 *nfs3svc_decode_fh(u32 *p, struct svc_fh *fhp);
|
||||
|
||||
|
||||
#endif /* _LINUX_NFSD_XDR3_H */
|
||||
|
|
|
@ -210,6 +210,7 @@ struct nfsd4_open {
|
|||
u32 op_share_access; /* request */
|
||||
u32 op_share_deny; /* request */
|
||||
stateid_t op_stateid; /* response */
|
||||
u32 op_recall; /* recall */
|
||||
struct nfsd4_change_info op_cinfo; /* response */
|
||||
u32 op_rflags; /* response */
|
||||
int op_truncate; /* used during processing */
|
||||
|
|
|
@ -43,8 +43,13 @@
|
|||
/* XXX from linux/nfs_idmap.h */
|
||||
#define IDMAP_NAMESZ 128
|
||||
|
||||
#ifdef CONFIG_NFSD_V4
|
||||
void nfsd_idmap_init(void);
|
||||
void nfsd_idmap_shutdown(void);
|
||||
#else
|
||||
static inline void nfsd_idmap_init(void) {};
|
||||
static inline void nfsd_idmap_shutdown(void) {};
|
||||
#endif
|
||||
|
||||
int nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *);
|
||||
int nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *);
|
||||
|
|
|
@ -27,6 +27,7 @@ struct node {
|
|||
};
|
||||
|
||||
extern int register_node(struct node *, int, struct node *);
|
||||
extern void unregister_node(struct node *node);
|
||||
|
||||
#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
#include <linux/config.h>
|
||||
|
||||
#ifdef CONFIG_DISCONTIGMEM
|
||||
#ifndef CONFIG_FLATMEM
|
||||
#include <asm/numnodes.h>
|
||||
#endif
|
||||
|
||||
|
|
|
@ -20,8 +20,6 @@ extern void __nvram_write_byte(unsigned char c, int i);
|
|||
extern void nvram_write_byte(unsigned char c, int i);
|
||||
extern int __nvram_check_checksum(void);
|
||||
extern int nvram_check_checksum(void);
|
||||
extern void __nvram_set_checksum(void);
|
||||
extern void nvram_set_checksum(void);
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_NVRAM_H */
|
||||
|
|
|
@ -61,21 +61,20 @@
|
|||
#define PG_active 6
|
||||
#define PG_slab 7 /* slab debug (Suparna wants this) */
|
||||
|
||||
#define PG_highmem 8
|
||||
#define PG_checked 9 /* kill me in 2.5.<early>. */
|
||||
#define PG_arch_1 10
|
||||
#define PG_reserved 11
|
||||
#define PG_checked 8 /* kill me in 2.5.<early>. */
|
||||
#define PG_arch_1 9
|
||||
#define PG_reserved 10
|
||||
#define PG_private 11 /* Has something at ->private */
|
||||
|
||||
#define PG_private 12 /* Has something at ->private */
|
||||
#define PG_writeback 13 /* Page is under writeback */
|
||||
#define PG_nosave 14 /* Used for system suspend/resume */
|
||||
#define PG_compound 15 /* Part of a compound page */
|
||||
#define PG_writeback 12 /* Page is under writeback */
|
||||
#define PG_nosave 13 /* Used for system suspend/resume */
|
||||
#define PG_compound 14 /* Part of a compound page */
|
||||
#define PG_swapcache 15 /* Swap page: swp_entry_t in private */
|
||||
|
||||
#define PG_swapcache 16 /* Swap page: swp_entry_t in private */
|
||||
#define PG_mappedtodisk 17 /* Has blocks allocated on-disk */
|
||||
#define PG_reclaim 18 /* To be reclaimed asap */
|
||||
#define PG_nosave_free 19 /* Free, should not be written */
|
||||
#define PG_uncached 20 /* Page has been mapped as uncached */
|
||||
#define PG_mappedtodisk 16 /* Has blocks allocated on-disk */
|
||||
#define PG_reclaim 17 /* To be reclaimed asap */
|
||||
#define PG_nosave_free 18 /* Free, should not be written */
|
||||
#define PG_uncached 19 /* Page has been mapped as uncached */
|
||||
|
||||
/*
|
||||
* Global page accounting. One instance per CPU. Only unsigned longs are
|
||||
|
@ -136,8 +135,8 @@ struct page_state {
|
|||
|
||||
extern void get_page_state(struct page_state *ret);
|
||||
extern void get_full_page_state(struct page_state *ret);
|
||||
extern unsigned long __read_page_state(unsigned offset);
|
||||
extern void __mod_page_state(unsigned offset, unsigned long delta);
|
||||
extern unsigned long __read_page_state(unsigned long offset);
|
||||
extern void __mod_page_state(unsigned long offset, unsigned long delta);
|
||||
|
||||
#define read_page_state(member) \
|
||||
__read_page_state(offsetof(struct page_state, member))
|
||||
|
@ -215,7 +214,7 @@ extern void __mod_page_state(unsigned offset, unsigned long delta);
|
|||
#define TestSetPageSlab(page) test_and_set_bit(PG_slab, &(page)->flags)
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
#define PageHighMem(page) test_bit(PG_highmem, &(page)->flags)
|
||||
#define PageHighMem(page) is_highmem(page_zone(page))
|
||||
#else
|
||||
#define PageHighMem(page) 0 /* needed to optimize away at compile time */
|
||||
#endif
|
||||
|
|
|
@ -52,12 +52,12 @@ void release_pages(struct page **pages, int nr, int cold);
|
|||
|
||||
static inline struct page *page_cache_alloc(struct address_space *x)
|
||||
{
|
||||
return alloc_pages(mapping_gfp_mask(x), 0);
|
||||
return alloc_pages(mapping_gfp_mask(x)|__GFP_NORECLAIM, 0);
|
||||
}
|
||||
|
||||
static inline struct page *page_cache_alloc_cold(struct address_space *x)
|
||||
{
|
||||
return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
|
||||
return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD|__GFP_NORECLAIM, 0);
|
||||
}
|
||||
|
||||
typedef int filler_t(void *, struct page *);
|
||||
|
|
|
@ -734,16 +734,20 @@ void pcibios_update_irq(struct pci_dev *, int irq);
|
|||
/* Generic PCI functions used internally */
|
||||
|
||||
extern struct pci_bus *pci_find_bus(int domain, int busnr);
|
||||
void pci_bus_add_devices(struct pci_bus *bus);
|
||||
struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, struct pci_ops *ops, void *sysdata);
|
||||
static inline struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata)
|
||||
{
|
||||
return pci_scan_bus_parented(NULL, bus, ops, sysdata);
|
||||
struct pci_bus *root_bus;
|
||||
root_bus = pci_scan_bus_parented(NULL, bus, ops, sysdata);
|
||||
if (root_bus)
|
||||
pci_bus_add_devices(root_bus);
|
||||
return root_bus;
|
||||
}
|
||||
int pci_scan_slot(struct pci_bus *bus, int devfn);
|
||||
struct pci_dev * pci_scan_single_device(struct pci_bus *bus, int devfn);
|
||||
unsigned int pci_scan_child_bus(struct pci_bus *bus);
|
||||
void pci_bus_add_device(struct pci_dev *dev);
|
||||
void pci_bus_add_devices(struct pci_bus *bus);
|
||||
void pci_name_device(struct pci_dev *dev);
|
||||
char *pci_class_name(u32 class);
|
||||
void pci_read_bridge_bases(struct pci_bus *child);
|
||||
|
@ -870,6 +874,15 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass
|
|||
#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
|
||||
#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
|
||||
|
||||
enum pci_dma_burst_strategy {
|
||||
PCI_DMA_BURST_INFINITY, /* make bursts as large as possible,
|
||||
strategy_parameter is N/A */
|
||||
PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
|
||||
byte boundaries */
|
||||
PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
|
||||
strategy_parameter byte boundaries */
|
||||
};
|
||||
|
||||
#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
|
||||
extern struct pci_dev *isa_bridge;
|
||||
#endif
|
||||
|
@ -972,6 +985,8 @@ static inline int pci_proc_domain(struct pci_bus *bus)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
|
||||
|
||||
#endif /* !CONFIG_PCI */
|
||||
|
||||
/* these helpers provide future and backwards compatibility
|
||||
|
@ -1016,6 +1031,20 @@ static inline char *pci_name(struct pci_dev *pdev)
|
|||
#define pci_pretty_name(dev) ""
|
||||
#endif
|
||||
|
||||
|
||||
/* Some archs don't want to expose struct resource to userland as-is
|
||||
* in sysfs and /proc
|
||||
*/
|
||||
#ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER
|
||||
static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
|
||||
const struct resource *rsrc, u64 *start, u64 *end)
|
||||
{
|
||||
*start = rsrc->start;
|
||||
*end = rsrc->end;
|
||||
}
|
||||
#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
|
||||
|
||||
|
||||
/*
|
||||
* The world is not perfect and supplies us with broken PCI devices.
|
||||
* For at least a part of these bugs we need a work-around, so both
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue