Merge branch 'master'; commit 'v2.6.38-rc7' into next
This commit is contained in:
commit
1cc26bada9
5774 changed files with 275913 additions and 122378 deletions
|
@ -1,5 +1,6 @@
|
|||
header-y += byteorder/
|
||||
header-y += can/
|
||||
header-y += caif/
|
||||
header-y += dvb/
|
||||
header-y += hdlc/
|
||||
header-y += isdn/
|
||||
|
@ -20,15 +21,18 @@ header-y += wimax/
|
|||
objhdr-y += version.h
|
||||
|
||||
ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \
|
||||
$(srctree)/include/asm-$(SRCARCH)/a.out.h),)
|
||||
$(srctree)/include/asm-$(SRCARCH)/a.out.h \
|
||||
$(INSTALL_HDR_PATH)/include/asm-*/a.out.h),)
|
||||
header-y += a.out.h
|
||||
endif
|
||||
ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \
|
||||
$(srctree)/include/asm-$(SRCARCH)/kvm.h),)
|
||||
$(srctree)/include/asm-$(SRCARCH)/kvm.h \
|
||||
$(INSTALL_HDR_PATH)/include/asm-*/kvm.h),)
|
||||
header-y += kvm.h
|
||||
endif
|
||||
ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm_para.h \
|
||||
$(srctree)/include/asm-$(SRCARCH)/kvm_para.h),)
|
||||
$(srctree)/include/asm-$(SRCARCH)/kvm_para.h \
|
||||
$(INSTALL_HDR_PATH)/include/asm-*/kvm_para.h),)
|
||||
header-y += kvm_para.h
|
||||
endif
|
||||
|
||||
|
@ -155,6 +159,7 @@ header-y += icmpv6.h
|
|||
header-y += if.h
|
||||
header-y += if_addr.h
|
||||
header-y += if_addrlabel.h
|
||||
header-y += if_alg.h
|
||||
header-y += if_arcnet.h
|
||||
header-y += if_arp.h
|
||||
header-y += if_bonding.h
|
||||
|
|
|
@ -306,9 +306,6 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
|
|||
u32 *mask, u32 req);
|
||||
extern void acpi_early_init(void);
|
||||
|
||||
int acpi_os_map_generic_address(struct acpi_generic_address *addr);
|
||||
void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
|
||||
|
||||
#else /* !CONFIG_ACPI */
|
||||
|
||||
#define acpi_disabled 1
|
||||
|
@ -352,4 +349,14 @@ static inline int acpi_table_parse(char *id,
|
|||
return -1;
|
||||
}
|
||||
#endif /* !CONFIG_ACPI */
|
||||
|
||||
#ifdef CONFIG_ACPI_SLEEP
|
||||
int suspend_nvs_register(unsigned long start, unsigned long size);
|
||||
#else
|
||||
static inline int suspend_nvs_register(unsigned long a, unsigned long b)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_LINUX_ACPI_H*/
|
||||
|
|
16
include/linux/acpi_io.h
Normal file
16
include/linux/acpi_io.h
Normal file
|
@ -0,0 +1,16 @@
|
|||
#ifndef _ACPI_IO_H_
|
||||
#define _ACPI_IO_H_
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <acpi/acpi.h>
|
||||
|
||||
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
|
||||
acpi_size size)
|
||||
{
|
||||
return ioremap_cache(phys, size);
|
||||
}
|
||||
|
||||
int acpi_os_map_generic_address(struct acpi_generic_address *addr);
|
||||
void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
|
||||
|
||||
#endif
|
|
@ -102,10 +102,8 @@ extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t,
|
|||
extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *);
|
||||
extern int agp_bind_memory(struct agp_memory *, off_t);
|
||||
extern int agp_unbind_memory(struct agp_memory *);
|
||||
extern int agp_rebind_memory(void);
|
||||
extern void agp_enable(struct agp_bridge_data *, u32);
|
||||
extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *);
|
||||
extern void agp_backend_release(struct agp_bridge_data *);
|
||||
extern void agp_flush_chipset(struct agp_bridge_data *);
|
||||
|
||||
#endif /* _AGP_BACKEND_H */
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
*
|
||||
* Please credit ARM.com
|
||||
* Documentation: ARM DDI 0196D
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef AMBA_PL08X_H
|
||||
|
@ -22,6 +21,15 @@
|
|||
#include <linux/dmaengine.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
struct pl08x_lli;
|
||||
struct pl08x_driver_data;
|
||||
|
||||
/* Bitmasks for selecting AHB ports for DMA transfers */
|
||||
enum {
|
||||
PL08X_AHB1 = (1 << 0),
|
||||
PL08X_AHB2 = (1 << 1)
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_channel_data - data structure to pass info between
|
||||
* platform and PL08x driver regarding channel configuration
|
||||
|
@ -46,8 +54,10 @@
|
|||
* @circular_buffer: whether the buffer passed in is circular and
|
||||
* shall simply be looped round round (like a record baby round
|
||||
* round round round)
|
||||
* @single: the device connected to this channel will request single
|
||||
* DMA transfers, not bursts. (Bursts are default.)
|
||||
* @single: the device connected to this channel will request single DMA
|
||||
* transfers, not bursts. (Bursts are default.)
|
||||
* @periph_buses: the device connected to this channel is accessible via
|
||||
* these buses (use PL08X_AHB1 | PL08X_AHB2).
|
||||
*/
|
||||
struct pl08x_channel_data {
|
||||
char *bus_id;
|
||||
|
@ -55,10 +65,10 @@ struct pl08x_channel_data {
|
|||
int max_signal;
|
||||
u32 muxval;
|
||||
u32 cctl;
|
||||
u32 ccfg;
|
||||
dma_addr_t addr;
|
||||
bool circular_buffer;
|
||||
bool single;
|
||||
u8 periph_buses;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -67,24 +77,23 @@ struct pl08x_channel_data {
|
|||
* @addr: current address
|
||||
* @maxwidth: the maximum width of a transfer on this bus
|
||||
* @buswidth: the width of this bus in bytes: 1, 2 or 4
|
||||
* @fill_bytes: bytes required to fill to the next bus memory
|
||||
* boundary
|
||||
* @fill_bytes: bytes required to fill to the next bus memory boundary
|
||||
*/
|
||||
struct pl08x_bus_data {
|
||||
dma_addr_t addr;
|
||||
u8 maxwidth;
|
||||
u8 buswidth;
|
||||
u32 fill_bytes;
|
||||
size_t fill_bytes;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_phy_chan - holder for the physical channels
|
||||
* @id: physical index to this channel
|
||||
* @lock: a lock to use when altering an instance of this struct
|
||||
* @signal: the physical signal (aka channel) serving this
|
||||
* physical channel right now
|
||||
* @serving: the virtual channel currently being served by this
|
||||
* physical channel
|
||||
* @signal: the physical signal (aka channel) serving this physical channel
|
||||
* right now
|
||||
* @serving: the virtual channel currently being served by this physical
|
||||
* channel
|
||||
*/
|
||||
struct pl08x_phy_chan {
|
||||
unsigned int id;
|
||||
|
@ -92,11 +101,6 @@ struct pl08x_phy_chan {
|
|||
spinlock_t lock;
|
||||
int signal;
|
||||
struct pl08x_dma_chan *serving;
|
||||
u32 csrc;
|
||||
u32 cdst;
|
||||
u32 clli;
|
||||
u32 cctl;
|
||||
u32 ccfg;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -108,26 +112,23 @@ struct pl08x_txd {
|
|||
struct dma_async_tx_descriptor tx;
|
||||
struct list_head node;
|
||||
enum dma_data_direction direction;
|
||||
struct pl08x_bus_data srcbus;
|
||||
struct pl08x_bus_data dstbus;
|
||||
int len;
|
||||
dma_addr_t src_addr;
|
||||
dma_addr_t dst_addr;
|
||||
size_t len;
|
||||
dma_addr_t llis_bus;
|
||||
void *llis_va;
|
||||
struct pl08x_channel_data *cd;
|
||||
bool active;
|
||||
struct pl08x_lli *llis_va;
|
||||
/* Default cctl value for LLIs */
|
||||
u32 cctl;
|
||||
/*
|
||||
* Settings to be put into the physical channel when we
|
||||
* trigger this txd
|
||||
* trigger this txd. Other registers are in llis_va[0].
|
||||
*/
|
||||
u32 csrc;
|
||||
u32 cdst;
|
||||
u32 clli;
|
||||
u32 cctl;
|
||||
u32 ccfg;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_dma_chan_state - holds the PL08x specific virtual
|
||||
* channel states
|
||||
* struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
|
||||
* states
|
||||
* @PL08X_CHAN_IDLE: the channel is idle
|
||||
* @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
|
||||
* channel and is running a transfer on it
|
||||
|
@ -147,6 +148,8 @@ enum pl08x_dma_chan_state {
|
|||
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
|
||||
* @chan: wrappped abstract channel
|
||||
* @phychan: the physical channel utilized by this channel, if there is one
|
||||
* @phychan_hold: if non-zero, hold on to the physical channel even if we
|
||||
* have no pending entries
|
||||
* @tasklet: tasklet scheduled by the IRQ to handle actual work etc
|
||||
* @name: name of channel
|
||||
* @cd: channel platform data
|
||||
|
@ -154,53 +157,49 @@ enum pl08x_dma_chan_state {
|
|||
* @runtime_direction: current direction of this channel according to
|
||||
* runtime config
|
||||
* @lc: last completed transaction on this channel
|
||||
* @desc_list: queued transactions pending on this channel
|
||||
* @pend_list: queued transactions pending on this channel
|
||||
* @at: active transaction on this channel
|
||||
* @lockflags: sometimes we let a lock last between two function calls,
|
||||
* especially prep/submit, and then we need to store the IRQ flags
|
||||
* in the channel state, here
|
||||
* @lock: a lock for this channel data
|
||||
* @host: a pointer to the host (internal use)
|
||||
* @state: whether the channel is idle, paused, running etc
|
||||
* @slave: whether this channel is a device (slave) or for memcpy
|
||||
* @waiting: a TX descriptor on this channel which is waiting for
|
||||
* a physical channel to become available
|
||||
* @waiting: a TX descriptor on this channel which is waiting for a physical
|
||||
* channel to become available
|
||||
*/
|
||||
struct pl08x_dma_chan {
|
||||
struct dma_chan chan;
|
||||
struct pl08x_phy_chan *phychan;
|
||||
int phychan_hold;
|
||||
struct tasklet_struct tasklet;
|
||||
char *name;
|
||||
struct pl08x_channel_data *cd;
|
||||
dma_addr_t runtime_addr;
|
||||
enum dma_data_direction runtime_direction;
|
||||
atomic_t last_issued;
|
||||
dma_cookie_t lc;
|
||||
struct list_head desc_list;
|
||||
struct list_head pend_list;
|
||||
struct pl08x_txd *at;
|
||||
unsigned long lockflags;
|
||||
spinlock_t lock;
|
||||
void *host;
|
||||
struct pl08x_driver_data *host;
|
||||
enum pl08x_dma_chan_state state;
|
||||
bool slave;
|
||||
struct pl08x_txd *waiting;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_platform_data - the platform configuration for the
|
||||
* PL08x PrimeCells.
|
||||
* struct pl08x_platform_data - the platform configuration for the PL08x
|
||||
* PrimeCells.
|
||||
* @slave_channels: the channels defined for the different devices on the
|
||||
* platform, all inclusive, including multiplexed channels. The available
|
||||
* physical channels will be multiplexed around these signals as they
|
||||
* are requested, just enumerate all possible channels.
|
||||
* @get_signal: request a physical signal to be used for a DMA
|
||||
* transfer immediately: if there is some multiplexing or similar blocking
|
||||
* the use of the channel the transfer can be denied by returning
|
||||
* less than zero, else it returns the allocated signal number
|
||||
* physical channels will be multiplexed around these signals as they are
|
||||
* requested, just enumerate all possible channels.
|
||||
* @get_signal: request a physical signal to be used for a DMA transfer
|
||||
* immediately: if there is some multiplexing or similar blocking the use
|
||||
* of the channel the transfer can be denied by returning less than zero,
|
||||
* else it returns the allocated signal number
|
||||
* @put_signal: indicate to the platform that this physical signal is not
|
||||
* running any DMA transfer and multiplexing can be recycled
|
||||
* @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the
|
||||
* LLI addresses are on 0/1 Master 1/2.
|
||||
* @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2
|
||||
* @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
|
||||
*/
|
||||
struct pl08x_platform_data {
|
||||
struct pl08x_channel_data *slave_channels;
|
||||
|
@ -208,6 +207,8 @@ struct pl08x_platform_data {
|
|||
struct pl08x_channel_data memcpy_channel;
|
||||
int (*get_signal)(struct pl08x_dma_chan *);
|
||||
void (*put_signal)(struct pl08x_dma_chan *);
|
||||
u8 lli_buses;
|
||||
u8 mem_buses;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_AMBA_PL08X
|
||||
|
|
|
@ -372,6 +372,7 @@ struct audit_buffer;
|
|||
struct audit_context;
|
||||
struct inode;
|
||||
struct netlink_skb_parms;
|
||||
struct path;
|
||||
struct linux_binprm;
|
||||
struct mq_attr;
|
||||
struct mqstat;
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#define AUTOFS_MIN_PROTO_VERSION 3
|
||||
#define AUTOFS_MAX_PROTO_VERSION 5
|
||||
|
||||
#define AUTOFS_PROTO_SUBVERSION 1
|
||||
#define AUTOFS_PROTO_SUBVERSION 2
|
||||
|
||||
/* Mask for expire behaviour */
|
||||
#define AUTOFS_EXP_IMMEDIATE 1
|
||||
|
|
|
@ -24,6 +24,7 @@ struct bfin_mii_bus_platform_data {
|
|||
const unsigned short *mac_peripherals;
|
||||
int phy_mode;
|
||||
unsigned int phy_mask;
|
||||
unsigned short vlan1_mask, vlan2_mask;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -25,7 +25,7 @@ struct pt_regs;
|
|||
/*
|
||||
* This structure is used to hold the arguments that are used when loading binaries.
|
||||
*/
|
||||
struct linux_binprm{
|
||||
struct linux_binprm {
|
||||
char buf[BINPRM_BUF_SIZE];
|
||||
#ifdef CONFIG_MMU
|
||||
struct vm_area_struct *vma;
|
||||
|
@ -93,7 +93,6 @@ struct linux_binfmt {
|
|||
int (*load_shlib)(struct file *);
|
||||
int (*core_dump)(struct coredump_params *cprm);
|
||||
unsigned long min_coredump; /* minimal dump size */
|
||||
int hasvdso;
|
||||
};
|
||||
|
||||
extern int __register_binfmt(struct linux_binfmt *fmt, int insert);
|
||||
|
@ -113,7 +112,7 @@ extern void unregister_binfmt(struct linux_binfmt *);
|
|||
|
||||
extern int prepare_binprm(struct linux_binprm *);
|
||||
extern int __must_check remove_arg_zero(struct linux_binprm *);
|
||||
extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
|
||||
extern int search_binary_handler(struct linux_binprm *, struct pt_regs *);
|
||||
extern int flush_old_exec(struct linux_binprm * bprm);
|
||||
extern void setup_new_exec(struct linux_binprm * bprm);
|
||||
|
||||
|
|
|
@ -115,6 +115,7 @@ struct request {
|
|||
void *elevator_private3;
|
||||
|
||||
struct gendisk *rq_disk;
|
||||
struct hd_struct *part;
|
||||
unsigned long start_time;
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
unsigned long long start_time_ns;
|
||||
|
@ -646,7 +647,6 @@ static inline void rq_flush_dcache_pages(struct request *rq)
|
|||
|
||||
extern int blk_register_queue(struct gendisk *disk);
|
||||
extern void blk_unregister_queue(struct gendisk *disk);
|
||||
extern void register_disk(struct gendisk *dev);
|
||||
extern void generic_make_request(struct bio *bio);
|
||||
extern void blk_rq_init(struct request_queue *q, struct request *rq);
|
||||
extern void blk_put_request(struct request *);
|
||||
|
@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q);
|
|||
extern void blk_stop_queue(struct request_queue *q);
|
||||
extern void blk_sync_queue(struct request_queue *q);
|
||||
extern void __blk_stop_queue(struct request_queue *q);
|
||||
extern void __blk_run_queue(struct request_queue *);
|
||||
extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
|
||||
extern void blk_run_queue(struct request_queue *);
|
||||
extern int blk_rq_map_user(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, void __user *, unsigned long,
|
||||
|
@ -1088,7 +1088,6 @@ static inline void put_dev_sector(Sector p)
|
|||
|
||||
struct work_struct;
|
||||
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
|
||||
int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
/*
|
||||
|
@ -1136,7 +1135,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
|
|||
extern int blk_throtl_init(struct request_queue *q);
|
||||
extern void blk_throtl_exit(struct request_queue *q);
|
||||
extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
|
||||
extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
|
||||
extern void throtl_shutdown_timer_wq(struct request_queue *q);
|
||||
#else /* CONFIG_BLK_DEV_THROTTLING */
|
||||
static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
|
||||
|
@ -1146,7 +1144,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
|
|||
|
||||
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
|
||||
static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
|
||||
static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
|
||||
static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
|
||||
#endif /* CONFIG_BLK_DEV_THROTTLING */
|
||||
|
||||
|
@ -1256,6 +1253,9 @@ struct block_device_operations {
|
|||
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
|
||||
int (*direct_access) (struct block_device *, sector_t,
|
||||
void **, unsigned long *);
|
||||
unsigned int (*check_events) (struct gendisk *disk,
|
||||
unsigned int clearing);
|
||||
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
|
||||
int (*media_changed) (struct gendisk *);
|
||||
void (*unlock_native_capacity) (struct gendisk *);
|
||||
int (*revalidate_disk) (struct gendisk *);
|
||||
|
|
|
@ -245,7 +245,6 @@ static inline int blk_cmd_buf_len(struct request *rq)
|
|||
|
||||
extern void blk_dump_cmd(char *buf, struct request *rq);
|
||||
extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
|
||||
extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq);
|
||||
|
||||
#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
|
||||
|
||||
|
|
2
include/linux/caif/Kbuild
Normal file
2
include/linux/caif/Kbuild
Normal file
|
@ -0,0 +1,2 @@
|
|||
header-y += caif_socket.h
|
||||
header-y += if_caif.h
|
|
@ -28,8 +28,6 @@ int cdev_add(struct cdev *, dev_t, unsigned);
|
|||
|
||||
void cdev_del(struct cdev *);
|
||||
|
||||
int cdev_index(struct inode *inode);
|
||||
|
||||
void cd_forget(struct inode *);
|
||||
|
||||
extern struct backing_dev_info directly_mappable_cdev_bdi;
|
||||
|
|
|
@ -946,6 +946,8 @@ struct cdrom_device_info {
|
|||
/* device-related storage */
|
||||
unsigned int options : 30; /* options flags */
|
||||
unsigned mc_flags : 2; /* media change buffer flags */
|
||||
unsigned int vfs_events; /* cached events for vfs path */
|
||||
unsigned int ioctl_events; /* cached events for ioctl path */
|
||||
int use_count; /* number of times device opened */
|
||||
char name[20]; /* name of the device type */
|
||||
/* per-device flags */
|
||||
|
@ -965,6 +967,8 @@ struct cdrom_device_ops {
|
|||
int (*open) (struct cdrom_device_info *, int);
|
||||
void (*release) (struct cdrom_device_info *);
|
||||
int (*drive_status) (struct cdrom_device_info *, int);
|
||||
unsigned int (*check_events) (struct cdrom_device_info *cdi,
|
||||
unsigned int clearing, int slot);
|
||||
int (*media_changed) (struct cdrom_device_info *, int);
|
||||
int (*tray_move) (struct cdrom_device_info *, int);
|
||||
int (*lock_door) (struct cdrom_device_info *, int);
|
||||
|
@ -993,6 +997,8 @@ extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
|
|||
extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode);
|
||||
extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
|
||||
fmode_t mode, unsigned int cmd, unsigned long arg);
|
||||
extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
|
||||
unsigned int clearing);
|
||||
extern int cdrom_media_changed(struct cdrom_device_info *);
|
||||
|
||||
extern int register_cdrom(struct cdrom_device_info *cdi);
|
||||
|
|
|
@ -43,6 +43,10 @@
|
|||
#define CEPH_FEATURE_NOSRCADDR (1<<1)
|
||||
#define CEPH_FEATURE_MONCLOCKCHECK (1<<2)
|
||||
#define CEPH_FEATURE_FLOCK (1<<3)
|
||||
#define CEPH_FEATURE_SUBSCRIBE2 (1<<4)
|
||||
#define CEPH_FEATURE_MONNAMES (1<<5)
|
||||
#define CEPH_FEATURE_RECONNECT_SEQ (1<<6)
|
||||
#define CEPH_FEATURE_DIRLAYOUTHASH (1<<7)
|
||||
|
||||
|
||||
/*
|
||||
|
@ -55,10 +59,10 @@ struct ceph_file_layout {
|
|||
__le32 fl_stripe_count; /* over this many objects */
|
||||
__le32 fl_object_size; /* until objects are this big, then move to
|
||||
new objects */
|
||||
__le32 fl_cas_hash; /* 0 = none; 1 = sha256 */
|
||||
__le32 fl_cas_hash; /* UNUSED. 0 = none; 1 = sha256 */
|
||||
|
||||
/* pg -> disk layout */
|
||||
__le32 fl_object_stripe_unit; /* for per-object parity, if any */
|
||||
__le32 fl_object_stripe_unit; /* UNUSED. for per-object parity, if any */
|
||||
|
||||
/* object -> pg layout */
|
||||
__le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */
|
||||
|
@ -69,6 +73,12 @@ struct ceph_file_layout {
|
|||
|
||||
int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
|
||||
|
||||
struct ceph_dir_layout {
|
||||
__u8 dl_dir_hash; /* see ceph_hash.h for ids */
|
||||
__u8 dl_unused1;
|
||||
__u16 dl_unused2;
|
||||
__u32 dl_unused3;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* crypto algorithms */
|
||||
#define CEPH_CRYPTO_NONE 0x0
|
||||
|
@ -457,7 +467,7 @@ struct ceph_mds_reply_inode {
|
|||
struct ceph_timespec rctime;
|
||||
struct ceph_frag_tree_head fragtree; /* (must be at end of struct) */
|
||||
} __attribute__ ((packed));
|
||||
/* followed by frag array, then symlink string, then xattr blob */
|
||||
/* followed by frag array, symlink string, dir layout, xattr blob */
|
||||
|
||||
/* reply_lease follows dname, and reply_inode */
|
||||
struct ceph_mds_reply_lease {
|
||||
|
|
|
@ -110,17 +110,12 @@ struct ceph_msg_pos {
|
|||
|
||||
/*
|
||||
* ceph_connection state bit flags
|
||||
*
|
||||
* QUEUED and BUSY are used together to ensure that only a single
|
||||
* thread is currently opening, reading or writing data to the socket.
|
||||
*/
|
||||
#define LOSSYTX 0 /* we can close channel or drop messages on errors */
|
||||
#define CONNECTING 1
|
||||
#define NEGOTIATING 2
|
||||
#define KEEPALIVE_PENDING 3
|
||||
#define WRITE_PENDING 4 /* we have data ready to send */
|
||||
#define QUEUED 5 /* there is work queued on this connection */
|
||||
#define BUSY 6 /* work is being done */
|
||||
#define STANDBY 8 /* no outgoing messages, socket closed. we keep
|
||||
* the ceph_connection around to maintain shared
|
||||
* state with the peer. */
|
||||
|
@ -128,6 +123,7 @@ struct ceph_msg_pos {
|
|||
#define SOCK_CLOSED 11 /* socket state changed to closed */
|
||||
#define OPENING 13 /* open connection w/ (possibly new) peer */
|
||||
#define DEAD 14 /* dead, about to kfree */
|
||||
#define BACKOFF 15
|
||||
|
||||
/*
|
||||
* A single connection with another host.
|
||||
|
@ -165,7 +161,6 @@ struct ceph_connection {
|
|||
struct list_head out_queue;
|
||||
struct list_head out_sent; /* sending or sent but unacked */
|
||||
u64 out_seq; /* last message queued for send */
|
||||
bool out_keepalive_pending;
|
||||
|
||||
u64 in_seq, in_seq_acked; /* last message received, acked */
|
||||
|
||||
|
|
|
@ -564,7 +564,7 @@ struct cgroup_iter {
|
|||
/*
|
||||
* To iterate across the tasks in a cgroup:
|
||||
*
|
||||
* 1) call cgroup_iter_start to intialize an iterator
|
||||
* 1) call cgroup_iter_start to initialize an iterator
|
||||
*
|
||||
* 2) call cgroup_iter_next() to retrieve member tasks until it
|
||||
* returns NULL or until you want to end the iteration
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
/* Coda filesystem -- Linux Minicache
|
||||
*
|
||||
* Copyright (C) 1989 - 1997 Carnegie Mellon University
|
||||
*
|
||||
* Carnegie Mellon University encourages users of this software to
|
||||
* contribute improvements to the Coda project. Contact Peter Braam
|
||||
* <coda@cs.cmu.edu>
|
||||
*/
|
||||
|
||||
#ifndef _CFSNC_HEADER_
|
||||
#define _CFSNC_HEADER_
|
||||
|
||||
/* credential cache */
|
||||
void coda_cache_enter(struct inode *inode, int mask);
|
||||
void coda_cache_clear_inode(struct inode *);
|
||||
void coda_cache_clear_all(struct super_block *sb);
|
||||
int coda_cache_check(struct inode *inode, int mask);
|
||||
|
||||
/* for downcalls and attributes and lookups */
|
||||
void coda_flag_inode_children(struct inode *inode, int flag);
|
||||
|
||||
#endif /* _CFSNC_HEADER_ */
|
|
@ -1,58 +0,0 @@
|
|||
/*
|
||||
* coda_fs_i.h
|
||||
*
|
||||
* Copyright (C) 1998 Carnegie Mellon University
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_CODA_FS_I
|
||||
#define _LINUX_CODA_FS_I
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/coda.h>
|
||||
|
||||
/*
|
||||
* coda fs inode data
|
||||
* c_lock protects accesses to c_flags, c_mapcount, c_cached_epoch, c_uid and
|
||||
* c_cached_perm.
|
||||
* vfs_inode is set only when the inode is created and never changes.
|
||||
* c_fid is set when the inode is created and should be considered immutable.
|
||||
*/
|
||||
struct coda_inode_info {
|
||||
struct CodaFid c_fid; /* Coda identifier */
|
||||
u_short c_flags; /* flags (see below) */
|
||||
unsigned int c_mapcount; /* nr of times this inode is mapped */
|
||||
unsigned int c_cached_epoch; /* epoch for cached permissions */
|
||||
vuid_t c_uid; /* fsuid for cached permissions */
|
||||
unsigned int c_cached_perm; /* cached access permissions */
|
||||
spinlock_t c_lock;
|
||||
struct inode vfs_inode;
|
||||
};
|
||||
|
||||
/*
|
||||
* coda fs file private data
|
||||
*/
|
||||
#define CODA_MAGIC 0xC0DAC0DA
|
||||
struct coda_file_info {
|
||||
int cfi_magic; /* magic number */
|
||||
struct file *cfi_container; /* container file for this cnode */
|
||||
unsigned int cfi_mapcount; /* nr of times this file is mapped */
|
||||
};
|
||||
|
||||
#define CODA_FTOC(file) ((struct coda_file_info *)((file)->private_data))
|
||||
|
||||
/* flags */
|
||||
#define C_VATTR 0x1 /* Validity of vattr in inode */
|
||||
#define C_FLUSH 0x2 /* used after a flush */
|
||||
#define C_DYING 0x4 /* from venus (which died) */
|
||||
#define C_PURGE 0x8
|
||||
|
||||
int coda_cnode_make(struct inode **, struct CodaFid *, struct super_block *);
|
||||
struct inode *coda_iget(struct super_block *sb, struct CodaFid *fid, struct coda_vattr *attr);
|
||||
int coda_cnode_makectl(struct inode **inode, struct super_block *sb);
|
||||
struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb);
|
||||
void coda_replace_fid(struct inode *, struct CodaFid *, struct CodaFid *);
|
||||
|
||||
#endif
|
|
@ -1,99 +0,0 @@
|
|||
/*
|
||||
* Coda File System, Linux Kernel module
|
||||
*
|
||||
* Original version, adapted from cfs_mach.c, (C) Carnegie Mellon University
|
||||
* Linux modifications (C) 1996, Peter J. Braam
|
||||
* Rewritten for Linux 2.1 (C) 1997 Carnegie Mellon University
|
||||
*
|
||||
* Carnegie Mellon University encourages users of this software to
|
||||
* contribute improvements to the Coda project.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_CODA_FS
|
||||
#define _LINUX_CODA_FS
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/param.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/coda_fs_i.h>
|
||||
|
||||
/* operations */
|
||||
extern const struct inode_operations coda_dir_inode_operations;
|
||||
extern const struct inode_operations coda_file_inode_operations;
|
||||
extern const struct inode_operations coda_ioctl_inode_operations;
|
||||
|
||||
extern const struct address_space_operations coda_file_aops;
|
||||
extern const struct address_space_operations coda_symlink_aops;
|
||||
|
||||
extern const struct file_operations coda_dir_operations;
|
||||
extern const struct file_operations coda_file_operations;
|
||||
extern const struct file_operations coda_ioctl_operations;
|
||||
|
||||
/* operations shared over more than one file */
|
||||
int coda_open(struct inode *i, struct file *f);
|
||||
int coda_release(struct inode *i, struct file *f);
|
||||
int coda_permission(struct inode *inode, int mask, unsigned int flags);
|
||||
int coda_revalidate_inode(struct dentry *);
|
||||
int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *);
|
||||
int coda_setattr(struct dentry *, struct iattr *);
|
||||
|
||||
/* this file: heloers */
|
||||
char *coda_f2s(struct CodaFid *f);
|
||||
int coda_isroot(struct inode *i);
|
||||
int coda_iscontrol(const char *name, size_t length);
|
||||
|
||||
void coda_vattr_to_iattr(struct inode *, struct coda_vattr *);
|
||||
void coda_iattr_to_vattr(struct iattr *, struct coda_vattr *);
|
||||
unsigned short coda_flags_to_cflags(unsigned short);
|
||||
|
||||
/* sysctl.h */
|
||||
void coda_sysctl_init(void);
|
||||
void coda_sysctl_clean(void);
|
||||
|
||||
#define CODA_ALLOC(ptr, cast, size) do { \
|
||||
if (size < PAGE_SIZE) \
|
||||
ptr = kmalloc((unsigned long) size, GFP_KERNEL); \
|
||||
else \
|
||||
ptr = (cast)vmalloc((unsigned long) size); \
|
||||
if (!ptr) \
|
||||
printk("kernel malloc returns 0 at %s:%d\n", __FILE__, __LINE__); \
|
||||
else memset( ptr, 0, size ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define CODA_FREE(ptr,size) \
|
||||
do { if (size < PAGE_SIZE) kfree((ptr)); else vfree((ptr)); } while (0)
|
||||
|
||||
/* inode to cnode access functions */
|
||||
|
||||
static inline struct coda_inode_info *ITOC(struct inode *inode)
|
||||
{
|
||||
return list_entry(inode, struct coda_inode_info, vfs_inode);
|
||||
}
|
||||
|
||||
static __inline__ struct CodaFid *coda_i2f(struct inode *inode)
|
||||
{
|
||||
return &(ITOC(inode)->c_fid);
|
||||
}
|
||||
|
||||
static __inline__ char *coda_i2s(struct inode *inode)
|
||||
{
|
||||
return coda_f2s(&(ITOC(inode)->c_fid));
|
||||
}
|
||||
|
||||
/* this will not zap the inode away */
|
||||
static __inline__ void coda_flag_inode(struct inode *inode, int flag)
|
||||
{
|
||||
struct coda_inode_info *cii = ITOC(inode);
|
||||
|
||||
spin_lock(&cii->c_lock);
|
||||
cii->c_flags |= flag;
|
||||
spin_unlock(&cii->c_lock);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -11,6 +11,9 @@
|
|||
/* The full zone was compacted */
|
||||
#define COMPACT_COMPLETE 3
|
||||
|
||||
#define COMPACT_MODE_DIRECT_RECLAIM 0
|
||||
#define COMPACT_MODE_KSWAPD 1
|
||||
|
||||
#ifdef CONFIG_COMPACTION
|
||||
extern int sysctl_compact_memory;
|
||||
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
|
||||
|
@ -21,7 +24,12 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
|
|||
|
||||
extern int fragmentation_index(struct zone *zone, unsigned int order);
|
||||
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
||||
int order, gfp_t gfp_mask, nodemask_t *mask);
|
||||
int order, gfp_t gfp_mask, nodemask_t *mask,
|
||||
bool sync);
|
||||
extern unsigned long compaction_suitable(struct zone *zone, int order);
|
||||
extern unsigned long compact_zone_order(struct zone *zone, int order,
|
||||
gfp_t gfp_mask, bool sync,
|
||||
int compact_mode);
|
||||
|
||||
/* Do not skip compaction more than 64 times */
|
||||
#define COMPACT_MAX_DEFER_SHIFT 6
|
||||
|
@ -54,7 +62,20 @@ static inline bool compaction_deferred(struct zone *zone)
|
|||
|
||||
#else
|
||||
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
||||
int order, gfp_t gfp_mask, nodemask_t *nodemask)
|
||||
int order, gfp_t gfp_mask, nodemask_t *nodemask,
|
||||
bool sync)
|
||||
{
|
||||
return COMPACT_CONTINUE;
|
||||
}
|
||||
|
||||
static inline unsigned long compaction_suitable(struct zone *zone, int order)
|
||||
{
|
||||
return COMPACT_SKIPPED;
|
||||
}
|
||||
|
||||
static inline unsigned long compact_zone_order(struct zone *zone, int order,
|
||||
gfp_t gfp_mask, bool sync,
|
||||
int compact_mode)
|
||||
{
|
||||
return COMPACT_CONTINUE;
|
||||
}
|
||||
|
|
|
@ -139,9 +139,9 @@ extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_n
|
|||
extern void register_console(struct console *);
|
||||
extern int unregister_console(struct console *);
|
||||
extern struct console *console_drivers;
|
||||
extern void acquire_console_sem(void);
|
||||
extern int try_acquire_console_sem(void);
|
||||
extern void release_console_sem(void);
|
||||
extern void console_lock(void);
|
||||
extern int console_trylock(void);
|
||||
extern void console_unlock(void);
|
||||
extern void console_conditional_schedule(void);
|
||||
extern void console_unblank(void);
|
||||
extern struct tty_driver *console_device(int *);
|
||||
|
|
|
@ -39,10 +39,12 @@
|
|||
* Severity difinition for error_severity in struct cper_record_header
|
||||
* and section_severity in struct cper_section_descriptor
|
||||
*/
|
||||
#define CPER_SEV_RECOVERABLE 0x0
|
||||
#define CPER_SEV_FATAL 0x1
|
||||
#define CPER_SEV_CORRECTED 0x2
|
||||
#define CPER_SEV_INFORMATIONAL 0x3
|
||||
enum {
|
||||
CPER_SEV_RECOVERABLE,
|
||||
CPER_SEV_FATAL,
|
||||
CPER_SEV_CORRECTED,
|
||||
CPER_SEV_INFORMATIONAL,
|
||||
};
|
||||
|
||||
/*
|
||||
* Validation bits difinition for validation_bits in struct
|
||||
|
@ -201,6 +203,47 @@
|
|||
UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \
|
||||
0xDF, 0xAA, 0x84, 0xEC)
|
||||
|
||||
#define CPER_PROC_VALID_TYPE 0x0001
|
||||
#define CPER_PROC_VALID_ISA 0x0002
|
||||
#define CPER_PROC_VALID_ERROR_TYPE 0x0004
|
||||
#define CPER_PROC_VALID_OPERATION 0x0008
|
||||
#define CPER_PROC_VALID_FLAGS 0x0010
|
||||
#define CPER_PROC_VALID_LEVEL 0x0020
|
||||
#define CPER_PROC_VALID_VERSION 0x0040
|
||||
#define CPER_PROC_VALID_BRAND_INFO 0x0080
|
||||
#define CPER_PROC_VALID_ID 0x0100
|
||||
#define CPER_PROC_VALID_TARGET_ADDRESS 0x0200
|
||||
#define CPER_PROC_VALID_REQUESTOR_ID 0x0400
|
||||
#define CPER_PROC_VALID_RESPONDER_ID 0x0800
|
||||
#define CPER_PROC_VALID_IP 0x1000
|
||||
|
||||
#define CPER_MEM_VALID_ERROR_STATUS 0x0001
|
||||
#define CPER_MEM_VALID_PHYSICAL_ADDRESS 0x0002
|
||||
#define CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK 0x0004
|
||||
#define CPER_MEM_VALID_NODE 0x0008
|
||||
#define CPER_MEM_VALID_CARD 0x0010
|
||||
#define CPER_MEM_VALID_MODULE 0x0020
|
||||
#define CPER_MEM_VALID_BANK 0x0040
|
||||
#define CPER_MEM_VALID_DEVICE 0x0080
|
||||
#define CPER_MEM_VALID_ROW 0x0100
|
||||
#define CPER_MEM_VALID_COLUMN 0x0200
|
||||
#define CPER_MEM_VALID_BIT_POSITION 0x0400
|
||||
#define CPER_MEM_VALID_REQUESTOR_ID 0x0800
|
||||
#define CPER_MEM_VALID_RESPONDER_ID 0x1000
|
||||
#define CPER_MEM_VALID_TARGET_ID 0x2000
|
||||
#define CPER_MEM_VALID_ERROR_TYPE 0x4000
|
||||
|
||||
#define CPER_PCIE_VALID_PORT_TYPE 0x0001
|
||||
#define CPER_PCIE_VALID_VERSION 0x0002
|
||||
#define CPER_PCIE_VALID_COMMAND_STATUS 0x0004
|
||||
#define CPER_PCIE_VALID_DEVICE_ID 0x0008
|
||||
#define CPER_PCIE_VALID_SERIAL_NUMBER 0x0010
|
||||
#define CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS 0x0020
|
||||
#define CPER_PCIE_VALID_CAPABILITY 0x0040
|
||||
#define CPER_PCIE_VALID_AER_INFO 0x0080
|
||||
|
||||
#define CPER_PCIE_SLOT_SHIFT 3
|
||||
|
||||
/*
|
||||
* All tables and structs must be byte-packed to match CPER
|
||||
* specification, since the tables are provided by the system BIOS
|
||||
|
@ -306,6 +349,41 @@ struct cper_sec_mem_err {
|
|||
__u8 error_type;
|
||||
};
|
||||
|
||||
struct cper_sec_pcie {
|
||||
__u64 validation_bits;
|
||||
__u32 port_type;
|
||||
struct {
|
||||
__u8 minor;
|
||||
__u8 major;
|
||||
__u8 reserved[2];
|
||||
} version;
|
||||
__u16 command;
|
||||
__u16 status;
|
||||
__u32 reserved;
|
||||
struct {
|
||||
__u16 vendor_id;
|
||||
__u16 device_id;
|
||||
__u8 class_code[3];
|
||||
__u8 function;
|
||||
__u8 device;
|
||||
__u16 segment;
|
||||
__u8 bus;
|
||||
__u8 secondary_bus;
|
||||
__u16 slot;
|
||||
__u8 reserved;
|
||||
} device_id;
|
||||
struct {
|
||||
__u32 lower;
|
||||
__u32 upper;
|
||||
} serial_number;
|
||||
struct {
|
||||
__u16 secondary_status;
|
||||
__u16 control;
|
||||
} bridge;
|
||||
__u8 capability[60];
|
||||
__u8 aer_info[96];
|
||||
};
|
||||
|
||||
/* Reset to default packing */
|
||||
#pragma pack()
|
||||
|
||||
|
|
|
@ -47,13 +47,7 @@ struct cpuidle_state {
|
|||
|
||||
/* Idle State Flags */
|
||||
#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
|
||||
#define CPUIDLE_FLAG_CHECK_BM (0x02) /* BM activity will exit state */
|
||||
#define CPUIDLE_FLAG_POLL (0x10) /* no latency, no savings */
|
||||
#define CPUIDLE_FLAG_SHALLOW (0x20) /* low latency, minimal savings */
|
||||
#define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */
|
||||
#define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */
|
||||
#define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */
|
||||
#define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */
|
||||
|
||||
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
|
||||
|
||||
|
|
|
@ -84,9 +84,11 @@ struct cramfs_super {
|
|||
| CRAMFS_FLAG_WRONG_SIGNATURE \
|
||||
| CRAMFS_FLAG_SHIFTED_ROOT_OFFSET )
|
||||
|
||||
#ifdef __KERNEL__
|
||||
/* Uncompression interfaces to the underlying zlib */
|
||||
int cramfs_uncompress_block(void *dst, int dstlen, void *src, int srclen);
|
||||
int cramfs_uncompress_init(void);
|
||||
void cramfs_uncompress_exit(void);
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -103,14 +103,20 @@ static inline int cs5535_has_vsa2(void)
|
|||
#define GPIO_POSITIVE_EDGE_STS 0x48
|
||||
#define GPIO_NEGATIVE_EDGE_STS 0x4C
|
||||
|
||||
#define GPIO_FLTR7_AMOUNT 0xD8
|
||||
|
||||
#define GPIO_MAP_X 0xE0
|
||||
#define GPIO_MAP_Y 0xE4
|
||||
#define GPIO_MAP_Z 0xE8
|
||||
#define GPIO_MAP_W 0xEC
|
||||
|
||||
#define GPIO_FE7_SEL 0xF7
|
||||
|
||||
void cs5535_gpio_set(unsigned offset, unsigned int reg);
|
||||
void cs5535_gpio_clear(unsigned offset, unsigned int reg);
|
||||
int cs5535_gpio_isset(unsigned offset, unsigned int reg);
|
||||
int cs5535_gpio_set_irq(unsigned group, unsigned irq);
|
||||
void cs5535_gpio_setup_event(unsigned offset, int pair, int pme);
|
||||
|
||||
/* MFGPTs */
|
||||
|
||||
|
|
|
@ -167,6 +167,8 @@ struct dentry_operations {
|
|||
void (*d_release)(struct dentry *);
|
||||
void (*d_iput)(struct dentry *, struct inode *);
|
||||
char *(*d_dname)(struct dentry *, char *, int);
|
||||
struct vfsmount *(*d_automount)(struct path *);
|
||||
int (*d_manage)(struct dentry *, bool, bool);
|
||||
} ____cacheline_aligned;
|
||||
|
||||
/*
|
||||
|
@ -205,13 +207,18 @@ struct dentry_operations {
|
|||
|
||||
#define DCACHE_CANT_MOUNT 0x0100
|
||||
#define DCACHE_GENOCIDE 0x0200
|
||||
#define DCACHE_MOUNTED 0x0400 /* is a mountpoint */
|
||||
|
||||
#define DCACHE_OP_HASH 0x1000
|
||||
#define DCACHE_OP_COMPARE 0x2000
|
||||
#define DCACHE_OP_REVALIDATE 0x4000
|
||||
#define DCACHE_OP_DELETE 0x8000
|
||||
|
||||
#define DCACHE_MOUNTED 0x10000 /* is a mountpoint */
|
||||
#define DCACHE_NEED_AUTOMOUNT 0x20000 /* handle automount on this dir */
|
||||
#define DCACHE_MANAGE_TRANSIT 0x40000 /* manage transit from this dirent */
|
||||
#define DCACHE_MANAGED_DENTRY \
|
||||
(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
|
||||
|
||||
extern seqlock_t rename_lock;
|
||||
|
||||
static inline int dname_external(struct dentry *dentry)
|
||||
|
@ -307,7 +314,7 @@ extern struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
|
|||
* __d_rcu_to_refcount - take a refcount on dentry if sequence check is ok
|
||||
* @dentry: dentry to take a ref on
|
||||
* @seq: seqcount to verify against
|
||||
* @Returns: 0 on failure, else 1.
|
||||
* Returns: 0 on failure, else 1.
|
||||
*
|
||||
* __d_rcu_to_refcount operates on a dentry,seq pair that was returned
|
||||
* by __d_lookup_rcu, to get a reference on an rcu-walk dentry.
|
||||
|
@ -399,7 +406,12 @@ static inline void dont_mount(struct dentry *dentry)
|
|||
|
||||
extern void dput(struct dentry *);
|
||||
|
||||
static inline int d_mountpoint(struct dentry *dentry)
|
||||
static inline bool d_managed(struct dentry *dentry)
|
||||
{
|
||||
return dentry->d_flags & DCACHE_MANAGED_DENTRY;
|
||||
}
|
||||
|
||||
static inline bool d_mountpoint(struct dentry *dentry)
|
||||
{
|
||||
return dentry->d_flags & DCACHE_MOUNTED;
|
||||
}
|
||||
|
|
|
@ -101,8 +101,8 @@ struct ieee_pfc {
|
|||
*/
|
||||
struct dcb_app {
|
||||
__u8 selector;
|
||||
__u32 protocol;
|
||||
__u8 priority;
|
||||
__u16 protocol;
|
||||
};
|
||||
|
||||
struct dcbmsg {
|
||||
|
|
|
@ -13,10 +13,10 @@
|
|||
#ifdef CONFIG_PROFILING
|
||||
|
||||
#include <linux/dcache.h>
|
||||
#include <linux/path.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct dcookie_user;
|
||||
struct path;
|
||||
|
||||
/**
|
||||
* dcookie_register - register a user of dcookies
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
#ifndef INFLATE_H
|
||||
#define INFLATE_H
|
||||
|
||||
/* Other housekeeping constants */
|
||||
#define INBUFSIZ 4096
|
||||
|
||||
int gunzip(unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
|
|
|
@ -61,8 +61,6 @@ static void free(void *where)
|
|||
#define large_malloc(a) malloc(a)
|
||||
#define large_free(a) free(a)
|
||||
|
||||
#define set_error_fn(x)
|
||||
|
||||
#define INIT
|
||||
|
||||
#else /* STATIC */
|
||||
|
@ -72,6 +70,7 @@ static void free(void *where)
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
/* Use defines rather than static inline in order to avoid spurious
|
||||
|
@ -84,9 +83,6 @@ static void free(void *where)
|
|||
#define large_malloc(a) vmalloc(a)
|
||||
#define large_free(a) vfree(a)
|
||||
|
||||
static void(*error)(char *m);
|
||||
#define set_error_fn(x) error = x;
|
||||
|
||||
#define INIT __init
|
||||
#define STATIC
|
||||
|
||||
|
|
19
include/linux/decompress/unxz.h
Normal file
19
include/linux/decompress/unxz.h
Normal file
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
|
||||
*
|
||||
* Author: Lasse Collin <lasse.collin@tukaani.org>
|
||||
*
|
||||
* This file has been put into the public domain.
|
||||
* You can do whatever you want with this file.
|
||||
*/
|
||||
|
||||
#ifndef DECOMPRESS_UNXZ_H
|
||||
#define DECOMPRESS_UNXZ_H
|
||||
|
||||
int unxz(unsigned char *in, int in_size,
|
||||
int (*fill)(void *dest, unsigned int size),
|
||||
int (*flush)(void *src, unsigned int size),
|
||||
unsigned char *out, int *in_used,
|
||||
void (*error)(char *x));
|
||||
|
||||
#endif
|
|
@ -193,6 +193,13 @@ struct dm_target {
|
|||
char *error;
|
||||
};
|
||||
|
||||
/* Each target can link one of these into the table */
|
||||
struct dm_target_callbacks {
|
||||
struct list_head list;
|
||||
int (*congested_fn) (struct dm_target_callbacks *, int);
|
||||
void (*unplug_fn)(struct dm_target_callbacks *);
|
||||
};
|
||||
|
||||
int dm_register_target(struct target_type *t);
|
||||
void dm_unregister_target(struct target_type *t);
|
||||
|
||||
|
@ -268,6 +275,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
|
|||
int dm_table_add_target(struct dm_table *t, const char *type,
|
||||
sector_t start, sector_t len, char *params);
|
||||
|
||||
/*
|
||||
* Target_ctr should call this if it needs to add any callbacks.
|
||||
*/
|
||||
void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
|
||||
|
||||
/*
|
||||
* Finally call this to make the table ready for use.
|
||||
*/
|
||||
|
|
|
@ -30,9 +30,8 @@ struct device_private;
|
|||
struct device_driver;
|
||||
struct driver_private;
|
||||
struct class;
|
||||
struct class_private;
|
||||
struct subsys_private;
|
||||
struct bus_type;
|
||||
struct bus_type_private;
|
||||
struct device_node;
|
||||
|
||||
struct bus_attribute {
|
||||
|
@ -65,7 +64,7 @@ struct bus_type {
|
|||
|
||||
const struct dev_pm_ops *pm;
|
||||
|
||||
struct bus_type_private *p;
|
||||
struct subsys_private *p;
|
||||
};
|
||||
|
||||
extern int __must_check bus_register(struct bus_type *bus);
|
||||
|
@ -197,6 +196,7 @@ struct class {
|
|||
|
||||
struct class_attribute *class_attrs;
|
||||
struct device_attribute *dev_attrs;
|
||||
struct bin_attribute *dev_bin_attrs;
|
||||
struct kobject *dev_kobj;
|
||||
|
||||
int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
|
||||
|
@ -213,7 +213,7 @@ struct class {
|
|||
|
||||
const struct dev_pm_ops *pm;
|
||||
|
||||
struct class_private *p;
|
||||
struct subsys_private *p;
|
||||
};
|
||||
|
||||
struct class_dev_iter {
|
||||
|
@ -508,13 +508,13 @@ static inline int device_is_registered(struct device *dev)
|
|||
|
||||
static inline void device_enable_async_suspend(struct device *dev)
|
||||
{
|
||||
if (dev->power.status == DPM_ON)
|
||||
if (!dev->power.in_suspend)
|
||||
dev->power.async_suspend = true;
|
||||
}
|
||||
|
||||
static inline void device_disable_async_suspend(struct device *dev)
|
||||
{
|
||||
if (dev->power.status == DPM_ON)
|
||||
if (!dev->power.in_suspend)
|
||||
dev->power.async_suspend = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
* Remove a device, destroy any tables.
|
||||
*
|
||||
* DM_DEV_RENAME:
|
||||
* Rename a device.
|
||||
* Rename a device or set its uuid if none was previously supplied.
|
||||
*
|
||||
* DM_SUSPEND:
|
||||
* This performs both suspend and resume, depending which flag is
|
||||
|
@ -267,9 +267,9 @@ enum {
|
|||
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
|
||||
|
||||
#define DM_VERSION_MAJOR 4
|
||||
#define DM_VERSION_MINOR 18
|
||||
#define DM_VERSION_PATCHLEVEL 0
|
||||
#define DM_VERSION_EXTRA "-ioctl (2010-06-29)"
|
||||
#define DM_VERSION_MINOR 19
|
||||
#define DM_VERSION_PATCHLEVEL 1
|
||||
#define DM_VERSION_EXTRA "-ioctl (2011-01-07)"
|
||||
|
||||
/* Status bits */
|
||||
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
|
||||
|
@ -322,4 +322,10 @@ enum {
|
|||
*/
|
||||
#define DM_UEVENT_GENERATED_FLAG (1 << 13) /* Out */
|
||||
|
||||
/*
|
||||
* If set, rename changes the uuid not the name. Only permitted
|
||||
* if no uuid was previously supplied: an existing uuid cannot be changed.
|
||||
*/
|
||||
#define DM_UUID_FLAG (1 << 14) /* In */
|
||||
|
||||
#endif /* _LINUX_DM_IOCTL_H */
|
||||
|
|
|
@ -370,6 +370,16 @@
|
|||
#define DM_ULOG_REQUEST_TYPE(request_type) \
|
||||
(DM_ULOG_REQUEST_MASK & (request_type))
|
||||
|
||||
/*
|
||||
* DM_ULOG_REQUEST_VERSION is incremented when there is a
|
||||
* change to the way information is passed between kernel
|
||||
* and userspace. This could be a structure change of
|
||||
* dm_ulog_request or a change in the way requests are
|
||||
* issued/handled. Changes are outlined here:
|
||||
* version 1: Initial implementation
|
||||
*/
|
||||
#define DM_ULOG_REQUEST_VERSION 1
|
||||
|
||||
struct dm_ulog_request {
|
||||
/*
|
||||
* The local unique identifier (luid) and the universally unique
|
||||
|
@ -383,8 +393,9 @@ struct dm_ulog_request {
|
|||
*/
|
||||
uint64_t luid;
|
||||
char uuid[DM_UUID_LEN];
|
||||
char padding[7]; /* Padding because DM_UUID_LEN = 129 */
|
||||
char padding[3]; /* Padding because DM_UUID_LEN = 129 */
|
||||
|
||||
uint32_t version; /* See DM_ULOG_REQUEST_VERSION */
|
||||
int32_t error; /* Used to report back processing errors */
|
||||
|
||||
uint32_t seq; /* Sequence number for request */
|
||||
|
|
|
@ -532,7 +532,7 @@ static inline int dmaengine_resume(struct dma_chan *chan)
|
|||
return dmaengine_device_control(chan, DMA_RESUME, 0);
|
||||
}
|
||||
|
||||
static inline int dmaengine_submit(struct dma_async_tx_descriptor *desc)
|
||||
static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
|
||||
{
|
||||
return desc->tx_submit(desc);
|
||||
}
|
||||
|
|
|
@ -44,34 +44,24 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
|
|||
extern int ddebug_remove_module(const char *mod_name);
|
||||
|
||||
#define dynamic_pr_debug(fmt, ...) do { \
|
||||
__label__ do_printk; \
|
||||
__label__ out; \
|
||||
static struct _ddebug descriptor \
|
||||
__used \
|
||||
__attribute__((section("__verbose"), aligned(8))) = \
|
||||
{ KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
|
||||
_DPRINTK_FLAGS_DEFAULT }; \
|
||||
JUMP_LABEL(&descriptor.enabled, do_printk); \
|
||||
goto out; \
|
||||
do_printk: \
|
||||
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
|
||||
out: ; \
|
||||
if (unlikely(descriptor.enabled)) \
|
||||
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define dynamic_dev_dbg(dev, fmt, ...) do { \
|
||||
__label__ do_printk; \
|
||||
__label__ out; \
|
||||
static struct _ddebug descriptor \
|
||||
__used \
|
||||
__attribute__((section("__verbose"), aligned(8))) = \
|
||||
{ KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
|
||||
_DPRINTK_FLAGS_DEFAULT }; \
|
||||
JUMP_LABEL(&descriptor.enabled, do_printk); \
|
||||
goto out; \
|
||||
do_printk: \
|
||||
dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
|
||||
out: ; \
|
||||
if (unlikely(descriptor.enabled)) \
|
||||
dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
|
|
|
@ -48,8 +48,10 @@ extern int eth_validate_addr(struct net_device *dev);
|
|||
|
||||
|
||||
|
||||
extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count);
|
||||
extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
|
||||
unsigned int rxqs);
|
||||
#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
|
||||
#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
|
||||
|
||||
/**
|
||||
* is_zero_ether_addr - Determine if give Ethernet address is all zeros.
|
||||
|
@ -96,6 +98,17 @@ static inline int is_broadcast_ether_addr(const u8 *addr)
|
|||
return (addr[0] & addr[1] & addr[2] & addr[3] & addr[4] & addr[5]) == 0xff;
|
||||
}
|
||||
|
||||
/**
|
||||
* is_unicast_ether_addr - Determine if the Ethernet address is unicast
|
||||
* @addr: Pointer to a six-byte array containing the Ethernet address
|
||||
*
|
||||
* Return true if the address is a unicast address.
|
||||
*/
|
||||
static inline int is_unicast_ether_addr(const u8 *addr)
|
||||
{
|
||||
return !is_multicast_ether_addr(addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* is_valid_ether_addr - Determine if the given Ethernet address is valid
|
||||
* @addr: Pointer to a six-byte array containing the Ethernet address
|
||||
|
|
|
@ -724,21 +724,30 @@ struct ext3_dir_entry_2 {
|
|||
~EXT3_DIR_ROUND)
|
||||
#define EXT3_MAX_REC_LEN ((1<<16)-1)
|
||||
|
||||
/*
|
||||
* Tests against MAX_REC_LEN etc were put in place for 64k block
|
||||
* sizes; if that is not possible on this arch, we can skip
|
||||
* those tests and speed things up.
|
||||
*/
|
||||
static inline unsigned ext3_rec_len_from_disk(__le16 dlen)
|
||||
{
|
||||
unsigned len = le16_to_cpu(dlen);
|
||||
|
||||
#if (PAGE_CACHE_SIZE >= 65536)
|
||||
if (len == EXT3_MAX_REC_LEN)
|
||||
return 1 << 16;
|
||||
#endif
|
||||
return len;
|
||||
}
|
||||
|
||||
static inline __le16 ext3_rec_len_to_disk(unsigned len)
|
||||
{
|
||||
#if (PAGE_CACHE_SIZE >= 65536)
|
||||
if (len == (1 << 16))
|
||||
return cpu_to_le16(EXT3_MAX_REC_LEN);
|
||||
else if (len > (1 << 16))
|
||||
BUG();
|
||||
#endif
|
||||
return cpu_to_le16(len);
|
||||
}
|
||||
|
||||
|
@ -856,6 +865,7 @@ extern struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
|
|||
extern int ext3_should_retry_alloc(struct super_block *sb, int *retries);
|
||||
extern void ext3_init_block_alloc_info(struct inode *);
|
||||
extern void ext3_rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv);
|
||||
extern int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range);
|
||||
|
||||
/* dir.c */
|
||||
extern int ext3_check_dir_entry(const char *, struct inode *,
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define _FALLOC_H_
|
||||
|
||||
#define FALLOC_FL_KEEP_SIZE 0x01 /* default is extend size */
|
||||
#define FALLOC_FL_PUNCH_HOLE 0x02 /* de-allocates range */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#define AT_REMOVEDIR 0x200 /* Remove directory instead of
|
||||
unlinking file. */
|
||||
#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
|
||||
#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
* Copyright (c) 2009 Orex Computed Radiography
|
||||
* Baruch Siach <baruch@tkos.co.il>
|
||||
*
|
||||
* Copyright (C) 2010 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Header file for the FEC platform data
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
|
@ -16,6 +18,7 @@
|
|||
|
||||
struct fec_platform_data {
|
||||
phy_interface_t phy;
|
||||
unsigned char mac[ETH_ALEN];
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -23,7 +23,7 @@ extern struct file *alloc_file(struct path *, fmode_t mode,
|
|||
|
||||
static inline void fput_light(struct file *file, int fput_needed)
|
||||
{
|
||||
if (unlikely(fput_needed))
|
||||
if (fput_needed)
|
||||
fput(file);
|
||||
}
|
||||
|
||||
|
|
|
@ -273,7 +273,7 @@ struct fw_cdev_event_iso_interrupt {
|
|||
* @closure: See &fw_cdev_event_common;
|
||||
* set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl
|
||||
* @type: %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
|
||||
* @completed: Offset into the receive buffer; data before this offest is valid
|
||||
* @completed: Offset into the receive buffer; data before this offset is valid
|
||||
*
|
||||
* This event is sent in multichannel contexts (context type
|
||||
* %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer
|
||||
|
|
|
@ -302,9 +302,9 @@ struct fw_packet {
|
|||
struct fw_transaction {
|
||||
int node_id; /* The generation is implied; it is always the current. */
|
||||
int tlabel;
|
||||
int timestamp;
|
||||
struct list_head link;
|
||||
struct fw_card *card;
|
||||
bool is_split_transaction;
|
||||
struct timer_list split_timeout_timer;
|
||||
|
||||
struct fw_packet packet;
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#define _LINUX_FIRMWARE_MAP_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/kobject.h>
|
||||
|
||||
/*
|
||||
* provide a dummy interface if CONFIG_FIRMWARE_MEMMAP is disabled
|
||||
|
|
|
@ -109,7 +109,7 @@ static inline void freezer_count(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* Check if the task should be counted as freezeable by the freezer
|
||||
* Check if the task should be counted as freezable by the freezer
|
||||
*/
|
||||
static inline int freezer_should_skip(struct task_struct *p)
|
||||
{
|
||||
|
|
|
@ -242,6 +242,7 @@ struct inodes_stat_t {
|
|||
#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */
|
||||
#define S_PRIVATE 512 /* Inode is fs-internal */
|
||||
#define S_IMA 1024 /* Inode has an associated IMA struct */
|
||||
#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
|
||||
|
||||
/*
|
||||
* Note that nosuid etc flags are inode-specific: setting some file-system
|
||||
|
@ -277,6 +278,7 @@ struct inodes_stat_t {
|
|||
#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
|
||||
#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
|
||||
#define IS_IMA(inode) ((inode)->i_flags & S_IMA)
|
||||
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
|
||||
|
||||
/* the read-only stuff doesn't really belong here, but any other place is
|
||||
probably as bad and I don't want to create yet another include file. */
|
||||
|
@ -382,7 +384,6 @@ struct inodes_stat_t {
|
|||
#include <linux/path.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/prio_tree.h>
|
||||
|
@ -402,6 +403,7 @@ struct hd_geometry;
|
|||
struct iovec;
|
||||
struct nameidata;
|
||||
struct kiocb;
|
||||
struct kobject;
|
||||
struct pipe_inode_info;
|
||||
struct poll_table_struct;
|
||||
struct kstatfs;
|
||||
|
@ -647,6 +649,7 @@ struct address_space {
|
|||
spinlock_t private_lock; /* for use by the address_space */
|
||||
struct list_head private_list; /* ditto */
|
||||
struct address_space *assoc_mapping; /* ditto */
|
||||
struct mutex unmap_mutex; /* to protect unmapping */
|
||||
} __attribute__((aligned(sizeof(long))));
|
||||
/*
|
||||
* On most architectures that alignment is already the case; but
|
||||
|
@ -664,8 +667,9 @@ struct block_device {
|
|||
void * bd_claiming;
|
||||
void * bd_holder;
|
||||
int bd_holders;
|
||||
bool bd_write_holder;
|
||||
#ifdef CONFIG_SYSFS
|
||||
struct list_head bd_holder_list;
|
||||
struct list_head bd_holder_disks;
|
||||
#endif
|
||||
struct block_device * bd_contains;
|
||||
unsigned bd_block_size;
|
||||
|
@ -1064,7 +1068,6 @@ struct lock_manager_operations {
|
|||
int (*fl_grant)(struct file_lock *, struct file_lock *, int);
|
||||
void (*fl_release_private)(struct file_lock *);
|
||||
void (*fl_break)(struct file_lock *);
|
||||
int (*fl_mylease)(struct file_lock *, struct file_lock *);
|
||||
int (*fl_change)(struct file_lock **, int);
|
||||
};
|
||||
|
||||
|
@ -1422,6 +1425,7 @@ struct super_block {
|
|||
* generic_show_options()
|
||||
*/
|
||||
char __rcu *s_options;
|
||||
const struct dentry_operations *s_d_op; /* default d_op for dentries */
|
||||
};
|
||||
|
||||
extern struct timespec current_fs_time(struct super_block *sb);
|
||||
|
@ -1479,8 +1483,8 @@ struct fiemap_extent_info {
|
|||
unsigned int fi_flags; /* Flags as passed from user */
|
||||
unsigned int fi_extents_mapped; /* Number of mapped extents */
|
||||
unsigned int fi_extents_max; /* Size of fiemap_extent array */
|
||||
struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
|
||||
* array */
|
||||
struct fiemap_extent __user *fi_extents_start; /* Start of
|
||||
fiemap_extent array */
|
||||
};
|
||||
int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
|
||||
u64 phys, u64 len, u32 flags);
|
||||
|
@ -1548,6 +1552,8 @@ struct file_operations {
|
|||
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
|
||||
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
|
||||
int (*setlease)(struct file *, long, struct file_lock **);
|
||||
long (*fallocate)(struct file *file, int mode, loff_t offset,
|
||||
loff_t len);
|
||||
};
|
||||
|
||||
#define IPERM_FLAG_RCU 0x0001
|
||||
|
@ -1578,8 +1584,6 @@ struct inode_operations {
|
|||
ssize_t (*listxattr) (struct dentry *, char *, size_t);
|
||||
int (*removexattr) (struct dentry *, const char *);
|
||||
void (*truncate_range)(struct inode *, loff_t, loff_t);
|
||||
long (*fallocate)(struct inode *inode, int mode, loff_t offset,
|
||||
loff_t len);
|
||||
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
|
||||
u64 len);
|
||||
} ____cacheline_aligned;
|
||||
|
@ -1833,7 +1837,9 @@ struct super_block *sget(struct file_system_type *type,
|
|||
int (*set)(struct super_block *,void *),
|
||||
void *data);
|
||||
extern struct dentry *mount_pseudo(struct file_system_type *, char *,
|
||||
const struct super_operations *ops, unsigned long);
|
||||
const struct super_operations *ops,
|
||||
const struct dentry_operations *dops,
|
||||
unsigned long);
|
||||
extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
|
||||
|
||||
static inline void sb_mark_dirty(struct super_block *sb)
|
||||
|
@ -2015,7 +2021,6 @@ extern struct block_device *bdgrab(struct block_device *bdev);
|
|||
extern void bd_set_size(struct block_device *, loff_t size);
|
||||
extern void bd_forget(struct inode *inode);
|
||||
extern void bdput(struct block_device *);
|
||||
extern struct block_device *open_by_devnum(dev_t, fmode_t);
|
||||
extern void invalidate_bdev(struct block_device *);
|
||||
extern int sync_blockdev(struct block_device *bdev);
|
||||
extern struct super_block *freeze_bdev(struct block_device *);
|
||||
|
@ -2046,16 +2051,26 @@ extern const struct file_operations def_fifo_fops;
|
|||
extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
|
||||
extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
|
||||
extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
|
||||
extern int blkdev_get(struct block_device *, fmode_t);
|
||||
extern int blkdev_put(struct block_device *, fmode_t);
|
||||
extern int bd_claim(struct block_device *, void *);
|
||||
extern void bd_release(struct block_device *);
|
||||
extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
|
||||
extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
|
||||
void *holder);
|
||||
extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
|
||||
void *holder);
|
||||
extern int blkdev_put(struct block_device *bdev, fmode_t mode);
|
||||
#ifdef CONFIG_SYSFS
|
||||
extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *);
|
||||
extern void bd_release_from_disk(struct block_device *, struct gendisk *);
|
||||
extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
|
||||
extern void bd_unlink_disk_holder(struct block_device *bdev,
|
||||
struct gendisk *disk);
|
||||
#else
|
||||
#define bd_claim_by_disk(bdev, holder, disk) bd_claim(bdev, holder)
|
||||
#define bd_release_from_disk(bdev, disk) bd_release(bdev)
|
||||
static inline int bd_link_disk_holder(struct block_device *bdev,
|
||||
struct gendisk *disk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void bd_unlink_disk_holder(struct block_device *bdev,
|
||||
struct gendisk *disk)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -2091,8 +2106,6 @@ static inline void unregister_chrdev(unsigned int major, const char *name)
|
|||
extern const char *__bdevname(dev_t, char *buffer);
|
||||
extern const char *bdevname(struct block_device *bdev, char *buffer);
|
||||
extern struct block_device *lookup_bdev(const char *);
|
||||
extern struct block_device *open_bdev_exclusive(const char *, fmode_t, void *);
|
||||
extern void close_bdev_exclusive(struct block_device *, fmode_t);
|
||||
extern void blkdev_show(struct seq_file *,off_t);
|
||||
|
||||
#else
|
||||
|
@ -2126,7 +2139,7 @@ extern void check_disk_size_change(struct gendisk *disk,
|
|||
struct block_device *bdev);
|
||||
extern int revalidate_disk(struct gendisk *);
|
||||
extern int check_disk_change(struct block_device *);
|
||||
extern int __invalidate_device(struct block_device *);
|
||||
extern int __invalidate_device(struct block_device *, bool);
|
||||
extern int invalidate_partition(struct gendisk *, int);
|
||||
#endif
|
||||
unsigned long invalidate_mapping_pages(struct address_space *mapping,
|
||||
|
@ -2232,6 +2245,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
|
|||
|
||||
extern int inode_init_always(struct super_block *, struct inode *);
|
||||
extern void inode_init_once(struct inode *);
|
||||
extern void address_space_init_once(struct address_space *mapping);
|
||||
extern void ihold(struct inode * inode);
|
||||
extern void iput(struct inode *);
|
||||
extern struct inode * igrab(struct inode *);
|
||||
|
@ -2562,9 +2576,12 @@ int proc_nr_inodes(struct ctl_table *table, int write,
|
|||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
int __init get_filesystem_list(char *buf);
|
||||
|
||||
#define __FMODE_EXEC ((__force int) FMODE_EXEC)
|
||||
#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
|
||||
|
||||
#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
|
||||
#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
|
||||
(flag & FMODE_NONOTIFY)))
|
||||
(flag & __FMODE_NONOTIFY)))
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_FS_H */
|
||||
|
|
|
@ -41,6 +41,12 @@
|
|||
* 7.15
|
||||
* - add store notify
|
||||
* - add retrieve notify
|
||||
*
|
||||
* 7.16
|
||||
* - add BATCH_FORGET request
|
||||
* - FUSE_IOCTL_UNRESTRICTED shall now return with array of 'struct
|
||||
* fuse_ioctl_iovec' instead of ambiguous 'struct iovec'
|
||||
* - add FUSE_IOCTL_32BIT flag
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_FUSE_H
|
||||
|
@ -72,7 +78,7 @@
|
|||
#define FUSE_KERNEL_VERSION 7
|
||||
|
||||
/** Minor version number of this interface */
|
||||
#define FUSE_KERNEL_MINOR_VERSION 15
|
||||
#define FUSE_KERNEL_MINOR_VERSION 16
|
||||
|
||||
/** The node ID of the root inode */
|
||||
#define FUSE_ROOT_ID 1
|
||||
|
@ -200,12 +206,14 @@ struct fuse_file_lock {
|
|||
* FUSE_IOCTL_COMPAT: 32bit compat ioctl on 64bit machine
|
||||
* FUSE_IOCTL_UNRESTRICTED: not restricted to well-formed ioctls, retry allowed
|
||||
* FUSE_IOCTL_RETRY: retry with new iovecs
|
||||
* FUSE_IOCTL_32BIT: 32bit ioctl
|
||||
*
|
||||
* FUSE_IOCTL_MAX_IOV: maximum of in_iovecs + out_iovecs
|
||||
*/
|
||||
#define FUSE_IOCTL_COMPAT (1 << 0)
|
||||
#define FUSE_IOCTL_UNRESTRICTED (1 << 1)
|
||||
#define FUSE_IOCTL_RETRY (1 << 2)
|
||||
#define FUSE_IOCTL_32BIT (1 << 3)
|
||||
|
||||
#define FUSE_IOCTL_MAX_IOV 256
|
||||
|
||||
|
@ -256,6 +264,7 @@ enum fuse_opcode {
|
|||
FUSE_IOCTL = 39,
|
||||
FUSE_POLL = 40,
|
||||
FUSE_NOTIFY_REPLY = 41,
|
||||
FUSE_BATCH_FORGET = 42,
|
||||
|
||||
/* CUSE specific operations */
|
||||
CUSE_INIT = 4096,
|
||||
|
@ -290,6 +299,16 @@ struct fuse_forget_in {
|
|||
__u64 nlookup;
|
||||
};
|
||||
|
||||
struct fuse_forget_one {
|
||||
__u64 nodeid;
|
||||
__u64 nlookup;
|
||||
};
|
||||
|
||||
struct fuse_batch_forget_in {
|
||||
__u32 count;
|
||||
__u32 dummy;
|
||||
};
|
||||
|
||||
struct fuse_getattr_in {
|
||||
__u32 getattr_flags;
|
||||
__u32 dummy;
|
||||
|
@ -510,6 +529,11 @@ struct fuse_ioctl_in {
|
|||
__u32 out_size;
|
||||
};
|
||||
|
||||
struct fuse_ioctl_iovec {
|
||||
__u64 base;
|
||||
__u64 len;
|
||||
};
|
||||
|
||||
struct fuse_ioctl_out {
|
||||
__s32 result;
|
||||
__u32 flags;
|
||||
|
|
|
@ -115,6 +115,7 @@ struct hd_struct {
|
|||
#else
|
||||
struct disk_stats dkstats;
|
||||
#endif
|
||||
atomic_t ref;
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
|
@ -127,6 +128,11 @@ struct hd_struct {
|
|||
#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
|
||||
#define GENHD_FL_NATIVE_CAPACITY 128
|
||||
|
||||
enum {
|
||||
DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
|
||||
DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
|
||||
};
|
||||
|
||||
#define BLK_SCSI_MAX_CMDS (256)
|
||||
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
|
||||
|
||||
|
@ -143,6 +149,8 @@ struct disk_part_tbl {
|
|||
struct hd_struct __rcu *part[];
|
||||
};
|
||||
|
||||
struct disk_events;
|
||||
|
||||
struct gendisk {
|
||||
/* major, first_minor and minors are input parameters only,
|
||||
* don't use directly. Use disk_devt() and disk_max_parts().
|
||||
|
@ -154,6 +162,10 @@ struct gendisk {
|
|||
|
||||
char disk_name[DISK_NAME_LEN]; /* name of major driver */
|
||||
char *(*devnode)(struct gendisk *gd, mode_t *mode);
|
||||
|
||||
unsigned int events; /* supported events */
|
||||
unsigned int async_events; /* async events, subset of all */
|
||||
|
||||
/* Array of pointers to partitions indexed by partno.
|
||||
* Protected with matching bdev lock but stat and other
|
||||
* non-critical accesses use RCU. Always access through
|
||||
|
@ -171,9 +183,8 @@ struct gendisk {
|
|||
struct kobject *slave_dir;
|
||||
|
||||
struct timer_rand_state *random;
|
||||
|
||||
atomic_t sync_io; /* RAID */
|
||||
struct work_struct async_notify;
|
||||
struct disk_events *ev;
|
||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||
struct blk_integrity *integrity;
|
||||
#endif
|
||||
|
@ -395,7 +406,6 @@ extern void part_round_stats(int cpu, struct hd_struct *part);
|
|||
/* block/genhd.c */
|
||||
extern void add_disk(struct gendisk *disk);
|
||||
extern void del_gendisk(struct gendisk *gp);
|
||||
extern void unlink_gendisk(struct gendisk *gp);
|
||||
extern struct gendisk *get_gendisk(dev_t dev, int *partno);
|
||||
extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
|
||||
|
||||
|
@ -407,6 +417,11 @@ static inline int get_disk_ro(struct gendisk *disk)
|
|||
return disk->part0.policy;
|
||||
}
|
||||
|
||||
extern void disk_block_events(struct gendisk *disk);
|
||||
extern void disk_unblock_events(struct gendisk *disk);
|
||||
extern void disk_check_events(struct gendisk *disk);
|
||||
extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
|
||||
|
||||
/* drivers/char/random.c */
|
||||
extern void add_disk_randomness(struct gendisk *disk);
|
||||
extern void rand_initialize_disk(struct gendisk *disk);
|
||||
|
@ -583,6 +598,7 @@ extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
|
|||
sector_t len, int flags,
|
||||
struct partition_meta_info
|
||||
*info);
|
||||
extern void __delete_partition(struct hd_struct *);
|
||||
extern void delete_partition(struct gendisk *, int);
|
||||
extern void printk_all_partitions(void);
|
||||
|
||||
|
@ -611,6 +627,29 @@ extern ssize_t part_fail_store(struct device *dev,
|
|||
const char *buf, size_t count);
|
||||
#endif /* CONFIG_FAIL_MAKE_REQUEST */
|
||||
|
||||
static inline void hd_ref_init(struct hd_struct *part)
|
||||
{
|
||||
atomic_set(&part->ref, 1);
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
static inline void hd_struct_get(struct hd_struct *part)
|
||||
{
|
||||
atomic_inc(&part->ref);
|
||||
smp_mb__after_atomic_inc();
|
||||
}
|
||||
|
||||
static inline int hd_struct_try_get(struct hd_struct *part)
|
||||
{
|
||||
return atomic_inc_not_zero(&part->ref);
|
||||
}
|
||||
|
||||
static inline void hd_struct_put(struct hd_struct *part)
|
||||
{
|
||||
if (atomic_dec_and_test(&part->ref))
|
||||
__delete_partition(part);
|
||||
}
|
||||
|
||||
#else /* CONFIG_BLOCK */
|
||||
|
||||
static inline void printk_all_partitions(void) { }
|
||||
|
|
|
@ -34,6 +34,7 @@ struct vm_area_struct;
|
|||
#else
|
||||
#define ___GFP_NOTRACK 0
|
||||
#endif
|
||||
#define ___GFP_NO_KSWAPD 0x400000u
|
||||
|
||||
/*
|
||||
* GFP bitmasks..
|
||||
|
@ -81,13 +82,15 @@ struct vm_area_struct;
|
|||
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
|
||||
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
|
||||
|
||||
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
|
||||
|
||||
/*
|
||||
* This may seem redundant, but it's a way of annotating false positives vs.
|
||||
* allocations that simply cannot be supported (e.g. page tables).
|
||||
*/
|
||||
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
|
||||
|
||||
#define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */
|
||||
#define __GFP_BITS_SHIFT 23 /* Room for 23 __GFP_FOO bits */
|
||||
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||
|
||||
/* This equals 0, but use constants in case they ever change */
|
||||
|
@ -106,6 +109,9 @@ struct vm_area_struct;
|
|||
__GFP_HARDWALL | __GFP_HIGHMEM | \
|
||||
__GFP_MOVABLE)
|
||||
#define GFP_IOFS (__GFP_IO | __GFP_FS)
|
||||
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
|
||||
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
|
||||
__GFP_NO_KSWAPD)
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
|
||||
|
@ -243,7 +249,7 @@ static inline enum zone_type gfp_zone(gfp_t flags)
|
|||
((1 << ZONES_SHIFT) - 1);
|
||||
|
||||
if (__builtin_constant_p(bit))
|
||||
MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
|
||||
BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
|
||||
else {
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
BUG_ON((GFP_ZONE_BAD >> bit) & 1);
|
||||
|
@ -325,14 +331,20 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
|
|||
{
|
||||
return alloc_pages_current(gfp_mask, order);
|
||||
}
|
||||
extern struct page *alloc_page_vma(gfp_t gfp_mask,
|
||||
struct vm_area_struct *vma, unsigned long addr);
|
||||
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
|
||||
struct vm_area_struct *vma, unsigned long addr,
|
||||
int node);
|
||||
#else
|
||||
#define alloc_pages(gfp_mask, order) \
|
||||
alloc_pages_node(numa_node_id(), gfp_mask, order)
|
||||
#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
|
||||
#define alloc_pages_vma(gfp_mask, order, vma, addr, node) \
|
||||
alloc_pages(gfp_mask, order)
|
||||
#endif
|
||||
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
|
||||
#define alloc_page_vma(gfp_mask, vma, addr) \
|
||||
alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
|
||||
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
|
||||
alloc_pages_vma(gfp_mask, 0, vma, addr, node)
|
||||
|
||||
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
|
||||
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
|
||||
|
|
38
include/linux/gpio-i2cmux.h
Normal file
38
include/linux/gpio-i2cmux.h
Normal file
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* gpio-i2cmux interface to platform code
|
||||
*
|
||||
* Peter Korsgaard <peter.korsgaard@barco.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_GPIO_I2CMUX_H
|
||||
#define _LINUX_GPIO_I2CMUX_H
|
||||
|
||||
/* MUX has no specific idle mode */
|
||||
#define GPIO_I2CMUX_NO_IDLE ((unsigned)-1)
|
||||
|
||||
/**
|
||||
* struct gpio_i2cmux_platform_data - Platform-dependent data for gpio-i2cmux
|
||||
* @parent: Parent I2C bus adapter number
|
||||
* @base_nr: Base I2C bus number to number adapters from or zero for dynamic
|
||||
* @values: Array of bitmasks of GPIO settings (low/high) for each
|
||||
* position
|
||||
* @n_values: Number of multiplexer positions (busses to instantiate)
|
||||
* @gpios: Array of GPIO numbers used to control MUX
|
||||
* @n_gpios: Number of GPIOs used to control MUX
|
||||
* @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
|
||||
*/
|
||||
struct gpio_i2cmux_platform_data {
|
||||
int parent;
|
||||
int base_nr;
|
||||
const unsigned *values;
|
||||
int n_values;
|
||||
const unsigned *gpios;
|
||||
int n_gpios;
|
||||
unsigned idle;
|
||||
};
|
||||
|
||||
#endif /* _LINUX_GPIO_I2CMUX_H */
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/errno.h>
|
||||
|
||||
struct device;
|
||||
struct gpio;
|
||||
struct gpio_chip;
|
||||
|
||||
/*
|
||||
|
@ -34,6 +35,17 @@ static inline int gpio_request(unsigned gpio, const char *label)
|
|||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int gpio_request_one(unsigned gpio,
|
||||
unsigned long flags, const char *label)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int gpio_request_array(struct gpio *array, size_t num)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline void gpio_free(unsigned gpio)
|
||||
{
|
||||
might_sleep();
|
||||
|
@ -42,6 +54,14 @@ static inline void gpio_free(unsigned gpio)
|
|||
WARN_ON(1);
|
||||
}
|
||||
|
||||
static inline void gpio_free_array(struct gpio *array, size_t num)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
static inline int gpio_direction_input(unsigned gpio)
|
||||
{
|
||||
return -ENOSYS;
|
||||
|
|
|
@ -402,7 +402,7 @@ struct hid_field {
|
|||
__u16 dpad; /* dpad input code */
|
||||
};
|
||||
|
||||
#define HID_MAX_FIELDS 64
|
||||
#define HID_MAX_FIELDS 128
|
||||
|
||||
struct hid_report {
|
||||
struct list_head list;
|
||||
|
@ -593,6 +593,7 @@ struct hid_usage_id {
|
|||
* @report_fixup: called before report descriptor parsing (NULL means nop)
|
||||
* @input_mapping: invoked on input registering before mapping an usage
|
||||
* @input_mapped: invoked on input registering after mapping an usage
|
||||
* @feature_mapping: invoked on feature registering
|
||||
* @suspend: invoked on suspend (NULL means nop)
|
||||
* @resume: invoked on resume if device was not reset (NULL means nop)
|
||||
* @reset_resume: invoked on resume if device was reset (NULL means nop)
|
||||
|
@ -636,6 +637,9 @@ struct hid_driver {
|
|||
int (*input_mapped)(struct hid_device *hdev,
|
||||
struct hid_input *hidinput, struct hid_field *field,
|
||||
struct hid_usage *usage, unsigned long **bit, int *max);
|
||||
void (*feature_mapping)(struct hid_device *hdev,
|
||||
struct hid_input *hidinput, struct hid_field *field,
|
||||
struct hid_usage *usage);
|
||||
#ifdef CONFIG_PM
|
||||
int (*suspend)(struct hid_device *hdev, pm_message_t message);
|
||||
int (*resume)(struct hid_device *hdev);
|
||||
|
@ -820,6 +824,49 @@ static inline void hid_hw_stop(struct hid_device *hdev)
|
|||
hdev->ll_driver->stop(hdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* hid_hw_open - signal underlaying HW to start delivering events
|
||||
*
|
||||
* @hdev: hid device
|
||||
*
|
||||
* Tell underlying HW to start delivering events from the device.
|
||||
* This function should be called sometime after successful call
|
||||
* to hid_hiw_start().
|
||||
*/
|
||||
static inline int __must_check hid_hw_open(struct hid_device *hdev)
|
||||
{
|
||||
return hdev->ll_driver->open(hdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* hid_hw_close - signal underlaying HW to stop delivering events
|
||||
*
|
||||
* @hdev: hid device
|
||||
*
|
||||
* This function indicates that we are not interested in the events
|
||||
* from this device anymore. Delivery of events may or may not stop,
|
||||
* depending on the number of users still outstanding.
|
||||
*/
|
||||
static inline void hid_hw_close(struct hid_device *hdev)
|
||||
{
|
||||
hdev->ll_driver->close(hdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* hid_hw_power - requests underlying HW to go into given power mode
|
||||
*
|
||||
* @hdev: hid device
|
||||
* @level: requested power level (one of %PM_HINT_* defines)
|
||||
*
|
||||
* This function requests underlying hardware to enter requested power
|
||||
* mode.
|
||||
*/
|
||||
|
||||
static inline int hid_hw_power(struct hid_device *hdev, int level)
|
||||
{
|
||||
return hdev->ll_driver->power ? hdev->ll_driver->power(hdev, level) : 0;
|
||||
}
|
||||
|
||||
void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
|
||||
int interrupt);
|
||||
|
||||
|
@ -838,12 +885,32 @@ int hid_pidff_init(struct hid_device *hid);
|
|||
#define hid_pidff_init NULL
|
||||
#endif
|
||||
|
||||
#define dbg_hid(format, arg...) if (hid_debug) \
|
||||
printk(KERN_DEBUG "%s: " format ,\
|
||||
__FILE__ , ## arg)
|
||||
#define err_hid(format, arg...) printk(KERN_ERR "%s: " format "\n" , \
|
||||
__FILE__ , ## arg)
|
||||
#endif /* HID_FF */
|
||||
#define dbg_hid(format, arg...) \
|
||||
do { \
|
||||
if (hid_debug) \
|
||||
printk(KERN_DEBUG "%s: " format, __FILE__, ##arg); \
|
||||
} while (0)
|
||||
|
||||
#define hid_printk(level, hid, fmt, arg...) \
|
||||
dev_printk(level, &(hid)->dev, fmt, ##arg)
|
||||
#define hid_emerg(hid, fmt, arg...) \
|
||||
dev_emerg(&(hid)->dev, fmt, ##arg)
|
||||
#define hid_crit(hid, fmt, arg...) \
|
||||
dev_crit(&(hid)->dev, fmt, ##arg)
|
||||
#define hid_alert(hid, fmt, arg...) \
|
||||
dev_alert(&(hid)->dev, fmt, ##arg)
|
||||
#define hid_err(hid, fmt, arg...) \
|
||||
dev_err(&(hid)->dev, fmt, ##arg)
|
||||
#define hid_notice(hid, fmt, arg...) \
|
||||
dev_notice(&(hid)->dev, fmt, ##arg)
|
||||
#define hid_warn(hid, fmt, arg...) \
|
||||
dev_warn(&(hid)->dev, fmt, ##arg)
|
||||
#define hid_info(hid, fmt, arg...) \
|
||||
dev_info(&(hid)->dev, fmt, ##arg)
|
||||
#define hid_dbg(hid, fmt, arg...) \
|
||||
dev_dbg(&(hid)->dev, fmt, ##arg)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -131,7 +131,6 @@ struct hrtimer_sleeper {
|
|||
* @index: clock type index for per_cpu support when moving a
|
||||
* timer to a base on another cpu.
|
||||
* @active: red black tree root node for the active timers
|
||||
* @first: pointer to the timer node which expires first
|
||||
* @resolution: the resolution of the clock, in nanoseconds
|
||||
* @get_time: function to retrieve the current time of the clock
|
||||
* @softirq_time: the time when running the hrtimer queue in the softirq
|
||||
|
|
180
include/linux/huge_mm.h
Normal file
180
include/linux/huge_mm.h
Normal file
|
@ -0,0 +1,180 @@
|
|||
#ifndef _LINUX_HUGE_MM_H
|
||||
#define _LINUX_HUGE_MM_H
|
||||
|
||||
extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmd,
|
||||
unsigned int flags);
|
||||
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
|
||||
struct vm_area_struct *vma);
|
||||
extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmd,
|
||||
pmd_t orig_pmd);
|
||||
extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm);
|
||||
extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pmd_t *pmd,
|
||||
unsigned int flags);
|
||||
extern int zap_huge_pmd(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma,
|
||||
pmd_t *pmd);
|
||||
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned char *vec);
|
||||
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, pgprot_t newprot);
|
||||
|
||||
enum transparent_hugepage_flag {
|
||||
TRANSPARENT_HUGEPAGE_FLAG,
|
||||
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
|
||||
TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
|
||||
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
|
||||
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
|
||||
#endif
|
||||
};
|
||||
|
||||
enum page_check_address_pmd_flag {
|
||||
PAGE_CHECK_ADDRESS_PMD_FLAG,
|
||||
PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG,
|
||||
PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG,
|
||||
};
|
||||
extern pmd_t *page_check_address_pmd(struct page *page,
|
||||
struct mm_struct *mm,
|
||||
unsigned long address,
|
||||
enum page_check_address_pmd_flag flag);
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define HPAGE_PMD_SHIFT HPAGE_SHIFT
|
||||
#define HPAGE_PMD_MASK HPAGE_MASK
|
||||
#define HPAGE_PMD_SIZE HPAGE_SIZE
|
||||
|
||||
#define transparent_hugepage_enabled(__vma) \
|
||||
((transparent_hugepage_flags & \
|
||||
(1<<TRANSPARENT_HUGEPAGE_FLAG) || \
|
||||
(transparent_hugepage_flags & \
|
||||
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
|
||||
((__vma)->vm_flags & VM_HUGEPAGE))) && \
|
||||
!((__vma)->vm_flags & VM_NOHUGEPAGE) && \
|
||||
!is_vma_temporary_stack(__vma))
|
||||
#define transparent_hugepage_defrag(__vma) \
|
||||
((transparent_hugepage_flags & \
|
||||
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
|
||||
(transparent_hugepage_flags & \
|
||||
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
|
||||
(__vma)->vm_flags & VM_HUGEPAGE))
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
#define transparent_hugepage_debug_cow() \
|
||||
(transparent_hugepage_flags & \
|
||||
(1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
|
||||
#else /* CONFIG_DEBUG_VM */
|
||||
#define transparent_hugepage_debug_cow() 0
|
||||
#endif /* CONFIG_DEBUG_VM */
|
||||
|
||||
extern unsigned long transparent_hugepage_flags;
|
||||
extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
pmd_t *dst_pmd, pmd_t *src_pmd,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long end);
|
||||
extern int handle_pte_fault(struct mm_struct *mm,
|
||||
struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *pte, pmd_t *pmd, unsigned int flags);
|
||||
extern int split_huge_page(struct page *page);
|
||||
extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
|
||||
#define split_huge_page_pmd(__mm, __pmd) \
|
||||
do { \
|
||||
pmd_t *____pmd = (__pmd); \
|
||||
if (unlikely(pmd_trans_huge(*____pmd))) \
|
||||
__split_huge_page_pmd(__mm, ____pmd); \
|
||||
} while (0)
|
||||
#define wait_split_huge_page(__anon_vma, __pmd) \
|
||||
do { \
|
||||
pmd_t *____pmd = (__pmd); \
|
||||
spin_unlock_wait(&(__anon_vma)->root->lock); \
|
||||
/* \
|
||||
* spin_unlock_wait() is just a loop in C and so the \
|
||||
* CPU can reorder anything around it. \
|
||||
*/ \
|
||||
smp_mb(); \
|
||||
BUG_ON(pmd_trans_splitting(*____pmd) || \
|
||||
pmd_trans_huge(*____pmd)); \
|
||||
} while (0)
|
||||
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
|
||||
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
|
||||
#if HPAGE_PMD_ORDER > MAX_ORDER
|
||||
#error "hugepages can't be allocated by the buddy allocator"
|
||||
#endif
|
||||
extern int hugepage_madvise(struct vm_area_struct *vma,
|
||||
unsigned long *vm_flags, int advice);
|
||||
extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
long adjust_next);
|
||||
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
long adjust_next)
|
||||
{
|
||||
if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
|
||||
return;
|
||||
__vma_adjust_trans_huge(vma, start, end, adjust_next);
|
||||
}
|
||||
static inline int hpage_nr_pages(struct page *page)
|
||||
{
|
||||
if (unlikely(PageTransHuge(page)))
|
||||
return HPAGE_PMD_NR;
|
||||
return 1;
|
||||
}
|
||||
static inline struct page *compound_trans_head(struct page *page)
|
||||
{
|
||||
if (PageTail(page)) {
|
||||
struct page *head;
|
||||
head = page->first_page;
|
||||
smp_rmb();
|
||||
/*
|
||||
* head may be a dangling pointer.
|
||||
* __split_huge_page_refcount clears PageTail before
|
||||
* overwriting first_page, so if PageTail is still
|
||||
* there it means the head pointer isn't dangling.
|
||||
*/
|
||||
if (PageTail(page))
|
||||
return head;
|
||||
}
|
||||
return page;
|
||||
}
|
||||
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
#define HPAGE_PMD_SHIFT ({ BUG(); 0; })
|
||||
#define HPAGE_PMD_MASK ({ BUG(); 0; })
|
||||
#define HPAGE_PMD_SIZE ({ BUG(); 0; })
|
||||
|
||||
#define hpage_nr_pages(x) 1
|
||||
|
||||
#define transparent_hugepage_enabled(__vma) 0
|
||||
|
||||
#define transparent_hugepage_flags 0UL
|
||||
static inline int split_huge_page(struct page *page)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#define split_huge_page_pmd(__mm, __pmd) \
|
||||
do { } while (0)
|
||||
#define wait_split_huge_page(__anon_vma, __pmd) \
|
||||
do { } while (0)
|
||||
#define compound_trans_head(page) compound_head(page)
|
||||
static inline int hugepage_madvise(struct vm_area_struct *vma,
|
||||
unsigned long *vm_flags, int advice)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
long adjust_next)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
#endif /* _LINUX_HUGE_MM_H */
|
|
@ -57,9 +57,10 @@ struct i2c_board_info;
|
|||
* transmit an arbitrary number of messages without interruption.
|
||||
* @count must be be less than 64k since msg.len is u16.
|
||||
*/
|
||||
extern int i2c_master_send(struct i2c_client *client, const char *buf,
|
||||
extern int i2c_master_send(const struct i2c_client *client, const char *buf,
|
||||
int count);
|
||||
extern int i2c_master_recv(const struct i2c_client *client, char *buf,
|
||||
int count);
|
||||
extern int i2c_master_recv(struct i2c_client *client, char *buf, int count);
|
||||
|
||||
/* Transfer num messages.
|
||||
*/
|
||||
|
@ -78,23 +79,25 @@ extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
|
|||
/* Now follow the 'nice' access routines. These also document the calling
|
||||
conventions of i2c_smbus_xfer. */
|
||||
|
||||
extern s32 i2c_smbus_read_byte(struct i2c_client *client);
|
||||
extern s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value);
|
||||
extern s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command);
|
||||
extern s32 i2c_smbus_write_byte_data(struct i2c_client *client,
|
||||
extern s32 i2c_smbus_read_byte(const struct i2c_client *client);
|
||||
extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
|
||||
extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client,
|
||||
u8 command);
|
||||
extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
|
||||
u8 command, u8 value);
|
||||
extern s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command);
|
||||
extern s32 i2c_smbus_write_word_data(struct i2c_client *client,
|
||||
extern s32 i2c_smbus_read_word_data(const struct i2c_client *client,
|
||||
u8 command);
|
||||
extern s32 i2c_smbus_write_word_data(const struct i2c_client *client,
|
||||
u8 command, u16 value);
|
||||
/* Returns the number of read bytes */
|
||||
extern s32 i2c_smbus_read_block_data(struct i2c_client *client,
|
||||
extern s32 i2c_smbus_read_block_data(const struct i2c_client *client,
|
||||
u8 command, u8 *values);
|
||||
extern s32 i2c_smbus_write_block_data(struct i2c_client *client,
|
||||
extern s32 i2c_smbus_write_block_data(const struct i2c_client *client,
|
||||
u8 command, u8 length, const u8 *values);
|
||||
/* Returns the number of read bytes */
|
||||
extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client,
|
||||
extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
|
||||
u8 command, u8 length, u8 *values);
|
||||
extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
|
||||
extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
|
||||
u8 command, u8 length,
|
||||
const u8 *values);
|
||||
#endif /* I2C */
|
||||
|
|
21
include/linux/i2c/ds620.h
Normal file
21
include/linux/i2c/ds620.h
Normal file
|
@ -0,0 +1,21 @@
|
|||
#ifndef _LINUX_DS620_H
|
||||
#define _LINUX_DS620_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/i2c.h>
|
||||
|
||||
/* platform data for the DS620 temperature sensor and thermostat */
|
||||
|
||||
struct ds620_platform_data {
|
||||
/*
|
||||
* Thermostat output pin PO mode:
|
||||
* 0 = always low (default)
|
||||
* 1 = PO_LOW
|
||||
* 2 = PO_HIGH
|
||||
*
|
||||
* (see Documentation/hwmon/ds620)
|
||||
*/
|
||||
int pomode;
|
||||
};
|
||||
|
||||
#endif /* _LINUX_DS620_H */
|
|
@ -959,7 +959,7 @@ struct ieee80211_ht_info {
|
|||
/* block-ack parameters */
|
||||
#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
|
||||
#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
|
||||
#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
|
||||
#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
|
||||
#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
|
||||
#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
|
||||
|
||||
|
|
40
include/linux/if_alg.h
Normal file
40
include/linux/if_alg.h
Normal file
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* if_alg: User-space algorithm interface
|
||||
*
|
||||
* Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_IF_ALG_H
|
||||
#define _LINUX_IF_ALG_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct sockaddr_alg {
|
||||
__u16 salg_family;
|
||||
__u8 salg_type[14];
|
||||
__u32 salg_feat;
|
||||
__u32 salg_mask;
|
||||
__u8 salg_name[64];
|
||||
};
|
||||
|
||||
struct af_alg_iv {
|
||||
__u32 ivlen;
|
||||
__u8 iv[0];
|
||||
};
|
||||
|
||||
/* Socket options */
|
||||
#define ALG_SET_KEY 1
|
||||
#define ALG_SET_IV 2
|
||||
#define ALG_SET_OP 3
|
||||
|
||||
/* Operations */
|
||||
#define ALG_OP_DECRYPT 0
|
||||
#define ALG_OP_ENCRYPT 1
|
||||
|
||||
#endif /* _LINUX_IF_ALG_H */
|
|
@ -103,7 +103,7 @@ struct __fdb_entry {
|
|||
|
||||
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
|
||||
|
||||
typedef int (*br_should_route_hook_t)(struct sk_buff *skb);
|
||||
typedef int br_should_route_hook_t(struct sk_buff *skb);
|
||||
extern br_should_route_hook_t __rcu *br_should_route_hook;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -802,6 +802,7 @@ struct input_keymap_entry {
|
|||
#define SW_CAMERA_LENS_COVER 0x09 /* set = lens covered */
|
||||
#define SW_KEYPAD_SLIDE 0x0a /* set = keypad slide out */
|
||||
#define SW_FRONT_PROXIMITY 0x0b /* set = front proximity sensor active */
|
||||
#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */
|
||||
#define SW_MAX 0x0f
|
||||
#define SW_CNT (SW_MAX+1)
|
||||
|
||||
|
|
20
include/linux/input/as5011.h
Normal file
20
include/linux/input/as5011.h
Normal file
|
@ -0,0 +1,20 @@
|
|||
#ifndef _AS5011_H
|
||||
#define _AS5011_H
|
||||
|
||||
/*
|
||||
* Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
struct as5011_platform_data {
|
||||
unsigned int button_gpio;
|
||||
unsigned int axis_irq; /* irq number */
|
||||
unsigned long axis_irqflags;
|
||||
char xp, xn; /* threshold for x axis */
|
||||
char yp, yn; /* threshold for y axis */
|
||||
};
|
||||
|
||||
#endif /* _AS5011_H */
|
|
@ -12,8 +12,6 @@
|
|||
* @cs_en: pointer to the cs enable function
|
||||
* @cs_dis: pointer to the cs disable function
|
||||
* @irq_read_val: pointer to read the pen irq value function
|
||||
* @x_max_res: xmax resolution
|
||||
* @y_max_res: ymax resolution
|
||||
* @touch_x_max: touch x max
|
||||
* @touch_y_max: touch y max
|
||||
* @cs_pin: chip select pin
|
||||
|
@ -29,8 +27,6 @@ struct bu21013_platform_device {
|
|||
int (*cs_en)(int reset_pin);
|
||||
int (*cs_dis)(int reset_pin);
|
||||
int (*irq_read_val)(void);
|
||||
int x_max_res;
|
||||
int y_max_res;
|
||||
int touch_x_max;
|
||||
int touch_y_max;
|
||||
unsigned int cs_pin;
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/input.h>
|
||||
|
||||
#define MATRIX_MAX_ROWS 16
|
||||
#define MATRIX_MAX_COLS 16
|
||||
#define MATRIX_MAX_ROWS 32
|
||||
#define MATRIX_MAX_COLS 32
|
||||
|
||||
#define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\
|
||||
(((col) & (MATRIX_MAX_COLS - 1)) << 16) |\
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
/*
|
||||
* Common Intel AGPGART and GTT definitions.
|
||||
*/
|
||||
#ifndef _INTEL_GTT_H
|
||||
#define _INTEL_GTT_H
|
||||
|
||||
#include <linux/agp_backend.h>
|
||||
|
||||
/* This is for Intel only GTT controls.
|
||||
*
|
||||
* Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only
|
||||
*/
|
||||
|
||||
#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
|
||||
#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
|
||||
|
||||
/* flag for GFDT type */
|
||||
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
|
||||
|
||||
#endif
|
|
@ -454,6 +454,44 @@ unsigned int ipmi_addr_length(int addr_type);
|
|||
/* Validate that the given IPMI address is valid. */
|
||||
int ipmi_validate_addr(struct ipmi_addr *addr, int len);
|
||||
|
||||
/*
|
||||
* How did the IPMI driver find out about the device?
|
||||
*/
|
||||
enum ipmi_addr_src {
|
||||
SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
|
||||
SI_PCI, SI_DEVICETREE, SI_DEFAULT
|
||||
};
|
||||
|
||||
union ipmi_smi_info_union {
|
||||
/*
|
||||
* the acpi_info element is defined for the SI_ACPI
|
||||
* address type
|
||||
*/
|
||||
struct {
|
||||
void *acpi_handle;
|
||||
} acpi_info;
|
||||
};
|
||||
|
||||
struct ipmi_smi_info {
|
||||
enum ipmi_addr_src addr_src;
|
||||
|
||||
/*
|
||||
* Base device for the interface. Don't forget to put this when
|
||||
* you are done.
|
||||
*/
|
||||
struct device *dev;
|
||||
|
||||
/*
|
||||
* The addr_info provides more detailed info for some IPMI
|
||||
* devices, depending on the addr_src. Currently only SI_ACPI
|
||||
* info is provided.
|
||||
*/
|
||||
union ipmi_smi_info_union addr_info;
|
||||
};
|
||||
|
||||
/* This is to get the private info of ipmi_smi_t */
|
||||
extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/ipmi.h>
|
||||
|
||||
/* This files describes the interface for IPMI system management interface
|
||||
drivers to bind into the IPMI message handler. */
|
||||
|
@ -86,6 +87,13 @@ struct ipmi_smi_handlers {
|
|||
int (*start_processing)(void *send_info,
|
||||
ipmi_smi_t new_intf);
|
||||
|
||||
/*
|
||||
* Get the detailed private info of the low level interface and store
|
||||
* it into the structure of ipmi_smi_data. For example: the
|
||||
* ACPI device handle will be returned for the pnp_acpi IPMI device.
|
||||
*/
|
||||
int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data);
|
||||
|
||||
/* Called to enqueue an SMI message to be sent. This
|
||||
operation is not allowed to fail. If an error occurs, it
|
||||
should report back the error in a received message. It may
|
||||
|
|
|
@ -74,7 +74,8 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
|
|||
|
||||
#define IRQF_MODIFY_MASK \
|
||||
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
|
||||
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL)
|
||||
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
|
||||
IRQ_PER_CPU)
|
||||
|
||||
#ifdef CONFIG_IRQ_PER_CPU
|
||||
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
|
||||
|
|
|
@ -57,7 +57,7 @@ struct irq_desc {
|
|||
#endif
|
||||
|
||||
struct timer_rand_state *timer_rand_state;
|
||||
unsigned int *kstat_irqs;
|
||||
unsigned int __percpu *kstat_irqs;
|
||||
irq_flow_handler_t handle_irq;
|
||||
struct irqaction *action; /* IRQ action list */
|
||||
unsigned int status; /* IRQ status */
|
||||
|
@ -100,13 +100,6 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
|
|||
#define get_irq_desc_data(desc) ((desc)->irq_data.handler_data)
|
||||
#define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc)
|
||||
|
||||
/*
|
||||
* Monolithic do_IRQ implementation.
|
||||
*/
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
extern unsigned int __do_IRQ(unsigned int irq);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Architectures call this to let the generic IRQ layer
|
||||
* handle an interrupt. If the descriptor is attached to an
|
||||
|
@ -115,14 +108,7 @@ extern unsigned int __do_IRQ(unsigned int irq);
|
|||
*/
|
||||
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
desc->handle_irq(irq, desc);
|
||||
#else
|
||||
if (likely(desc->handle_irq))
|
||||
desc->handle_irq(irq, desc);
|
||||
else
|
||||
__do_IRQ(irq);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void generic_handle_irq(unsigned int irq)
|
||||
|
|
|
@ -94,7 +94,7 @@ extern void jbd2_free(void *ptr, size_t size);
|
|||
*
|
||||
* This is an opaque datatype.
|
||||
**/
|
||||
typedef struct handle_s handle_t; /* Atomic operation type */
|
||||
typedef struct jbd2_journal_handle handle_t; /* Atomic operation type */
|
||||
|
||||
|
||||
/**
|
||||
|
@ -416,7 +416,7 @@ struct jbd2_revoke_table_s;
|
|||
* in so it can be fixed later.
|
||||
*/
|
||||
|
||||
struct handle_s
|
||||
struct jbd2_journal_handle
|
||||
{
|
||||
/* Which compound transaction is this update a part of? */
|
||||
transaction_t *h_transaction;
|
||||
|
@ -1158,6 +1158,22 @@ static inline void jbd2_free_handle(handle_t *handle)
|
|||
kmem_cache_free(jbd2_handle_cache, handle);
|
||||
}
|
||||
|
||||
/*
|
||||
* jbd2_inode management (optional, for those file systems that want to use
|
||||
* dynamically allocated jbd2_inode structures)
|
||||
*/
|
||||
extern struct kmem_cache *jbd2_inode_cache;
|
||||
|
||||
static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags)
|
||||
{
|
||||
return kmem_cache_alloc(jbd2_inode_cache, gfp_flags);
|
||||
}
|
||||
|
||||
static inline void jbd2_free_inode(struct jbd2_inode *jinode)
|
||||
{
|
||||
kmem_cache_free(jbd2_inode_cache, jinode);
|
||||
}
|
||||
|
||||
/* Primary revoke support */
|
||||
#define JOURNAL_REVOKE_DEFAULT_HASH 256
|
||||
extern int jbd2_journal_init_revoke(journal_t *, int);
|
||||
|
|
|
@ -143,9 +143,22 @@ extern int _cond_resched(void);
|
|||
|
||||
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
|
||||
|
||||
#define abs(x) ({ \
|
||||
long __x = (x); \
|
||||
(__x < 0) ? -__x : __x; \
|
||||
/*
|
||||
* abs() handles unsigned and signed longs, ints, shorts and chars. For all
|
||||
* input types abs() returns a signed long.
|
||||
* abs() should not be used for 64-bit types (s64, u64, long long) - use abs64()
|
||||
* for those.
|
||||
*/
|
||||
#define abs(x) ({ \
|
||||
long ret; \
|
||||
if (sizeof(x) == sizeof(long)) { \
|
||||
long __x = (x); \
|
||||
ret = (__x < 0) ? -__x : __x; \
|
||||
} else { \
|
||||
int __x = (x); \
|
||||
ret = (__x < 0) ? -__x : __x; \
|
||||
} \
|
||||
ret; \
|
||||
})
|
||||
|
||||
#define abs64(x) ({ \
|
||||
|
@ -230,6 +243,8 @@ extern int test_taint(unsigned flag);
|
|||
extern unsigned long get_taint(void);
|
||||
extern int root_mountflags;
|
||||
|
||||
extern bool early_boot_irqs_disabled;
|
||||
|
||||
/* Values used for system_state */
|
||||
extern enum system_states {
|
||||
SYSTEM_BOOTING,
|
||||
|
@ -560,12 +575,6 @@ struct sysinfo {
|
|||
char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */
|
||||
};
|
||||
|
||||
/* Force a compilation error if condition is true */
|
||||
#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
|
||||
|
||||
/* Force a compilation error if condition is constant and true */
|
||||
#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
|
||||
|
||||
/* Force a compilation error if a constant expression is not a power of 2 */
|
||||
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
|
||||
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
|
||||
|
@ -577,6 +586,32 @@ struct sysinfo {
|
|||
#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
|
||||
#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
|
||||
|
||||
/**
|
||||
* BUILD_BUG_ON - break compile if a condition is true.
|
||||
* @condition: the condition which the compiler should know is false.
|
||||
*
|
||||
* If you have some code which relies on certain constants being equal, or
|
||||
* other compile-time-evaluated condition, you should use BUILD_BUG_ON to
|
||||
* detect if someone changes it.
|
||||
*
|
||||
* The implementation uses gcc's reluctance to create a negative array, but
|
||||
* gcc (as of 4.4) only emits that error for obvious cases (eg. not arguments
|
||||
* to inline functions). So as a fallback we use the optimizer; if it can't
|
||||
* prove the condition is false, it will cause a link error on the undefined
|
||||
* "__build_bug_on_failed". This error message can be harder to track down
|
||||
* though, hence the two different methods.
|
||||
*/
|
||||
#ifndef __OPTIMIZE__
|
||||
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
|
||||
#else
|
||||
extern int __build_bug_on_failed;
|
||||
#define BUILD_BUG_ON(condition) \
|
||||
do { \
|
||||
((void)sizeof(char[1 - 2*!!(condition)])); \
|
||||
if (condition) __build_bug_on_failed = 1; \
|
||||
} while(0)
|
||||
#endif
|
||||
|
||||
/* Trap pasters of __FUNCTION__ at compile-time */
|
||||
#define __FUNCTION__ (__func__)
|
||||
|
||||
|
@ -587,6 +622,13 @@ struct sysinfo {
|
|||
#define NUMA_BUILD 0
|
||||
#endif
|
||||
|
||||
/* This helps us avoid #ifdef CONFIG_COMPACTION */
|
||||
#ifdef CONFIG_COMPACTION
|
||||
#define COMPACTION_BUILD 1
|
||||
#else
|
||||
#define COMPACTION_BUILD 0
|
||||
#endif
|
||||
|
||||
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
|
||||
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
||||
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
|
||||
|
|
|
@ -46,16 +46,14 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
|
|||
extern unsigned long long nr_context_switches(void);
|
||||
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS
|
||||
#define kstat_irqs_this_cpu(irq) \
|
||||
(this_cpu_read(kstat.irqs[irq])
|
||||
|
||||
struct irq_desc;
|
||||
|
||||
static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
|
||||
struct irq_desc *desc)
|
||||
{
|
||||
kstat_this_cpu.irqs[irq]++;
|
||||
kstat_this_cpu.irqs_sum++;
|
||||
__this_cpu_inc(kstat.irqs[irq]);
|
||||
__this_cpu_inc(kstat.irqs_sum);
|
||||
}
|
||||
|
||||
static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
||||
|
@ -65,17 +63,18 @@ static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
|||
#else
|
||||
#include <linux/irq.h>
|
||||
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
|
||||
#define kstat_irqs_this_cpu(DESC) \
|
||||
((DESC)->kstat_irqs[smp_processor_id()])
|
||||
#define kstat_incr_irqs_this_cpu(irqno, DESC) do {\
|
||||
((DESC)->kstat_irqs[smp_processor_id()]++);\
|
||||
kstat_this_cpu.irqs_sum++; } while (0)
|
||||
|
||||
#define kstat_incr_irqs_this_cpu(irqno, DESC) \
|
||||
do { \
|
||||
__this_cpu_inc(*(DESC)->kstat_irqs); \
|
||||
__this_cpu_inc(kstat.irqs_sum); \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
||||
static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
|
||||
{
|
||||
kstat_this_cpu.softirqs[irq]++;
|
||||
__this_cpu_inc(kstat.softirqs[irq]);
|
||||
}
|
||||
|
||||
static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
|
||||
|
|
67
include/linux/khugepaged.h
Normal file
67
include/linux/khugepaged.h
Normal file
|
@ -0,0 +1,67 @@
|
|||
#ifndef _LINUX_KHUGEPAGED_H
|
||||
#define _LINUX_KHUGEPAGED_H
|
||||
|
||||
#include <linux/sched.h> /* MMF_VM_HUGEPAGE */
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
extern int __khugepaged_enter(struct mm_struct *mm);
|
||||
extern void __khugepaged_exit(struct mm_struct *mm);
|
||||
extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma);
|
||||
|
||||
#define khugepaged_enabled() \
|
||||
(transparent_hugepage_flags & \
|
||||
((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
|
||||
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
|
||||
#define khugepaged_always() \
|
||||
(transparent_hugepage_flags & \
|
||||
(1<<TRANSPARENT_HUGEPAGE_FLAG))
|
||||
#define khugepaged_req_madv() \
|
||||
(transparent_hugepage_flags & \
|
||||
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
|
||||
#define khugepaged_defrag() \
|
||||
(transparent_hugepage_flags & \
|
||||
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
|
||||
|
||||
static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
{
|
||||
if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
|
||||
return __khugepaged_enter(mm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void khugepaged_exit(struct mm_struct *mm)
|
||||
{
|
||||
if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
|
||||
__khugepaged_exit(mm);
|
||||
}
|
||||
|
||||
static inline int khugepaged_enter(struct vm_area_struct *vma)
|
||||
{
|
||||
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
|
||||
if ((khugepaged_always() ||
|
||||
(khugepaged_req_madv() &&
|
||||
vma->vm_flags & VM_HUGEPAGE)) &&
|
||||
!(vma->vm_flags & VM_NOHUGEPAGE))
|
||||
if (__khugepaged_enter(vma->vm_mm))
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void khugepaged_exit(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
static inline int khugepaged_enter(struct vm_area_struct *vma)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
#endif /* _LINUX_KHUGEPAGED_H */
|
|
@ -22,7 +22,7 @@ struct klist {
|
|||
struct list_head k_list;
|
||||
void (*get)(struct klist_node *);
|
||||
void (*put)(struct klist_node *);
|
||||
} __attribute__ ((aligned (4)));
|
||||
} __attribute__ ((aligned (sizeof(void *))));
|
||||
|
||||
#define KLIST_INIT(_name, _get, _put) \
|
||||
{ .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \
|
||||
|
|
|
@ -76,7 +76,7 @@ bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
|
|||
\
|
||||
_n = (long) &((ptr)->name##_end) \
|
||||
- (long) &((ptr)->name##_begin); \
|
||||
MAYBE_BUILD_BUG_ON(_n < 0); \
|
||||
BUILD_BUG_ON(_n < 0); \
|
||||
\
|
||||
kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
|
||||
} while (0)
|
||||
|
|
|
@ -18,6 +18,10 @@ enum kmsg_dump_reason {
|
|||
KMSG_DUMP_OOPS,
|
||||
KMSG_DUMP_PANIC,
|
||||
KMSG_DUMP_KEXEC,
|
||||
KMSG_DUMP_RESTART,
|
||||
KMSG_DUMP_HALT,
|
||||
KMSG_DUMP_POWEROFF,
|
||||
KMSG_DUMP_EMERG,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -24,5 +24,7 @@ struct kref {
|
|||
void kref_init(struct kref *kref);
|
||||
void kref_get(struct kref *kref);
|
||||
int kref_put(struct kref *kref, void (*release) (struct kref *kref));
|
||||
int kref_sub(struct kref *kref, unsigned int count,
|
||||
void (*release) (struct kref *kref));
|
||||
|
||||
#endif /* _KREF_H_ */
|
||||
|
|
|
@ -540,6 +540,7 @@ struct kvm_ppc_pvinfo {
|
|||
#endif
|
||||
#define KVM_CAP_PPC_GET_PVINFO 57
|
||||
#define KVM_CAP_PPC_IRQ_LEVEL 58
|
||||
#define KVM_CAP_ASYNC_PF 59
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <asm/signal.h>
|
||||
|
||||
#include <linux/kvm.h>
|
||||
|
@ -40,6 +42,7 @@
|
|||
#define KVM_REQ_KICK 9
|
||||
#define KVM_REQ_DEACTIVATE_FPU 10
|
||||
#define KVM_REQ_EVENT 11
|
||||
#define KVM_REQ_APF_HALT 12
|
||||
|
||||
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
|
||||
|
||||
|
@ -74,6 +77,27 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
|||
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
||||
struct kvm_io_device *dev);
|
||||
|
||||
#ifdef CONFIG_KVM_ASYNC_PF
|
||||
struct kvm_async_pf {
|
||||
struct work_struct work;
|
||||
struct list_head link;
|
||||
struct list_head queue;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct mm_struct *mm;
|
||||
gva_t gva;
|
||||
unsigned long addr;
|
||||
struct kvm_arch_async_pf arch;
|
||||
struct page *page;
|
||||
bool done;
|
||||
};
|
||||
|
||||
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
|
||||
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
|
||||
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
|
||||
struct kvm_arch_async_pf *arch);
|
||||
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
|
||||
#endif
|
||||
|
||||
struct kvm_vcpu {
|
||||
struct kvm *kvm;
|
||||
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
||||
|
@ -104,6 +128,15 @@ struct kvm_vcpu {
|
|||
gpa_t mmio_phys_addr;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_ASYNC_PF
|
||||
struct {
|
||||
u32 queued;
|
||||
struct list_head queue;
|
||||
struct list_head done;
|
||||
spinlock_t lock;
|
||||
} async_pf;
|
||||
#endif
|
||||
|
||||
struct kvm_vcpu_arch arch;
|
||||
};
|
||||
|
||||
|
@ -113,16 +146,19 @@ struct kvm_vcpu {
|
|||
*/
|
||||
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
|
||||
|
||||
struct kvm_lpage_info {
|
||||
unsigned long rmap_pde;
|
||||
int write_count;
|
||||
};
|
||||
|
||||
struct kvm_memory_slot {
|
||||
gfn_t base_gfn;
|
||||
unsigned long npages;
|
||||
unsigned long flags;
|
||||
unsigned long *rmap;
|
||||
unsigned long *dirty_bitmap;
|
||||
struct {
|
||||
unsigned long rmap_pde;
|
||||
int write_count;
|
||||
} *lpage_info[KVM_NR_PAGE_SIZES - 1];
|
||||
unsigned long *dirty_bitmap_head;
|
||||
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
|
||||
unsigned long userspace_addr;
|
||||
int user_alloc;
|
||||
int id;
|
||||
|
@ -169,6 +205,7 @@ struct kvm_irq_routing_table {};
|
|||
|
||||
struct kvm_memslots {
|
||||
int nmemslots;
|
||||
u64 generation;
|
||||
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
|
||||
KVM_PRIVATE_MEM_SLOTS];
|
||||
};
|
||||
|
@ -206,6 +243,10 @@ struct kvm {
|
|||
|
||||
struct mutex irq_lock;
|
||||
#ifdef CONFIG_HAVE_KVM_IRQCHIP
|
||||
/*
|
||||
* Update side is protected by irq_lock and,
|
||||
* if configured, irqfds.lock.
|
||||
*/
|
||||
struct kvm_irq_routing_table __rcu *irq_routing;
|
||||
struct hlist_head mask_notifier_list;
|
||||
struct hlist_head irq_ack_notifier_list;
|
||||
|
@ -216,6 +257,7 @@ struct kvm {
|
|||
unsigned long mmu_notifier_seq;
|
||||
long mmu_notifier_count;
|
||||
#endif
|
||||
long tlbs_dirty;
|
||||
};
|
||||
|
||||
/* The guest did something we don't support. */
|
||||
|
@ -302,7 +344,11 @@ void kvm_set_page_accessed(struct page *page);
|
|||
|
||||
pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
|
||||
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
|
||||
pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
|
||||
bool write_fault, bool *writable);
|
||||
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
|
||||
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
|
||||
bool *writable);
|
||||
pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
int memslot_id(struct kvm *kvm, gfn_t gfn);
|
||||
|
@ -321,18 +367,25 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
|
|||
int offset, int len);
|
||||
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
|
||||
unsigned long len);
|
||||
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
||||
void *data, unsigned long len);
|
||||
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
||||
gpa_t gpa);
|
||||
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
|
||||
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
|
||||
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
|
||||
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
|
||||
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
|
||||
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
|
||||
void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
gfn_t gfn);
|
||||
|
||||
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
|
||||
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
|
||||
void kvm_resched(struct kvm_vcpu *vcpu);
|
||||
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
|
||||
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_flush_remote_tlbs(struct kvm *kvm);
|
||||
void kvm_reload_remote_mmus(struct kvm *kvm);
|
||||
|
||||
|
@ -398,7 +451,19 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
|
|||
|
||||
void kvm_free_physmem(struct kvm *kvm);
|
||||
|
||||
struct kvm *kvm_arch_create_vm(void);
|
||||
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
|
||||
static inline struct kvm *kvm_arch_alloc_vm(void)
|
||||
{
|
||||
return kzalloc(sizeof(struct kvm), GFP_KERNEL);
|
||||
}
|
||||
|
||||
static inline void kvm_arch_free_vm(struct kvm *kvm)
|
||||
{
|
||||
kfree(kvm);
|
||||
}
|
||||
#endif
|
||||
|
||||
int kvm_arch_init_vm(struct kvm *kvm);
|
||||
void kvm_arch_destroy_vm(struct kvm *kvm);
|
||||
void kvm_free_all_assigned_devices(struct kvm *kvm);
|
||||
void kvm_arch_sync_events(struct kvm *kvm);
|
||||
|
@ -414,16 +479,8 @@ struct kvm_irq_ack_notifier {
|
|||
void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
|
||||
};
|
||||
|
||||
#define KVM_ASSIGNED_MSIX_PENDING 0x1
|
||||
struct kvm_guest_msix_entry {
|
||||
u32 vector;
|
||||
u16 entry;
|
||||
u16 flags;
|
||||
};
|
||||
|
||||
struct kvm_assigned_dev_kernel {
|
||||
struct kvm_irq_ack_notifier ack_notifier;
|
||||
struct work_struct interrupt_work;
|
||||
struct list_head list;
|
||||
int assigned_dev_id;
|
||||
int host_segnr;
|
||||
|
@ -434,13 +491,14 @@ struct kvm_assigned_dev_kernel {
|
|||
bool host_irq_disabled;
|
||||
struct msix_entry *host_msix_entries;
|
||||
int guest_irq;
|
||||
struct kvm_guest_msix_entry *guest_msix_entries;
|
||||
struct msix_entry *guest_msix_entries;
|
||||
unsigned long irq_requested_type;
|
||||
int irq_source_id;
|
||||
int flags;
|
||||
struct pci_dev *dev;
|
||||
struct kvm *kvm;
|
||||
spinlock_t assigned_dev_lock;
|
||||
spinlock_t intx_lock;
|
||||
char irq_name[32];
|
||||
};
|
||||
|
||||
struct kvm_irq_mask_notifier {
|
||||
|
@ -462,6 +520,8 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
|
|||
unsigned long *deliver_bitmask);
|
||||
#endif
|
||||
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
|
||||
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
|
||||
int irq_source_id, int level);
|
||||
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
|
||||
void kvm_register_irq_ack_notifier(struct kvm *kvm,
|
||||
struct kvm_irq_ack_notifier *kian);
|
||||
|
@ -603,17 +663,28 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
|
|||
void kvm_eventfd_init(struct kvm *kvm);
|
||||
int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
|
||||
void kvm_irqfd_release(struct kvm *kvm);
|
||||
void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
|
||||
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
|
||||
|
||||
#else
|
||||
|
||||
static inline void kvm_eventfd_init(struct kvm *kvm) {}
|
||||
|
||||
static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void kvm_irqfd_release(struct kvm *kvm) {}
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_IRQCHIP
|
||||
static inline void kvm_irq_routing_update(struct kvm *kvm,
|
||||
struct kvm_irq_routing_table *irq_rt)
|
||||
{
|
||||
rcu_assign_pointer(kvm->irq_routing, irq_rt);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||
{
|
||||
return -ENOSYS;
|
||||
|
|
|
@ -67,4 +67,11 @@ struct kvm_lapic_irq {
|
|||
u32 dest_id;
|
||||
};
|
||||
|
||||
struct gfn_to_hva_cache {
|
||||
u64 generation;
|
||||
gpa_t gpa;
|
||||
unsigned long hva;
|
||||
struct kvm_memory_slot *memslot;
|
||||
};
|
||||
|
||||
#endif /* __KVM_TYPES_H__ */
|
||||
|
|
|
@ -42,6 +42,7 @@ struct lp5521_platform_data {
|
|||
int (*setup_resources)(void);
|
||||
void (*release_resources)(void);
|
||||
void (*enable)(bool state);
|
||||
const char *label;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_LP5521_H */
|
||||
|
|
|
@ -42,6 +42,7 @@ struct lp5523_platform_data {
|
|||
int (*setup_resources)(void);
|
||||
void (*release_resources)(void);
|
||||
void (*enable)(bool state);
|
||||
const char *label;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_LP5523_H */
|
||||
|
|
|
@ -996,8 +996,7 @@ extern int ata_sas_port_init(struct ata_port *);
|
|||
extern int ata_sas_port_start(struct ata_port *ap);
|
||||
extern void ata_sas_port_stop(struct ata_port *ap);
|
||||
extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
|
||||
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
|
||||
struct ata_port *ap);
|
||||
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
|
||||
extern int sata_scr_valid(struct ata_link *link);
|
||||
extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
|
||||
extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
|
||||
|
@ -1040,8 +1039,7 @@ extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
|
|||
struct ata_taskfile *tf, u16 *id);
|
||||
extern void ata_qc_complete(struct ata_queued_cmd *qc);
|
||||
extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active);
|
||||
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
|
||||
void (*done)(struct scsi_cmnd *));
|
||||
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
|
||||
extern int ata_std_bios_param(struct scsi_device *sdev,
|
||||
struct block_device *bdev,
|
||||
sector_t capacity, int geom[]);
|
||||
|
|
|
@ -96,6 +96,11 @@ static inline void __list_del(struct list_head * prev, struct list_head * next)
|
|||
* in an undefined state.
|
||||
*/
|
||||
#ifndef CONFIG_DEBUG_LIST
|
||||
static inline void __list_del_entry(struct list_head *entry)
|
||||
{
|
||||
__list_del(entry->prev, entry->next);
|
||||
}
|
||||
|
||||
static inline void list_del(struct list_head *entry)
|
||||
{
|
||||
__list_del(entry->prev, entry->next);
|
||||
|
@ -103,6 +108,7 @@ static inline void list_del(struct list_head *entry)
|
|||
entry->prev = LIST_POISON2;
|
||||
}
|
||||
#else
|
||||
extern void __list_del_entry(struct list_head *entry);
|
||||
extern void list_del(struct list_head *entry);
|
||||
#endif
|
||||
|
||||
|
@ -135,7 +141,7 @@ static inline void list_replace_init(struct list_head *old,
|
|||
*/
|
||||
static inline void list_del_init(struct list_head *entry)
|
||||
{
|
||||
__list_del(entry->prev, entry->next);
|
||||
__list_del_entry(entry);
|
||||
INIT_LIST_HEAD(entry);
|
||||
}
|
||||
|
||||
|
@ -146,7 +152,7 @@ static inline void list_del_init(struct list_head *entry)
|
|||
*/
|
||||
static inline void list_move(struct list_head *list, struct list_head *head)
|
||||
{
|
||||
__list_del(list->prev, list->next);
|
||||
__list_del_entry(list);
|
||||
list_add(list, head);
|
||||
}
|
||||
|
||||
|
@ -158,7 +164,7 @@ static inline void list_move(struct list_head *list, struct list_head *head)
|
|||
static inline void list_move_tail(struct list_head *list,
|
||||
struct list_head *head)
|
||||
{
|
||||
__list_del(list->prev, list->next);
|
||||
__list_del_entry(list);
|
||||
list_add_tail(list, head);
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,8 @@ static inline void hlist_bl_set_first(struct hlist_bl_head *h,
|
|||
struct hlist_bl_node *n)
|
||||
{
|
||||
LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
|
||||
LIST_BL_BUG_ON(!((unsigned long)h->first & LIST_BL_LOCKMASK));
|
||||
LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
|
||||
LIST_BL_LOCKMASK);
|
||||
h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK);
|
||||
}
|
||||
|
||||
|
|
|
@ -44,14 +44,4 @@
|
|||
#define NLMDBG_XDR 0x0100
|
||||
#define NLMDBG_ALL 0x7fff
|
||||
|
||||
|
||||
/*
|
||||
* Support for printing NLM cookies in dprintk()
|
||||
*/
|
||||
#ifdef RPC_DEBUG
|
||||
struct nlm_cookie;
|
||||
/* Call this function with the BKL held (it uses a static buffer) */
|
||||
extern const char *nlmdbg_cookie2a(const struct nlm_cookie *);
|
||||
#endif
|
||||
|
||||
#endif /* LINUX_LOCKD_DEBUG_H */
|
||||
|
|
|
@ -202,9 +202,9 @@ extern u32 nsm_local_state;
|
|||
* Lockd client functions
|
||||
*/
|
||||
struct nlm_rqst * nlm_alloc_call(struct nlm_host *host);
|
||||
void nlm_release_call(struct nlm_rqst *);
|
||||
int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
|
||||
int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *);
|
||||
void nlmclnt_release_call(struct nlm_rqst *);
|
||||
struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
|
||||
void nlmclnt_finish_block(struct nlm_wait *block);
|
||||
int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
|
||||
|
@ -223,13 +223,14 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
|
|||
const u32 version,
|
||||
const char *hostname,
|
||||
int noresvport);
|
||||
void nlmclnt_release_host(struct nlm_host *);
|
||||
struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
|
||||
const char *hostname,
|
||||
const size_t hostname_len);
|
||||
void nlmsvc_release_host(struct nlm_host *);
|
||||
struct rpc_clnt * nlm_bind_host(struct nlm_host *);
|
||||
void nlm_rebind_host(struct nlm_host *);
|
||||
struct nlm_host * nlm_get_host(struct nlm_host *);
|
||||
void nlm_release_host(struct nlm_host *);
|
||||
void nlm_shutdown_hosts(void);
|
||||
void nlm_host_rebooted(const struct nlm_reboot *);
|
||||
|
||||
|
@ -267,6 +268,7 @@ unsigned long nlmsvc_retry_blocked(void);
|
|||
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
|
||||
nlm_host_match_fn_t match);
|
||||
void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
|
||||
void nlmsvc_release_call(struct nlm_rqst *);
|
||||
|
||||
/*
|
||||
* File handling for the server personality
|
||||
|
|
|
@ -436,16 +436,8 @@ do { \
|
|||
#endif /* CONFIG_LOCKDEP */
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
extern void early_boot_irqs_off(void);
|
||||
extern void early_boot_irqs_on(void);
|
||||
extern void print_irqtrace_events(struct task_struct *curr);
|
||||
#else
|
||||
static inline void early_boot_irqs_off(void)
|
||||
{
|
||||
}
|
||||
static inline void early_boot_irqs_on(void)
|
||||
{
|
||||
}
|
||||
static inline void print_irqtrace_events(struct task_struct *curr)
|
||||
{
|
||||
}
|
||||
|
@ -522,12 +514,15 @@ static inline void print_irqtrace_events(struct task_struct *curr)
|
|||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
|
||||
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
|
||||
# else
|
||||
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
|
||||
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
|
||||
# endif
|
||||
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
|
||||
#else
|
||||
# define lock_map_acquire(l) do { } while (0)
|
||||
# define lock_map_acquire_read(l) do { } while (0)
|
||||
# define lock_map_release(l) do { } while (0)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#define TMPFS_MAGIC 0x01021994
|
||||
#define HUGETLBFS_MAGIC 0x958458f6 /* some random number */
|
||||
#define SQUASHFS_MAGIC 0x73717368
|
||||
#define ECRYPTFS_SUPER_MAGIC 0xf15f
|
||||
#define EFS_SUPER_MAGIC 0x414A53
|
||||
#define EXT2_SUPER_MAGIC 0xEF53
|
||||
#define EXT3_SUPER_MAGIC 0xEF53
|
||||
|
|
|
@ -18,6 +18,17 @@ struct mb_cache_entry {
|
|||
} e_index;
|
||||
};
|
||||
|
||||
struct mb_cache {
|
||||
struct list_head c_cache_list;
|
||||
const char *c_name;
|
||||
atomic_t c_entry_count;
|
||||
int c_max_entries;
|
||||
int c_bucket_bits;
|
||||
struct kmem_cache *c_entry_cache;
|
||||
struct list_head *c_block_hash;
|
||||
struct list_head *c_index_hash;
|
||||
};
|
||||
|
||||
/* Functions on caches */
|
||||
|
||||
struct mb_cache *mb_cache_create(const char *, int);
|
||||
|
|
|
@ -25,6 +25,11 @@ struct page_cgroup;
|
|||
struct page;
|
||||
struct mm_struct;
|
||||
|
||||
/* Stats that can be updated by kernel. */
|
||||
enum mem_cgroup_page_stat_item {
|
||||
MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
|
||||
};
|
||||
|
||||
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
||||
struct list_head *dst,
|
||||
unsigned long *scanned, int order,
|
||||
|
@ -93,7 +98,7 @@ extern int
|
|||
mem_cgroup_prepare_migration(struct page *page,
|
||||
struct page *newpage, struct mem_cgroup **ptr);
|
||||
extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
|
||||
struct page *oldpage, struct page *newpage);
|
||||
struct page *oldpage, struct page *newpage, bool migration_ok);
|
||||
|
||||
/*
|
||||
* For memory reclaim.
|
||||
|
@ -121,11 +126,30 @@ static inline bool mem_cgroup_disabled(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
void mem_cgroup_update_file_mapped(struct page *page, int val);
|
||||
void mem_cgroup_update_page_stat(struct page *page,
|
||||
enum mem_cgroup_page_stat_item idx,
|
||||
int val);
|
||||
|
||||
static inline void mem_cgroup_inc_page_stat(struct page *page,
|
||||
enum mem_cgroup_page_stat_item idx)
|
||||
{
|
||||
mem_cgroup_update_page_stat(page, idx, 1);
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_dec_page_stat(struct page *page,
|
||||
enum mem_cgroup_page_stat_item idx)
|
||||
{
|
||||
mem_cgroup_update_page_stat(page, idx, -1);
|
||||
}
|
||||
|
||||
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
||||
gfp_t gfp_mask);
|
||||
u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
|
||||
struct mem_cgroup;
|
||||
|
||||
|
@ -231,8 +255,7 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
|
|||
}
|
||||
|
||||
static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
|
||||
struct page *oldpage,
|
||||
struct page *newpage)
|
||||
struct page *oldpage, struct page *newpage, bool migration_ok)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -293,8 +316,13 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_update_file_mapped(struct page *page,
|
||||
int val)
|
||||
static inline void mem_cgroup_inc_page_stat(struct page *page,
|
||||
enum mem_cgroup_page_stat_item idx)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_dec_page_stat(struct page *page,
|
||||
enum mem_cgroup_page_stat_item idx)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -311,6 +339,11 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *mem)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_split_huge_fixup(struct page *head,
|
||||
struct page *tail)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CGROUP_MEM_CONT */
|
||||
|
||||
#endif /* _LINUX_MEMCONTROL_H */
|
||||
|
|
|
@ -13,12 +13,16 @@ struct mem_section;
|
|||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
|
||||
/*
|
||||
* Types for free bootmem.
|
||||
* The normal smallest mapcount is -1. Here is smaller value than it.
|
||||
* Types for free bootmem stored in page->lru.next. These have to be in
|
||||
* some random range in unsigned long space for debugging purposes.
|
||||
*/
|
||||
#define SECTION_INFO (-1 - 1)
|
||||
#define MIX_SECTION_INFO (-1 - 2)
|
||||
#define NODE_INFO (-1 - 3)
|
||||
enum {
|
||||
MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
|
||||
SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
|
||||
MIX_SECTION_INFO,
|
||||
NODE_INFO,
|
||||
MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
|
||||
};
|
||||
|
||||
/*
|
||||
* pgdat resizing functions
|
||||
|
@ -161,6 +165,12 @@ extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
|
|||
extern void put_page_bootmem(struct page *page);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Lock for memory hotplug guarantees 1) all callbacks for memory hotplug
|
||||
* notifier will be called under this. 2) offline/online/add/remove memory
|
||||
* will not run simultaneously.
|
||||
*/
|
||||
|
||||
void lock_memory_hotplug(void);
|
||||
void unlock_memory_hotplug(void);
|
||||
|
||||
|
|
|
@ -74,32 +74,37 @@
|
|||
#define AB8500_INT_ACC_DETECT_21DB_F 37
|
||||
#define AB8500_INT_ACC_DETECT_21DB_R 38
|
||||
#define AB8500_INT_GP_SW_ADC_CONV_END 39
|
||||
#define AB8500_INT_BTEMP_LOW 72
|
||||
#define AB8500_INT_BTEMP_LOW_MEDIUM 73
|
||||
#define AB8500_INT_BTEMP_MEDIUM_HIGH 74
|
||||
#define AB8500_INT_BTEMP_HIGH 75
|
||||
#define AB8500_INT_USB_CHARGER_NOT_OK 81
|
||||
#define AB8500_INT_ID_WAKEUP_R 82
|
||||
#define AB8500_INT_ID_DET_R1R 84
|
||||
#define AB8500_INT_ID_DET_R2R 85
|
||||
#define AB8500_INT_ID_DET_R3R 86
|
||||
#define AB8500_INT_ID_DET_R4R 87
|
||||
#define AB8500_INT_ID_WAKEUP_F 88
|
||||
#define AB8500_INT_ID_DET_R1F 90
|
||||
#define AB8500_INT_ID_DET_R2F 91
|
||||
#define AB8500_INT_ID_DET_R3F 92
|
||||
#define AB8500_INT_ID_DET_R4F 93
|
||||
#define AB8500_INT_USB_CHG_DET_DONE 94
|
||||
#define AB8500_INT_USB_CH_TH_PROT_F 96
|
||||
#define AB8500_INT_USB_CH_TH_PROP_R 97
|
||||
#define AB8500_INT_MAIN_CH_TH_PROP_F 98
|
||||
#define AB8500_INT_MAIN_CH_TH_PROT_R 99
|
||||
#define AB8500_INT_USB_CHARGER_NOT_OKF 103
|
||||
#define AB8500_INT_ADP_SOURCE_ERROR 72
|
||||
#define AB8500_INT_ADP_SINK_ERROR 73
|
||||
#define AB8500_INT_ADP_PROBE_PLUG 74
|
||||
#define AB8500_INT_ADP_PROBE_UNPLUG 75
|
||||
#define AB8500_INT_ADP_SENSE_OFF 76
|
||||
#define AB8500_INT_USB_PHY_POWER_ERR 78
|
||||
#define AB8500_INT_USB_LINK_STATUS 79
|
||||
#define AB8500_INT_BTEMP_LOW 80
|
||||
#define AB8500_INT_BTEMP_LOW_MEDIUM 81
|
||||
#define AB8500_INT_BTEMP_MEDIUM_HIGH 82
|
||||
#define AB8500_INT_BTEMP_HIGH 83
|
||||
#define AB8500_INT_USB_CHARGER_NOT_OK 89
|
||||
#define AB8500_INT_ID_WAKEUP_R 90
|
||||
#define AB8500_INT_ID_DET_R1R 92
|
||||
#define AB8500_INT_ID_DET_R2R 93
|
||||
#define AB8500_INT_ID_DET_R3R 94
|
||||
#define AB8500_INT_ID_DET_R4R 95
|
||||
#define AB8500_INT_ID_WAKEUP_F 96
|
||||
#define AB8500_INT_ID_DET_R1F 98
|
||||
#define AB8500_INT_ID_DET_R2F 99
|
||||
#define AB8500_INT_ID_DET_R3F 100
|
||||
#define AB8500_INT_ID_DET_R4F 101
|
||||
#define AB8500_INT_USB_CHG_DET_DONE 102
|
||||
#define AB8500_INT_USB_CH_TH_PROT_F 104
|
||||
#define AB8500_INT_USB_CH_TH_PROT_R 105
|
||||
#define AB8500_INT_MAIN_CH_TH_PROT_F 106
|
||||
#define AB8500_INT_MAIN_CH_TH_PROT_R 107
|
||||
#define AB8500_INT_USB_CHARGER_NOT_OKF 111
|
||||
|
||||
#define AB8500_NR_IRQS 104
|
||||
#define AB8500_NUM_IRQ_REGS 13
|
||||
|
||||
#define AB8500_NUM_REGULATORS 15
|
||||
#define AB8500_NR_IRQS 112
|
||||
#define AB8500_NUM_IRQ_REGS 14
|
||||
|
||||
/**
|
||||
* struct ab8500 - ab8500 internal structure
|
||||
|
@ -145,7 +150,8 @@ struct regulator_init_data;
|
|||
struct ab8500_platform_data {
|
||||
int irq_base;
|
||||
void (*init) (struct ab8500 *);
|
||||
struct regulator_init_data *regulator[AB8500_NUM_REGULATORS];
|
||||
int num_regulator;
|
||||
struct regulator_init_data *regulator;
|
||||
};
|
||||
|
||||
extern int __devinit ab8500_init(struct ab8500 *ab8500);
|
||||
|
|
|
@ -39,7 +39,7 @@ struct mfd_cell {
|
|||
size_t data_size;
|
||||
|
||||
/*
|
||||
* This resources can be specified relatievly to the parent device.
|
||||
* This resources can be specified relatively to the parent device.
|
||||
* For accessing device you should use resources from device
|
||||
*/
|
||||
int num_resources;
|
||||
|
@ -47,6 +47,12 @@ struct mfd_cell {
|
|||
|
||||
/* don't check for resource conflicts */
|
||||
bool ignore_resource_conflicts;
|
||||
|
||||
/*
|
||||
* Disable runtime PM callbacks for this subdevice - see
|
||||
* pm_runtime_no_callbacks().
|
||||
*/
|
||||
bool pm_runtime_no_callbacks;
|
||||
};
|
||||
|
||||
extern int mfd_add_devices(struct device *parent, int id,
|
||||
|
|
|
@ -159,10 +159,12 @@ struct max8998_dev {
|
|||
u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS];
|
||||
u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS];
|
||||
int type;
|
||||
bool wakeup;
|
||||
};
|
||||
|
||||
int max8998_irq_init(struct max8998_dev *max8998);
|
||||
void max8998_irq_exit(struct max8998_dev *max8998);
|
||||
int max8998_irq_resume(struct max8998_dev *max8998);
|
||||
|
||||
extern int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest);
|
||||
extern int max8998_bulk_read(struct i2c_client *i2c, u8 reg, int count,
|
||||
|
|
|
@ -70,24 +70,43 @@ struct max8998_regulator_data {
|
|||
* @num_regulators: number of regultors used
|
||||
* @irq_base: base IRQ number for max8998, required for IRQs
|
||||
* @ono: power onoff IRQ number for max8998
|
||||
* @buck1_max_voltage1: BUCK1 maximum alowed voltage register 1
|
||||
* @buck1_max_voltage2: BUCK1 maximum alowed voltage register 2
|
||||
* @buck2_max_voltage: BUCK2 maximum alowed voltage
|
||||
* @buck_voltage_lock: Do NOT change the values of the following six
|
||||
* registers set by buck?_voltage?. The voltage of BUCK1/2 cannot
|
||||
* be other than the preset values.
|
||||
* @buck1_voltage1: BUCK1 DVS mode 1 voltage register
|
||||
* @buck1_voltage2: BUCK1 DVS mode 2 voltage register
|
||||
* @buck1_voltage3: BUCK1 DVS mode 3 voltage register
|
||||
* @buck1_voltage4: BUCK1 DVS mode 4 voltage register
|
||||
* @buck2_voltage1: BUCK2 DVS mode 1 voltage register
|
||||
* @buck2_voltage2: BUCK2 DVS mode 2 voltage register
|
||||
* @buck1_set1: BUCK1 gpio pin 1 to set output voltage
|
||||
* @buck1_set2: BUCK1 gpio pin 2 to set output voltage
|
||||
* @buck1_default_idx: Default for BUCK1 gpio pin 1, 2
|
||||
* @buck2_set3: BUCK2 gpio pin to set output voltage
|
||||
* @buck2_default_idx: Default for BUCK2 gpio pin.
|
||||
* @wakeup: Allow to wake up from suspend
|
||||
* @rtc_delay: LP3974 RTC chip bug that requires delay after a register
|
||||
* write before reading it.
|
||||
*/
|
||||
struct max8998_platform_data {
|
||||
struct max8998_regulator_data *regulators;
|
||||
int num_regulators;
|
||||
int irq_base;
|
||||
int ono;
|
||||
int buck1_max_voltage1;
|
||||
int buck1_max_voltage2;
|
||||
int buck2_max_voltage;
|
||||
bool buck_voltage_lock;
|
||||
int buck1_voltage1;
|
||||
int buck1_voltage2;
|
||||
int buck1_voltage3;
|
||||
int buck1_voltage4;
|
||||
int buck2_voltage1;
|
||||
int buck2_voltage2;
|
||||
int buck1_set1;
|
||||
int buck1_set2;
|
||||
int buck1_default_idx;
|
||||
int buck2_set3;
|
||||
int buck2_default_idx;
|
||||
bool wakeup;
|
||||
bool rtc_delay;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_MFD_MAX8998_H */
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
/*
|
||||
* Copyright 2010 Yong Shen <yong.shen@linaro.org>
|
||||
* Copyright 2009-2010 Pengutronix
|
||||
* Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
|
||||
*
|
||||
|
@ -122,39 +123,39 @@ int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
|
|||
unsigned int channel, unsigned int *sample);
|
||||
|
||||
|
||||
#define MC13783_SW_SW1A 0
|
||||
#define MC13783_SW_SW1B 1
|
||||
#define MC13783_SW_SW2A 2
|
||||
#define MC13783_SW_SW2B 3
|
||||
#define MC13783_SW_SW3 4
|
||||
#define MC13783_SW_PLL 5
|
||||
#define MC13783_REGU_VAUDIO 6
|
||||
#define MC13783_REGU_VIOHI 7
|
||||
#define MC13783_REGU_VIOLO 8
|
||||
#define MC13783_REGU_VDIG 9
|
||||
#define MC13783_REGU_VGEN 10
|
||||
#define MC13783_REGU_VRFDIG 11
|
||||
#define MC13783_REGU_VRFREF 12
|
||||
#define MC13783_REGU_VRFCP 13
|
||||
#define MC13783_REGU_VSIM 14
|
||||
#define MC13783_REGU_VESIM 15
|
||||
#define MC13783_REGU_VCAM 16
|
||||
#define MC13783_REGU_VRFBG 17
|
||||
#define MC13783_REGU_VVIB 18
|
||||
#define MC13783_REGU_VRF1 19
|
||||
#define MC13783_REGU_VRF2 20
|
||||
#define MC13783_REGU_VMMC1 21
|
||||
#define MC13783_REGU_VMMC2 22
|
||||
#define MC13783_REGU_GPO1 23
|
||||
#define MC13783_REGU_GPO2 24
|
||||
#define MC13783_REGU_GPO3 25
|
||||
#define MC13783_REGU_GPO4 26
|
||||
#define MC13783_REGU_V1 27
|
||||
#define MC13783_REGU_V2 28
|
||||
#define MC13783_REGU_V3 29
|
||||
#define MC13783_REGU_V4 30
|
||||
#define MC13783_REGU_PWGT1SPI 31
|
||||
#define MC13783_REGU_PWGT2SPI 32
|
||||
#define MC13783_REG_SW1A 0
|
||||
#define MC13783_REG_SW1B 1
|
||||
#define MC13783_REG_SW2A 2
|
||||
#define MC13783_REG_SW2B 3
|
||||
#define MC13783_REG_SW3 4
|
||||
#define MC13783_REG_PLL 5
|
||||
#define MC13783_REG_VAUDIO 6
|
||||
#define MC13783_REG_VIOHI 7
|
||||
#define MC13783_REG_VIOLO 8
|
||||
#define MC13783_REG_VDIG 9
|
||||
#define MC13783_REG_VGEN 10
|
||||
#define MC13783_REG_VRFDIG 11
|
||||
#define MC13783_REG_VRFREF 12
|
||||
#define MC13783_REG_VRFCP 13
|
||||
#define MC13783_REG_VSIM 14
|
||||
#define MC13783_REG_VESIM 15
|
||||
#define MC13783_REG_VCAM 16
|
||||
#define MC13783_REG_VRFBG 17
|
||||
#define MC13783_REG_VVIB 18
|
||||
#define MC13783_REG_VRF1 19
|
||||
#define MC13783_REG_VRF2 20
|
||||
#define MC13783_REG_VMMC1 21
|
||||
#define MC13783_REG_VMMC2 22
|
||||
#define MC13783_REG_GPO1 23
|
||||
#define MC13783_REG_GPO2 24
|
||||
#define MC13783_REG_GPO3 25
|
||||
#define MC13783_REG_GPO4 26
|
||||
#define MC13783_REG_V1 27
|
||||
#define MC13783_REG_V2 28
|
||||
#define MC13783_REG_V3 29
|
||||
#define MC13783_REG_V4 30
|
||||
#define MC13783_REG_PWGT1SPI 31
|
||||
#define MC13783_REG_PWGT2SPI 32
|
||||
|
||||
#define MC13783_IRQ_ADCDONE MC13XXX_IRQ_ADCDONE
|
||||
#define MC13783_IRQ_ADCBISDONE MC13XXX_IRQ_ADCBISDONE
|
||||
|
|
39
include/linux/mfd/mc13892.h
Normal file
39
include/linux/mfd/mc13892.h
Normal file
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright 2010 Yong Shen <yong.shen@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it under
|
||||
* the terms of the GNU General Public License version 2 as published by the
|
||||
* Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_MFD_MC13892_H
|
||||
#define __LINUX_MFD_MC13892_H
|
||||
|
||||
#include <linux/mfd/mc13xxx.h>
|
||||
|
||||
#define MC13892_SW1 0
|
||||
#define MC13892_SW2 1
|
||||
#define MC13892_SW3 2
|
||||
#define MC13892_SW4 3
|
||||
#define MC13892_SWBST 4
|
||||
#define MC13892_VIOHI 5
|
||||
#define MC13892_VPLL 6
|
||||
#define MC13892_VDIG 7
|
||||
#define MC13892_VSD 8
|
||||
#define MC13892_VUSB2 9
|
||||
#define MC13892_VVIDEO 10
|
||||
#define MC13892_VAUDIO 11
|
||||
#define MC13892_VCAM 12
|
||||
#define MC13892_VGEN1 13
|
||||
#define MC13892_VGEN2 14
|
||||
#define MC13892_VGEN3 15
|
||||
#define MC13892_VUSB 16
|
||||
#define MC13892_GPO1 17
|
||||
#define MC13892_GPO2 18
|
||||
#define MC13892_GPO3 19
|
||||
#define MC13892_GPO4 20
|
||||
#define MC13892_PWGT1SPI 21
|
||||
#define MC13892_PWGT2SPI 22
|
||||
#define MC13892_VCOINCELL 23
|
||||
|
||||
#endif
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue