Merge commit 'v2.6.31-rc6' into core/rcu
Merge reason: the branch was on pre-rc1 .30, update to latest. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
commit
fa08661af8
2538 changed files with 70466 additions and 36595 deletions
|
@ -57,6 +57,7 @@ header-y += dlmconstants.h
|
|||
header-y += dlm_device.h
|
||||
header-y += dlm_netlink.h
|
||||
header-y += dm-ioctl.h
|
||||
header-y += dm-log-userspace.h
|
||||
header-y += dn.h
|
||||
header-y += dqblk_xfs.h
|
||||
header-y += efs_fs_sb.h
|
||||
|
|
|
@ -113,9 +113,6 @@ void acpi_irq_stats_init(void);
|
|||
extern u32 acpi_irq_handled;
|
||||
extern u32 acpi_irq_not_handled;
|
||||
|
||||
extern struct acpi_mcfg_allocation *pci_mmcfg_config;
|
||||
extern int pci_mmcfg_config_num;
|
||||
|
||||
extern int sbf_port;
|
||||
extern unsigned long acpi_realmode_flags;
|
||||
|
||||
|
@ -293,7 +290,10 @@ void __init acpi_s4_no_nvs(void);
|
|||
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
|
||||
|
||||
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
|
||||
extern void acpi_early_init(void);
|
||||
|
||||
#else /* CONFIG_ACPI */
|
||||
static inline void acpi_early_init(void) { }
|
||||
|
||||
static inline int early_acpi_boot_init(void)
|
||||
{
|
||||
|
|
|
@ -121,9 +121,9 @@ struct kiocb {
|
|||
|
||||
/*
|
||||
* If the aio_resfd field of the userspace iocb is not zero,
|
||||
* this is the underlying file* to deliver event to.
|
||||
* this is the underlying eventfd context to deliver events to.
|
||||
*/
|
||||
struct file *ki_eventfd;
|
||||
struct eventfd_ctx *ki_eventfd;
|
||||
};
|
||||
|
||||
#define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY)
|
||||
|
|
|
@ -599,6 +599,8 @@ extern void audit_log_untrustedstring(struct audit_buffer *ab,
|
|||
extern void audit_log_d_path(struct audit_buffer *ab,
|
||||
const char *prefix,
|
||||
struct path *path);
|
||||
extern void audit_log_key(struct audit_buffer *ab,
|
||||
char *key);
|
||||
extern void audit_log_lost(const char *message);
|
||||
extern int audit_update_lsm_rules(void);
|
||||
|
||||
|
@ -621,6 +623,7 @@ extern int audit_enabled;
|
|||
#define audit_log_n_untrustedstring(a,n,s) do { ; } while (0)
|
||||
#define audit_log_untrustedstring(a,s) do { ; } while (0)
|
||||
#define audit_log_d_path(b, p, d) do { ; } while (0)
|
||||
#define audit_log_key(b, k) do { ; } while (0)
|
||||
#define audit_enabled 0
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -229,9 +229,14 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
|
|||
(1 << BDI_async_congested));
|
||||
}
|
||||
|
||||
void clear_bdi_congested(struct backing_dev_info *bdi, int rw);
|
||||
void set_bdi_congested(struct backing_dev_info *bdi, int rw);
|
||||
long congestion_wait(int rw, long timeout);
|
||||
enum {
|
||||
BLK_RW_ASYNC = 0,
|
||||
BLK_RW_SYNC = 1,
|
||||
};
|
||||
|
||||
void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
|
||||
void set_bdi_congested(struct backing_dev_info *bdi, int sync);
|
||||
long congestion_wait(int sync, long timeout);
|
||||
|
||||
|
||||
static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
|
||||
|
|
|
@ -319,7 +319,6 @@ static inline int bio_has_allocated_vec(struct bio *bio)
|
|||
*/
|
||||
struct bio_integrity_payload {
|
||||
struct bio *bip_bio; /* parent bio */
|
||||
struct bio_vec *bip_vec; /* integrity data vector */
|
||||
|
||||
sector_t bip_sector; /* virtual start sector */
|
||||
|
||||
|
@ -328,11 +327,12 @@ struct bio_integrity_payload {
|
|||
|
||||
unsigned int bip_size;
|
||||
|
||||
unsigned short bip_pool; /* pool the ivec came from */
|
||||
unsigned short bip_slab; /* slab the bip came from */
|
||||
unsigned short bip_vcnt; /* # of integrity bio_vecs */
|
||||
unsigned short bip_idx; /* current bip_vec index */
|
||||
|
||||
struct work_struct bip_work; /* I/O completion */
|
||||
struct bio_vec bip_vec[0]; /* embedded bvec array */
|
||||
};
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
|
@ -430,6 +430,9 @@ struct bio_set {
|
|||
unsigned int front_pad;
|
||||
|
||||
mempool_t *bio_pool;
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
mempool_t *bio_integrity_pool;
|
||||
#endif
|
||||
mempool_t *bvec_pool;
|
||||
};
|
||||
|
||||
|
@ -634,8 +637,9 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
|
|||
|
||||
#define bio_integrity(bio) (bio->bi_integrity != NULL)
|
||||
|
||||
extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
|
||||
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
|
||||
extern void bio_integrity_free(struct bio *);
|
||||
extern void bio_integrity_free(struct bio *, struct bio_set *);
|
||||
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
|
||||
extern int bio_integrity_enabled(struct bio *bio);
|
||||
extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
|
||||
|
@ -645,21 +649,27 @@ extern void bio_integrity_endio(struct bio *, int);
|
|||
extern void bio_integrity_advance(struct bio *, unsigned int);
|
||||
extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
|
||||
extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
|
||||
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
|
||||
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *);
|
||||
extern int bioset_integrity_create(struct bio_set *, int);
|
||||
extern void bioset_integrity_free(struct bio_set *);
|
||||
extern void bio_integrity_init(void);
|
||||
|
||||
#else /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
#define bio_integrity(a) (0)
|
||||
#define bioset_integrity_create(a, b) (0)
|
||||
#define bio_integrity_prep(a) (0)
|
||||
#define bio_integrity_enabled(a) (0)
|
||||
#define bio_integrity_clone(a, b, c) (0)
|
||||
#define bio_integrity_free(a) do { } while (0)
|
||||
#define bio_integrity_clone(a, b, c, d) (0)
|
||||
#define bioset_integrity_free(a) do { } while (0)
|
||||
#define bio_integrity_free(a, b) do { } while (0)
|
||||
#define bio_integrity_endio(a, b) do { } while (0)
|
||||
#define bio_integrity_advance(a, b) do { } while (0)
|
||||
#define bio_integrity_trim(a, b, c) do { } while (0)
|
||||
#define bio_integrity_split(a, b, c) do { } while (0)
|
||||
#define bio_integrity_set_tag(a, b, c) do { } while (0)
|
||||
#define bio_integrity_get_tag(a, b, c) do { } while (0)
|
||||
#define bio_integrity_init(a) do { } while (0)
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
|
|
|
@ -70,11 +70,6 @@ enum rq_cmd_type_bits {
|
|||
REQ_TYPE_ATA_PC,
|
||||
};
|
||||
|
||||
enum {
|
||||
BLK_RW_ASYNC = 0,
|
||||
BLK_RW_SYNC = 1,
|
||||
};
|
||||
|
||||
/*
|
||||
* For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
|
||||
* sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
|
||||
|
@ -301,12 +296,6 @@ struct blk_queue_tag {
|
|||
#define BLK_SCSI_MAX_CMDS (256)
|
||||
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
|
||||
|
||||
struct blk_cmd_filter {
|
||||
unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
|
||||
unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
|
||||
struct kobject kobj;
|
||||
};
|
||||
|
||||
struct queue_limits {
|
||||
unsigned long bounce_pfn;
|
||||
unsigned long seg_boundary_mask;
|
||||
|
@ -445,7 +434,6 @@ struct request_queue
|
|||
#if defined(CONFIG_BLK_DEV_BSG)
|
||||
struct bsg_class_device bsg_dev;
|
||||
#endif
|
||||
struct blk_cmd_filter cmd_filter;
|
||||
};
|
||||
|
||||
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
|
||||
|
@ -730,6 +718,7 @@ struct rq_map_data {
|
|||
int nr_entries;
|
||||
unsigned long offset;
|
||||
int null_mapped;
|
||||
int from_user;
|
||||
};
|
||||
|
||||
struct req_iterator {
|
||||
|
@ -786,18 +775,18 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
|||
* congested queues, and wake up anyone who was waiting for requests to be
|
||||
* put back.
|
||||
*/
|
||||
static inline void blk_clear_queue_congested(struct request_queue *q, int rw)
|
||||
static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
|
||||
{
|
||||
clear_bdi_congested(&q->backing_dev_info, rw);
|
||||
clear_bdi_congested(&q->backing_dev_info, sync);
|
||||
}
|
||||
|
||||
/*
|
||||
* A queue has just entered congestion. Flag that in the queue's VM-visible
|
||||
* state flags and increment the global gounter of congested queues.
|
||||
*/
|
||||
static inline void blk_set_queue_congested(struct request_queue *q, int rw)
|
||||
static inline void blk_set_queue_congested(struct request_queue *q, int sync)
|
||||
{
|
||||
set_bdi_congested(&q->backing_dev_info, rw);
|
||||
set_bdi_congested(&q->backing_dev_info, sync);
|
||||
}
|
||||
|
||||
extern void blk_start_queue(struct request_queue *q);
|
||||
|
@ -924,6 +913,7 @@ extern void blk_queue_logical_block_size(struct request_queue *, unsigned short)
|
|||
extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_alignment_offset(struct request_queue *q,
|
||||
unsigned int alignment);
|
||||
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
|
||||
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
|
||||
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
|
||||
extern void blk_set_default_limits(struct queue_limits *lim);
|
||||
|
@ -998,13 +988,7 @@ static inline int sb_issue_discard(struct super_block *sb,
|
|||
return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/*
|
||||
* command filter functions
|
||||
*/
|
||||
extern int blk_verify_command(struct blk_cmd_filter *filter,
|
||||
unsigned char *cmd, fmode_t has_write_perm);
|
||||
extern void blk_unregister_filter(struct gendisk *disk);
|
||||
extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
|
||||
extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
|
||||
|
||||
#define MAX_PHYS_SEGMENTS 128
|
||||
#define MAX_HW_SEGMENTS 128
|
||||
|
|
|
@ -140,29 +140,6 @@ void cb710_dump_regs(struct cb710_chip *chip, unsigned dump);
|
|||
#include <linux/highmem.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
/**
|
||||
* cb710_sg_miter_stop_writing - stop mapping iteration after writing
|
||||
* @miter: sg mapping iter to be stopped
|
||||
*
|
||||
* Description:
|
||||
* Stops mapping iterator @miter. @miter should have been started
|
||||
* started using sg_miter_start(). A stopped iteration can be
|
||||
* resumed by calling sg_miter_next() on it. This is useful when
|
||||
* resources (kmap) need to be released during iteration.
|
||||
*
|
||||
* This is a convenience wrapper that will be optimized out for arches
|
||||
* that don't need flush_kernel_dcache_page().
|
||||
*
|
||||
* Context:
|
||||
* IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
|
||||
*/
|
||||
static inline void cb710_sg_miter_stop_writing(struct sg_mapping_iter *miter)
|
||||
{
|
||||
if (miter->page)
|
||||
flush_kernel_dcache_page(miter->page);
|
||||
sg_miter_stop(miter);
|
||||
}
|
||||
|
||||
/*
|
||||
* 32-bit PIO mapping sg iterator
|
||||
*
|
||||
|
@ -171,12 +148,12 @@ static inline void cb710_sg_miter_stop_writing(struct sg_mapping_iter *miter)
|
|||
* without DMA support).
|
||||
*
|
||||
* Best-case reading (transfer from device):
|
||||
* sg_miter_start();
|
||||
* sg_miter_start(, SG_MITER_TO_SG);
|
||||
* cb710_sg_dwiter_write_from_io();
|
||||
* cb710_sg_miter_stop_writing();
|
||||
* sg_miter_stop();
|
||||
*
|
||||
* Best-case writing (transfer to device):
|
||||
* sg_miter_start();
|
||||
* sg_miter_start(, SG_MITER_FROM_SG);
|
||||
* cb710_sg_dwiter_read_to_io();
|
||||
* sg_miter_stop();
|
||||
*/
|
||||
|
|
|
@ -179,14 +179,11 @@ struct cgroup {
|
|||
*/
|
||||
struct list_head release_list;
|
||||
|
||||
/* pids_mutex protects the fields below */
|
||||
/* pids_mutex protects pids_list and cached pid arrays. */
|
||||
struct rw_semaphore pids_mutex;
|
||||
/* Array of process ids in the cgroup */
|
||||
pid_t *tasks_pids;
|
||||
/* How many files are using the current tasks_pids array */
|
||||
int pids_use_count;
|
||||
/* Length of the current tasks_pids array */
|
||||
int pids_length;
|
||||
|
||||
/* Linked list of struct cgroup_pids */
|
||||
struct list_head pids_list;
|
||||
|
||||
/* For RCU-protected deletion */
|
||||
struct rcu_head rcu_head;
|
||||
|
@ -365,6 +362,23 @@ int cgroup_task_count(const struct cgroup *cgrp);
|
|||
/* Return true if cgrp is a descendant of the task's cgroup */
|
||||
int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
|
||||
|
||||
/*
|
||||
* When the subsys has to access css and may add permanent refcnt to css,
|
||||
* it should take care of racy conditions with rmdir(). Following set of
|
||||
* functions, is for stop/restart rmdir if necessary.
|
||||
* Because these will call css_get/put, "css" should be alive css.
|
||||
*
|
||||
* cgroup_exclude_rmdir();
|
||||
* ...do some jobs which may access arbitrary empty cgroup
|
||||
* cgroup_release_and_wakeup_rmdir();
|
||||
*
|
||||
* When someone removes a cgroup while cgroup_exclude_rmdir() holds it,
|
||||
* it sleeps and cgroup_release_and_wakeup_rmdir() will wake him up.
|
||||
*/
|
||||
|
||||
void cgroup_exclude_rmdir(struct cgroup_subsys_state *css);
|
||||
void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css);
|
||||
|
||||
/*
|
||||
* Control Group subsystem type.
|
||||
* See Documentation/cgroups/cgroups.txt for details
|
||||
|
|
|
@ -143,12 +143,3 @@ extern void clockevents_notify(unsigned long reason, void *arg);
|
|||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||
extern ktime_t clockevents_get_next_event(int cpu);
|
||||
#else
|
||||
static inline ktime_t clockevents_get_next_event(int cpu)
|
||||
{
|
||||
return (ktime_t) { .tv64 = KTIME_MAX };
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -293,7 +293,12 @@ static inline int clocksource_enable(struct clocksource *cs)
|
|||
if (cs->enable)
|
||||
ret = cs->enable(cs);
|
||||
|
||||
/* save mult_orig on enable */
|
||||
/*
|
||||
* The frequency may have changed while the clocksource
|
||||
* was disabled. If so the code in ->enable() must update
|
||||
* the mult value to reflect the new frequency. Make sure
|
||||
* mult_orig follows this change.
|
||||
*/
|
||||
cs->mult_orig = cs->mult;
|
||||
|
||||
return ret;
|
||||
|
@ -309,6 +314,13 @@ static inline int clocksource_enable(struct clocksource *cs)
|
|||
*/
|
||||
static inline void clocksource_disable(struct clocksource *cs)
|
||||
{
|
||||
/*
|
||||
* Save mult_orig in mult so clocksource_enable() can
|
||||
* restore the value regardless if ->enable() updates
|
||||
* the value of mult or not.
|
||||
*/
|
||||
cs->mult = cs->mult_orig;
|
||||
|
||||
if (cs->disable)
|
||||
cs->disable(cs);
|
||||
}
|
||||
|
|
|
@ -41,8 +41,10 @@
|
|||
#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */
|
||||
#define CN_DST_IDX 0x6
|
||||
#define CN_DST_VAL 0x1
|
||||
#define CN_IDX_DM 0x7 /* Device Mapper */
|
||||
#define CN_VAL_DM_USERSPACE_LOG 0x1
|
||||
|
||||
#define CN_NETLINK_USERS 7
|
||||
#define CN_NETLINK_USERS 8
|
||||
|
||||
/*
|
||||
* Maximum connector's message size.
|
||||
|
|
|
@ -89,7 +89,6 @@ struct vc_data {
|
|||
unsigned int vc_need_wrap : 1;
|
||||
unsigned int vc_can_do_color : 1;
|
||||
unsigned int vc_report_mouse : 2;
|
||||
unsigned int vc_kmalloced : 1;
|
||||
unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */
|
||||
unsigned char vc_utf_count;
|
||||
int vc_utf_char;
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/proc_fs.h>
|
||||
|
||||
|
|
|
@ -1,31 +1,37 @@
|
|||
#ifndef DECOMPRESS_GENERIC_H
|
||||
#define DECOMPRESS_GENERIC_H
|
||||
|
||||
/* Minimal chunksize to be read.
|
||||
*Bzip2 prefers at least 4096
|
||||
*Lzma prefers 0x10000 */
|
||||
#define COMPR_IOBUF_SIZE 4096
|
||||
|
||||
typedef int (*decompress_fn) (unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*writebb)(void*, unsigned int),
|
||||
unsigned char *output,
|
||||
int(*flush)(void*, unsigned int),
|
||||
unsigned char *outbuf,
|
||||
int *posp,
|
||||
void(*error)(char *x));
|
||||
|
||||
/* inbuf - input buffer
|
||||
*len - len of pre-read data in inbuf
|
||||
*fill - function to fill inbuf if empty
|
||||
*writebb - function to write out outbug
|
||||
*fill - function to fill inbuf when empty
|
||||
*flush - function to write out outbuf
|
||||
*outbuf - output buffer
|
||||
*posp - if non-null, input position (number of bytes read) will be
|
||||
* returned here
|
||||
*
|
||||
*If len != 0, the inbuf is initialized (with as much data), and fill
|
||||
*should not be called
|
||||
*If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE
|
||||
*fill should be called (repeatedly...) to read data, at most IOBUF_SIZE
|
||||
*If len != 0, inbuf should contain all the necessary input data, and fill
|
||||
*should be NULL
|
||||
*If len = 0, inbuf can be NULL, in which case the decompressor will allocate
|
||||
*the input buffer. If inbuf != NULL it must be at least XXX_IOBUF_SIZE bytes.
|
||||
*fill will be called (repeatedly...) to read data, at most XXX_IOBUF_SIZE
|
||||
*bytes should be read per call. Replace XXX with the appropriate decompressor
|
||||
*name, i.e. LZMA_IOBUF_SIZE.
|
||||
*
|
||||
*If flush = NULL, outbuf must be large enough to buffer all the expected
|
||||
*output. If flush != NULL, the output buffer will be allocated by the
|
||||
*decompressor (outbuf = NULL), and the flush function will be called to
|
||||
*flush the output buffer at the appropriate time (decompressor and stream
|
||||
*dependent).
|
||||
*/
|
||||
|
||||
|
||||
/* Utility routine to detect the decompression method */
|
||||
decompress_fn decompress_method(const unsigned char *inbuf, int len,
|
||||
const char **name);
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
|
||||
struct dm_dev;
|
||||
struct dm_target;
|
||||
struct dm_table;
|
||||
struct mapped_device;
|
||||
|
@ -21,6 +22,7 @@ typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
|
|||
union map_info {
|
||||
void *ptr;
|
||||
unsigned long long ll;
|
||||
unsigned flush_request;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -80,6 +82,15 @@ typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
|
|||
typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
|
||||
struct bio_vec *biovec, int max_size);
|
||||
|
||||
typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
|
||||
struct dm_dev *dev,
|
||||
sector_t start, sector_t len,
|
||||
void *data);
|
||||
|
||||
typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
|
||||
iterate_devices_callout_fn fn,
|
||||
void *data);
|
||||
|
||||
/*
|
||||
* Returns:
|
||||
* 0: The target can handle the next I/O immediately.
|
||||
|
@ -92,7 +103,8 @@ void dm_error(const char *message);
|
|||
/*
|
||||
* Combine device limits.
|
||||
*/
|
||||
void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
|
||||
int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data);
|
||||
|
||||
struct dm_dev {
|
||||
struct block_device *bdev;
|
||||
|
@ -138,23 +150,12 @@ struct target_type {
|
|||
dm_ioctl_fn ioctl;
|
||||
dm_merge_fn merge;
|
||||
dm_busy_fn busy;
|
||||
dm_iterate_devices_fn iterate_devices;
|
||||
|
||||
/* For internal device-mapper use. */
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct io_restrictions {
|
||||
unsigned long bounce_pfn;
|
||||
unsigned long seg_boundary_mask;
|
||||
unsigned max_hw_sectors;
|
||||
unsigned max_sectors;
|
||||
unsigned max_segment_size;
|
||||
unsigned short logical_block_size;
|
||||
unsigned short max_hw_segments;
|
||||
unsigned short max_phys_segments;
|
||||
unsigned char no_cluster; /* inverted so that 0 is default */
|
||||
};
|
||||
|
||||
struct dm_target {
|
||||
struct dm_table *table;
|
||||
struct target_type *type;
|
||||
|
@ -163,15 +164,18 @@ struct dm_target {
|
|||
sector_t begin;
|
||||
sector_t len;
|
||||
|
||||
/* FIXME: turn this into a mask, and merge with io_restrictions */
|
||||
/* Always a power of 2 */
|
||||
sector_t split_io;
|
||||
|
||||
/*
|
||||
* These are automatically filled in by
|
||||
* dm_table_get_device.
|
||||
* A number of zero-length barrier requests that will be submitted
|
||||
* to the target for the purpose of flushing cache.
|
||||
*
|
||||
* The request number will be placed in union map_info->flush_request.
|
||||
* It is a responsibility of the target driver to remap these requests
|
||||
* to the real underlying devices.
|
||||
*/
|
||||
struct io_restrictions limits;
|
||||
unsigned num_flush_requests;
|
||||
|
||||
/* target specific data */
|
||||
void *private;
|
||||
|
@ -230,6 +234,7 @@ struct gendisk *dm_disk(struct mapped_device *md);
|
|||
int dm_suspended(struct mapped_device *md);
|
||||
int dm_noflush_suspending(struct dm_target *ti);
|
||||
union map_info *dm_get_mapinfo(struct bio *bio);
|
||||
union map_info *dm_get_rq_mapinfo(struct request *rq);
|
||||
|
||||
/*
|
||||
* Geometry functions.
|
||||
|
@ -392,4 +397,12 @@ static inline unsigned long to_bytes(sector_t n)
|
|||
return (n << SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Helper for block layer and dm core operations
|
||||
*---------------------------------------------------------------*/
|
||||
void dm_dispatch_request(struct request *rq);
|
||||
void dm_requeue_unmapped_request(struct request *rq);
|
||||
void dm_kill_unmapped_request(struct request *rq, int error);
|
||||
int dm_underlying_device_busy(struct request_queue *q);
|
||||
|
||||
#endif /* _LINUX_DEVICE_MAPPER_H */
|
||||
|
|
|
@ -25,8 +25,6 @@
|
|||
#include <asm/atomic.h>
|
||||
#include <asm/device.h>
|
||||
|
||||
#define BUS_ID_SIZE 20
|
||||
|
||||
struct device;
|
||||
struct device_private;
|
||||
struct device_driver;
|
||||
|
|
|
@ -123,6 +123,16 @@ struct dm_ioctl {
|
|||
__u32 target_count; /* in/out */
|
||||
__s32 open_count; /* out */
|
||||
__u32 flags; /* in/out */
|
||||
|
||||
/*
|
||||
* event_nr holds either the event number (input and output) or the
|
||||
* udev cookie value (input only).
|
||||
* The DM_DEV_WAIT ioctl takes an event number as input.
|
||||
* The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls
|
||||
* use the field as a cookie to return in the DM_COOKIE
|
||||
* variable with the uevents they issue.
|
||||
* For output, the ioctls return the event number, not the cookie.
|
||||
*/
|
||||
__u32 event_nr; /* in/out */
|
||||
__u32 padding;
|
||||
|
||||
|
@ -256,9 +266,9 @@ enum {
|
|||
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
|
||||
|
||||
#define DM_VERSION_MAJOR 4
|
||||
#define DM_VERSION_MINOR 14
|
||||
#define DM_VERSION_MINOR 15
|
||||
#define DM_VERSION_PATCHLEVEL 0
|
||||
#define DM_VERSION_EXTRA "-ioctl (2008-04-23)"
|
||||
#define DM_VERSION_EXTRA "-ioctl (2009-04-01)"
|
||||
|
||||
/* Status bits */
|
||||
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
|
||||
|
|
386
include/linux/dm-log-userspace.h
Normal file
386
include/linux/dm-log-userspace.h
Normal file
|
@ -0,0 +1,386 @@
|
|||
/*
|
||||
* Copyright (C) 2006-2009 Red Hat, Inc.
|
||||
*
|
||||
* This file is released under the LGPL.
|
||||
*/
|
||||
|
||||
#ifndef __DM_LOG_USERSPACE_H__
|
||||
#define __DM_LOG_USERSPACE_H__
|
||||
|
||||
#include <linux/dm-ioctl.h> /* For DM_UUID_LEN */
|
||||
|
||||
/*
|
||||
* The device-mapper userspace log module consists of a kernel component and
|
||||
* a user-space component. The kernel component implements the API defined
|
||||
* in dm-dirty-log.h. Its purpose is simply to pass the parameters and
|
||||
* return values of those API functions between kernel and user-space.
|
||||
*
|
||||
* Below are defined the 'request_types' - DM_ULOG_CTR, DM_ULOG_DTR, etc.
|
||||
* These request types represent the different functions in the device-mapper
|
||||
* dirty log API. Each of these is described in more detail below.
|
||||
*
|
||||
* The user-space program must listen for requests from the kernel (representing
|
||||
* the various API functions) and process them.
|
||||
*
|
||||
* User-space begins by setting up the communication link (error checking
|
||||
* removed for clarity):
|
||||
* fd = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
|
||||
* addr.nl_family = AF_NETLINK;
|
||||
* addr.nl_groups = CN_IDX_DM;
|
||||
* addr.nl_pid = 0;
|
||||
* r = bind(fd, (struct sockaddr *) &addr, sizeof(addr));
|
||||
* opt = addr.nl_groups;
|
||||
* setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &opt, sizeof(opt));
|
||||
*
|
||||
* User-space will then wait to receive requests form the kernel, which it
|
||||
* will process as described below. The requests are received in the form,
|
||||
* ((struct dm_ulog_request) + (additional data)). Depending on the request
|
||||
* type, there may or may not be 'additional data'. In the descriptions below,
|
||||
* you will see 'Payload-to-userspace' and 'Payload-to-kernel'. The
|
||||
* 'Payload-to-userspace' is what the kernel sends in 'additional data' as
|
||||
* necessary parameters to complete the request. The 'Payload-to-kernel' is
|
||||
* the 'additional data' returned to the kernel that contains the necessary
|
||||
* results of the request. The 'data_size' field in the dm_ulog_request
|
||||
* structure denotes the availability and amount of payload data.
|
||||
*/
|
||||
|
||||
/*
|
||||
* DM_ULOG_CTR corresponds to (found in dm-dirty-log.h):
|
||||
* int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
|
||||
* unsigned argc, char **argv);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* A single string containing all the argv arguments separated by ' 's
|
||||
* Payload-to-kernel:
|
||||
* None. ('data_size' in the dm_ulog_request struct should be 0.)
|
||||
*
|
||||
* The UUID contained in the dm_ulog_request structure is the reference that
|
||||
* will be used by all request types to a specific log. The constructor must
|
||||
* record this assotiation with instance created.
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field and
|
||||
* 'data_size' appropriately.
|
||||
*/
|
||||
#define DM_ULOG_CTR 1
|
||||
|
||||
/*
|
||||
* DM_ULOG_DTR corresponds to (found in dm-dirty-log.h):
|
||||
* void (*dtr)(struct dm_dirty_log *log);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* A single string containing all the argv arguments separated by ' 's
|
||||
* Payload-to-kernel:
|
||||
* None. ('data_size' in the dm_ulog_request struct should be 0.)
|
||||
*
|
||||
* The UUID contained in the dm_ulog_request structure is all that is
|
||||
* necessary to identify the log instance being destroyed. There is no
|
||||
* payload data.
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field and clearing
|
||||
* 'data_size' appropriately.
|
||||
*/
|
||||
#define DM_ULOG_DTR 2
|
||||
|
||||
/*
|
||||
* DM_ULOG_PRESUSPEND corresponds to (found in dm-dirty-log.h):
|
||||
* int (*presuspend)(struct dm_dirty_log *log);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* None.
|
||||
* Payload-to-kernel:
|
||||
* None.
|
||||
*
|
||||
* The UUID contained in the dm_ulog_request structure is all that is
|
||||
* necessary to identify the log instance being presuspended. There is no
|
||||
* payload data.
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field and
|
||||
* 'data_size' appropriately.
|
||||
*/
|
||||
#define DM_ULOG_PRESUSPEND 3
|
||||
|
||||
/*
|
||||
* DM_ULOG_POSTSUSPEND corresponds to (found in dm-dirty-log.h):
|
||||
* int (*postsuspend)(struct dm_dirty_log *log);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* None.
|
||||
* Payload-to-kernel:
|
||||
* None.
|
||||
*
|
||||
* The UUID contained in the dm_ulog_request structure is all that is
|
||||
* necessary to identify the log instance being postsuspended. There is no
|
||||
* payload data.
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field and
|
||||
* 'data_size' appropriately.
|
||||
*/
|
||||
#define DM_ULOG_POSTSUSPEND 4
|
||||
|
||||
/*
|
||||
* DM_ULOG_RESUME corresponds to (found in dm-dirty-log.h):
|
||||
* int (*resume)(struct dm_dirty_log *log);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* None.
|
||||
* Payload-to-kernel:
|
||||
* None.
|
||||
*
|
||||
* The UUID contained in the dm_ulog_request structure is all that is
|
||||
* necessary to identify the log instance being resumed. There is no
|
||||
* payload data.
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field and
|
||||
* 'data_size' appropriately.
|
||||
*/
|
||||
#define DM_ULOG_RESUME 5
|
||||
|
||||
/*
|
||||
* DM_ULOG_GET_REGION_SIZE corresponds to (found in dm-dirty-log.h):
|
||||
* uint32_t (*get_region_size)(struct dm_dirty_log *log);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* None.
|
||||
* Payload-to-kernel:
|
||||
* uint64_t - contains the region size
|
||||
*
|
||||
* The region size is something that was determined at constructor time.
|
||||
* It is returned in the payload area and 'data_size' is set to
|
||||
* reflect this.
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field appropriately.
|
||||
*/
|
||||
#define DM_ULOG_GET_REGION_SIZE 6
|
||||
|
||||
/*
|
||||
* DM_ULOG_IS_CLEAN corresponds to (found in dm-dirty-log.h):
|
||||
* int (*is_clean)(struct dm_dirty_log *log, region_t region);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* uint64_t - the region to get clean status on
|
||||
* Payload-to-kernel:
|
||||
* int64_t - 1 if clean, 0 otherwise
|
||||
*
|
||||
* Payload is sizeof(uint64_t) and contains the region for which the clean
|
||||
* status is being made.
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - filling the payload with 0 (not clean) or
|
||||
* 1 (clean), setting 'data_size' and 'error' appropriately.
|
||||
*/
|
||||
#define DM_ULOG_IS_CLEAN 7
|
||||
|
||||
/*
|
||||
* DM_ULOG_IN_SYNC corresponds to (found in dm-dirty-log.h):
|
||||
* int (*in_sync)(struct dm_dirty_log *log, region_t region,
|
||||
* int can_block);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* uint64_t - the region to get sync status on
|
||||
* Payload-to-kernel:
|
||||
* int64_t - 1 if in-sync, 0 otherwise
|
||||
*
|
||||
* Exactly the same as 'is_clean' above, except this time asking "has the
|
||||
* region been recovered?" vs. "is the region not being modified?"
|
||||
*/
|
||||
#define DM_ULOG_IN_SYNC 8
|
||||
|
||||
/*
|
||||
* DM_ULOG_FLUSH corresponds to (found in dm-dirty-log.h):
|
||||
* int (*flush)(struct dm_dirty_log *log);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* None.
|
||||
* Payload-to-kernel:
|
||||
* None.
|
||||
*
|
||||
* No incoming or outgoing payload. Simply flush log state to disk.
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field and clearing
|
||||
* 'data_size' appropriately.
|
||||
*/
|
||||
#define DM_ULOG_FLUSH 9
|
||||
|
||||
/*
|
||||
* DM_ULOG_MARK_REGION corresponds to (found in dm-dirty-log.h):
|
||||
* void (*mark_region)(struct dm_dirty_log *log, region_t region);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* uint64_t [] - region(s) to mark
|
||||
* Payload-to-kernel:
|
||||
* None.
|
||||
*
|
||||
* Incoming payload contains the one or more regions to mark dirty.
|
||||
* The number of regions contained in the payload can be determined from
|
||||
* 'data_size/sizeof(uint64_t)'.
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field and clearing
|
||||
* 'data_size' appropriately.
|
||||
*/
|
||||
#define DM_ULOG_MARK_REGION 10
|
||||
|
||||
/*
|
||||
* DM_ULOG_CLEAR_REGION corresponds to (found in dm-dirty-log.h):
|
||||
* void (*clear_region)(struct dm_dirty_log *log, region_t region);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* uint64_t [] - region(s) to clear
|
||||
* Payload-to-kernel:
|
||||
* None.
|
||||
*
|
||||
* Incoming payload contains the one or more regions to mark clean.
|
||||
* The number of regions contained in the payload can be determined from
|
||||
* 'data_size/sizeof(uint64_t)'.
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field and clearing
|
||||
* 'data_size' appropriately.
|
||||
*/
|
||||
#define DM_ULOG_CLEAR_REGION 11
|
||||
|
||||
/*
|
||||
* DM_ULOG_GET_RESYNC_WORK corresponds to (found in dm-dirty-log.h):
|
||||
* int (*get_resync_work)(struct dm_dirty_log *log, region_t *region);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* None.
|
||||
* Payload-to-kernel:
|
||||
* {
|
||||
* int64_t i; -- 1 if recovery necessary, 0 otherwise
|
||||
* uint64_t r; -- The region to recover if i=1
|
||||
* }
|
||||
* 'data_size' should be set appropriately.
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field appropriately.
|
||||
*/
|
||||
#define DM_ULOG_GET_RESYNC_WORK 12
|
||||
|
||||
/*
|
||||
* DM_ULOG_SET_REGION_SYNC corresponds to (found in dm-dirty-log.h):
|
||||
* void (*set_region_sync)(struct dm_dirty_log *log,
|
||||
* region_t region, int in_sync);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* {
|
||||
* uint64_t - region to set sync state on
|
||||
* int64_t - 0 if not-in-sync, 1 if in-sync
|
||||
* }
|
||||
* Payload-to-kernel:
|
||||
* None.
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field and clearing
|
||||
* 'data_size' appropriately.
|
||||
*/
|
||||
#define DM_ULOG_SET_REGION_SYNC 13
|
||||
|
||||
/*
|
||||
* DM_ULOG_GET_SYNC_COUNT corresponds to (found in dm-dirty-log.h):
|
||||
* region_t (*get_sync_count)(struct dm_dirty_log *log);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* None.
|
||||
* Payload-to-kernel:
|
||||
* uint64_t - the number of in-sync regions
|
||||
*
|
||||
* No incoming payload. Kernel-bound payload contains the number of
|
||||
* regions that are in-sync (in a size_t).
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field and
|
||||
* 'data_size' appropriately.
|
||||
*/
|
||||
#define DM_ULOG_GET_SYNC_COUNT 14
|
||||
|
||||
/*
|
||||
* DM_ULOG_STATUS_INFO corresponds to (found in dm-dirty-log.h):
|
||||
* int (*status)(struct dm_dirty_log *log, STATUSTYPE_INFO,
|
||||
* char *result, unsigned maxlen);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* None.
|
||||
* Payload-to-kernel:
|
||||
* Character string containing STATUSTYPE_INFO
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field and
|
||||
* 'data_size' appropriately.
|
||||
*/
|
||||
#define DM_ULOG_STATUS_INFO 15
|
||||
|
||||
/*
|
||||
* DM_ULOG_STATUS_TABLE corresponds to (found in dm-dirty-log.h):
|
||||
* int (*status)(struct dm_dirty_log *log, STATUSTYPE_TABLE,
|
||||
* char *result, unsigned maxlen);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* None.
|
||||
* Payload-to-kernel:
|
||||
* Character string containing STATUSTYPE_TABLE
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field and
|
||||
* 'data_size' appropriately.
|
||||
*/
|
||||
#define DM_ULOG_STATUS_TABLE 16
|
||||
|
||||
/*
|
||||
* DM_ULOG_IS_REMOTE_RECOVERING corresponds to (found in dm-dirty-log.h):
|
||||
* int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region);
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* uint64_t - region to determine recovery status on
|
||||
* Payload-to-kernel:
|
||||
* {
|
||||
* int64_t is_recovering; -- 0 if no, 1 if yes
|
||||
* uint64_t in_sync_hint; -- lowest region still needing resync
|
||||
* }
|
||||
*
|
||||
* When the request has been processed, user-space must return the
|
||||
* dm_ulog_request to the kernel - setting the 'error' field and
|
||||
* 'data_size' appropriately.
|
||||
*/
|
||||
#define DM_ULOG_IS_REMOTE_RECOVERING 17
|
||||
|
||||
/*
|
||||
* (DM_ULOG_REQUEST_MASK & request_type) to get the request type
|
||||
*
|
||||
* Payload-to-userspace:
|
||||
* A single string containing all the argv arguments separated by ' 's
|
||||
* Payload-to-kernel:
|
||||
* None. ('data_size' in the dm_ulog_request struct should be 0.)
|
||||
*
|
||||
* We are reserving 8 bits of the 32-bit 'request_type' field for the
|
||||
* various request types above. The remaining 24-bits are currently
|
||||
* set to zero and are reserved for future use and compatibility concerns.
|
||||
*
|
||||
* User-space should always use DM_ULOG_REQUEST_TYPE to aquire the
|
||||
* request type from the 'request_type' field to maintain forward compatibility.
|
||||
*/
|
||||
#define DM_ULOG_REQUEST_MASK 0xFF
|
||||
#define DM_ULOG_REQUEST_TYPE(request_type) \
|
||||
(DM_ULOG_REQUEST_MASK & (request_type))
|
||||
|
||||
struct dm_ulog_request {
|
||||
char uuid[DM_UUID_LEN]; /* Ties a request to a specific mirror log */
|
||||
char padding[7]; /* Padding because DM_UUID_LEN = 129 */
|
||||
|
||||
int32_t error; /* Used to report back processing errors */
|
||||
|
||||
uint32_t seq; /* Sequence number for request */
|
||||
uint32_t request_type; /* DM_ULOG_* defined above */
|
||||
uint32_t data_size; /* How much data (not including this struct) */
|
||||
|
||||
char data[0];
|
||||
};
|
||||
|
||||
#endif /* __DM_LOG_USERSPACE_H__ */
|
|
@ -126,6 +126,8 @@ extern int free_irte(int irq);
|
|||
extern int irq_remapped(int irq);
|
||||
extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
|
||||
extern struct intel_iommu *map_ioapic_to_ir(int apic);
|
||||
extern int set_ioapic_sid(struct irte *irte, int apic);
|
||||
extern int set_msi_sid(struct irte *irte, struct pci_dev *dev);
|
||||
#else
|
||||
static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
|
||||
{
|
||||
|
@ -156,6 +158,15 @@ static inline struct intel_iommu *map_ioapic_to_ir(int apic)
|
|||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline int set_ioapic_sid(struct irte *irte, int apic)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define irq_remapped(irq) (0)
|
||||
#define enable_intr_remapping(mode) (-1)
|
||||
#define disable_intr_remapping() (0)
|
||||
|
|
|
@ -122,9 +122,10 @@ static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_r
|
|||
|
||||
static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
|
||||
{
|
||||
#ifdef ELF_CORE_COPY_TASK_REGS
|
||||
|
||||
#if defined (ELF_CORE_COPY_TASK_REGS)
|
||||
return ELF_CORE_COPY_TASK_REGS(t, elfregs);
|
||||
#elif defined (task_pt_regs)
|
||||
elf_core_copy_regs(elfregs, task_pt_regs(t));
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -8,10 +8,8 @@
|
|||
#ifndef _LINUX_EVENTFD_H
|
||||
#define _LINUX_EVENTFD_H
|
||||
|
||||
#ifdef CONFIG_EVENTFD
|
||||
|
||||
/* For O_CLOEXEC and O_NONBLOCK */
|
||||
#include <linux/fcntl.h>
|
||||
#include <linux/file.h>
|
||||
|
||||
/*
|
||||
* CAREFUL: Check include/asm-generic/fcntl.h when defining
|
||||
|
@ -27,16 +25,37 @@
|
|||
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
|
||||
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
|
||||
|
||||
#ifdef CONFIG_EVENTFD
|
||||
|
||||
struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
|
||||
void eventfd_ctx_put(struct eventfd_ctx *ctx);
|
||||
struct file *eventfd_fget(int fd);
|
||||
int eventfd_signal(struct file *file, int n);
|
||||
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
|
||||
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
|
||||
int eventfd_signal(struct eventfd_ctx *ctx, int n);
|
||||
|
||||
#else /* CONFIG_EVENTFD */
|
||||
|
||||
#define eventfd_fget(fd) ERR_PTR(-ENOSYS)
|
||||
static inline int eventfd_signal(struct file *file, int n)
|
||||
{ return 0; }
|
||||
/*
|
||||
* Ugly ugly ugly error layer to support modules that uses eventfd but
|
||||
* pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
|
||||
*/
|
||||
static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_EVENTFD */
|
||||
static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_EVENTFD_H */
|
||||
|
||||
|
|
|
@ -874,7 +874,7 @@ struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
|
|||
struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
|
||||
int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
|
||||
sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
|
||||
int create, int extend_disksize);
|
||||
int create);
|
||||
|
||||
extern struct inode *ext3_iget(struct super_block *, unsigned long);
|
||||
extern int ext3_write_inode (struct inode *, int);
|
||||
|
|
|
@ -103,10 +103,6 @@ struct ext3_inode_info {
|
|||
*/
|
||||
struct rw_semaphore xattr_sem;
|
||||
#endif
|
||||
#ifdef CONFIG_EXT3_FS_POSIX_ACL
|
||||
struct posix_acl *i_acl;
|
||||
struct posix_acl *i_default_acl;
|
||||
#endif
|
||||
|
||||
struct list_head i_orphan; /* unlinked but open inodes */
|
||||
|
||||
|
|
|
@ -3,4 +3,25 @@
|
|||
|
||||
#define FALLOC_FL_KEEP_SIZE 0x01 /* default is extend size */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/*
|
||||
* Space reservation ioctls and argument structure
|
||||
* are designed to be compatible with the legacy XFS ioctls.
|
||||
*/
|
||||
struct space_resv {
|
||||
__s16 l_type;
|
||||
__s16 l_whence;
|
||||
__s64 l_start;
|
||||
__s64 l_len; /* len == 0 means until end of file */
|
||||
__s32 l_sysid;
|
||||
__u32 l_pid;
|
||||
__s32 l_pad[4]; /* reserved area */
|
||||
};
|
||||
|
||||
#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv)
|
||||
#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _FALLOC_H_ */
|
||||
|
|
|
@ -819,6 +819,7 @@ struct fb_info {
|
|||
int node;
|
||||
int flags;
|
||||
struct mutex lock; /* Lock for open/release/ioctl funcs */
|
||||
struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */
|
||||
struct fb_var_screeninfo var; /* Current var */
|
||||
struct fb_fix_screeninfo fix; /* Current fix */
|
||||
struct fb_monspecs monspecs; /* Current Monitor specs */
|
||||
|
|
|
@ -127,6 +127,7 @@ struct fw_card {
|
|||
struct delayed_work work;
|
||||
int bm_retries;
|
||||
int bm_generation;
|
||||
__be32 bm_transaction_data[2];
|
||||
|
||||
bool broadcast_channel_allocated;
|
||||
u32 broadcast_channel;
|
||||
|
|
47
include/linux/flex_array.h
Normal file
47
include/linux/flex_array.h
Normal file
|
@ -0,0 +1,47 @@
|
|||
#ifndef _FLEX_ARRAY_H
|
||||
#define _FLEX_ARRAY_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#define FLEX_ARRAY_PART_SIZE PAGE_SIZE
|
||||
#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE
|
||||
|
||||
struct flex_array_part;
|
||||
|
||||
/*
|
||||
* This is meant to replace cases where an array-like
|
||||
* structure has gotten too big to fit into kmalloc()
|
||||
* and the developer is getting tempted to use
|
||||
* vmalloc().
|
||||
*/
|
||||
|
||||
struct flex_array {
|
||||
union {
|
||||
struct {
|
||||
int element_size;
|
||||
int total_nr_elements;
|
||||
struct flex_array_part *parts[0];
|
||||
};
|
||||
/*
|
||||
* This little trick makes sure that
|
||||
* sizeof(flex_array) == PAGE_SIZE
|
||||
*/
|
||||
char padding[FLEX_ARRAY_BASE_SIZE];
|
||||
};
|
||||
};
|
||||
|
||||
#define FLEX_ARRAY_INIT(size, total) { { {\
|
||||
.element_size = (size), \
|
||||
.total_nr_elements = (total), \
|
||||
} } }
|
||||
|
||||
struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags);
|
||||
int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags);
|
||||
void flex_array_free(struct flex_array *fa);
|
||||
void flex_array_free_parts(struct flex_array *fa);
|
||||
int flex_array_put(struct flex_array *fa, int element_nr, void *src,
|
||||
gfp_t flags);
|
||||
void *flex_array_get(struct flex_array *fa, int element_nr);
|
||||
|
||||
#endif /* _FLEX_ARRAY_H */
|
|
@ -710,6 +710,9 @@ static inline int mapping_writably_mapped(struct address_space *mapping)
|
|||
#define i_size_ordered_init(inode) do { } while (0)
|
||||
#endif
|
||||
|
||||
struct posix_acl;
|
||||
#define ACL_NOT_CACHED ((void *)(-1))
|
||||
|
||||
struct inode {
|
||||
struct hlist_node i_hash;
|
||||
struct list_head i_list;
|
||||
|
@ -772,6 +775,10 @@ struct inode {
|
|||
atomic_t i_writecount;
|
||||
#ifdef CONFIG_SECURITY
|
||||
void *i_security;
|
||||
#endif
|
||||
#ifdef CONFIG_FS_POSIX_ACL
|
||||
struct posix_acl *i_acl;
|
||||
struct posix_acl *i_default_acl;
|
||||
#endif
|
||||
void *i_private; /* fs or device private pointer */
|
||||
};
|
||||
|
@ -1906,6 +1913,8 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
|
|||
|
||||
extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
|
||||
struct file *filp);
|
||||
extern int do_fallocate(struct file *file, int mode, loff_t offset,
|
||||
loff_t len);
|
||||
extern long do_sys_open(int dfd, const char __user *filename, int flags,
|
||||
int mode);
|
||||
extern struct file *filp_open(const char *, int, int);
|
||||
|
@ -1914,6 +1923,10 @@ extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
|
|||
extern int filp_close(struct file *, fl_owner_t id);
|
||||
extern char * getname(const char __user *);
|
||||
|
||||
/* fs/ioctl.c */
|
||||
|
||||
extern int ioctl_preallocate(struct file *filp, void __user *argp);
|
||||
|
||||
/* fs/dcache.c */
|
||||
extern void __init vfs_caches_init_early(void);
|
||||
extern void __init vfs_caches_init(unsigned long);
|
||||
|
@ -1933,6 +1946,7 @@ extern void putname(const char *name);
|
|||
extern int register_blkdev(unsigned int, const char *);
|
||||
extern void unregister_blkdev(unsigned int, const char *);
|
||||
extern struct block_device *bdget(dev_t);
|
||||
extern struct block_device *bdgrab(struct block_device *bdev);
|
||||
extern void bd_set_size(struct block_device *, loff_t size);
|
||||
extern void bd_forget(struct inode *inode);
|
||||
extern void bdput(struct block_device *);
|
||||
|
@ -2123,7 +2137,7 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
|
|||
|
||||
extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
|
||||
|
||||
extern struct inode * inode_init_always(struct super_block *, struct inode *);
|
||||
extern int inode_init_always(struct super_block *, struct inode *);
|
||||
extern void inode_init_once(struct inode *);
|
||||
extern void inode_add_to_lists(struct super_block *, struct inode *);
|
||||
extern void iput(struct inode *);
|
||||
|
@ -2150,6 +2164,7 @@ extern void __iget(struct inode * inode);
|
|||
extern void iget_failed(struct inode *);
|
||||
extern void clear_inode(struct inode *);
|
||||
extern void destroy_inode(struct inode *);
|
||||
extern void __destroy_inode(struct inode *);
|
||||
extern struct inode *new_inode(struct super_block *);
|
||||
extern int should_remove_suid(struct dentry *);
|
||||
extern int file_remove_suid(struct file *);
|
||||
|
|
|
@ -280,7 +280,7 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
|
|||
assert_spin_locked(&dentry->d_lock);
|
||||
|
||||
parent = dentry->d_parent;
|
||||
if (fsnotify_inode_watches_children(parent->d_inode))
|
||||
if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode))
|
||||
dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
|
||||
else
|
||||
dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
|
||||
|
@ -352,7 +352,7 @@ extern void fsnotify_unmount_inodes(struct list_head *list);
|
|||
/* put here because inotify does some weird stuff when destroying watches */
|
||||
extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
|
||||
void *data, int data_is, const char *name,
|
||||
u32 cookie);
|
||||
u32 cookie, gfp_t gfp);
|
||||
|
||||
#else
|
||||
|
||||
|
|
|
@ -89,7 +89,9 @@ enum print_line_t {
|
|||
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
|
||||
};
|
||||
|
||||
|
||||
void tracing_generic_entry_update(struct trace_entry *entry,
|
||||
unsigned long flags,
|
||||
int pc);
|
||||
struct ring_buffer_event *
|
||||
trace_current_buffer_lock_reserve(int type, unsigned long len,
|
||||
unsigned long flags, int pc);
|
||||
|
@ -119,11 +121,9 @@ struct ftrace_event_call {
|
|||
void *filter;
|
||||
void *mod;
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
atomic_t profile_count;
|
||||
int (*profile_enable)(struct ftrace_event_call *);
|
||||
void (*profile_disable)(struct ftrace_event_call *);
|
||||
#endif
|
||||
atomic_t profile_count;
|
||||
int (*profile_enable)(struct ftrace_event_call *);
|
||||
void (*profile_disable)(struct ftrace_event_call *);
|
||||
};
|
||||
|
||||
#define MAX_FILTER_PRED 32
|
||||
|
|
|
@ -25,6 +25,11 @@
|
|||
* - add IOCTL message
|
||||
* - add unsolicited notification support
|
||||
* - add POLL message and NOTIFY_POLL notification
|
||||
*
|
||||
* 7.12
|
||||
* - add umask flag to input argument of open, mknod and mkdir
|
||||
* - add notification messages for invalidation of inodes and
|
||||
* directory entries
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_FUSE_H
|
||||
|
@ -36,7 +41,7 @@
|
|||
#define FUSE_KERNEL_VERSION 7
|
||||
|
||||
/** Minor version number of this interface */
|
||||
#define FUSE_KERNEL_MINOR_VERSION 11
|
||||
#define FUSE_KERNEL_MINOR_VERSION 12
|
||||
|
||||
/** The node ID of the root inode */
|
||||
#define FUSE_ROOT_ID 1
|
||||
|
@ -112,6 +117,7 @@ struct fuse_file_lock {
|
|||
* INIT request/reply flags
|
||||
*
|
||||
* FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".."
|
||||
* FUSE_DONT_MASK: don't apply umask to file mode on create operations
|
||||
*/
|
||||
#define FUSE_ASYNC_READ (1 << 0)
|
||||
#define FUSE_POSIX_LOCKS (1 << 1)
|
||||
|
@ -119,6 +125,7 @@ struct fuse_file_lock {
|
|||
#define FUSE_ATOMIC_O_TRUNC (1 << 3)
|
||||
#define FUSE_EXPORT_SUPPORT (1 << 4)
|
||||
#define FUSE_BIG_WRITES (1 << 5)
|
||||
#define FUSE_DONT_MASK (1 << 6)
|
||||
|
||||
/**
|
||||
* CUSE INIT request/reply flags
|
||||
|
@ -224,6 +231,8 @@ enum fuse_opcode {
|
|||
|
||||
enum fuse_notify_code {
|
||||
FUSE_NOTIFY_POLL = 1,
|
||||
FUSE_NOTIFY_INVAL_INODE = 2,
|
||||
FUSE_NOTIFY_INVAL_ENTRY = 3,
|
||||
FUSE_NOTIFY_CODE_MAX,
|
||||
};
|
||||
|
||||
|
@ -262,14 +271,18 @@ struct fuse_attr_out {
|
|||
struct fuse_attr attr;
|
||||
};
|
||||
|
||||
#define FUSE_COMPAT_MKNOD_IN_SIZE 8
|
||||
|
||||
struct fuse_mknod_in {
|
||||
__u32 mode;
|
||||
__u32 rdev;
|
||||
__u32 umask;
|
||||
__u32 padding;
|
||||
};
|
||||
|
||||
struct fuse_mkdir_in {
|
||||
__u32 mode;
|
||||
__u32 padding;
|
||||
__u32 umask;
|
||||
};
|
||||
|
||||
struct fuse_rename_in {
|
||||
|
@ -300,8 +313,15 @@ struct fuse_setattr_in {
|
|||
};
|
||||
|
||||
struct fuse_open_in {
|
||||
__u32 flags;
|
||||
__u32 unused;
|
||||
};
|
||||
|
||||
struct fuse_create_in {
|
||||
__u32 flags;
|
||||
__u32 mode;
|
||||
__u32 umask;
|
||||
__u32 padding;
|
||||
};
|
||||
|
||||
struct fuse_open_out {
|
||||
|
@ -508,4 +528,16 @@ struct fuse_dirent {
|
|||
#define FUSE_DIRENT_SIZE(d) \
|
||||
FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
|
||||
|
||||
struct fuse_notify_inval_inode_out {
|
||||
__u64 ino;
|
||||
__s64 off;
|
||||
__s64 len;
|
||||
};
|
||||
|
||||
struct fuse_notify_inval_entry_out {
|
||||
__u64 parent;
|
||||
__u32 namelen;
|
||||
__u32 padding;
|
||||
};
|
||||
|
||||
#endif /* _LINUX_FUSE_H */
|
||||
|
|
|
@ -2,7 +2,9 @@
|
|||
#define LINUX_HARDIRQ_H
|
||||
|
||||
#include <linux/preempt.h>
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#include <linux/smp_lock.h>
|
||||
#endif
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/ftrace_irq.h>
|
||||
#include <asm/hardirq.h>
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/timer.h>
|
||||
|
||||
|
||||
struct hrtimer_clock_base;
|
||||
|
@ -447,6 +448,8 @@ extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
|
|||
|
||||
static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
|
||||
{
|
||||
if (likely(!timer->start_site))
|
||||
return;
|
||||
timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
|
||||
timer->function, timer->start_comm, 0);
|
||||
}
|
||||
|
@ -456,6 +459,8 @@ extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer,
|
|||
|
||||
static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
|
||||
{
|
||||
if (likely(!timer_stats_active))
|
||||
return;
|
||||
__timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
|
||||
}
|
||||
|
||||
|
|
|
@ -175,16 +175,16 @@ struct icmp6_filter {
|
|||
|
||||
|
||||
extern void icmpv6_send(struct sk_buff *skb,
|
||||
int type, int code,
|
||||
u8 type, u8 code,
|
||||
__u32 info,
|
||||
struct net_device *dev);
|
||||
|
||||
extern int icmpv6_init(void);
|
||||
extern int icmpv6_err_convert(int type, int code,
|
||||
extern int icmpv6_err_convert(u8 type, u8 code,
|
||||
int *err);
|
||||
extern void icmpv6_cleanup(void);
|
||||
extern void icmpv6_param_prob(struct sk_buff *skb,
|
||||
int code, int pos);
|
||||
u8 code, int pos);
|
||||
|
||||
struct flowi;
|
||||
struct in6_addr;
|
||||
|
|
|
@ -1062,7 +1062,6 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l
|
|||
extern int ide_vlb_clk;
|
||||
extern int ide_pci_clk;
|
||||
|
||||
unsigned int ide_rq_bytes(struct request *);
|
||||
int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int);
|
||||
void ide_kill_rq(ide_drive_t *, struct request *);
|
||||
|
||||
|
@ -1361,7 +1360,6 @@ int ide_in_drive_list(u16 *, const struct drive_list_entry *);
|
|||
#ifdef CONFIG_BLK_DEV_IDEDMA
|
||||
int ide_dma_good_drive(ide_drive_t *);
|
||||
int __ide_dma_bad_drive(ide_drive_t *);
|
||||
int ide_id_dma_bug(ide_drive_t *);
|
||||
|
||||
u8 ide_find_dma_mode(ide_drive_t *, u8);
|
||||
|
||||
|
@ -1402,7 +1400,6 @@ void ide_dma_lost_irq(ide_drive_t *);
|
|||
ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
|
||||
|
||||
#else
|
||||
static inline int ide_id_dma_bug(ide_drive_t *drive) { return 0; }
|
||||
static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
|
||||
static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
|
||||
static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
|
||||
|
@ -1422,6 +1419,7 @@ static inline void ide_dma_unmap_sg(ide_drive_t *drive,
|
|||
|
||||
#ifdef CONFIG_BLK_DEV_IDEACPI
|
||||
int ide_acpi_init(void);
|
||||
bool ide_port_acpi(ide_hwif_t *hwif);
|
||||
extern int ide_acpi_exec_tfs(ide_drive_t *drive);
|
||||
extern void ide_acpi_get_timing(ide_hwif_t *hwif);
|
||||
extern void ide_acpi_push_timing(ide_hwif_t *hwif);
|
||||
|
@ -1430,6 +1428,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *);
|
|||
extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
|
||||
#else
|
||||
static inline int ide_acpi_init(void) { return 0; }
|
||||
static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; }
|
||||
static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
|
||||
static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
|
||||
static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
|
||||
|
|
|
@ -78,6 +78,7 @@
|
|||
#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
|
||||
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
|
||||
#define ETH_P_TIPC 0x88CA /* TIPC */
|
||||
#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */
|
||||
#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
|
||||
#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
|
||||
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
|
||||
|
|
|
@ -24,6 +24,7 @@ extern int ima_path_check(struct path *path, int mask, int update_counts);
|
|||
extern void ima_file_free(struct file *file);
|
||||
extern int ima_file_mmap(struct file *file, unsigned long prot);
|
||||
extern void ima_counts_get(struct file *file);
|
||||
extern void ima_counts_put(struct path *path, int mask);
|
||||
|
||||
#else
|
||||
static inline int ima_bprm_check(struct linux_binprm *bprm)
|
||||
|
@ -60,5 +61,10 @@ static inline void ima_counts_get(struct file *file)
|
|||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void ima_counts_put(struct path *path, int mask)
|
||||
{
|
||||
return;
|
||||
}
|
||||
#endif /* CONFIG_IMA_H */
|
||||
#endif /* _LINUX_IMA_H */
|
||||
|
|
|
@ -82,7 +82,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
|
|||
|
||||
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
|
||||
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
|
||||
#define IN_DEV_RPFILTER(in_dev) IN_DEV_ANDCONF((in_dev), RP_FILTER)
|
||||
#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
|
||||
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
|
||||
ACCEPT_SOURCE_ROUTE)
|
||||
#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
|
||||
|
|
|
@ -183,5 +183,8 @@ extern struct cred init_cred;
|
|||
LIST_HEAD_INIT(cpu_timers[2]), \
|
||||
}
|
||||
|
||||
/* Attach to the init_task data structure for proper alignment */
|
||||
#define __init_task_data __attribute__((__section__(".data.init_task")))
|
||||
|
||||
|
||||
#endif
|
||||
|
|
66
include/linux/input/matrix_keypad.h
Normal file
66
include/linux/input/matrix_keypad.h
Normal file
|
@ -0,0 +1,66 @@
|
|||
#ifndef _MATRIX_KEYPAD_H
|
||||
#define _MATRIX_KEYPAD_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/input.h>
|
||||
|
||||
#define MATRIX_MAX_ROWS 16
|
||||
#define MATRIX_MAX_COLS 16
|
||||
|
||||
#define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\
|
||||
(((col) & (MATRIX_MAX_COLS - 1)) << 16) |\
|
||||
(val & 0xffff))
|
||||
|
||||
#define KEY_ROW(k) (((k) >> 24) & 0xff)
|
||||
#define KEY_COL(k) (((k) >> 16) & 0xff)
|
||||
#define KEY_VAL(k) ((k) & 0xffff)
|
||||
|
||||
#define MATRIX_SCAN_CODE(row, col, row_shift) (((row) << (row_shift)) + (col))
|
||||
|
||||
/**
|
||||
* struct matrix_keymap_data - keymap for matrix keyboards
|
||||
* @keymap: pointer to array of uint32 values encoded with KEY() macro
|
||||
* representing keymap
|
||||
* @keymap_size: number of entries (initialized) in this keymap
|
||||
*
|
||||
* This structure is supposed to be used by platform code to supply
|
||||
* keymaps to drivers that implement matrix-like keypads/keyboards.
|
||||
*/
|
||||
struct matrix_keymap_data {
|
||||
const uint32_t *keymap;
|
||||
unsigned int keymap_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct matrix_keypad_platform_data - platform-dependent keypad data
|
||||
* @keymap_data: pointer to &matrix_keymap_data
|
||||
* @row_gpios: pointer to array of gpio numbers representing rows
|
||||
* @col_gpios: pointer to array of gpio numbers reporesenting colums
|
||||
* @num_row_gpios: actual number of row gpios used by device
|
||||
* @num_col_gpios: actual number of col gpios used by device
|
||||
* @col_scan_delay_us: delay, measured in microseconds, that is
|
||||
* needed before we can keypad after activating column gpio
|
||||
* @debounce_ms: debounce interval in milliseconds
|
||||
*
|
||||
* This structure represents platform-specific data that use used by
|
||||
* matrix_keypad driver to perform proper initialization.
|
||||
*/
|
||||
struct matrix_keypad_platform_data {
|
||||
const struct matrix_keymap_data *keymap_data;
|
||||
|
||||
const unsigned int *row_gpios;
|
||||
const unsigned int *col_gpios;
|
||||
|
||||
unsigned int num_row_gpios;
|
||||
unsigned int num_col_gpios;
|
||||
|
||||
unsigned int col_scan_delay_us;
|
||||
|
||||
/* key debounce interval in milli-second */
|
||||
unsigned int debounce_ms;
|
||||
|
||||
bool active_low;
|
||||
bool wakeup;
|
||||
};
|
||||
|
||||
#endif /* _MATRIX_KEYPAD_H */
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/irqflags.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/hrtimer.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
@ -64,11 +65,13 @@
|
|||
* IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
|
||||
* IRQTF_DIED - handler thread died
|
||||
* IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
|
||||
* IRQTF_AFFINITY - irq thread is requested to adjust affinity
|
||||
*/
|
||||
enum {
|
||||
IRQTF_RUNTHREAD,
|
||||
IRQTF_DIED,
|
||||
IRQTF_WARNED,
|
||||
IRQTF_AFFINITY,
|
||||
};
|
||||
|
||||
typedef irqreturn_t (*irq_handler_t)(int, void *);
|
||||
|
@ -517,6 +520,31 @@ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
|
|||
extern void tasklet_init(struct tasklet_struct *t,
|
||||
void (*func)(unsigned long), unsigned long data);
|
||||
|
||||
struct tasklet_hrtimer {
|
||||
struct hrtimer timer;
|
||||
struct tasklet_struct tasklet;
|
||||
enum hrtimer_restart (*function)(struct hrtimer *);
|
||||
};
|
||||
|
||||
extern void
|
||||
tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
|
||||
enum hrtimer_restart (*function)(struct hrtimer *),
|
||||
clockid_t which_clock, enum hrtimer_mode mode);
|
||||
|
||||
static inline
|
||||
int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
|
||||
const enum hrtimer_mode mode)
|
||||
{
|
||||
return hrtimer_start(&ttimer->timer, time, mode);
|
||||
}
|
||||
|
||||
static inline
|
||||
void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
|
||||
{
|
||||
hrtimer_cancel(&ttimer->timer);
|
||||
tasklet_kill(&ttimer->tasklet);
|
||||
}
|
||||
|
||||
/*
|
||||
* Autoprobing for irqs:
|
||||
*
|
||||
|
|
|
@ -92,7 +92,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
|
|||
* a race).
|
||||
*/
|
||||
if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
|
||||
atomic_long_inc(&ioc->refcount);
|
||||
atomic_inc(&ioc->nr_tasks);
|
||||
return ioc;
|
||||
}
|
||||
|
||||
|
|
|
@ -303,6 +303,7 @@ extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in
|
|||
extern int panic_timeout;
|
||||
extern int panic_on_oops;
|
||||
extern int panic_on_unrecovered_nmi;
|
||||
extern int panic_on_io_nmi;
|
||||
extern const char *print_tainted(void);
|
||||
extern void add_taint(unsigned flag);
|
||||
extern int test_taint(unsigned flag);
|
||||
|
|
|
@ -27,6 +27,7 @@ extern void kmemleak_init(void);
|
|||
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
|
||||
gfp_t gfp);
|
||||
extern void kmemleak_free(const void *ptr);
|
||||
extern void kmemleak_free_part(const void *ptr, size_t size);
|
||||
extern void kmemleak_padding(const void *ptr, unsigned long offset,
|
||||
size_t size);
|
||||
extern void kmemleak_not_leak(const void *ptr);
|
||||
|
@ -71,6 +72,9 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
|
|||
static inline void kmemleak_free(const void *ptr)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_free_part(const void *ptr, size_t size)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -110,6 +110,7 @@ struct kvm_memory_slot {
|
|||
|
||||
struct kvm_kernel_irq_routing_entry {
|
||||
u32 gsi;
|
||||
u32 type;
|
||||
int (*set)(struct kvm_kernel_irq_routing_entry *e,
|
||||
struct kvm *kvm, int level);
|
||||
union {
|
||||
|
@ -125,6 +126,7 @@ struct kvm_kernel_irq_routing_entry {
|
|||
struct kvm {
|
||||
struct mutex lock; /* protects the vcpus array and APIC accesses */
|
||||
spinlock_t mmu_lock;
|
||||
spinlock_t requests_lock;
|
||||
struct rw_semaphore slots_lock;
|
||||
struct mm_struct *mm; /* userspace tied to this vm */
|
||||
int nmemslots;
|
||||
|
|
53
include/linux/leds-lp3944.h
Normal file
53
include/linux/leds-lp3944.h
Normal file
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* leds-lp3944.h - platform data structure for lp3944 led controller
|
||||
*
|
||||
* Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_LEDS_LP3944_H
|
||||
#define __LINUX_LEDS_LP3944_H
|
||||
|
||||
#include <linux/leds.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#define LP3944_LED0 0
|
||||
#define LP3944_LED1 1
|
||||
#define LP3944_LED2 2
|
||||
#define LP3944_LED3 3
|
||||
#define LP3944_LED4 4
|
||||
#define LP3944_LED5 5
|
||||
#define LP3944_LED6 6
|
||||
#define LP3944_LED7 7
|
||||
#define LP3944_LEDS_MAX 8
|
||||
|
||||
#define LP3944_LED_STATUS_MASK 0x03
|
||||
enum lp3944_status {
|
||||
LP3944_LED_STATUS_OFF = 0x0,
|
||||
LP3944_LED_STATUS_ON = 0x1,
|
||||
LP3944_LED_STATUS_DIM0 = 0x2,
|
||||
LP3944_LED_STATUS_DIM1 = 0x3
|
||||
};
|
||||
|
||||
enum lp3944_type {
|
||||
LP3944_LED_TYPE_NONE,
|
||||
LP3944_LED_TYPE_LED,
|
||||
LP3944_LED_TYPE_LED_INVERTED,
|
||||
};
|
||||
|
||||
struct lp3944_led {
|
||||
char *name;
|
||||
enum lp3944_type type;
|
||||
enum lp3944_status status;
|
||||
};
|
||||
|
||||
struct lp3944_platform_data {
|
||||
struct lp3944_led leds[LP3944_LEDS_MAX];
|
||||
u8 leds_size;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_LEDS_LP3944_H */
|
|
@ -45,7 +45,10 @@ struct led_classdev {
|
|||
/* Get LED brightness level */
|
||||
enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
|
||||
|
||||
/* Activate hardware accelerated blink */
|
||||
/* Activate hardware accelerated blink, delays are in
|
||||
* miliseconds and if none is provided then a sensible default
|
||||
* should be chosen. The call can adjust the timings if it can't
|
||||
* match the values specified exactly. */
|
||||
int (*blink_set)(struct led_classdev *led_cdev,
|
||||
unsigned long *delay_on,
|
||||
unsigned long *delay_off);
|
||||
|
@ -141,9 +144,14 @@ struct gpio_led {
|
|||
const char *name;
|
||||
const char *default_trigger;
|
||||
unsigned gpio;
|
||||
u8 active_low : 1;
|
||||
u8 retain_state_suspended : 1;
|
||||
unsigned active_low : 1;
|
||||
unsigned retain_state_suspended : 1;
|
||||
unsigned default_state : 2;
|
||||
/* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
|
||||
};
|
||||
#define LEDS_GPIO_DEFSTATE_OFF 0
|
||||
#define LEDS_GPIO_DEFSTATE_ON 1
|
||||
#define LEDS_GPIO_DEFSTATE_KEEP 2
|
||||
|
||||
struct gpio_led_platform_data {
|
||||
int num_leds;
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
/* Things the lguest guest needs to know. Note: like all lguest interfaces,
|
||||
* this is subject to wild and random change between versions. */
|
||||
/*
|
||||
* Things the lguest guest needs to know. Note: like all lguest interfaces,
|
||||
* this is subject to wild and random change between versions.
|
||||
*/
|
||||
#ifndef _LINUX_LGUEST_H
|
||||
#define _LINUX_LGUEST_H
|
||||
|
||||
|
@ -11,32 +13,41 @@
|
|||
#define LG_CLOCK_MIN_DELTA 100UL
|
||||
#define LG_CLOCK_MAX_DELTA ULONG_MAX
|
||||
|
||||
/*G:032 The second method of communicating with the Host is to via "struct
|
||||
/*G:031
|
||||
* The second method of communicating with the Host is to via "struct
|
||||
* lguest_data". Once the Guest's initialization hypercall tells the Host where
|
||||
* this is, the Guest and Host both publish information in it. :*/
|
||||
struct lguest_data
|
||||
{
|
||||
/* 512 == enabled (same as eflags in normal hardware). The Guest
|
||||
* changes interrupts so often that a hypercall is too slow. */
|
||||
* this is, the Guest and Host both publish information in it.
|
||||
:*/
|
||||
struct lguest_data {
|
||||
/*
|
||||
* 512 == enabled (same as eflags in normal hardware). The Guest
|
||||
* changes interrupts so often that a hypercall is too slow.
|
||||
*/
|
||||
unsigned int irq_enabled;
|
||||
/* Fine-grained interrupt disabling by the Guest */
|
||||
DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS);
|
||||
|
||||
/* The Host writes the virtual address of the last page fault here,
|
||||
/*
|
||||
* The Host writes the virtual address of the last page fault here,
|
||||
* which saves the Guest a hypercall. CR2 is the native register where
|
||||
* this address would normally be found. */
|
||||
* this address would normally be found.
|
||||
*/
|
||||
unsigned long cr2;
|
||||
|
||||
/* Wallclock time set by the Host. */
|
||||
struct timespec time;
|
||||
|
||||
/* Interrupt pending set by the Host. The Guest should do a hypercall
|
||||
* if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). */
|
||||
/*
|
||||
* Interrupt pending set by the Host. The Guest should do a hypercall
|
||||
* if it re-enables interrupts and sees this set (to X86_EFLAGS_IF).
|
||||
*/
|
||||
int irq_pending;
|
||||
|
||||
/* Async hypercall ring. Instead of directly making hypercalls, we can
|
||||
/*
|
||||
* Async hypercall ring. Instead of directly making hypercalls, we can
|
||||
* place them in here for processing the next time the Host wants.
|
||||
* This batching can be quite efficient. */
|
||||
* This batching can be quite efficient.
|
||||
*/
|
||||
|
||||
/* 0xFF == done (set by Host), 0 == pending (set by Guest). */
|
||||
u8 hcall_status[LHCALL_RING_SIZE];
|
||||
|
|
|
@ -29,8 +29,10 @@ struct lguest_device_desc {
|
|||
__u8 type;
|
||||
/* The number of virtqueues (first in config array) */
|
||||
__u8 num_vq;
|
||||
/* The number of bytes of feature bits. Multiply by 2: one for host
|
||||
* features and one for Guest acknowledgements. */
|
||||
/*
|
||||
* The number of bytes of feature bits. Multiply by 2: one for host
|
||||
* features and one for Guest acknowledgements.
|
||||
*/
|
||||
__u8 feature_len;
|
||||
/* The number of bytes of the config array after virtqueues. */
|
||||
__u8 config_len;
|
||||
|
@ -39,8 +41,10 @@ struct lguest_device_desc {
|
|||
__u8 config[0];
|
||||
};
|
||||
|
||||
/*D:135 This is how we expect the device configuration field for a virtqueue
|
||||
* to be laid out in config space. */
|
||||
/*D:135
|
||||
* This is how we expect the device configuration field for a virtqueue
|
||||
* to be laid out in config space.
|
||||
*/
|
||||
struct lguest_vqconfig {
|
||||
/* The number of entries in the virtio_ring */
|
||||
__u16 num;
|
||||
|
@ -61,7 +65,9 @@ enum lguest_req
|
|||
LHREQ_EVENTFD, /* + address, fd. */
|
||||
};
|
||||
|
||||
/* The alignment to use between consumer and producer parts of vring.
|
||||
* x86 pagesize for historical reasons. */
|
||||
/*
|
||||
* The alignment to use between consumer and producer parts of vring.
|
||||
* x86 pagesize for historical reasons.
|
||||
*/
|
||||
#define LGUEST_VRING_ALIGN 4096
|
||||
#endif /* _LINUX_LGUEST_LAUNCHER */
|
||||
|
|
|
@ -385,6 +385,7 @@ enum {
|
|||
not multiple of 16 bytes */
|
||||
ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
|
||||
ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
|
||||
ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
|
||||
|
||||
/* DMA mask for user DMA control: User visible values; DO NOT
|
||||
renumber */
|
||||
|
@ -588,6 +589,7 @@ struct ata_device {
|
|||
#endif
|
||||
/* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */
|
||||
u64 n_sectors; /* size of device, if ATA */
|
||||
u64 n_native_sectors; /* native size, if ATA */
|
||||
unsigned int class; /* ATA_DEV_xxx */
|
||||
unsigned long unpark_deadline;
|
||||
|
||||
|
|
|
@ -21,6 +21,15 @@
|
|||
#define __page_aligned_data __section(.data.page_aligned) __aligned(PAGE_SIZE)
|
||||
#define __page_aligned_bss __section(.bss.page_aligned) __aligned(PAGE_SIZE)
|
||||
|
||||
/*
|
||||
* For assembly routines.
|
||||
*
|
||||
* Note when using these that you must specify the appropriate
|
||||
* alignment directives yourself
|
||||
*/
|
||||
#define __PAGE_ALIGNED_DATA .section ".data.page_aligned", "aw"
|
||||
#define __PAGE_ALIGNED_BSS .section ".bss.page_aligned", "aw"
|
||||
|
||||
/*
|
||||
* This is used by architectures to keep arguments on the stack
|
||||
* untouched by the compiler by keeping them live until the end.
|
||||
|
|
|
@ -258,6 +258,16 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
|||
#define lockdep_set_subclass(lock, sub) \
|
||||
lockdep_init_map(&(lock)->dep_map, #lock, \
|
||||
(lock)->dep_map.key, sub)
|
||||
/*
|
||||
* Compare locking classes
|
||||
*/
|
||||
#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
|
||||
|
||||
static inline int lockdep_match_key(struct lockdep_map *lock,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
return lock->key == key;
|
||||
}
|
||||
|
||||
/*
|
||||
* Acquire a lock.
|
||||
|
@ -326,6 +336,11 @@ static inline void lockdep_on(void)
|
|||
#define lockdep_set_class_and_subclass(lock, key, sub) \
|
||||
do { (void)(key); } while (0)
|
||||
#define lockdep_set_subclass(lock, sub) do { } while (0)
|
||||
/*
|
||||
* We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
|
||||
* case since the result is not well defined and the caller should rather
|
||||
* #ifdef the call himself.
|
||||
*/
|
||||
|
||||
# define INIT_LOCKDEP
|
||||
# define lockdep_reset() do { debug_locks = 1; } while (0)
|
||||
|
|
19
include/linux/max17040_battery.h
Normal file
19
include/linux/max17040_battery.h
Normal file
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* Copyright (C) 2009 Samsung Electronics
|
||||
* Minkyu Kang <mk7.kang@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __MAX17040_BATTERY_H_
|
||||
#define __MAX17040_BATTERY_H_
|
||||
|
||||
struct max17040_platform_data {
|
||||
int (*battery_online)(void);
|
||||
int (*charger_online)(void);
|
||||
int (*charger_enable)(void);
|
||||
};
|
||||
|
||||
#endif
|
|
@ -826,7 +826,7 @@ extern int make_pages_present(unsigned long addr, unsigned long end);
|
|||
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
|
||||
|
||||
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, int write, int force,
|
||||
unsigned long start, int nr_pages, int write, int force,
|
||||
struct page **pages, struct vm_area_struct **vmas);
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages);
|
||||
|
|
|
@ -2,10 +2,9 @@
|
|||
#define _NAMESPACE_H_
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/mount.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/nsproxy.h>
|
||||
#include <linux/path.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
struct mnt_namespace {
|
||||
atomic_t count;
|
||||
|
@ -28,14 +27,6 @@ extern struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt);
|
|||
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
|
||||
struct fs_struct *);
|
||||
extern void put_mnt_ns(struct mnt_namespace *ns);
|
||||
|
||||
static inline void exit_mnt_ns(struct task_struct *p)
|
||||
{
|
||||
struct mnt_namespace *ns = p->nsproxy->mnt_ns;
|
||||
if (ns)
|
||||
put_mnt_ns(ns);
|
||||
}
|
||||
|
||||
static inline void get_mnt_ns(struct mnt_namespace *ns)
|
||||
{
|
||||
atomic_inc(&ns->count);
|
||||
|
|
|
@ -251,7 +251,7 @@ struct mtd_info {
|
|||
|
||||
static inline struct mtd_info *dev_to_mtd(struct device *dev)
|
||||
{
|
||||
return dev ? container_of(dev, struct mtd_info, dev) : NULL;
|
||||
return dev ? dev_get_drvdata(dev) : NULL;
|
||||
}
|
||||
|
||||
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
|
||||
|
|
|
@ -47,6 +47,8 @@ struct mtd_partition {
|
|||
#define MTDPART_SIZ_FULL (0)
|
||||
|
||||
|
||||
struct mtd_info;
|
||||
|
||||
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
|
||||
int del_mtd_partitions(struct mtd_info *);
|
||||
|
||||
|
|
|
@ -81,4 +81,17 @@ struct xt_conntrack_mtinfo1 {
|
|||
__u8 state_mask, status_mask;
|
||||
};
|
||||
|
||||
struct xt_conntrack_mtinfo2 {
|
||||
union nf_inet_addr origsrc_addr, origsrc_mask;
|
||||
union nf_inet_addr origdst_addr, origdst_mask;
|
||||
union nf_inet_addr replsrc_addr, replsrc_mask;
|
||||
union nf_inet_addr repldst_addr, repldst_mask;
|
||||
__u32 expires_min, expires_max;
|
||||
__u16 l4proto;
|
||||
__be16 origsrc_port, origdst_port;
|
||||
__be16 replsrc_port, repldst_port;
|
||||
__u16 match_flags, invert_flags;
|
||||
__u16 state_mask, status_mask;
|
||||
};
|
||||
|
||||
#endif /*_XT_CONNTRACK_H*/
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
#ifndef _XT_OSF_H
|
||||
#define _XT_OSF_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define MAXGENRELEN 32
|
||||
|
||||
#define XT_OSF_GENRE (1<<0)
|
||||
|
|
|
@ -473,7 +473,6 @@ extern int nfs_writepages(struct address_space *, struct writeback_control *);
|
|||
extern int nfs_flush_incompatible(struct file *file, struct page *page);
|
||||
extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
|
||||
extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
|
||||
extern void nfs_writedata_release(void *);
|
||||
|
||||
/*
|
||||
* Try to write back everything synchronously (but check the
|
||||
|
@ -488,7 +487,6 @@ extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
|
|||
extern int nfs_commit_inode(struct inode *, int);
|
||||
extern struct nfs_write_data *nfs_commitdata_alloc(void);
|
||||
extern void nfs_commit_free(struct nfs_write_data *wdata);
|
||||
extern void nfs_commitdata_release(void *wdata);
|
||||
#else
|
||||
static inline int
|
||||
nfs_commit_inode(struct inode *inode, int how)
|
||||
|
@ -507,6 +505,7 @@ nfs_have_writebacks(struct inode *inode)
|
|||
* Allocate nfs_write_data structures
|
||||
*/
|
||||
extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages);
|
||||
extern void nfs_writedata_free(struct nfs_write_data *);
|
||||
|
||||
/*
|
||||
* linux/fs/nfs/read.c
|
||||
|
@ -515,7 +514,6 @@ extern int nfs_readpage(struct file *, struct page *);
|
|||
extern int nfs_readpages(struct file *, struct address_space *,
|
||||
struct list_head *, unsigned);
|
||||
extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
|
||||
extern void nfs_readdata_release(void *data);
|
||||
extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
|
||||
struct page *);
|
||||
|
||||
|
@ -523,6 +521,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
|
|||
* Allocate nfs_read_data structures
|
||||
*/
|
||||
extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages);
|
||||
extern void nfs_readdata_free(struct nfs_read_data *);
|
||||
|
||||
/*
|
||||
* linux/fs/nfs3proc.c
|
||||
|
|
|
@ -82,6 +82,12 @@
|
|||
* to generate slightly worse code. So use a simple one-line #define
|
||||
* for node_isset(), instead of wrapping an inline inside a macro, the
|
||||
* way we do the other calls.
|
||||
*
|
||||
* NODEMASK_SCRATCH
|
||||
* When doing above logical AND, OR, XOR, Remap operations the callers tend to
|
||||
* need temporary nodemask_t's on the stack. But if NODES_SHIFT is large,
|
||||
* nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper
|
||||
* for such situations. See below and CPUMASK_ALLOC also.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
@ -473,4 +479,26 @@ static inline int num_node_state(enum node_states state)
|
|||
#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
|
||||
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
|
||||
|
||||
/*
|
||||
* For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h)
|
||||
*/
|
||||
|
||||
#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */
|
||||
#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL)
|
||||
#define NODEMASK_FREE(m) kfree(m)
|
||||
#else
|
||||
#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m
|
||||
#define NODEMASK_FREE(m)
|
||||
#endif
|
||||
|
||||
/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
|
||||
struct nodemask_scratch {
|
||||
nodemask_t mask1;
|
||||
nodemask_t mask2;
|
||||
};
|
||||
|
||||
#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x)
|
||||
#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
|
||||
|
||||
|
||||
#endif /* __LINUX_NODEMASK_H */
|
||||
|
|
|
@ -18,5 +18,8 @@ extern struct phy_device *of_phy_connect(struct net_device *dev,
|
|||
struct device_node *phy_np,
|
||||
void (*hndlr)(struct net_device *),
|
||||
u32 flags, phy_interface_t iface);
|
||||
extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
|
||||
void (*hndlr)(struct net_device *),
|
||||
phy_interface_t iface);
|
||||
|
||||
#endif /* __LINUX_OF_MDIO_H */
|
||||
|
|
|
@ -1145,7 +1145,7 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
|
|||
/* If you want to know what to call your pci_dev, ask this function.
|
||||
* Again, it's a wrapper around the generic device.
|
||||
*/
|
||||
static inline const char *pci_name(struct pci_dev *pdev)
|
||||
static inline const char *pci_name(const struct pci_dev *pdev)
|
||||
{
|
||||
return dev_name(&pdev->dev);
|
||||
}
|
||||
|
|
|
@ -229,7 +229,6 @@ struct hotplug_params {
|
|||
extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
|
||||
struct hotplug_params *hpp);
|
||||
int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
|
||||
int acpi_root_bridge(acpi_handle handle);
|
||||
int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
|
||||
int acpi_pci_detect_ejectable(struct pci_bus *pbus);
|
||||
#endif
|
||||
|
|
|
@ -2645,6 +2645,7 @@
|
|||
#define PCI_DEVICE_ID_NETMOS_9835 0x9835
|
||||
#define PCI_DEVICE_ID_NETMOS_9845 0x9845
|
||||
#define PCI_DEVICE_ID_NETMOS_9855 0x9855
|
||||
#define PCI_DEVICE_ID_NETMOS_9901 0x9901
|
||||
|
||||
#define PCI_VENDOR_ID_3COM_2 0xa727
|
||||
|
||||
|
|
|
@ -24,7 +24,8 @@
|
|||
|
||||
#define DEFINE_PER_CPU_SECTION(type, name, section) \
|
||||
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
PER_CPU_ATTRIBUTES PER_CPU_DEF_ATTRIBUTES \
|
||||
__typeof__(type) per_cpu__##name
|
||||
|
||||
/*
|
||||
* Variant on the per-CPU variable declaration/definition theme used for
|
||||
|
|
|
@ -115,26 +115,44 @@ enum perf_counter_sample_format {
|
|||
PERF_SAMPLE_TID = 1U << 1,
|
||||
PERF_SAMPLE_TIME = 1U << 2,
|
||||
PERF_SAMPLE_ADDR = 1U << 3,
|
||||
PERF_SAMPLE_GROUP = 1U << 4,
|
||||
PERF_SAMPLE_READ = 1U << 4,
|
||||
PERF_SAMPLE_CALLCHAIN = 1U << 5,
|
||||
PERF_SAMPLE_ID = 1U << 6,
|
||||
PERF_SAMPLE_CPU = 1U << 7,
|
||||
PERF_SAMPLE_PERIOD = 1U << 8,
|
||||
PERF_SAMPLE_STREAM_ID = 1U << 9,
|
||||
PERF_SAMPLE_RAW = 1U << 10,
|
||||
|
||||
PERF_SAMPLE_MAX = 1U << 9, /* non-ABI */
|
||||
PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
|
||||
};
|
||||
|
||||
/*
|
||||
* Bits that can be set in attr.read_format to request that
|
||||
* reads on the counter should return the indicated quantities,
|
||||
* in increasing order of bit value, after the counter value.
|
||||
* The format of the data returned by read() on a perf counter fd,
|
||||
* as specified by attr.read_format:
|
||||
*
|
||||
* struct read_format {
|
||||
* { u64 value;
|
||||
* { u64 time_enabled; } && PERF_FORMAT_ENABLED
|
||||
* { u64 time_running; } && PERF_FORMAT_RUNNING
|
||||
* { u64 id; } && PERF_FORMAT_ID
|
||||
* } && !PERF_FORMAT_GROUP
|
||||
*
|
||||
* { u64 nr;
|
||||
* { u64 time_enabled; } && PERF_FORMAT_ENABLED
|
||||
* { u64 time_running; } && PERF_FORMAT_RUNNING
|
||||
* { u64 value;
|
||||
* { u64 id; } && PERF_FORMAT_ID
|
||||
* } cntr[nr];
|
||||
* } && PERF_FORMAT_GROUP
|
||||
* };
|
||||
*/
|
||||
enum perf_counter_read_format {
|
||||
PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
|
||||
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
|
||||
PERF_FORMAT_ID = 1U << 2,
|
||||
PERF_FORMAT_GROUP = 1U << 3,
|
||||
|
||||
PERF_FORMAT_MAX = 1U << 3, /* non-ABI */
|
||||
PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
|
||||
};
|
||||
|
||||
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
|
||||
|
@ -178,8 +196,11 @@ struct perf_counter_attr {
|
|||
mmap : 1, /* include mmap data */
|
||||
comm : 1, /* include comm data */
|
||||
freq : 1, /* use freq, not period */
|
||||
inherit_stat : 1, /* per task counts */
|
||||
enable_on_exec : 1, /* next exec enables */
|
||||
task : 1, /* trace fork/exit */
|
||||
|
||||
__reserved_1 : 53;
|
||||
__reserved_1 : 50;
|
||||
|
||||
__u32 wakeup_events; /* wakeup every n events */
|
||||
__u32 __reserved_2;
|
||||
|
@ -232,6 +253,14 @@ struct perf_counter_mmap_page {
|
|||
__u32 lock; /* seqlock for synchronization */
|
||||
__u32 index; /* hardware counter identifier */
|
||||
__s64 offset; /* add to hardware counter value */
|
||||
__u64 time_enabled; /* time counter active */
|
||||
__u64 time_running; /* time counter on cpu */
|
||||
|
||||
/*
|
||||
* Hole for extension of the self monitor capabilities
|
||||
*/
|
||||
|
||||
__u64 __reserved[123]; /* align to 1k */
|
||||
|
||||
/*
|
||||
* Control data for the mmap() data buffer.
|
||||
|
@ -253,7 +282,6 @@ struct perf_counter_mmap_page {
|
|||
#define PERF_EVENT_MISC_KERNEL (1 << 0)
|
||||
#define PERF_EVENT_MISC_USER (2 << 0)
|
||||
#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
|
||||
#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
|
||||
|
||||
struct perf_event_header {
|
||||
__u32 type;
|
||||
|
@ -301,18 +329,18 @@ enum perf_event_type {
|
|||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u64 time;
|
||||
* u64 id;
|
||||
* u64 sample_period;
|
||||
* u32 pid, ppid;
|
||||
* u32 tid, ptid;
|
||||
* };
|
||||
*/
|
||||
PERF_EVENT_PERIOD = 4,
|
||||
PERF_EVENT_EXIT = 4,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u64 time;
|
||||
* u64 id;
|
||||
* u64 stream_id;
|
||||
* };
|
||||
*/
|
||||
PERF_EVENT_THROTTLE = 5,
|
||||
|
@ -322,14 +350,22 @@ enum perf_event_type {
|
|||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u32 pid, ppid;
|
||||
* u32 tid, ptid;
|
||||
* };
|
||||
*/
|
||||
PERF_EVENT_FORK = 7,
|
||||
|
||||
/*
|
||||
* When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
|
||||
* will be PERF_SAMPLE_*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u32 pid, tid;
|
||||
*
|
||||
* struct read_format values;
|
||||
* };
|
||||
*/
|
||||
PERF_EVENT_READ = 8,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
*
|
||||
|
@ -337,16 +373,34 @@ enum perf_event_type {
|
|||
* { u32 pid, tid; } && PERF_SAMPLE_TID
|
||||
* { u64 time; } && PERF_SAMPLE_TIME
|
||||
* { u64 addr; } && PERF_SAMPLE_ADDR
|
||||
* { u64 config; } && PERF_SAMPLE_CONFIG
|
||||
* { u64 id; } && PERF_SAMPLE_ID
|
||||
* { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
|
||||
* { u32 cpu, res; } && PERF_SAMPLE_CPU
|
||||
* { u64 period; } && PERF_SAMPLE_PERIOD
|
||||
*
|
||||
* { u64 nr;
|
||||
* { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
|
||||
* { struct read_format values; } && PERF_SAMPLE_READ
|
||||
*
|
||||
* { u64 nr,
|
||||
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
|
||||
*
|
||||
* #
|
||||
* # The RAW record below is opaque data wrt the ABI
|
||||
* #
|
||||
* # That is, the ABI doesn't make any promises wrt to
|
||||
* # the stability of its content, it may vary depending
|
||||
* # on event, hardware, kernel version and phase of
|
||||
* # the moon.
|
||||
* #
|
||||
* # In other words, PERF_SAMPLE_RAW contents are not an ABI.
|
||||
* #
|
||||
*
|
||||
* { u32 size;
|
||||
* char data[size];}&& PERF_SAMPLE_RAW
|
||||
* };
|
||||
*/
|
||||
PERF_EVENT_SAMPLE = 9,
|
||||
|
||||
PERF_EVENT_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
enum perf_callchain_context {
|
||||
|
@ -387,6 +441,11 @@ struct perf_callchain_entry {
|
|||
__u64 ip[PERF_MAX_STACK_DEPTH];
|
||||
};
|
||||
|
||||
struct perf_raw_record {
|
||||
u32 size;
|
||||
void *data;
|
||||
};
|
||||
|
||||
struct task_struct;
|
||||
|
||||
/**
|
||||
|
@ -582,6 +641,7 @@ struct perf_counter_context {
|
|||
int nr_counters;
|
||||
int nr_active;
|
||||
int is_active;
|
||||
int nr_stat;
|
||||
atomic_t refcount;
|
||||
struct task_struct *task;
|
||||
|
||||
|
@ -654,10 +714,13 @@ struct perf_sample_data {
|
|||
struct pt_regs *regs;
|
||||
u64 addr;
|
||||
u64 period;
|
||||
struct perf_raw_record *raw;
|
||||
};
|
||||
|
||||
extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
|
||||
struct perf_sample_data *data);
|
||||
extern void perf_counter_output(struct perf_counter *counter, int nmi,
|
||||
struct perf_sample_data *data);
|
||||
|
||||
/*
|
||||
* Return 1 for a software counter, 0 for a hardware counter
|
||||
|
@ -669,7 +732,16 @@ static inline int is_software_counter(struct perf_counter *counter)
|
|||
(counter->attr.type != PERF_TYPE_HW_CACHE);
|
||||
}
|
||||
|
||||
extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
|
||||
extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
|
||||
|
||||
extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
|
||||
|
||||
static inline void
|
||||
perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
|
||||
{
|
||||
if (atomic_read(&perf_swcounter_enabled[event]))
|
||||
__perf_swcounter_event(event, nr, nmi, regs, addr);
|
||||
}
|
||||
|
||||
extern void __perf_counter_mmap(struct vm_area_struct *vma);
|
||||
|
||||
|
|
|
@ -40,7 +40,10 @@ enum {
|
|||
* Security-relevant compatibility flags that must be
|
||||
* cleared upon setuid or setgid exec:
|
||||
*/
|
||||
#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC|ADDR_NO_RANDOMIZE)
|
||||
#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
|
||||
ADDR_NO_RANDOMIZE | \
|
||||
ADDR_COMPAT_LAYOUT | \
|
||||
MMAP_PAGE_ZERO)
|
||||
|
||||
/*
|
||||
* Personality types.
|
||||
|
|
|
@ -83,4 +83,78 @@ extern int posix_acl_chmod_masq(struct posix_acl *, mode_t);
|
|||
extern struct posix_acl *get_posix_acl(struct inode *, int);
|
||||
extern int set_posix_acl(struct inode *, int, struct posix_acl *);
|
||||
|
||||
#ifdef CONFIG_FS_POSIX_ACL
|
||||
static inline struct posix_acl *get_cached_acl(struct inode *inode, int type)
|
||||
{
|
||||
struct posix_acl **p, *acl;
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
p = &inode->i_acl;
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
p = &inode->i_default_acl;
|
||||
break;
|
||||
default:
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
acl = ACCESS_ONCE(*p);
|
||||
if (acl) {
|
||||
spin_lock(&inode->i_lock);
|
||||
acl = *p;
|
||||
if (acl != ACL_NOT_CACHED)
|
||||
acl = posix_acl_dup(acl);
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
return acl;
|
||||
}
|
||||
|
||||
static inline void set_cached_acl(struct inode *inode,
|
||||
int type,
|
||||
struct posix_acl *acl)
|
||||
{
|
||||
struct posix_acl *old = NULL;
|
||||
spin_lock(&inode->i_lock);
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
old = inode->i_acl;
|
||||
inode->i_acl = posix_acl_dup(acl);
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
old = inode->i_default_acl;
|
||||
inode->i_default_acl = posix_acl_dup(acl);
|
||||
break;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
if (old != ACL_NOT_CACHED)
|
||||
posix_acl_release(old);
|
||||
}
|
||||
|
||||
static inline void forget_cached_acl(struct inode *inode, int type)
|
||||
{
|
||||
struct posix_acl *old = NULL;
|
||||
spin_lock(&inode->i_lock);
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
old = inode->i_acl;
|
||||
inode->i_acl = ACL_NOT_CACHED;
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
old = inode->i_default_acl;
|
||||
inode->i_default_acl = ACL_NOT_CACHED;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
if (old != ACL_NOT_CACHED)
|
||||
posix_acl_release(old);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void cache_no_acl(struct inode *inode)
|
||||
{
|
||||
#ifdef CONFIG_FS_POSIX_ACL
|
||||
inode->i_acl = NULL;
|
||||
inode->i_default_acl = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __LINUX_POSIX_ACL_H */
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
#ifndef _PPS_H_
|
||||
#define _PPS_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define PPS_VERSION "5.3.6"
|
||||
#define PPS_MAX_SOURCES 16 /* should be enough... */
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#ifndef _LINUX_QUOTAOPS_
|
||||
#define _LINUX_QUOTAOPS_
|
||||
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
static inline struct quota_info *sb_dqopt(struct super_block *sb)
|
||||
|
|
|
@ -56,15 +56,6 @@ int reiserfs_cache_default_acl(struct inode *dir);
|
|||
extern struct xattr_handler reiserfs_posix_acl_default_handler;
|
||||
extern struct xattr_handler reiserfs_posix_acl_access_handler;
|
||||
|
||||
static inline void reiserfs_init_acl_access(struct inode *inode)
|
||||
{
|
||||
REISERFS_I(inode)->i_acl_access = NULL;
|
||||
}
|
||||
|
||||
static inline void reiserfs_init_acl_default(struct inode *inode)
|
||||
{
|
||||
REISERFS_I(inode)->i_acl_default = NULL;
|
||||
}
|
||||
#else
|
||||
|
||||
#define reiserfs_cache_default_acl(inode) 0
|
||||
|
@ -86,12 +77,4 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void reiserfs_init_acl_access(struct inode *inode)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void reiserfs_init_acl_default(struct inode *inode)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -54,10 +54,6 @@ struct reiserfs_inode_info {
|
|||
unsigned int i_trans_id;
|
||||
struct reiserfs_journal_list *i_jl;
|
||||
struct mutex i_mmap;
|
||||
#ifdef CONFIG_REISERFS_FS_POSIX_ACL
|
||||
struct posix_acl *i_acl_access;
|
||||
struct posix_acl *i_acl_default;
|
||||
#endif
|
||||
#ifdef CONFIG_REISERFS_FS_XATTR
|
||||
struct rw_semaphore i_xattr_sem;
|
||||
#endif
|
||||
|
|
|
@ -99,7 +99,6 @@ enum rfkill_user_states {
|
|||
#undef RFKILL_STATE_UNBLOCKED
|
||||
#undef RFKILL_STATE_HARD_BLOCKED
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
|
@ -225,7 +224,7 @@ void rfkill_destroy(struct rfkill *rfkill);
|
|||
* should be blocked) so that drivers need not keep track of the soft
|
||||
* block state -- which they might not be able to.
|
||||
*/
|
||||
bool __must_check rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
|
||||
bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
|
||||
|
||||
/**
|
||||
* rfkill_set_sw_state - Set the internal rfkill software block state
|
||||
|
|
|
@ -242,6 +242,8 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
|||
*/
|
||||
|
||||
#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
|
||||
#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
|
||||
#define SG_MITER_FROM_SG (1 << 2) /* nop */
|
||||
|
||||
struct sg_mapping_iter {
|
||||
/* the following three fields can be accessed directly */
|
||||
|
|
|
@ -209,7 +209,7 @@ extern unsigned long long time_sync_thresh;
|
|||
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
|
||||
#define task_contributes_to_load(task) \
|
||||
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
|
||||
(task->flags & PF_FROZEN) == 0)
|
||||
(task->flags & PF_FREEZING) == 0)
|
||||
|
||||
#define __set_task_state(tsk, state_value) \
|
||||
do { (tsk)->state = (state_value); } while (0)
|
||||
|
@ -349,8 +349,20 @@ extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
|
|||
struct nsproxy;
|
||||
struct user_namespace;
|
||||
|
||||
/* Maximum number of active map areas.. This is a random (large) number */
|
||||
#define DEFAULT_MAX_MAP_COUNT 65536
|
||||
/*
|
||||
* Default maximum number of active map areas, this limits the number of vmas
|
||||
* per mm struct. Users can overwrite this number by sysctl but there is a
|
||||
* problem.
|
||||
*
|
||||
* When a program's coredump is generated as ELF format, a section is created
|
||||
* per a vma. In ELF, the number of sections is represented in unsigned short.
|
||||
* This means the number of sections should be smaller than 65535 at coredump.
|
||||
* Because the kernel adds some informative sections to a image of program at
|
||||
* generating coredump, we need some margin. The number of extra sections is
|
||||
* 1-3 now and depends on arch. We use "5" as safe margin, here.
|
||||
*/
|
||||
#define MAPCOUNT_ELF_CORE_MARGIN (5)
|
||||
#define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
|
||||
|
||||
extern int sysctl_max_map_count;
|
||||
|
||||
|
@ -486,6 +498,15 @@ struct task_cputime {
|
|||
.sum_exec_runtime = 0, \
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable preemption until the scheduler is running.
|
||||
* Reset by start_kernel()->sched_init()->init_idle().
|
||||
*
|
||||
* We include PREEMPT_ACTIVE to avoid cond_resched() from working
|
||||
* before the scheduler is active -- see should_resched().
|
||||
*/
|
||||
#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE)
|
||||
|
||||
/**
|
||||
* struct thread_group_cputimer - thread group interval timer counts
|
||||
* @cputime: thread group interval timers.
|
||||
|
@ -1659,6 +1680,7 @@ extern cputime_t task_gtime(struct task_struct *p);
|
|||
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
|
||||
#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
|
||||
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
|
||||
#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
|
||||
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
|
||||
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
|
||||
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
|
||||
|
|
|
@ -19,10 +19,6 @@ struct shmem_inode_info {
|
|||
swp_entry_t i_direct[SHMEM_NR_DIRECT]; /* first blocks */
|
||||
struct list_head swaplist; /* chain of maybes on swap */
|
||||
struct inode vfs_inode;
|
||||
#ifdef CONFIG_TMPFS_POSIX_ACL
|
||||
struct posix_acl *i_acl;
|
||||
struct posix_acl *i_default_acl;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct shmem_sb_info {
|
||||
|
@ -45,7 +41,6 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
|
|||
#ifdef CONFIG_TMPFS_POSIX_ACL
|
||||
int shmem_permission(struct inode *, int);
|
||||
int shmem_acl_init(struct inode *, struct inode *);
|
||||
void shmem_acl_destroy_inode(struct inode *);
|
||||
|
||||
extern struct xattr_handler shmem_xattr_acl_access_handler;
|
||||
extern struct xattr_handler shmem_xattr_acl_default_handler;
|
||||
|
@ -57,9 +52,6 @@ static inline int shmem_acl_init(struct inode *inode, struct inode *dir)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void shmem_acl_destroy_inode(struct inode *inode)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_TMPFS_POSIX_ACL */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1342,12 +1342,12 @@ static inline int skb_network_offset(const struct sk_buff *skb)
|
|||
* shifting the start of the packet by 2 bytes. Drivers should do this
|
||||
* with:
|
||||
*
|
||||
* skb_reserve(NET_IP_ALIGN);
|
||||
* skb_reserve(skb, NET_IP_ALIGN);
|
||||
*
|
||||
* The downside to this alignment of the IP header is that the DMA is now
|
||||
* unaligned. On some architectures the cost of an unaligned DMA is high
|
||||
* and this cost outweighs the gains made by aligning the IP header.
|
||||
*
|
||||
*
|
||||
* Since this trade off varies between architectures, we allow NET_IP_ALIGN
|
||||
* to be overridden.
|
||||
*/
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/workqueue.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/kmemtrace.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
enum stat_item {
|
||||
ALLOC_FASTPATH, /* Allocation from cpu slab */
|
||||
|
@ -233,6 +234,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
|
|||
unsigned int order = get_order(size);
|
||||
void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
|
||||
|
||||
kmemleak_alloc(ret, size, 1, flags);
|
||||
trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -80,6 +80,8 @@ struct spi_device {
|
|||
#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
|
||||
#define SPI_3WIRE 0x10 /* SI/SO signals shared */
|
||||
#define SPI_LOOP 0x20 /* loopback mode */
|
||||
#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */
|
||||
#define SPI_READY 0x80 /* slave pulls low to pause */
|
||||
u8 bits_per_word;
|
||||
int irq;
|
||||
void *controller_state;
|
||||
|
@ -248,6 +250,10 @@ struct spi_master {
|
|||
/* spi_device.mode flags understood by this controller driver */
|
||||
u16 mode_bits;
|
||||
|
||||
/* other constraints relevant to this driver */
|
||||
u16 flags;
|
||||
#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */
|
||||
|
||||
/* Setup mode and clock, etc (spi driver may call many times).
|
||||
*
|
||||
* IMPORTANT: this may be called when transfers to another
|
||||
|
|
|
@ -40,6 +40,8 @@
|
|||
#define SPI_LSB_FIRST 0x08
|
||||
#define SPI_3WIRE 0x10
|
||||
#define SPI_LOOP 0x20
|
||||
#define SPI_NO_CS 0x40
|
||||
#define SPI_READY 0x80
|
||||
|
||||
/*---------------------------------------------------------------------------*/
|
||||
|
||||
|
|
|
@ -132,6 +132,11 @@ do { \
|
|||
#endif /*__raw_spin_is_contended*/
|
||||
#endif
|
||||
|
||||
/* The lock does not imply full memory barrier. */
|
||||
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
|
||||
static inline void smp_mb__after_lock(void) { smp_mb(); }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* spin_unlock_wait - wait until the spinlock gets unlocked
|
||||
* @lock: the spinlock in question.
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include <linux/uio.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/smp_lock.h>
|
||||
|
||||
/*
|
||||
* Buffer adjustment
|
||||
|
|
|
@ -321,6 +321,8 @@ asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese,
|
|||
siginfo_t __user *uinfo,
|
||||
const struct timespec __user *uts,
|
||||
size_t sigsetsize);
|
||||
asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
|
||||
siginfo_t __user *uinfo);
|
||||
asmlinkage long sys_kill(int pid, int sig);
|
||||
asmlinkage long sys_tgkill(int tgid, int pid, int sig);
|
||||
asmlinkage long sys_tkill(int pid, int sig);
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
#ifndef _LINUX_SYSRQ_H
|
||||
#define _LINUX_SYSRQ_H
|
||||
|
||||
#include <linux/errno.h>
|
||||
|
||||
struct pt_regs;
|
||||
struct tty_struct;
|
||||
|
||||
|
|
|
@ -190,6 +190,8 @@ extern unsigned long get_next_timer_interrupt(unsigned long now);
|
|||
*/
|
||||
#ifdef CONFIG_TIMER_STATS
|
||||
|
||||
extern int timer_stats_active;
|
||||
|
||||
#define TIMER_STATS_FLAG_DEFERRABLE 0x1
|
||||
|
||||
extern void init_timer_stats(void);
|
||||
|
@ -203,6 +205,8 @@ extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
|
|||
|
||||
static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
|
||||
{
|
||||
if (likely(!timer_stats_active))
|
||||
return;
|
||||
__timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
|
||||
}
|
||||
|
||||
|
|
|
@ -394,6 +394,7 @@ extern void __do_SAK(struct tty_struct *tty);
|
|||
extern void disassociate_ctty(int priv);
|
||||
extern void no_tty(void);
|
||||
extern void tty_flip_buffer_push(struct tty_struct *tty);
|
||||
extern void tty_flush_to_ldisc(struct tty_struct *tty);
|
||||
extern void tty_buffer_free_all(struct tty_struct *tty);
|
||||
extern void tty_buffer_flush(struct tty_struct *tty);
|
||||
extern void tty_buffer_init(struct tty_struct *tty);
|
||||
|
|
|
@ -144,7 +144,7 @@ struct tty_ldisc_ops {
|
|||
|
||||
struct tty_ldisc {
|
||||
struct tty_ldisc_ops *ops;
|
||||
int refcount;
|
||||
atomic_t users;
|
||||
};
|
||||
|
||||
#define TTY_LDISC_MAGIC 0x5403
|
||||
|
|
|
@ -19,15 +19,6 @@ struct iovec
|
|||
__kernel_size_t iov_len; /* Must be size_t (1003.1g) */
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
struct kvec {
|
||||
void *iov_base; /* and that should *never* hold a userland pointer */
|
||||
size_t iov_len;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1)
|
||||
*/
|
||||
|
@ -35,6 +26,13 @@ struct kvec {
|
|||
#define UIO_FASTIOV 8
|
||||
#define UIO_MAXIOV 1024
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
struct kvec {
|
||||
void *iov_base; /* and that should *never* hold a userland pointer */
|
||||
size_t iov_len;
|
||||
};
|
||||
|
||||
/*
|
||||
* Total number of bytes covered by an iovec.
|
||||
*
|
||||
|
@ -53,5 +51,6 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
|
|||
}
|
||||
|
||||
unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -888,8 +888,6 @@ struct usb_driver {
|
|||
* struct usb_device_driver - identifies USB device driver to usbcore
|
||||
* @name: The driver name should be unique among USB drivers,
|
||||
* and should normally be the same as the module name.
|
||||
* @nodename: Callback to provide a naming hint for a possible
|
||||
* device node to create.
|
||||
* @probe: Called to see if the driver is willing to manage a particular
|
||||
* device. If it is, probe returns zero and uses dev_set_drvdata()
|
||||
* to associate driver-specific data with the device. If unwilling
|
||||
|
@ -924,6 +922,8 @@ extern struct bus_type usb_bus_type;
|
|||
/**
|
||||
* struct usb_class_driver - identifies a USB driver that wants to use the USB major number
|
||||
* @name: the usb class device name for this driver. Will show up in sysfs.
|
||||
* @nodename: Callback to provide a naming hint for a possible
|
||||
* device node to create.
|
||||
* @fops: pointer to the struct file_operations of this driver.
|
||||
* @minor_base: the start of the minor range for this driver.
|
||||
*
|
||||
|
@ -1046,6 +1046,8 @@ typedef void (*usb_complete_t)(struct urb *);
|
|||
* the device driver is saying that it provided this DMA address,
|
||||
* which the host controller driver should use in preference to the
|
||||
* transfer_buffer.
|
||||
* @sg: scatter gather buffer list
|
||||
* @num_sgs: number of entries in the sg list
|
||||
* @transfer_buffer_length: How big is transfer_buffer. The transfer may
|
||||
* be broken up into chunks according to the current maximum packet
|
||||
* size for the endpoint, which is a function of the configuration
|
||||
|
|
|
@ -1,177 +0,0 @@
|
|||
/*
|
||||
* Intel Langwell USB OTG transceiver driver
|
||||
* Copyright (C) 2008, Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __LANGWELL_OTG_H__
|
||||
#define __LANGWELL_OTG_H__
|
||||
|
||||
/* notify transceiver driver about OTG events */
|
||||
extern void langwell_update_transceiver(void);
|
||||
/* HCD register bus driver */
|
||||
extern int langwell_register_host(struct pci_driver *host_driver);
|
||||
/* HCD unregister bus driver */
|
||||
extern void langwell_unregister_host(struct pci_driver *host_driver);
|
||||
/* DCD register bus driver */
|
||||
extern int langwell_register_peripheral(struct pci_driver *client_driver);
|
||||
/* DCD unregister bus driver */
|
||||
extern void langwell_unregister_peripheral(struct pci_driver *client_driver);
|
||||
/* No silent failure, output warning message */
|
||||
extern void langwell_otg_nsf_msg(unsigned long message);
|
||||
|
||||
#define CI_USBCMD 0x30
|
||||
# define USBCMD_RST BIT(1)
|
||||
# define USBCMD_RS BIT(0)
|
||||
#define CI_USBSTS 0x34
|
||||
# define USBSTS_SLI BIT(8)
|
||||
# define USBSTS_URI BIT(6)
|
||||
# define USBSTS_PCI BIT(2)
|
||||
#define CI_PORTSC1 0x74
|
||||
# define PORTSC_PP BIT(12)
|
||||
# define PORTSC_LS (BIT(11) | BIT(10))
|
||||
# define PORTSC_SUSP BIT(7)
|
||||
# define PORTSC_CCS BIT(0)
|
||||
#define CI_HOSTPC1 0xb4
|
||||
# define HOSTPC1_PHCD BIT(22)
|
||||
#define CI_OTGSC 0xf4
|
||||
# define OTGSC_DPIE BIT(30)
|
||||
# define OTGSC_1MSE BIT(29)
|
||||
# define OTGSC_BSEIE BIT(28)
|
||||
# define OTGSC_BSVIE BIT(27)
|
||||
# define OTGSC_ASVIE BIT(26)
|
||||
# define OTGSC_AVVIE BIT(25)
|
||||
# define OTGSC_IDIE BIT(24)
|
||||
# define OTGSC_DPIS BIT(22)
|
||||
# define OTGSC_1MSS BIT(21)
|
||||
# define OTGSC_BSEIS BIT(20)
|
||||
# define OTGSC_BSVIS BIT(19)
|
||||
# define OTGSC_ASVIS BIT(18)
|
||||
# define OTGSC_AVVIS BIT(17)
|
||||
# define OTGSC_IDIS BIT(16)
|
||||
# define OTGSC_DPS BIT(14)
|
||||
# define OTGSC_1MST BIT(13)
|
||||
# define OTGSC_BSE BIT(12)
|
||||
# define OTGSC_BSV BIT(11)
|
||||
# define OTGSC_ASV BIT(10)
|
||||
# define OTGSC_AVV BIT(9)
|
||||
# define OTGSC_ID BIT(8)
|
||||
# define OTGSC_HABA BIT(7)
|
||||
# define OTGSC_HADP BIT(6)
|
||||
# define OTGSC_IDPU BIT(5)
|
||||
# define OTGSC_DP BIT(4)
|
||||
# define OTGSC_OT BIT(3)
|
||||
# define OTGSC_HAAR BIT(2)
|
||||
# define OTGSC_VC BIT(1)
|
||||
# define OTGSC_VD BIT(0)
|
||||
# define OTGSC_INTEN_MASK (0x7f << 24)
|
||||
# define OTGSC_INTSTS_MASK (0x7f << 16)
|
||||
#define CI_USBMODE 0xf8
|
||||
# define USBMODE_CM (BIT(1) | BIT(0))
|
||||
# define USBMODE_IDLE 0
|
||||
# define USBMODE_DEVICE 0x2
|
||||
# define USBMODE_HOST 0x3
|
||||
|
||||
#define INTR_DUMMY_MASK (USBSTS_SLI | USBSTS_URI | USBSTS_PCI)
|
||||
|
||||
struct otg_hsm {
|
||||
/* Input */
|
||||
int a_bus_resume;
|
||||
int a_bus_suspend;
|
||||
int a_conn;
|
||||
int a_sess_vld;
|
||||
int a_srp_det;
|
||||
int a_vbus_vld;
|
||||
int b_bus_resume;
|
||||
int b_bus_suspend;
|
||||
int b_conn;
|
||||
int b_se0_srp;
|
||||
int b_sess_end;
|
||||
int b_sess_vld;
|
||||
int id;
|
||||
|
||||
/* Internal variables */
|
||||
int a_set_b_hnp_en;
|
||||
int b_srp_done;
|
||||
int b_hnp_enable;
|
||||
|
||||
/* Timeout indicator for timers */
|
||||
int a_wait_vrise_tmout;
|
||||
int a_wait_bcon_tmout;
|
||||
int a_aidl_bdis_tmout;
|
||||
int b_ase0_brst_tmout;
|
||||
int b_bus_suspend_tmout;
|
||||
int b_srp_res_tmout;
|
||||
|
||||
/* Informative variables */
|
||||
int a_bus_drop;
|
||||
int a_bus_req;
|
||||
int a_clr_err;
|
||||
int a_suspend_req;
|
||||
int b_bus_req;
|
||||
|
||||
/* Output */
|
||||
int drv_vbus;
|
||||
int loc_conn;
|
||||
int loc_sof;
|
||||
|
||||
/* Others */
|
||||
int b_bus_suspend_vld;
|
||||
};
|
||||
|
||||
#define TA_WAIT_VRISE 100
|
||||
#define TA_WAIT_BCON 30000
|
||||
#define TA_AIDL_BDIS 15000
|
||||
#define TB_ASE0_BRST 5000
|
||||
#define TB_SE0_SRP 2
|
||||
#define TB_SRP_RES 100
|
||||
#define TB_BUS_SUSPEND 500
|
||||
|
||||
struct langwell_otg_timer {
|
||||
unsigned long expires; /* Number of count increase to timeout */
|
||||
unsigned long count; /* Tick counter */
|
||||
void (*function)(unsigned long); /* Timeout function */
|
||||
unsigned long data; /* Data passed to function */
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct langwell_otg {
|
||||
struct otg_transceiver otg;
|
||||
struct otg_hsm hsm;
|
||||
void __iomem *regs;
|
||||
unsigned region;
|
||||
struct pci_driver *host_ops;
|
||||
struct pci_driver *client_ops;
|
||||
struct pci_dev *pdev;
|
||||
struct work_struct work;
|
||||
struct workqueue_struct *qwork;
|
||||
spinlock_t lock;
|
||||
spinlock_t wq_lock;
|
||||
};
|
||||
|
||||
static inline struct langwell_otg *otg_to_langwell(struct otg_transceiver *otg)
|
||||
{
|
||||
return container_of(otg, struct langwell_otg, otg);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
#define otg_dbg(fmt, args...) \
|
||||
printk(KERN_DEBUG fmt , ## args)
|
||||
#else
|
||||
#define otg_dbg(fmt, args...) \
|
||||
do { } while (0)
|
||||
#endif /* DEBUG */
|
||||
#endif /* __LANGWELL_OTG_H__ */
|
|
@ -317,7 +317,8 @@ extern int usb_serial_generic_register(int debug);
|
|||
extern void usb_serial_generic_deregister(void);
|
||||
extern void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port,
|
||||
gfp_t mem_flags);
|
||||
extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
|
||||
extern int usb_serial_handle_sysrq_char(struct tty_struct *tty,
|
||||
struct usb_serial_port *port,
|
||||
unsigned int ch);
|
||||
extern int usb_serial_handle_break(struct usb_serial_port *port);
|
||||
|
||||
|
|
|
@ -42,7 +42,6 @@ struct usbnet {
|
|||
|
||||
/* protocol/interface state */
|
||||
struct net_device *net;
|
||||
struct net_device_stats stats;
|
||||
int msg_enable;
|
||||
unsigned long data [5];
|
||||
u32 xid;
|
||||
|
|
|
@ -318,6 +318,8 @@ struct v4l2_pix_format {
|
|||
/* see http://www.siliconimaging.com/RGB%20Bayer.htm */
|
||||
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
|
||||
#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
|
||||
#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */
|
||||
|
||||
/*
|
||||
* 10bit raw bayer, expanded to 16 bits
|
||||
* xxxxrrrrrrrrrrxxxxgggggggggg xxxxggggggggggxxxxbbbbbbbbbb...
|
||||
|
@ -336,6 +338,7 @@ struct v4l2_pix_format {
|
|||
/* Vendor-specific formats */
|
||||
#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */
|
||||
#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */
|
||||
#define V4L2_PIX_FMT_SN9C20X_I420 v4l2_fourcc('S', '9', '2', '0') /* SN9C20x YUV 4:2:0 */
|
||||
#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */
|
||||
#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */
|
||||
#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */
|
||||
|
|
|
@ -20,8 +20,7 @@
|
|||
|
||||
#define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */
|
||||
|
||||
struct virtio_blk_config
|
||||
{
|
||||
struct virtio_blk_config {
|
||||
/* The capacity (in 512-byte sectors). */
|
||||
__u64 capacity;
|
||||
/* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */
|
||||
|
@ -50,8 +49,7 @@ struct virtio_blk_config
|
|||
#define VIRTIO_BLK_T_BARRIER 0x80000000
|
||||
|
||||
/* This is the first element of the read scatter-gather list. */
|
||||
struct virtio_blk_outhdr
|
||||
{
|
||||
struct virtio_blk_outhdr {
|
||||
/* VIRTIO_BLK_T* */
|
||||
__u32 type;
|
||||
/* io priority. */
|
||||
|
|
|
@ -79,8 +79,7 @@
|
|||
* the dev->feature bits if it wants.
|
||||
*/
|
||||
typedef void vq_callback_t(struct virtqueue *);
|
||||
struct virtio_config_ops
|
||||
{
|
||||
struct virtio_config_ops {
|
||||
void (*get)(struct virtio_device *vdev, unsigned offset,
|
||||
void *buf, unsigned len);
|
||||
void (*set)(struct virtio_device *vdev, unsigned offset,
|
||||
|
|
|
@ -27,11 +27,11 @@
|
|||
#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
|
||||
#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
|
||||
#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
|
||||
#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
|
||||
|
||||
#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
|
||||
|
||||
struct virtio_net_config
|
||||
{
|
||||
struct virtio_net_config {
|
||||
/* The config defining mac address (if VIRTIO_NET_F_MAC) */
|
||||
__u8 mac[6];
|
||||
/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
|
||||
|
@ -40,8 +40,7 @@ struct virtio_net_config
|
|||
|
||||
/* This is the first element of the scatter-gather list. If you don't
|
||||
* specify GSO or CSUM features, you can simply ignore the header. */
|
||||
struct virtio_net_hdr
|
||||
{
|
||||
struct virtio_net_hdr {
|
||||
#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset
|
||||
__u8 flags;
|
||||
#define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame
|
||||
|
@ -81,14 +80,19 @@ typedef __u8 virtio_net_ctrl_ack;
|
|||
#define VIRTIO_NET_ERR 1
|
||||
|
||||
/*
|
||||
* Control the RX mode, ie. promisucous and allmulti. PROMISC and
|
||||
* ALLMULTI commands require an "out" sg entry containing a 1 byte
|
||||
* state value, zero = disable, non-zero = enable. These commands
|
||||
* are supported with the VIRTIO_NET_F_CTRL_RX feature.
|
||||
* Control the RX mode, ie. promisucous, allmulti, etc...
|
||||
* All commands require an "out" sg entry containing a 1 byte
|
||||
* state value, zero = disable, non-zero = enable. Commands
|
||||
* 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
|
||||
* Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
|
||||
*/
|
||||
#define VIRTIO_NET_CTRL_RX 0
|
||||
#define VIRTIO_NET_CTRL_RX_PROMISC 0
|
||||
#define VIRTIO_NET_CTRL_RX_ALLMULTI 1
|
||||
#define VIRTIO_NET_CTRL_RX_ALLUNI 2
|
||||
#define VIRTIO_NET_CTRL_RX_NOMULTI 3
|
||||
#define VIRTIO_NET_CTRL_RX_NOUNI 4
|
||||
#define VIRTIO_NET_CTRL_RX_NOBCAST 5
|
||||
|
||||
/*
|
||||
* Control the MAC filter table.
|
||||
|
|
|
@ -30,8 +30,7 @@
|
|||
#define VIRTIO_RING_F_INDIRECT_DESC 28
|
||||
|
||||
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
|
||||
struct vring_desc
|
||||
{
|
||||
struct vring_desc {
|
||||
/* Address (guest-physical). */
|
||||
__u64 addr;
|
||||
/* Length. */
|
||||
|
@ -42,24 +41,21 @@ struct vring_desc
|
|||
__u16 next;
|
||||
};
|
||||
|
||||
struct vring_avail
|
||||
{
|
||||
struct vring_avail {
|
||||
__u16 flags;
|
||||
__u16 idx;
|
||||
__u16 ring[];
|
||||
};
|
||||
|
||||
/* u32 is used here for ids for padding reasons. */
|
||||
struct vring_used_elem
|
||||
{
|
||||
struct vring_used_elem {
|
||||
/* Index of start of used descriptor chain. */
|
||||
__u32 id;
|
||||
/* Total length of the descriptor chain which was used (written to) */
|
||||
__u32 len;
|
||||
};
|
||||
|
||||
struct vring_used
|
||||
{
|
||||
struct vring_used {
|
||||
__u16 flags;
|
||||
__u16 idx;
|
||||
struct vring_used_elem ring[];
|
||||
|
|
|
@ -77,7 +77,14 @@ struct task_struct;
|
|||
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
|
||||
{ .flags = word, .bit_nr = bit, }
|
||||
|
||||
extern void init_waitqueue_head(wait_queue_head_t *q);
|
||||
extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
|
||||
|
||||
#define init_waitqueue_head(q) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__init_waitqueue_head((q), &__key); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue