Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/scsi/fcoe/fcoe.c
This commit is contained in:
commit
bb803cfbec
1787 changed files with 38448 additions and 34549 deletions
|
|
@ -138,6 +138,7 @@ header-y += qnxtypes.h
|
|||
header-y += radeonfb.h
|
||||
header-y += raw.h
|
||||
header-y += resource.h
|
||||
header-y += romfs_fs.h
|
||||
header-y += rose.h
|
||||
header-y += serial_reg.h
|
||||
header-y += smbno.h
|
||||
|
|
@ -314,7 +315,6 @@ unifdef-y += irqnr.h
|
|||
unifdef-y += reboot.h
|
||||
unifdef-y += reiserfs_fs.h
|
||||
unifdef-y += reiserfs_xattr.h
|
||||
unifdef-y += romfs_fs.h
|
||||
unifdef-y += route.h
|
||||
unifdef-y += rtc.h
|
||||
unifdef-y += rtnetlink.h
|
||||
|
|
|
|||
|
|
@ -111,6 +111,7 @@ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
|
|||
int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
|
||||
void acpi_irq_stats_init(void);
|
||||
extern u32 acpi_irq_handled;
|
||||
extern u32 acpi_irq_not_handled;
|
||||
|
||||
extern struct acpi_mcfg_allocation *pci_mmcfg_config;
|
||||
extern int pci_mmcfg_config_num;
|
||||
|
|
|
|||
|
|
@ -730,6 +730,34 @@ static inline int ata_id_has_unload(const u16 *id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int ata_id_form_factor(const u16 *id)
|
||||
{
|
||||
u16 val = id[168];
|
||||
|
||||
if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff)
|
||||
return 0;
|
||||
|
||||
val &= 0xf;
|
||||
|
||||
if (val > 5)
|
||||
return 0;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline int ata_id_rotation_rate(const u16 *id)
|
||||
{
|
||||
u16 val = id[217];
|
||||
|
||||
if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff)
|
||||
return 0;
|
||||
|
||||
if (val > 1 && val < 0x401)
|
||||
return 0;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline int ata_id_has_trim(const u16 *id)
|
||||
{
|
||||
if (ata_id_major_version(id) >= 7 &&
|
||||
|
|
|
|||
|
|
@ -82,7 +82,19 @@ struct linux_binfmt {
|
|||
int hasvdso;
|
||||
};
|
||||
|
||||
extern int register_binfmt(struct linux_binfmt *);
|
||||
extern int __register_binfmt(struct linux_binfmt *fmt, int insert);
|
||||
|
||||
/* Registration of default binfmt handlers */
|
||||
static inline int register_binfmt(struct linux_binfmt *fmt)
|
||||
{
|
||||
return __register_binfmt(fmt, 0);
|
||||
}
|
||||
/* Same as above, but adds a new binfmt at the top of the list */
|
||||
static inline int insert_binfmt(struct linux_binfmt *fmt)
|
||||
{
|
||||
return __register_binfmt(fmt, 1);
|
||||
}
|
||||
|
||||
extern void unregister_binfmt(struct linux_binfmt *);
|
||||
|
||||
extern int prepare_binprm(struct linux_binprm *);
|
||||
|
|
|
|||
|
|
@ -132,6 +132,7 @@ struct bio {
|
|||
* top 4 bits of bio flags indicate the pool this bio came from
|
||||
*/
|
||||
#define BIO_POOL_BITS (4)
|
||||
#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
|
||||
#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
|
||||
#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
|
||||
#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
|
||||
|
|
@ -504,6 +505,115 @@ static inline int bio_has_data(struct bio *bio)
|
|||
return bio && bio->bi_io_vec != NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* BIO list managment for use by remapping drivers (e.g. DM or MD).
|
||||
*
|
||||
* A bio_list anchors a singly-linked list of bios chained through the bi_next
|
||||
* member of the bio. The bio_list also caches the last list member to allow
|
||||
* fast access to the tail.
|
||||
*/
|
||||
struct bio_list {
|
||||
struct bio *head;
|
||||
struct bio *tail;
|
||||
};
|
||||
|
||||
static inline int bio_list_empty(const struct bio_list *bl)
|
||||
{
|
||||
return bl->head == NULL;
|
||||
}
|
||||
|
||||
static inline void bio_list_init(struct bio_list *bl)
|
||||
{
|
||||
bl->head = bl->tail = NULL;
|
||||
}
|
||||
|
||||
#define bio_list_for_each(bio, bl) \
|
||||
for (bio = (bl)->head; bio; bio = bio->bi_next)
|
||||
|
||||
static inline unsigned bio_list_size(const struct bio_list *bl)
|
||||
{
|
||||
unsigned sz = 0;
|
||||
struct bio *bio;
|
||||
|
||||
bio_list_for_each(bio, bl)
|
||||
sz++;
|
||||
|
||||
return sz;
|
||||
}
|
||||
|
||||
static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
|
||||
{
|
||||
bio->bi_next = NULL;
|
||||
|
||||
if (bl->tail)
|
||||
bl->tail->bi_next = bio;
|
||||
else
|
||||
bl->head = bio;
|
||||
|
||||
bl->tail = bio;
|
||||
}
|
||||
|
||||
static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
|
||||
{
|
||||
bio->bi_next = bl->head;
|
||||
|
||||
bl->head = bio;
|
||||
|
||||
if (!bl->tail)
|
||||
bl->tail = bio;
|
||||
}
|
||||
|
||||
static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
|
||||
{
|
||||
if (!bl2->head)
|
||||
return;
|
||||
|
||||
if (bl->tail)
|
||||
bl->tail->bi_next = bl2->head;
|
||||
else
|
||||
bl->head = bl2->head;
|
||||
|
||||
bl->tail = bl2->tail;
|
||||
}
|
||||
|
||||
static inline void bio_list_merge_head(struct bio_list *bl,
|
||||
struct bio_list *bl2)
|
||||
{
|
||||
if (!bl2->head)
|
||||
return;
|
||||
|
||||
if (bl->head)
|
||||
bl2->tail->bi_next = bl->head;
|
||||
else
|
||||
bl->tail = bl2->tail;
|
||||
|
||||
bl->head = bl2->head;
|
||||
}
|
||||
|
||||
static inline struct bio *bio_list_pop(struct bio_list *bl)
|
||||
{
|
||||
struct bio *bio = bl->head;
|
||||
|
||||
if (bio) {
|
||||
bl->head = bl->head->bi_next;
|
||||
if (!bl->head)
|
||||
bl->tail = NULL;
|
||||
|
||||
bio->bi_next = NULL;
|
||||
}
|
||||
|
||||
return bio;
|
||||
}
|
||||
|
||||
static inline struct bio *bio_list_get(struct bio_list *bl)
|
||||
{
|
||||
struct bio *bio = bl->head;
|
||||
|
||||
bl->head = bl->tail = NULL;
|
||||
|
||||
return bio;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
|
||||
#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
|
||||
|
|
|
|||
|
|
@ -112,6 +112,25 @@ static inline unsigned fls_long(unsigned long l)
|
|||
return fls64(l);
|
||||
}
|
||||
|
||||
/**
|
||||
* __ffs64 - find first set bit in a 64 bit word
|
||||
* @word: The 64 bit word
|
||||
*
|
||||
* On 64 bit arches this is a synomyn for __ffs
|
||||
* The result is not defined if no bits are set, so check that @word
|
||||
* is non-zero before calling this.
|
||||
*/
|
||||
static inline unsigned long __ffs64(u64 word)
|
||||
{
|
||||
#if BITS_PER_LONG == 32
|
||||
if (((u32)word) == 0UL)
|
||||
return __ffs((u32)(word >> 32)) + 32;
|
||||
#elif BITS_PER_LONG != 64
|
||||
#error BITS_PER_LONG not 32 or 64
|
||||
#endif
|
||||
return __ffs((unsigned long)word);
|
||||
}
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
|
||||
|
||||
|
|
|
|||
|
|
@ -118,6 +118,7 @@ enum rq_flag_bits {
|
|||
__REQ_COPY_USER, /* contains copies of user pages */
|
||||
__REQ_INTEGRITY, /* integrity metadata has been remapped */
|
||||
__REQ_NOIDLE, /* Don't anticipate more IO after this one */
|
||||
__REQ_IO_STAT, /* account I/O stat */
|
||||
__REQ_NR_BITS, /* stops here */
|
||||
};
|
||||
|
||||
|
|
@ -145,6 +146,7 @@ enum rq_flag_bits {
|
|||
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
|
||||
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
|
||||
#define REQ_NOIDLE (1 << __REQ_NOIDLE)
|
||||
#define REQ_IO_STAT (1 << __REQ_IO_STAT)
|
||||
|
||||
#define BLK_MAX_CDB 16
|
||||
|
||||
|
|
@ -598,6 +600,8 @@ enum {
|
|||
blk_failfast_transport(rq) || \
|
||||
blk_failfast_driver(rq))
|
||||
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
|
||||
#define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT)
|
||||
#define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET)
|
||||
|
||||
#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
|
||||
|
||||
|
|
|
|||
|
|
@ -155,6 +155,7 @@ void create_empty_buffers(struct page *, unsigned long,
|
|||
unsigned long b_state);
|
||||
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
|
||||
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
|
||||
void end_buffer_async_write(struct buffer_head *bh, int uptodate);
|
||||
|
||||
/* Things to do with buffers at mapping->private_list */
|
||||
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
|
||||
|
|
@ -197,6 +198,8 @@ extern int buffer_heads_over_limit;
|
|||
void block_invalidatepage(struct page *page, unsigned long offset);
|
||||
int block_write_full_page(struct page *page, get_block_t *get_block,
|
||||
struct writeback_control *wbc);
|
||||
int block_write_full_page_endio(struct page *page, get_block_t *get_block,
|
||||
struct writeback_control *wbc, bh_end_io_t *handler);
|
||||
int block_read_full_page(struct page*, get_block_t*);
|
||||
int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
|
||||
unsigned long from);
|
||||
|
|
|
|||
|
|
@ -143,7 +143,9 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
|
|||
* 400-499: Perfect
|
||||
* The ideal clocksource. A must-use where
|
||||
* available.
|
||||
* @read: returns a cycle value
|
||||
* @read: returns a cycle value, passes clocksource as argument
|
||||
* @enable: optional function to enable the clocksource
|
||||
* @disable: optional function to disable the clocksource
|
||||
* @mask: bitmask for two's complement
|
||||
* subtraction of non 64 bit counters
|
||||
* @mult: cycle to nanosecond multiplier (adjusted by NTP)
|
||||
|
|
@ -162,7 +164,9 @@ struct clocksource {
|
|||
char *name;
|
||||
struct list_head list;
|
||||
int rating;
|
||||
cycle_t (*read)(void);
|
||||
cycle_t (*read)(struct clocksource *cs);
|
||||
int (*enable)(struct clocksource *cs);
|
||||
void (*disable)(struct clocksource *cs);
|
||||
cycle_t mask;
|
||||
u32 mult;
|
||||
u32 mult_orig;
|
||||
|
|
@ -271,7 +275,34 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
|
|||
*/
|
||||
static inline cycle_t clocksource_read(struct clocksource *cs)
|
||||
{
|
||||
return cs->read();
|
||||
return cs->read(cs);
|
||||
}
|
||||
|
||||
/**
|
||||
* clocksource_enable: - enable clocksource
|
||||
* @cs: pointer to clocksource
|
||||
*
|
||||
* Enables the specified clocksource. The clocksource callback
|
||||
* function should start up the hardware and setup mult and field
|
||||
* members of struct clocksource to reflect hardware capabilities.
|
||||
*/
|
||||
static inline int clocksource_enable(struct clocksource *cs)
|
||||
{
|
||||
return cs->enable ? cs->enable(cs) : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* clocksource_disable: - disable clocksource
|
||||
* @cs: pointer to clocksource
|
||||
*
|
||||
* Disables the specified clocksource. The clocksource callback
|
||||
* function should power down the now unused hardware block to
|
||||
* save power.
|
||||
*/
|
||||
static inline void clocksource_disable(struct clocksource *cs)
|
||||
{
|
||||
if (cs->disable)
|
||||
cs->disable(cs);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
struct task_struct;
|
||||
|
||||
|
|
|
|||
|
|
@ -384,13 +384,8 @@ struct device {
|
|||
struct device_driver *driver; /* which driver has allocated this
|
||||
device */
|
||||
void *driver_data; /* data private to the driver */
|
||||
|
||||
void *platform_data; /* We will remove platform_data
|
||||
field if all platform devices
|
||||
pass its platform specific data
|
||||
from platform_device->platform_data,
|
||||
other kind of devices should not
|
||||
use platform_data. */
|
||||
void *platform_data; /* Platform specific data, device
|
||||
core doesn't touch it */
|
||||
struct dev_pm_info power;
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
|
|
@ -551,6 +546,7 @@ extern int (*platform_notify_remove)(struct device *dev);
|
|||
extern struct device *get_device(struct device *dev);
|
||||
extern void put_device(struct device *dev);
|
||||
|
||||
extern void wait_for_device_probe(void);
|
||||
|
||||
/* drivers/base/power/shutdown.c */
|
||||
extern void device_shutdown(void);
|
||||
|
|
|
|||
|
|
@ -78,12 +78,18 @@ enum dma_transaction_type {
|
|||
* dependency chains
|
||||
* @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
|
||||
* @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
|
||||
* @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
|
||||
* (if not set, do the source dma-unmapping as page)
|
||||
* @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
|
||||
* (if not set, do the destination dma-unmapping as page)
|
||||
*/
|
||||
enum dma_ctrl_flags {
|
||||
DMA_PREP_INTERRUPT = (1 << 0),
|
||||
DMA_CTRL_ACK = (1 << 1),
|
||||
DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
|
||||
DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
|
||||
DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
|
||||
DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ extern int ddebug_remove_module(char *mod_name);
|
|||
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
|
||||
if (__dynamic_dbg_enabled(descriptor)) \
|
||||
dev_printk(KERN_DEBUG, dev, \
|
||||
KBUILD_MODNAME ": " pr_fmt(fmt),\
|
||||
KBUILD_MODNAME ": " fmt, \
|
||||
##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
|
|
|
|||
|
|
@ -173,8 +173,12 @@ struct fb_fix_screeninfo {
|
|||
/* Interpretation of offset for color fields: All offsets are from the right,
|
||||
* inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you
|
||||
* can use the offset as right argument to <<). A pixel afterwards is a bit
|
||||
* stream and is written to video memory as that unmodified. This implies
|
||||
* big-endian byte order if bits_per_pixel is greater than 8.
|
||||
* stream and is written to video memory as that unmodified.
|
||||
*
|
||||
* For pseudocolor: offset and length should be the same for all color
|
||||
* components. Offset specifies the position of the least significant bit
|
||||
* of the pallette index in a pixel value. Length indicates the number
|
||||
* of available palette entries (i.e. # of entries = 1 << length).
|
||||
*/
|
||||
struct fb_bitfield {
|
||||
__u32 offset; /* beginning of bitfield */
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@
|
|||
#ifndef _LINUX_FIEMAP_H
|
||||
#define _LINUX_FIEMAP_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct fiemap_extent {
|
||||
__u64 fe_logical; /* logical offset in bytes for the start of
|
||||
* the extent from the beginning of the file */
|
||||
|
|
|
|||
|
|
@ -87,6 +87,60 @@ struct inodes_stat_t {
|
|||
*/
|
||||
#define FMODE_NOCMTIME ((__force fmode_t)2048)
|
||||
|
||||
/*
|
||||
* The below are the various read and write types that we support. Some of
|
||||
* them include behavioral modifiers that send information down to the
|
||||
* block layer and IO scheduler. Terminology:
|
||||
*
|
||||
* The block layer uses device plugging to defer IO a little bit, in
|
||||
* the hope that we will see more IO very shortly. This increases
|
||||
* coalescing of adjacent IO and thus reduces the number of IOs we
|
||||
* have to send to the device. It also allows for better queuing,
|
||||
* if the IO isn't mergeable. If the caller is going to be waiting
|
||||
* for the IO, then he must ensure that the device is unplugged so
|
||||
* that the IO is dispatched to the driver.
|
||||
*
|
||||
* All IO is handled async in Linux. This is fine for background
|
||||
* writes, but for reads or writes that someone waits for completion
|
||||
* on, we want to notify the block layer and IO scheduler so that they
|
||||
* know about it. That allows them to make better scheduling
|
||||
* decisions. So when the below references 'sync' and 'async', it
|
||||
* is referencing this priority hint.
|
||||
*
|
||||
* With that in mind, the available types are:
|
||||
*
|
||||
* READ A normal read operation. Device will be plugged.
|
||||
* READ_SYNC A synchronous read. Device is not plugged, caller can
|
||||
* immediately wait on this read without caring about
|
||||
* unplugging.
|
||||
* READA Used for read-ahead operations. Lower priority, and the
|
||||
* block layer could (in theory) choose to ignore this
|
||||
* request if it runs into resource problems.
|
||||
* WRITE A normal async write. Device will be plugged.
|
||||
* SWRITE Like WRITE, but a special case for ll_rw_block() that
|
||||
* tells it to lock the buffer first. Normally a buffer
|
||||
* must be locked before doing IO.
|
||||
* WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down
|
||||
* the hint that someone will be waiting on this IO
|
||||
* shortly. The device must still be unplugged explicitly,
|
||||
* WRITE_SYNC_PLUG does not do this as we could be
|
||||
* submitting more writes before we actually wait on any
|
||||
* of them.
|
||||
* WRITE_SYNC Like WRITE_SYNC_PLUG, but also unplugs the device
|
||||
* immediately after submission. The write equivalent
|
||||
* of READ_SYNC.
|
||||
* WRITE_ODIRECT Special case write for O_DIRECT only.
|
||||
* SWRITE_SYNC
|
||||
* SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer.
|
||||
* See SWRITE.
|
||||
* WRITE_BARRIER Like WRITE, but tells the block layer that all
|
||||
* previously submitted writes must be safely on storage
|
||||
* before this one is started. Also guarantees that when
|
||||
* this write is complete, it itself is also safely on
|
||||
* storage. Prevents reordering of writes on both sides
|
||||
* of this IO.
|
||||
*
|
||||
*/
|
||||
#define RW_MASK 1
|
||||
#define RWA_MASK 2
|
||||
#define READ 0
|
||||
|
|
@ -102,6 +156,11 @@ struct inodes_stat_t {
|
|||
(SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
|
||||
#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
|
||||
#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
|
||||
|
||||
/*
|
||||
* These aren't really reads or writes, they pass down information about
|
||||
* parts of device that are now unused by the file system.
|
||||
*/
|
||||
#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
|
||||
#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
|
||||
|
||||
|
|
@ -738,9 +797,6 @@ enum inode_i_mutex_lock_class
|
|||
I_MUTEX_QUOTA
|
||||
};
|
||||
|
||||
extern void inode_double_lock(struct inode *inode1, struct inode *inode2);
|
||||
extern void inode_double_unlock(struct inode *inode1, struct inode *inode2);
|
||||
|
||||
/*
|
||||
* NOTE: in a 32bit arch with a preemptable kernel and
|
||||
* an UP compile the i_size_read/write must be atomic
|
||||
|
|
@ -1719,6 +1775,7 @@ void kill_block_super(struct super_block *sb);
|
|||
void kill_anon_super(struct super_block *sb);
|
||||
void kill_litter_super(struct super_block *sb);
|
||||
void deactivate_super(struct super_block *sb);
|
||||
void deactivate_locked_super(struct super_block *sb);
|
||||
int set_anon_super(struct super_block *s, void *data);
|
||||
struct super_block *sget(struct file_system_type *type,
|
||||
int (*test)(struct super_block *,void *),
|
||||
|
|
@ -2061,7 +2118,7 @@ extern struct file *create_write_pipe(int flags);
|
|||
extern void free_write_pipe(struct file *);
|
||||
|
||||
extern struct file *do_filp_open(int dfd, const char *pathname,
|
||||
int open_flag, int mode);
|
||||
int open_flag, int mode, int acc_mode);
|
||||
extern int may_open(struct path *, int, int);
|
||||
|
||||
extern int kernel_read(struct file *, unsigned long, char *, unsigned long);
|
||||
|
|
@ -2150,8 +2207,6 @@ extern ssize_t generic_file_splice_read(struct file *, loff_t *,
|
|||
struct pipe_inode_info *, size_t, unsigned int);
|
||||
extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
|
||||
struct file *, loff_t *, size_t, unsigned int);
|
||||
extern ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *,
|
||||
struct file *, loff_t *, size_t, unsigned int);
|
||||
extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
|
||||
struct file *out, loff_t *, size_t len, unsigned int flags);
|
||||
extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
|
||||
|
|
@ -2245,9 +2300,8 @@ extern int vfs_readdir(struct file *, filldir_t, void *);
|
|||
|
||||
extern int vfs_stat(char __user *, struct kstat *);
|
||||
extern int vfs_lstat(char __user *, struct kstat *);
|
||||
extern int vfs_stat_fd(int dfd, char __user *, struct kstat *);
|
||||
extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *);
|
||||
extern int vfs_fstat(unsigned int, struct kstat *);
|
||||
extern int vfs_fstatat(int , char __user *, struct kstat *, int);
|
||||
|
||||
extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
|
@ -2314,6 +2368,7 @@ extern void file_update_time(struct file *file);
|
|||
|
||||
extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt);
|
||||
extern void save_mount_options(struct super_block *sb, char *options);
|
||||
extern void replace_mount_options(struct super_block *sb, char *options);
|
||||
|
||||
static inline ino_t parent_ino(struct dentry *dentry)
|
||||
{
|
||||
|
|
@ -2395,7 +2450,7 @@ struct ctl_table;
|
|||
int proc_nr_files(struct ctl_table *table, int write, struct file *filp,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
||||
int get_filesystem_list(char * buf);
|
||||
int __init get_filesystem_list(char *buf);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_FS_H */
|
||||
|
|
|
|||
|
|
@ -43,10 +43,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
/* Flags related to I2C device features */
|
||||
#define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001
|
||||
#define FSL_I2C_DEV_CLOCK_5200 0x00000002
|
||||
|
||||
enum fsl_usb2_operating_modes {
|
||||
FSL_USB2_MPH_HOST,
|
||||
FSL_USB2_DR_HOST,
|
||||
|
|
@ -83,6 +79,10 @@ struct fsl_spi_platform_data {
|
|||
u16 max_chipselect;
|
||||
void (*cs_control)(struct spi_device *spi, bool on);
|
||||
u32 sysclk;
|
||||
|
||||
/* Legacy hooks, used by mpc52xx_psc_spi driver. */
|
||||
void (*activate_cs)(u8 cs, u8 polarity);
|
||||
void (*deactivate_cs)(u8 cs, u8 polarity);
|
||||
};
|
||||
|
||||
struct mpc8xx_pcmcia_ops {
|
||||
|
|
|
|||
|
|
@ -214,6 +214,7 @@ static inline void disk_put_part(struct hd_struct *part)
|
|||
#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
|
||||
#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
|
||||
#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
|
||||
#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
|
||||
|
||||
struct disk_part_iter {
|
||||
struct gendisk *disk;
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@
|
|||
#define _LINUX_INIT_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/section-names.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
/* These macros are used to mark some functions or
|
||||
* initialized data (doesn't apply to uninitialized data)
|
||||
|
|
@ -60,14 +62,6 @@
|
|||
#define __refdata __section(.ref.data)
|
||||
#define __refconst __section(.ref.rodata)
|
||||
|
||||
/* backward compatibility note
|
||||
* A few places hardcode the old section names:
|
||||
* .text.init.refok
|
||||
* .data.init.refok
|
||||
* .exit.text.refok
|
||||
* They should be converted to use the defines from this file
|
||||
*/
|
||||
|
||||
/* compatibility defines */
|
||||
#define __init_refok __ref
|
||||
#define __initdata_refok __refdata
|
||||
|
|
@ -107,7 +101,7 @@
|
|||
#define __memexitconst __section(.memexit.rodata)
|
||||
|
||||
/* For assembly routines */
|
||||
#define __HEAD .section ".head.text","ax"
|
||||
#define __HEAD .section __stringify(HEAD_TEXT_SECTION),"ax"
|
||||
#define __INIT .section ".init.text","ax"
|
||||
#define __FINIT .previous
|
||||
|
||||
|
|
@ -247,6 +241,7 @@ struct obs_kernel_param {
|
|||
|
||||
/* Relies on boot_command_line being set */
|
||||
void __init parse_early_param(void);
|
||||
void __init parse_early_options(char *cmdline);
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -15,19 +15,6 @@
|
|||
extern struct files_struct init_files;
|
||||
extern struct fs_struct init_fs;
|
||||
|
||||
#define INIT_KIOCTX(name, which_mm) \
|
||||
{ \
|
||||
.users = ATOMIC_INIT(1), \
|
||||
.dead = 0, \
|
||||
.mm = &which_mm, \
|
||||
.user_id = 0, \
|
||||
.next = NULL, \
|
||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \
|
||||
.ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \
|
||||
.reqs_active = 0U, \
|
||||
.max_reqs = ~0U, \
|
||||
}
|
||||
|
||||
#define INIT_MM(name) \
|
||||
{ \
|
||||
.mm_rb = RB_ROOT, \
|
||||
|
|
|
|||
|
|
@ -106,6 +106,7 @@ struct input_absinfo {
|
|||
|
||||
#define SYN_REPORT 0
|
||||
#define SYN_CONFIG 1
|
||||
#define SYN_MT_REPORT 2
|
||||
|
||||
/*
|
||||
* Keys and buttons
|
||||
|
|
@ -445,6 +446,7 @@ struct input_absinfo {
|
|||
#define BTN_STYLUS2 0x14c
|
||||
#define BTN_TOOL_DOUBLETAP 0x14d
|
||||
#define BTN_TOOL_TRIPLETAP 0x14e
|
||||
#define BTN_TOOL_QUADTAP 0x14f /* Four fingers on trackpad */
|
||||
|
||||
#define BTN_WHEEL 0x150
|
||||
#define BTN_GEAR_DOWN 0x150
|
||||
|
|
@ -644,6 +646,17 @@ struct input_absinfo {
|
|||
#define ABS_TOOL_WIDTH 0x1c
|
||||
#define ABS_VOLUME 0x20
|
||||
#define ABS_MISC 0x28
|
||||
|
||||
#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
|
||||
#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
|
||||
#define ABS_MT_WIDTH_MAJOR 0x32 /* Major axis of approaching ellipse */
|
||||
#define ABS_MT_WIDTH_MINOR 0x33 /* Minor axis (omit if circular) */
|
||||
#define ABS_MT_ORIENTATION 0x34 /* Ellipse orientation */
|
||||
#define ABS_MT_POSITION_X 0x35 /* Center X ellipse position */
|
||||
#define ABS_MT_POSITION_Y 0x36 /* Center Y ellipse position */
|
||||
#define ABS_MT_TOOL_TYPE 0x37 /* Type of touching device */
|
||||
#define ABS_MT_BLOB_ID 0x38 /* Group a set of packets as a blob */
|
||||
|
||||
#define ABS_MAX 0x3f
|
||||
#define ABS_CNT (ABS_MAX+1)
|
||||
|
||||
|
|
@ -742,6 +755,12 @@ struct input_absinfo {
|
|||
#define BUS_GSC 0x1A
|
||||
#define BUS_ATARI 0x1B
|
||||
|
||||
/*
|
||||
* MT_TOOL types
|
||||
*/
|
||||
#define MT_TOOL_FINGER 0
|
||||
#define MT_TOOL_PEN 1
|
||||
|
||||
/*
|
||||
* Values describing the status of a force-feedback effect
|
||||
*/
|
||||
|
|
@ -1311,6 +1330,11 @@ static inline void input_sync(struct input_dev *dev)
|
|||
input_event(dev, EV_SYN, SYN_REPORT, 0);
|
||||
}
|
||||
|
||||
static inline void input_mt_sync(struct input_dev *dev)
|
||||
{
|
||||
input_event(dev, EV_SYN, SYN_MT_REPORT, 0);
|
||||
}
|
||||
|
||||
void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code);
|
||||
|
||||
static inline void input_set_abs_params(struct input_dev *dev, int axis, int min, int max, int fuzz, int flat)
|
||||
|
|
|
|||
|
|
@ -198,6 +198,8 @@ struct kernel_ipmi_msg {
|
|||
response. When you send a
|
||||
response message, this will
|
||||
be returned. */
|
||||
#define IPMI_OEM_RECV_TYPE 5 /* The response for OEM Channels */
|
||||
|
||||
/* Note that async events and received commands do not have a completion
|
||||
code as the first byte of the incoming data, unlike a response. */
|
||||
|
||||
|
|
|
|||
|
|
@ -58,6 +58,12 @@
|
|||
#define IPMI_READ_EVENT_MSG_BUFFER_CMD 0x35
|
||||
#define IPMI_GET_CHANNEL_INFO_CMD 0x42
|
||||
|
||||
/* Bit for BMC global enables. */
|
||||
#define IPMI_BMC_RCV_MSG_INTR 0x01
|
||||
#define IPMI_BMC_EVT_MSG_INTR 0x02
|
||||
#define IPMI_BMC_EVT_MSG_BUFF 0x04
|
||||
#define IPMI_BMC_SYS_LOG 0x08
|
||||
|
||||
#define IPMI_NETFN_STORAGE_REQUEST 0x0a
|
||||
#define IPMI_NETFN_STORAGE_RESPONSE 0x0b
|
||||
#define IPMI_ADD_SEL_ENTRY_CMD 0x44
|
||||
|
|
@ -109,5 +115,7 @@
|
|||
#define IPMI_CHANNEL_MEDIUM_USB1 10
|
||||
#define IPMI_CHANNEL_MEDIUM_USB2 11
|
||||
#define IPMI_CHANNEL_MEDIUM_SYSINTF 12
|
||||
#define IPMI_CHANNEL_MEDIUM_OEM_MIN 0x60
|
||||
#define IPMI_CHANNEL_MEDIUM_OEM_MAX 0x7f
|
||||
|
||||
#endif /* __LINUX_IPMI_MSGDEFS_H */
|
||||
|
|
|
|||
|
|
@ -978,7 +978,8 @@ extern void journal_destroy_revoke(journal_t *);
|
|||
extern int journal_revoke (handle_t *,
|
||||
unsigned long, struct buffer_head *);
|
||||
extern int journal_cancel_revoke(handle_t *, struct journal_head *);
|
||||
extern void journal_write_revoke_records(journal_t *, transaction_t *);
|
||||
extern void journal_write_revoke_records(journal_t *,
|
||||
transaction_t *, int);
|
||||
|
||||
/* Recovery revoke support */
|
||||
extern int journal_set_revoke(journal_t *, unsigned long, tid_t);
|
||||
|
|
|
|||
|
|
@ -1193,7 +1193,8 @@ extern int jbd2_journal_init_revoke_caches(void);
|
|||
extern void jbd2_journal_destroy_revoke(journal_t *);
|
||||
extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
|
||||
extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
|
||||
extern void jbd2_journal_write_revoke_records(journal_t *, transaction_t *);
|
||||
extern void jbd2_journal_write_revoke_records(journal_t *,
|
||||
transaction_t *, int);
|
||||
|
||||
/* Recovery revoke support */
|
||||
extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t);
|
||||
|
|
|
|||
|
|
@ -377,6 +377,15 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
|
|||
#define pr_cont(fmt, ...) \
|
||||
printk(KERN_CONT fmt, ##__VA_ARGS__)
|
||||
|
||||
/* pr_devel() should produce zero code unless DEBUG is defined */
|
||||
#ifdef DEBUG
|
||||
#define pr_devel(fmt, ...) \
|
||||
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
|
||||
#else
|
||||
#define pr_devel(fmt, ...) \
|
||||
({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
|
||||
#endif
|
||||
|
||||
/* If you are writing a driver, please use dev_dbg instead */
|
||||
#if defined(DEBUG)
|
||||
#define pr_debug(fmt, ...) \
|
||||
|
|
|
|||
|
|
@ -409,6 +409,8 @@ struct kvm_trace_rec {
|
|||
#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
|
||||
#define KVM_CAP_DEVICE_DEASSIGNMENT 27
|
||||
#endif
|
||||
/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */
|
||||
#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
|
|
|
|||
|
|
@ -209,6 +209,7 @@ enum {
|
|||
|
||||
/* bits 24:31 of ap->flags are reserved for LLD specific flags */
|
||||
|
||||
|
||||
/* struct ata_port pflags */
|
||||
ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */
|
||||
ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */
|
||||
|
|
@ -225,6 +226,9 @@ enum {
|
|||
ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
|
||||
ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
|
||||
|
||||
ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */
|
||||
ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */
|
||||
|
||||
/* struct ata_queued_cmd flags */
|
||||
ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
|
||||
ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */
|
||||
|
|
@ -689,7 +693,10 @@ struct ata_port {
|
|||
struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
|
||||
struct ata_port_operations *ops;
|
||||
spinlock_t *lock;
|
||||
/* Flags owned by the EH context. Only EH should touch these once the
|
||||
port is active */
|
||||
unsigned long flags; /* ATA_FLAG_xxx */
|
||||
/* Flags that change dynamically, protected by ap->lock */
|
||||
unsigned int pflags; /* ATA_PFLAG_xxx */
|
||||
unsigned int print_id; /* user visible unique port ID */
|
||||
unsigned int port_no; /* 0 based port no. inside the host */
|
||||
|
|
@ -1595,6 +1602,7 @@ extern void ata_sff_drain_fifo(struct ata_queued_cmd *qc);
|
|||
extern void ata_sff_error_handler(struct ata_port *ap);
|
||||
extern void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc);
|
||||
extern int ata_sff_port_start(struct ata_port *ap);
|
||||
extern int ata_sff_port_start32(struct ata_port *ap);
|
||||
extern void ata_sff_std_ports(struct ata_ioports *ioaddr);
|
||||
extern unsigned long ata_bmdma_mode_filter(struct ata_device *dev,
|
||||
unsigned long xfer_mask);
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ extern void mem_cgroup_move_lists(struct page *page,
|
|||
enum lru_list from, enum lru_list to);
|
||||
extern void mem_cgroup_uncharge_page(struct page *page);
|
||||
extern void mem_cgroup_uncharge_cache_page(struct page *page);
|
||||
extern int mem_cgroup_shrink_usage(struct page *page,
|
||||
extern int mem_cgroup_shmem_charge_fallback(struct page *page,
|
||||
struct mm_struct *mm, gfp_t gfp_mask);
|
||||
|
||||
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
||||
|
|
@ -75,7 +75,7 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
|
|||
{
|
||||
struct mem_cgroup *mem;
|
||||
rcu_read_lock();
|
||||
mem = mem_cgroup_from_task((mm)->owner);
|
||||
mem = mem_cgroup_from_task(rcu_dereference((mm)->owner));
|
||||
rcu_read_unlock();
|
||||
return cgroup == mem;
|
||||
}
|
||||
|
|
@ -155,7 +155,7 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page)
|
|||
{
|
||||
}
|
||||
|
||||
static inline int mem_cgroup_shrink_usage(struct page *page,
|
||||
static inline int mem_cgroup_shmem_charge_fallback(struct page *page,
|
||||
struct mm_struct *mm, gfp_t gfp_mask)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -12,21 +12,18 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/mm.h>
|
||||
#include <linux/percpu_counter.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
extern int sysctl_overcommit_memory;
|
||||
extern int sysctl_overcommit_ratio;
|
||||
extern atomic_long_t vm_committed_space;
|
||||
extern struct percpu_counter vm_committed_as;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void vm_acct_memory(long pages);
|
||||
#else
|
||||
static inline void vm_acct_memory(long pages)
|
||||
{
|
||||
atomic_long_add(pages, &vm_committed_space);
|
||||
percpu_counter_add(&vm_committed_as, pages);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void vm_unacct_memory(long pages)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -69,7 +69,6 @@ extern int path_lookup(const char *, unsigned, struct nameidata *);
|
|||
extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
|
||||
const char *, unsigned int, struct nameidata *);
|
||||
|
||||
extern int path_lookup_open(int dfd, const char *name, unsigned lookup_flags, struct nameidata *, int open_flags);
|
||||
extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
|
||||
int (*open)(struct inode *, struct file *));
|
||||
extern struct file *nameidata_to_filp(struct nameidata *nd, int flags);
|
||||
|
|
|
|||
|
|
@ -58,6 +58,25 @@ static inline int nls_strnicmp(struct nls_table *t, const unsigned char *s1,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* nls_nullsize - return length of null character for codepage
|
||||
* @codepage - codepage for which to return length of NULL terminator
|
||||
*
|
||||
* Since we can't guarantee that the null terminator will be a particular
|
||||
* length, we have to check against the codepage. If there's a problem
|
||||
* determining it, assume a single-byte NULL terminator.
|
||||
*/
|
||||
static inline int
|
||||
nls_nullsize(const struct nls_table *codepage)
|
||||
{
|
||||
int charlen;
|
||||
char tmp[NLS_MAX_CHARSET_SIZE];
|
||||
|
||||
charlen = codepage->uni2char(0, tmp, NLS_MAX_CHARSET_SIZE);
|
||||
|
||||
return charlen > 0 ? charlen : 1;
|
||||
}
|
||||
|
||||
#define MODULE_ALIAS_NLS(name) MODULE_ALIAS("nls_" __stringify(name))
|
||||
|
||||
#endif /* _LINUX_NLS_H */
|
||||
|
|
|
|||
|
|
@ -51,6 +51,16 @@ extern int of_register_driver(struct of_platform_driver *drv,
|
|||
struct bus_type *bus);
|
||||
extern void of_unregister_driver(struct of_platform_driver *drv);
|
||||
|
||||
/* Platform drivers register/unregister */
|
||||
static inline int of_register_platform_driver(struct of_platform_driver *drv)
|
||||
{
|
||||
return of_register_driver(drv, &of_platform_bus_type);
|
||||
}
|
||||
static inline void of_unregister_platform_driver(struct of_platform_driver *drv)
|
||||
{
|
||||
of_unregister_driver(drv);
|
||||
}
|
||||
|
||||
#include <asm/of_platform.h>
|
||||
|
||||
extern struct of_device *of_find_device_by_node(struct device_node *np);
|
||||
|
|
|
|||
|
|
@ -2516,6 +2516,8 @@
|
|||
#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433
|
||||
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
|
||||
#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
|
||||
#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c
|
||||
#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e
|
||||
#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580
|
||||
#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582
|
||||
#define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590
|
||||
|
|
|
|||
|
|
@ -376,6 +376,7 @@
|
|||
#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
|
||||
#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
|
||||
#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
|
||||
#define PCI_EXP_TYPE_RC_EC 0x10 /* Root Complex Event Collector */
|
||||
#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
|
||||
#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
|
||||
#define PCI_EXP_DEVCAP 4 /* Device capabilities */
|
||||
|
|
|
|||
84
include/linux/percpu-defs.h
Normal file
84
include/linux/percpu-defs.h
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
#ifndef _LINUX_PERCPU_DEFS_H
|
||||
#define _LINUX_PERCPU_DEFS_H
|
||||
|
||||
/*
|
||||
* Determine the real variable name from the name visible in the
|
||||
* kernel sources.
|
||||
*/
|
||||
#define per_cpu_var(var) per_cpu__##var
|
||||
|
||||
/*
|
||||
* Base implementations of per-CPU variable declarations and definitions, where
|
||||
* the section in which the variable is to be placed is provided by the
|
||||
* 'section' argument. This may be used to affect the parameters governing the
|
||||
* variable's storage.
|
||||
*
|
||||
* NOTE! The sections for the DECLARE and for the DEFINE must match, lest
|
||||
* linkage errors occur due the compiler generating the wrong code to access
|
||||
* that section.
|
||||
*/
|
||||
#define DECLARE_PER_CPU_SECTION(type, name, section) \
|
||||
extern \
|
||||
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
|
||||
#define DEFINE_PER_CPU_SECTION(type, name, section) \
|
||||
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
|
||||
/*
|
||||
* Variant on the per-CPU variable declaration/definition theme used for
|
||||
* ordinary per-CPU variables.
|
||||
*/
|
||||
#define DECLARE_PER_CPU(type, name) \
|
||||
DECLARE_PER_CPU_SECTION(type, name, "")
|
||||
|
||||
#define DEFINE_PER_CPU(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, "")
|
||||
|
||||
/*
|
||||
* Declaration/definition used for per-CPU variables that must come first in
|
||||
* the set of variables.
|
||||
*/
|
||||
#define DECLARE_PER_CPU_FIRST(type, name) \
|
||||
DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
|
||||
|
||||
#define DEFINE_PER_CPU_FIRST(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
|
||||
|
||||
/*
|
||||
* Declaration/definition used for per-CPU variables that must be cacheline
|
||||
* aligned under SMP conditions so that, whilst a particular instance of the
|
||||
* data corresponds to a particular CPU, inefficiencies due to direct access by
|
||||
* other CPUs are reduced by preventing the data from unnecessarily spanning
|
||||
* cachelines.
|
||||
*
|
||||
* An example of this would be statistical data, where each CPU's set of data
|
||||
* is updated by that CPU alone, but the data from across all CPUs is collated
|
||||
* by a CPU processing a read from a proc file.
|
||||
*/
|
||||
#define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \
|
||||
DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
|
||||
____cacheline_aligned_in_smp
|
||||
|
||||
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
|
||||
____cacheline_aligned_in_smp
|
||||
|
||||
/*
|
||||
* Declaration/definition used for per-CPU variables that must be page aligned.
|
||||
*/
|
||||
#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \
|
||||
DECLARE_PER_CPU_SECTION(type, name, ".page_aligned")
|
||||
|
||||
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
|
||||
|
||||
/*
|
||||
* Intermodule exports for per-CPU variables.
|
||||
*/
|
||||
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
|
||||
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
|
||||
|
||||
|
||||
#endif /* _LINUX_PERCPU_DEFS_H */
|
||||
|
|
@ -9,50 +9,6 @@
|
|||
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#ifndef PER_CPU_BASE_SECTION
|
||||
#ifdef CONFIG_SMP
|
||||
#define PER_CPU_BASE_SECTION ".data.percpu"
|
||||
#else
|
||||
#define PER_CPU_BASE_SECTION ".data"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifdef MODULE
|
||||
#define PER_CPU_SHARED_ALIGNED_SECTION ""
|
||||
#else
|
||||
#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
|
||||
#endif
|
||||
#define PER_CPU_FIRST_SECTION ".first"
|
||||
|
||||
#else
|
||||
|
||||
#define PER_CPU_SHARED_ALIGNED_SECTION ""
|
||||
#define PER_CPU_FIRST_SECTION ""
|
||||
|
||||
#endif
|
||||
|
||||
#define DEFINE_PER_CPU_SECTION(type, name, section) \
|
||||
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
|
||||
#define DEFINE_PER_CPU(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, "")
|
||||
|
||||
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
|
||||
____cacheline_aligned_in_smp
|
||||
|
||||
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
|
||||
|
||||
#define DEFINE_PER_CPU_FIRST(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
|
||||
|
||||
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
|
||||
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
|
||||
|
||||
/* enough to cover all DEFINE_PER_CPUs in modules */
|
||||
#ifdef CONFIG_MODULES
|
||||
#define PERCPU_MODULE_RESERVE (8 << 10)
|
||||
|
|
|
|||
|
|
@ -134,6 +134,11 @@ struct pipe_buf_operations {
|
|||
memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
|
||||
#define PIPE_SIZE PAGE_SIZE
|
||||
|
||||
/* Pipe lock and unlock operations */
|
||||
void pipe_lock(struct pipe_inode_info *);
|
||||
void pipe_unlock(struct pipe_inode_info *);
|
||||
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
|
||||
|
||||
/* Drop the inode semaphore and wait for a pipe event, atomically */
|
||||
void pipe_wait(struct pipe_inode_info *pipe);
|
||||
|
||||
|
|
|
|||
|
|
@ -113,6 +113,7 @@ struct pkt_ctrl_command {
|
|||
#include <linux/cdrom.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/mempool.h>
|
||||
|
||||
/* default bio write queue congestion marks */
|
||||
#define PKT_WRITE_CONGESTION_ON 10000
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ struct platform_device {
|
|||
struct device dev;
|
||||
u32 num_resources;
|
||||
struct resource * resource;
|
||||
void *platform_data;
|
||||
|
||||
struct platform_device_id *id_entry;
|
||||
};
|
||||
|
|
@ -77,4 +76,46 @@ extern int platform_driver_probe(struct platform_driver *driver,
|
|||
#define platform_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev)
|
||||
#define platform_set_drvdata(_dev,data) dev_set_drvdata(&(_dev)->dev, (data))
|
||||
|
||||
/* early platform driver interface */
|
||||
struct early_platform_driver {
|
||||
const char *class_str;
|
||||
struct platform_driver *pdrv;
|
||||
struct list_head list;
|
||||
int requested_id;
|
||||
};
|
||||
|
||||
#define EARLY_PLATFORM_ID_UNSET -2
|
||||
#define EARLY_PLATFORM_ID_ERROR -3
|
||||
|
||||
extern int early_platform_driver_register(struct early_platform_driver *epdrv,
|
||||
char *buf);
|
||||
extern void early_platform_add_devices(struct platform_device **devs, int num);
|
||||
|
||||
static inline int is_early_platform_device(struct platform_device *pdev)
|
||||
{
|
||||
return !pdev->dev.driver;
|
||||
}
|
||||
|
||||
extern void early_platform_driver_register_all(char *class_str);
|
||||
extern int early_platform_driver_probe(char *class_str,
|
||||
int nr_probe, int user_only);
|
||||
extern void early_platform_cleanup(void);
|
||||
|
||||
|
||||
#ifndef MODULE
|
||||
#define early_platform_init(class_string, platform_driver) \
|
||||
static __initdata struct early_platform_driver early_driver = { \
|
||||
.class_str = class_string, \
|
||||
.pdrv = platform_driver, \
|
||||
.requested_id = EARLY_PLATFORM_ID_UNSET, \
|
||||
}; \
|
||||
static int __init early_platform_driver_setup_func(char *buf) \
|
||||
{ \
|
||||
return early_platform_driver_register(&early_driver, buf); \
|
||||
} \
|
||||
early_param(class_string, early_platform_driver_setup_func)
|
||||
#else /* MODULE */
|
||||
#define early_platform_init(class_string, platform_driver)
|
||||
#endif /* MODULE */
|
||||
|
||||
#endif /* _PLATFORM_DEVICE_H_ */
|
||||
|
|
|
|||
|
|
@ -161,9 +161,8 @@ struct rcu_data {
|
|||
unsigned long offline_fqs; /* Kicked due to being offline. */
|
||||
unsigned long resched_ipi; /* Sent a resched IPI. */
|
||||
|
||||
/* 5) state to allow this CPU to force_quiescent_state on others */
|
||||
/* 5) For future __rcu_pending statistics. */
|
||||
long n_rcu_pending; /* rcu_pending() calls since boot. */
|
||||
long n_rcu_pending_force_qs; /* when to force quiescent states. */
|
||||
|
||||
int cpu;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -50,6 +50,7 @@ enum regulator_status {
|
|||
* @set_current_limit: Configure a limit for a current-limited regulator.
|
||||
* @get_current_limit: Get the configured limit for a current-limited regulator.
|
||||
*
|
||||
* @set_mode: Set the configured operating mode for the regulator.
|
||||
* @get_mode: Get the configured operating mode for the regulator.
|
||||
* @get_status: Return actual (not as-configured) status of regulator, as a
|
||||
* REGULATOR_STATUS value (or negative errno)
|
||||
|
|
|
|||
|
|
@ -193,7 +193,7 @@ struct reiserfs_journal {
|
|||
atomic_t j_wcount; /* count of writers for current commit */
|
||||
unsigned long j_bcount; /* batch count. allows turning X transactions into 1 */
|
||||
unsigned long j_first_unflushed_offset; /* first unflushed transactions offset */
|
||||
unsigned long j_last_flush_trans_id; /* last fully flushed journal timestamp */
|
||||
unsigned j_last_flush_trans_id; /* last fully flushed journal timestamp */
|
||||
struct buffer_head *j_header_bh;
|
||||
|
||||
time_t j_trans_start_time; /* time this transaction started */
|
||||
|
|
@ -402,7 +402,7 @@ struct reiserfs_sb_info {
|
|||
int reserved_blocks; /* amount of blocks reserved for further allocations */
|
||||
spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */
|
||||
struct dentry *priv_root; /* root of /.reiserfs_priv */
|
||||
struct dentry *xattr_root; /* root of /.reiserfs_priv/.xa */
|
||||
struct dentry *xattr_root; /* root of /.reiserfs_priv/xattrs */
|
||||
int j_errno;
|
||||
#ifdef CONFIG_QUOTA
|
||||
char *s_qf_names[MAXQUOTAS];
|
||||
|
|
@ -488,7 +488,6 @@ enum reiserfs_mount_options {
|
|||
#define reiserfs_data_log(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_LOG))
|
||||
#define reiserfs_data_ordered(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_ORDERED))
|
||||
#define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK))
|
||||
#define reiserfs_xattrs(s) ((s)->s_xattr != NULL)
|
||||
#define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER))
|
||||
#define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL))
|
||||
#define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s))
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ struct nameidata;
|
|||
int reiserfs_xattr_register_handlers(void) __init;
|
||||
void reiserfs_xattr_unregister_handlers(void);
|
||||
int reiserfs_xattr_init(struct super_block *sb, int mount_flags);
|
||||
int reiserfs_lookup_privroot(struct super_block *sb);
|
||||
int reiserfs_delete_xattrs(struct inode *inode);
|
||||
int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs);
|
||||
|
||||
|
|
@ -97,7 +98,7 @@ static inline size_t reiserfs_xattr_jcreate_nblocks(struct inode *inode)
|
|||
|
||||
if ((REISERFS_I(inode)->i_flags & i_has_xattr_dir) == 0) {
|
||||
nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb);
|
||||
if (REISERFS_SB(inode->i_sb)->xattr_root == NULL)
|
||||
if (!REISERFS_SB(inode->i_sb)->xattr_root->d_inode)
|
||||
nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -53,9 +53,4 @@ struct romfs_inode {
|
|||
#define ROMFH_PAD (ROMFH_SIZE-1)
|
||||
#define ROMFH_MASK (~ROMFH_PAD)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/* Not much now */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -21,6 +21,8 @@ extern long prctl_set_seccomp(unsigned long);
|
|||
|
||||
#else /* CONFIG_SECCOMP */
|
||||
|
||||
#include <linux/errno.h>
|
||||
|
||||
typedef struct { } seccomp_t;
|
||||
|
||||
#define secure_computing(x) do { } while (0)
|
||||
|
|
|
|||
6
include/linux/section-names.h
Normal file
6
include/linux/section-names.h
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
#ifndef __LINUX_SECTION_NAMES_H
|
||||
#define __LINUX_SECTION_NAMES_H
|
||||
|
||||
#define HEAD_TEXT_SECTION .head.text
|
||||
|
||||
#endif /* !__LINUX_SECTION_NAMES_H */
|
||||
24
include/linux/sht15.h
Normal file
24
include/linux/sht15.h
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
* sht15.h - support for the SHT15 Temperature and Humidity Sensor
|
||||
*
|
||||
* Copyright (c) 2009 Jonathan Cameron
|
||||
*
|
||||
* Copyright (c) 2007 Wouter Horre
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct sht15_platform_data - sht15 connectivity info
|
||||
* @gpio_data: no. of gpio to which bidirectional data line is connected
|
||||
* @gpio_sck: no. of gpio to which the data clock is connected.
|
||||
* @supply_mv: supply voltage in mv. Overridden by regulator if available.
|
||||
**/
|
||||
struct sht15_platform_data {
|
||||
int gpio_data;
|
||||
int gpio_sck;
|
||||
int supply_mv;
|
||||
};
|
||||
|
||||
|
|
@ -67,7 +67,7 @@ static inline void slow_work_init(struct slow_work *work,
|
|||
}
|
||||
|
||||
/**
|
||||
* slow_work_init - Initialise a very slow work item
|
||||
* vslow_work_init - Initialise a very slow work item
|
||||
* @work: The work item to initialise
|
||||
* @ops: The operations to use to handle the slow work item
|
||||
*
|
||||
|
|
|
|||
|
|
@ -51,5 +51,6 @@ struct ads7846_platform_data {
|
|||
void **filter_data);
|
||||
int (*filter) (void *filter_data, int data_idx, int *val);
|
||||
void (*filter_cleanup)(void *filter_data);
|
||||
void (*wait_for_sync)(void);
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -245,7 +245,12 @@ struct spi_master {
|
|||
*/
|
||||
u16 dma_alignment;
|
||||
|
||||
/* setup mode and clock, etc (spi driver may call many times) */
|
||||
/* Setup mode and clock, etc (spi driver may call many times).
|
||||
*
|
||||
* IMPORTANT: this may be called when transfers to another
|
||||
* device are active. DO NOT UPDATE SHARED REGISTERS in ways
|
||||
* which could break those transfers.
|
||||
*/
|
||||
int (*setup)(struct spi_device *spi);
|
||||
|
||||
/* bidirectional bulk transfers
|
||||
|
|
|
|||
|
|
@ -36,6 +36,8 @@ struct splice_desc {
|
|||
void *data; /* cookie */
|
||||
} u;
|
||||
loff_t pos; /* file position */
|
||||
size_t num_spliced; /* number of bytes already spliced */
|
||||
bool need_wakeup; /* need to wake up writer */
|
||||
};
|
||||
|
||||
struct partial_page {
|
||||
|
|
@ -66,6 +68,16 @@ extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
|
|||
splice_actor *);
|
||||
extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
|
||||
struct splice_desc *, splice_actor *);
|
||||
extern int splice_from_pipe_feed(struct pipe_inode_info *, struct splice_desc *,
|
||||
splice_actor *);
|
||||
extern int splice_from_pipe_next(struct pipe_inode_info *,
|
||||
struct splice_desc *);
|
||||
extern void splice_from_pipe_begin(struct splice_desc *);
|
||||
extern void splice_from_pipe_end(struct pipe_inode_info *,
|
||||
struct splice_desc *);
|
||||
extern int pipe_to_file(struct pipe_inode_info *, struct pipe_buffer *,
|
||||
struct splice_desc *);
|
||||
|
||||
extern ssize_t splice_to_pipe(struct pipe_inode_info *,
|
||||
struct splice_pipe_desc *);
|
||||
extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
|
||||
|
|
|
|||
|
|
@ -261,6 +261,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
|
|||
#define XPRT_BINDING (5)
|
||||
#define XPRT_CLOSING (6)
|
||||
#define XPRT_CONNECTION_ABORT (7)
|
||||
#define XPRT_CONNECTION_CLOSE (8)
|
||||
|
||||
static inline void xprt_set_connected(struct rpc_xprt *xprt)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -58,10 +58,17 @@ typedef int __bitwise suspend_state_t;
|
|||
* by @begin().
|
||||
* @prepare() is called right after devices have been suspended (ie. the
|
||||
* appropriate .suspend() method has been executed for each device) and
|
||||
* before the nonboot CPUs are disabled (it is executed with IRQs enabled).
|
||||
* This callback is optional. It returns 0 on success or a negative
|
||||
* error code otherwise, in which case the system cannot enter the desired
|
||||
* sleep state (@enter() and @finish() will not be called in that case).
|
||||
* before device drivers' late suspend callbacks are executed. It returns
|
||||
* 0 on success or a negative error code otherwise, in which case the
|
||||
* system cannot enter the desired sleep state (@prepare_late(), @enter(),
|
||||
* @wake(), and @finish() will not be called in that case).
|
||||
*
|
||||
* @prepare_late: Finish preparing the platform for entering the system sleep
|
||||
* state indicated by @begin().
|
||||
* @prepare_late is called before disabling nonboot CPUs and after
|
||||
* device drivers' late suspend callbacks have been executed. It returns
|
||||
* 0 on success or a negative error code otherwise, in which case the
|
||||
* system cannot enter the desired sleep state (@enter() and @wake()).
|
||||
*
|
||||
* @enter: Enter the system sleep state indicated by @begin() or represented by
|
||||
* the argument if @begin() is not implemented.
|
||||
|
|
@ -69,19 +76,26 @@ typedef int __bitwise suspend_state_t;
|
|||
* error code otherwise, in which case the system cannot enter the desired
|
||||
* sleep state.
|
||||
*
|
||||
* @finish: Called when the system has just left a sleep state, right after
|
||||
* the nonboot CPUs have been enabled and before devices are resumed (it is
|
||||
* executed with IRQs enabled).
|
||||
* @wake: Called when the system has just left a sleep state, right after
|
||||
* the nonboot CPUs have been enabled and before device drivers' early
|
||||
* resume callbacks are executed.
|
||||
* This callback is optional, but should be implemented by the platforms
|
||||
* that implement @prepare_late(). If implemented, it is always called
|
||||
* after @enter(), even if @enter() fails.
|
||||
*
|
||||
* @finish: Finish wake-up of the platform.
|
||||
* @finish is called right prior to calling device drivers' regular suspend
|
||||
* callbacks.
|
||||
* This callback is optional, but should be implemented by the platforms
|
||||
* that implement @prepare(). If implemented, it is always called after
|
||||
* @enter() (even if @enter() fails).
|
||||
* @enter() and @wake(), if implemented, even if any of them fails.
|
||||
*
|
||||
* @end: Called by the PM core right after resuming devices, to indicate to
|
||||
* the platform that the system has returned to the working state or
|
||||
* the transition to the sleep state has been aborted.
|
||||
* This callback is optional, but should be implemented by the platforms
|
||||
* that implement @begin(), but platforms implementing @begin() should
|
||||
* also provide a @end() which cleans up transitions aborted before
|
||||
* that implement @begin(). Accordingly, platforms implementing @begin()
|
||||
* should also provide a @end() which cleans up transitions aborted before
|
||||
* @enter().
|
||||
*
|
||||
* @recover: Recover the platform from a suspend failure.
|
||||
|
|
@ -93,7 +107,9 @@ struct platform_suspend_ops {
|
|||
int (*valid)(suspend_state_t state);
|
||||
int (*begin)(suspend_state_t state);
|
||||
int (*prepare)(void);
|
||||
int (*prepare_late)(void);
|
||||
int (*enter)(suspend_state_t state);
|
||||
void (*wake)(void);
|
||||
void (*finish)(void);
|
||||
void (*end)(void);
|
||||
void (*recover)(void);
|
||||
|
|
|
|||
|
|
@ -148,7 +148,7 @@ struct old_linux_dirent;
|
|||
asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \
|
||||
"\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
|
||||
#else
|
||||
#ifdef CONFIG_ALPHA
|
||||
#if defined(CONFIG_ALPHA) || defined(CONFIG_MIPS)
|
||||
#define SYSCALL_ALIAS(alias, name) \
|
||||
asm ( #alias " = " #name "\n\t.globl " #alias)
|
||||
#else
|
||||
|
|
@ -433,6 +433,7 @@ asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
|
|||
asmlinkage long sys_fcntl64(unsigned int fd,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
#endif
|
||||
asmlinkage long sys_pipe2(int __user *fildes, int flags);
|
||||
asmlinkage long sys_dup(unsigned int fildes);
|
||||
asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd);
|
||||
asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags);
|
||||
|
|
|
|||
|
|
@ -1387,6 +1387,7 @@ extern int usb_string(struct usb_device *dev, int index,
|
|||
extern int usb_clear_halt(struct usb_device *dev, int pipe);
|
||||
extern int usb_reset_configuration(struct usb_device *dev);
|
||||
extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate);
|
||||
extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr);
|
||||
|
||||
/* this request isn't really synchronous, but it belongs with the others */
|
||||
extern int usb_driver_set_configuration(struct usb_device *udev, int config);
|
||||
|
|
@ -1491,14 +1492,6 @@ void usb_sg_wait(struct usb_sg_request *io);
|
|||
#define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL)
|
||||
#define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK)
|
||||
|
||||
/* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */
|
||||
#define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1)
|
||||
#define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep)))
|
||||
#define usb_settoggle(dev, ep, out, bit) \
|
||||
((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \
|
||||
((bit) << (ep)))
|
||||
|
||||
|
||||
static inline unsigned int __create_pipe(struct usb_device *dev,
|
||||
unsigned int endpoint)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -7,6 +7,9 @@
|
|||
* key configuration differences between boards.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_USB_MUSB_H
|
||||
#define __LINUX_USB_MUSB_H
|
||||
|
||||
/* The USB role is defined by the connector used on the board, so long as
|
||||
* standards are being followed. (Developer boards sometimes won't.)
|
||||
*/
|
||||
|
|
@ -101,3 +104,5 @@ extern int __init tusb6010_setup_interface(
|
|||
extern int tusb6010_platform_retime(unsigned is_refclk);
|
||||
|
||||
#endif /* OMAP2 */
|
||||
|
||||
#endif /* __LINUX_USB_MUSB_H */
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@
|
|||
/**
|
||||
* usb_serial_port: structure for the specific ports of a device.
|
||||
* @serial: pointer back to the struct usb_serial owner of this port.
|
||||
* @tty: pointer to the corresponding tty for this port.
|
||||
* @port: pointer to the corresponding tty_port for this port.
|
||||
* @lock: spinlock to grab when updating portions of this structure.
|
||||
* @mutex: mutex used to synchronize serial_open() and serial_close()
|
||||
* access for this port.
|
||||
|
|
@ -44,19 +44,22 @@
|
|||
* @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe
|
||||
* for this port.
|
||||
* @bulk_in_buffer: pointer to the bulk in buffer for this port.
|
||||
* @bulk_in_size: the size of the bulk_in_buffer, in bytes.
|
||||
* @read_urb: pointer to the bulk in struct urb for this port.
|
||||
* @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this
|
||||
* port.
|
||||
* @bulk_out_buffer: pointer to the bulk out buffer for this port.
|
||||
* @bulk_out_size: the size of the bulk_out_buffer, in bytes.
|
||||
* @write_urb: pointer to the bulk out struct urb for this port.
|
||||
* @write_urb_busy: port`s writing status
|
||||
* @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this
|
||||
* port.
|
||||
* @write_wait: a wait_queue_head_t used by the port.
|
||||
* @work: work queue entry for the line discipline waking up.
|
||||
* @open_count: number of times this port has been opened.
|
||||
* @throttled: nonzero if the read urb is inactive to throttle the device
|
||||
* @throttle_req: nonzero if the tty wants to throttle us
|
||||
* @console: attached usb serial console
|
||||
* @dev: pointer to the serial device
|
||||
*
|
||||
* This structure is used by the usb-serial core and drivers for the specific
|
||||
* ports of a device.
|
||||
|
|
|
|||
|
|
@ -168,8 +168,6 @@ void writeback_set_ratelimit(void);
|
|||
/* pdflush.c */
|
||||
extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
|
||||
read-only. */
|
||||
extern int nr_pdflush_threads_max; /* Global so it can be exported to sysctl */
|
||||
extern int nr_pdflush_threads_min; /* Global so it can be exported to sysctl */
|
||||
|
||||
|
||||
#endif /* WRITEBACK_H */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue