Merge branch 'merge-fixes' into devel

This commit is contained in:
Russell King 2008-04-19 17:17:34 +01:00 committed by Russell King
commit cf816ecb53
2366 changed files with 141852 additions and 166028 deletions

View file

@ -86,6 +86,7 @@ header-y += if_plip.h
header-y += if_ppp.h
header-y += if_slip.h
header-y += if_strip.h
header-y += if_tun.h
header-y += if_tunnel.h
header-y += in6.h
header-y += in_route.h
@ -229,9 +230,7 @@ unifdef-y += if_link.h
unifdef-y += if_pppol2tp.h
unifdef-y += if_pppox.h
unifdef-y += if_tr.h
unifdef-y += if_tun.h
unifdef-y += if_vlan.h
unifdef-y += if_wanpipe.h
unifdef-y += igmp.h
unifdef-y += inet_diag.h
unifdef-y += in.h
@ -261,6 +260,7 @@ unifdef-y += mempolicy.h
unifdef-y += mii.h
unifdef-y += mman.h
unifdef-y += mroute.h
unifdef-y += mroute6.h
unifdef-y += msdos_fs.h
unifdef-y += msg.h
unifdef-y += nbd.h
@ -289,6 +289,7 @@ unifdef-y += parport.h
unifdef-y += patchkey.h
unifdef-y += pci.h
unifdef-y += personality.h
unifdef-y += pim.h
unifdef-y += pktcdvd.h
unifdef-y += pmu.h
unifdef-y += poll.h

View file

@ -283,8 +283,8 @@ struct arcnet_local {
int next_buf, first_free_buf;
/* network "reconfiguration" handling */
time_t first_recon, /* time of "first" RECON message to count */
last_recon; /* time of most recent RECON */
unsigned long first_recon; /* time of "first" RECON message to count */
unsigned long last_recon; /* time of most recent RECON */
int num_recons; /* number of RECONs between first and last. */
bool network_down; /* do we think the network is down? */

View file

@ -85,8 +85,6 @@ static inline struct atalk_sock *at_sk(struct sock *sk)
return (struct atalk_sock *)sk;
}
#include <asm/byteorder.h>
struct ddpehdr {
__be16 deh_len_hops; /* lower 10 bits are length, next 4 - hops */
__be16 deh_sum;

View file

@ -37,7 +37,7 @@ attribute_container_set_no_classdevs(struct attribute_container *atc)
}
int attribute_container_register(struct attribute_container *cont);
int attribute_container_unregister(struct attribute_container *cont);
int __must_check attribute_container_unregister(struct attribute_container *cont);
void attribute_container_create_device(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,

View file

@ -353,6 +353,33 @@ struct netlink_skb_parms;
struct linux_binprm;
struct mq_attr;
struct mqstat;
struct audit_watch;
struct audit_tree;
struct audit_krule {
int vers_ops;
u32 flags;
u32 listnr;
u32 action;
u32 mask[AUDIT_BITMASK_SIZE];
u32 buflen; /* for data alloc on list rules */
u32 field_count;
char *filterkey; /* ties events to rules */
struct audit_field *fields;
struct audit_field *arch_f; /* quick access to arch field */
struct audit_field *inode_f; /* quick access to an inode field */
struct audit_watch *watch; /* associated watch */
struct audit_tree *tree; /* associated watched tree */
struct list_head rlist; /* entry in audit_{watch,tree}.rules list */
};
struct audit_field {
u32 type;
u32 val;
u32 op;
char *lsm_str;
void *lsm_rule;
};
#define AUDITSC_INVALID 0
#define AUDITSC_SUCCESS 1
@ -536,6 +563,8 @@ extern void audit_log_d_path(struct audit_buffer *ab,
const char *prefix,
struct path *path);
extern void audit_log_lost(const char *message);
extern int audit_update_lsm_rules(void);
/* Private API (for audit.c only) */
extern int audit_filter_user(struct netlink_skb_parms *cb, int type);
extern int audit_filter_type(int type);

View file

@ -216,6 +216,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
/* used to install a new clocksource */
extern int clocksource_register(struct clocksource*);
extern void clocksource_unregister(struct clocksource*);
extern void clocksource_touch_watchdog(void);
extern struct clocksource* clocksource_get_next(void);
extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_resume(void);

View file

@ -95,12 +95,17 @@ enum dma_transaction_type {
#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1)
/**
* enum dma_prep_flags - DMA flags to augment operation preparation
* enum dma_ctrl_flags - DMA flags to augment operation preparation,
* control completion, and communicate status.
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
* this transaction
* @DMA_CTRL_ACK - the descriptor cannot be reused until the client
* acknowledges receipt, i.e. has has a chance to establish any
* dependency chains
*/
enum dma_prep_flags {
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
DMA_CTRL_ACK = (1 << 1),
};
/**
@ -211,8 +216,8 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
* ---dma generic offload fields---
* @cookie: tracking cookie for this transaction, set to -EBUSY if
* this tx is sitting on a dependency list
* @ack: the descriptor can not be reused until the client acknowledges
* receipt, i.e. has has a chance to establish any dependency chains
* @flags: flags to augment operation preparation, control completion, and
* communicate status
* @phys: physical address of the descriptor
* @tx_list: driver common field for operations that require multiple
* descriptors
@ -221,23 +226,20 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
* @callback: routine to call after this operation is complete
* @callback_param: general parameter to pass to the callback routine
* ---async_tx api specific fields---
* @depend_list: at completion this list of transactions are submitted
* @depend_node: allow this transaction to be executed after another
* transaction has completed, possibly on another channel
* @next: at completion submit this descriptor
* @parent: pointer to the next level up in the dependency chain
* @lock: protect the dependency list
* @lock: protect the parent and next pointers
*/
struct dma_async_tx_descriptor {
dma_cookie_t cookie;
int ack;
enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
dma_addr_t phys;
struct list_head tx_list;
struct dma_chan *chan;
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
void *callback_param;
struct list_head depend_list;
struct list_head depend_node;
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
spinlock_t lock;
};
@ -261,7 +263,6 @@ struct dma_async_tx_descriptor {
* @device_prep_dma_zero_sum: prepares a zero_sum operation
* @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_dependency_added: async_tx notifies the channel about new deps
* @device_issue_pending: push pending transactions to hardware
*/
struct dma_device {
@ -294,9 +295,8 @@ struct dma_device {
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan);
struct dma_chan *chan, unsigned long flags);
void (*device_dependency_added)(struct dma_chan *chan);
enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *last,
dma_cookie_t *used);
@ -321,7 +321,13 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
static inline void
async_tx_ack(struct dma_async_tx_descriptor *tx)
{
tx->ack = 1;
tx->flags |= DMA_CTRL_ACK;
}
static inline int
async_tx_test_ack(struct dma_async_tx_descriptor *tx)
{
return tx->flags & DMA_CTRL_ACK;
}
#define first_dma_cap(mask) __first_dma_cap(&(mask))

View file

@ -37,21 +37,6 @@ struct sock_fprog /* Required for SO_ATTACH_FILTER. */
struct sock_filter __user *filter;
};
#ifdef __KERNEL__
struct sk_filter
{
atomic_t refcnt;
unsigned int len; /* Number of filter blocks */
struct rcu_head rcu;
struct sock_filter insns[0];
};
static inline unsigned int sk_filter_len(struct sk_filter *fp)
{
return fp->len*sizeof(struct sock_filter) + sizeof(*fp);
}
#endif
/*
* Instruction classes
*/
@ -136,15 +121,31 @@ static inline unsigned int sk_filter_len(struct sk_filter *fp)
#define SKF_AD_PROTOCOL 0
#define SKF_AD_PKTTYPE 4
#define SKF_AD_IFINDEX 8
#define SKF_AD_MAX 12
#define SKF_AD_NLATTR 12
#define SKF_AD_MAX 16
#define SKF_NET_OFF (-0x100000)
#define SKF_LL_OFF (-0x200000)
#ifdef __KERNEL__
struct sk_filter
{
atomic_t refcnt;
unsigned int len; /* Number of filter blocks */
struct rcu_head rcu;
struct sock_filter insns[0];
};
static inline unsigned int sk_filter_len(const struct sk_filter *fp)
{
return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
}
struct sk_buff;
struct sock;
extern unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen);
extern int sk_filter(struct sock *sk, struct sk_buff *skb);
extern unsigned int sk_run_filter(struct sk_buff *skb,
struct sock_filter *filter, int flen);
extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
extern int sk_detach_filter(struct sock *sk);
extern int sk_chk_filter(struct sock_filter *filter, int flen);

View file

@ -50,7 +50,7 @@ struct gianfar_platform_data {
u32 device_flags;
/* board specific information */
u32 board_flags;
u32 bus_id;
char bus_id[MII_BUS_ID_SIZE];
u32 phy_id;
u8 mac_addr[6];
phy_interface_t interface;

View file

@ -422,9 +422,11 @@ struct hd_geometry {
#define HDIO_SET_NOWERR 0x0325 /* change ignore-write-error flag */
#define HDIO_SET_DMA 0x0326 /* change use-dma flag */
#define HDIO_SET_PIO_MODE 0x0327 /* reconfig interface to new speed */
#ifndef __KERNEL__
#define HDIO_SCAN_HWIF 0x0328 /* register and (re)scan interface */
#define HDIO_SET_NICE 0x0329 /* set nice flags */
#define HDIO_UNREGISTER_HWIF 0x032a /* unregister interface */
#endif
#define HDIO_SET_NICE 0x0329 /* set nice flags */
#define HDIO_SET_WCACHE 0x032b /* change write cache enable-disable */
#define HDIO_SET_ACOUSTIC 0x032c /* change acoustic behavior */
#define HDIO_SET_BUSSTATE 0x032d /* set the bus state of the hwif */

View file

@ -173,7 +173,6 @@ struct hrtimer_clock_base {
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
* @lock_key: the lock_class_key for use with lockdep
* @clock_base: array of clock bases for this cpu
* @curr_timer: the timer which is executing a callback right now
* @expires_next: absolute time of the next event which was scheduled
@ -189,7 +188,6 @@ struct hrtimer_clock_base {
*/
struct hrtimer_cpu_base {
spinlock_t lock;
struct lock_class_key lock_key;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
struct list_head cb_pending;
#ifdef CONFIG_HIGH_RES_TIMERS

View file

@ -176,12 +176,21 @@ extern void icmpv6_send(struct sk_buff *skb,
__u32 info,
struct net_device *dev);
extern int icmpv6_init(struct net_proto_family *ops);
extern int icmpv6_init(void);
extern int icmpv6_err_convert(int type, int code,
int *err);
extern void icmpv6_cleanup(void);
extern void icmpv6_param_prob(struct sk_buff *skb,
int code, int pos);
struct flowi;
struct in6_addr;
extern void icmpv6_flow_init(struct sock *sk,
struct flowi *fl,
u8 type,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
int oif);
#endif
#endif

View file

@ -82,24 +82,10 @@ typedef unsigned char byte; /* used everywhere */
#define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET
#define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET
#define IDE_DATA_REG (HWIF(drive)->io_ports[IDE_DATA_OFFSET])
#define IDE_ERROR_REG (HWIF(drive)->io_ports[IDE_ERROR_OFFSET])
#define IDE_NSECTOR_REG (HWIF(drive)->io_ports[IDE_NSECTOR_OFFSET])
#define IDE_SECTOR_REG (HWIF(drive)->io_ports[IDE_SECTOR_OFFSET])
#define IDE_LCYL_REG (HWIF(drive)->io_ports[IDE_LCYL_OFFSET])
#define IDE_HCYL_REG (HWIF(drive)->io_ports[IDE_HCYL_OFFSET])
#define IDE_SELECT_REG (HWIF(drive)->io_ports[IDE_SELECT_OFFSET])
#define IDE_STATUS_REG (HWIF(drive)->io_ports[IDE_STATUS_OFFSET])
#define IDE_CONTROL_REG (HWIF(drive)->io_ports[IDE_CONTROL_OFFSET])
#define IDE_IRQ_REG (HWIF(drive)->io_ports[IDE_IRQ_OFFSET])
#define IDE_FEATURE_REG IDE_ERROR_REG
#define IDE_COMMAND_REG IDE_STATUS_REG
#define IDE_ALTSTATUS_REG IDE_CONTROL_REG
#define IDE_IREASON_REG IDE_NSECTOR_REG
#define IDE_BCOUNTL_REG IDE_LCYL_REG
#define IDE_BCOUNTH_REG IDE_HCYL_REG
#define IDE_ALTSTATUS_OFFSET IDE_CONTROL_OFFSET
#define IDE_IREASON_OFFSET IDE_NSECTOR_OFFSET
#define IDE_BCOUNTL_OFFSET IDE_LCYL_OFFSET
#define IDE_BCOUNTH_OFFSET IDE_HCYL_OFFSET
#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
#define BAD_R_STAT (BUSY_STAT | ERR_STAT)
@ -169,7 +155,7 @@ enum { ide_unknown, ide_generic, ide_pci,
ide_rz1000, ide_trm290,
ide_cmd646, ide_cy82c693, ide_4drives,
ide_pmac, ide_etrax100, ide_acorn,
ide_au1xxx, ide_palm3710, ide_forced
ide_au1xxx, ide_palm3710
};
typedef u8 hwif_chipset_t;
@ -186,14 +172,9 @@ typedef struct hw_regs_s {
} hw_regs_t;
struct hwif_s * ide_find_port(unsigned long);
struct hwif_s *ide_deprecated_find_port(unsigned long);
void ide_init_port_data(struct hwif_s *, unsigned int);
void ide_init_port_hw(struct hwif_s *, hw_regs_t *);
struct ide_drive_s;
int ide_register_hw(hw_regs_t *, void (*)(struct ide_drive_s *),
struct hwif_s **);
static inline void ide_std_init_ports(hw_regs_t *hw,
unsigned long io_addr,
unsigned long ctl_addr)
@ -213,45 +194,6 @@ static inline void ide_std_init_ports(hw_regs_t *hw,
#define MAX_HWIFS CONFIG_IDE_MAX_HWIFS
#endif
/* needed on alpha, x86/x86_64, ia64, mips, ppc32 and sh */
#ifndef IDE_ARCH_OBSOLETE_DEFAULTS
# define ide_default_io_base(index) (0)
# define ide_default_irq(base) (0)
# define ide_init_default_irq(base) (0)
#endif
#ifdef CONFIG_IDE_ARCH_OBSOLETE_INIT
static inline void ide_init_hwif_ports(hw_regs_t *hw,
unsigned long io_addr,
unsigned long ctl_addr,
int *irq)
{
if (!ctl_addr)
ide_std_init_ports(hw, io_addr, ide_default_io_ctl(io_addr));
else
ide_std_init_ports(hw, io_addr, ctl_addr);
if (irq)
*irq = 0;
hw->io_ports[IDE_IRQ_OFFSET] = 0;
#ifdef CONFIG_PPC32
if (ppc_ide_md.ide_init_hwif)
ppc_ide_md.ide_init_hwif(hw, io_addr, ctl_addr, irq);
#endif
}
#else
static inline void ide_init_hwif_ports(hw_regs_t *hw,
unsigned long io_addr,
unsigned long ctl_addr,
int *irq)
{
if (io_addr || ctl_addr)
printk(KERN_WARNING "%s: must not be called\n", __FUNCTION__);
}
#endif /* CONFIG_IDE_ARCH_OBSOLETE_INIT */
/* Currently only m68k, apus and m8xx need it */
#ifndef IDE_ARCH_ACK_INTR
# define ide_ack_intr(hwif) (1)
@ -406,7 +348,7 @@ typedef struct ide_drive_s {
u8 wcache; /* status of write cache */
u8 acoustic; /* acoustic management */
u8 media; /* disk, cdrom, tape, floppy, ... */
u8 ctl; /* "normal" value for IDE_CONTROL_REG */
u8 ctl; /* "normal" value for Control register */
u8 ready_stat; /* min status value for drive ready */
u8 mult_count; /* current multiple sector setting */
u8 mult_req; /* requested multiple sector setting */
@ -507,8 +449,6 @@ typedef struct hwif_s {
void (*maskproc)(ide_drive_t *, int);
/* check host's drive quirk list */
void (*quirkproc)(ide_drive_t *);
/* driver soft-power interface */
int (*busproc)(ide_drive_t *, int);
#endif
u8 (*mdma_filter)(ide_drive_t *);
u8 (*udma_filter)(ide_drive_t *);
@ -578,7 +518,6 @@ typedef struct hwif_s {
unsigned noprobe : 1; /* don't probe for this interface */
unsigned present : 1; /* this interface exists */
unsigned hold : 1; /* this interface is always present */
unsigned serialized : 1; /* serialized all channel operation */
unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
unsigned reset : 1; /* reset after probe */
@ -586,7 +525,9 @@ typedef struct hwif_s {
unsigned mmio : 1; /* host uses MMIO */
unsigned straight8 : 1; /* Alan's straight 8 check */
struct device gendev;
struct device gendev;
struct device *portdev;
struct completion gendev_rel_comp; /* To deal with device release() */
void *hwif_data; /* extra hwif data */
@ -647,6 +588,68 @@ int set_io_32bit(ide_drive_t *, int);
int set_pio_mode(ide_drive_t *, int);
int set_using_dma(ide_drive_t *, int);
/* ATAPI packet command flags */
enum {
/* set when an error is considered normal - no retry (ide-tape) */
PC_FLAG_ABORT = (1 << 0),
PC_FLAG_SUPPRESS_ERROR = (1 << 1),
PC_FLAG_WAIT_FOR_DSC = (1 << 2),
PC_FLAG_DMA_OK = (1 << 3),
PC_FLAG_DMA_RECOMMENDED = (1 << 4),
PC_FLAG_DMA_IN_PROGRESS = (1 << 5),
PC_FLAG_DMA_ERROR = (1 << 6),
PC_FLAG_WRITING = (1 << 7),
/* command timed out */
PC_FLAG_TIMEDOUT = (1 << 8),
};
struct ide_atapi_pc {
/* actual packet bytes */
u8 c[12];
/* incremented on each retry */
int retries;
int error;
/* bytes to transfer */
int req_xfer;
/* bytes actually transferred */
int xferred;
/* data buffer */
u8 *buf;
/* current buffer position */
u8 *cur_pos;
int buf_size;
/* missing/available data on the current buffer */
int b_count;
/* the corresponding request */
struct request *rq;
unsigned long flags;
/*
* those are more or less driver-specific and some of them are subject
* to change/removal later.
*/
u8 pc_buf[256];
void (*idefloppy_callback) (ide_drive_t *);
ide_startstop_t (*idetape_callback) (ide_drive_t *);
/* idetape only */
struct idetape_bh *bh;
char *b_data;
/* idescsi only for now */
struct scatterlist *sg;
unsigned int sg_cnt;
struct scsi_cmnd *scsi_cmd;
void (*done) (struct scsi_cmnd *);
unsigned long timeout;
};
#ifdef CONFIG_IDE_PROC_FS
/*
* configurable drive settings
@ -691,6 +694,7 @@ void proc_ide_create(void);
void proc_ide_destroy(void);
void ide_proc_register_port(ide_hwif_t *);
void ide_proc_port_register_devices(ide_hwif_t *);
void ide_proc_unregister_device(ide_drive_t *);
void ide_proc_unregister_port(ide_hwif_t *);
void ide_proc_register_driver(ide_drive_t *, ide_driver_t *);
void ide_proc_unregister_driver(ide_drive_t *, ide_driver_t *);
@ -724,6 +728,7 @@ static inline void proc_ide_create(void) { ; }
static inline void proc_ide_destroy(void) { ; }
static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
static inline void ide_proc_register_driver(ide_drive_t *drive, ide_driver_t *driver) { ; }
static inline void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver) { ; }
@ -990,7 +995,6 @@ extern void do_ide_request(struct request_queue *);
void ide_init_disk(struct gendisk *, ide_drive_t *);
#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
extern int ide_scan_direction;
extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
#else
@ -1195,7 +1199,7 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
void ide_remove_port_from_hwgroup(ide_hwif_t *);
extern int ide_hwif_request_regions(ide_hwif_t *hwif);
extern void ide_hwif_release_regions(ide_hwif_t* hwif);
void ide_unregister(unsigned int, int, int);
void ide_unregister(unsigned int);
void ide_register_region(struct gendisk *);
void ide_unregister_region(struct gendisk *);
@ -1204,6 +1208,8 @@ void ide_undecoded_slave(ide_drive_t *);
int ide_device_add_all(u8 *idx, const struct ide_port_info *);
int ide_device_add(u8 idx[4], const struct ide_port_info *);
void ide_port_unregister_devices(ide_hwif_t *);
void ide_port_scan(ide_hwif_t *);
static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
{
@ -1279,6 +1285,7 @@ extern struct mutex ide_cfg_mtx;
#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0)
extern struct bus_type ide_bus_type;
extern struct class *ide_port_class;
/* check if CACHE FLUSH (EXT) command is supported (bits defined in ATA-6) */
#define ide_id_has_flush_cache(id) ((id)->cfs_enable_2 & 0x3000)
@ -1307,7 +1314,10 @@ static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive)
static inline void ide_set_irq(ide_drive_t *drive, int on)
{
drive->hwif->OUTB(drive->ctl | (on ? 0 : 2), IDE_CONTROL_REG);
ide_hwif_t *hwif = drive->hwif;
hwif->OUTB(drive->ctl | (on ? 0 : 2),
hwif->io_ports[IDE_CONTROL_OFFSET]);
}
static inline u8 ide_read_status(ide_drive_t *drive)
@ -1331,4 +1341,26 @@ static inline u8 ide_read_error(ide_drive_t *drive)
return hwif->INB(hwif->io_ports[IDE_ERROR_OFFSET]);
}
/*
* Too bad. The drive wants to send us data which we are not ready to accept.
* Just throw it away.
*/
static inline void ide_atapi_discard_data(ide_drive_t *drive, unsigned bcount)
{
ide_hwif_t *hwif = drive->hwif;
/* FIXME: use ->atapi_input_bytes */
while (bcount--)
(void)hwif->INB(hwif->io_ports[IDE_DATA_OFFSET]);
}
static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount)
{
ide_hwif_t *hwif = drive->hwif;
/* FIXME: use ->atapi_output_bytes */
while (bcount--)
hwif->OUTB(0, hwif->io_ports[IDE_DATA_OFFSET]);
}
#endif /* _IDE_H */

View file

@ -97,6 +97,7 @@
#define IEEE80211_MAX_FRAME_LEN 2352
#define IEEE80211_MAX_SSID_LEN 32
#define IEEE80211_MAX_MESH_ID_LEN 32
struct ieee80211_hdr {
__le16 frame_control;
@ -109,6 +110,16 @@ struct ieee80211_hdr {
} __attribute__ ((packed));
struct ieee80211s_hdr {
u8 flags;
u8 ttl;
u8 seqnum[3];
u8 eaddr1[6];
u8 eaddr2[6];
u8 eaddr3[6];
} __attribute__ ((packed));
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@ -206,6 +217,23 @@ struct ieee80211_mgmt {
__le16 params;
__le16 reason_code;
} __attribute__((packed)) delba;
struct{
u8 action_code;
/* capab_info for open and confirm,
* reason for close
*/
__le16 aux;
/* Followed in plink_confirm by status
* code, AID and supported rates,
* and directly by supported rates in
* plink_open and plink_close
*/
u8 variable[0];
} __attribute__((packed)) plink_action;
struct{
u8 action_code;
u8 variable[0];
} __attribute__((packed)) mesh_action;
} u;
} __attribute__ ((packed)) action;
} u;
@ -437,6 +465,13 @@ enum ieee80211_eid {
WLAN_EID_TS_DELAY = 43,
WLAN_EID_TCLAS_PROCESSING = 44,
WLAN_EID_QOS_CAPA = 46,
/* 802.11s */
WLAN_EID_MESH_CONFIG = 36, /* Pending IEEE 802.11 ANA approval */
WLAN_EID_MESH_ID = 37, /* Pending IEEE 802.11 ANA approval */
WLAN_EID_PEER_LINK = 40, /* Pending IEEE 802.11 ANA approval */
WLAN_EID_PREQ = 53, /* Pending IEEE 802.11 ANA approval */
WLAN_EID_PREP = 54, /* Pending IEEE 802.11 ANA approval */
WLAN_EID_PERR = 55, /* Pending IEEE 802.11 ANA approval */
/* 802.11h */
WLAN_EID_PWR_CONSTRAINT = 32,
WLAN_EID_PWR_CAPABILITY = 33,

View file

@ -156,6 +156,12 @@ static inline struct arphdr *arp_hdr(const struct sk_buff *skb)
{
return (struct arphdr *)skb_network_header(skb);
}
static inline int arp_hdr_len(struct net_device *dev)
{
/* ARP header, plus 2 device addresses, plus 2 IP addresses. */
return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2;
}
#endif
#endif /* _LINUX_IF_ARP_H */

View file

@ -18,47 +18,8 @@
#ifndef __IF_TUN_H
#define __IF_TUN_H
/* Uncomment to enable debugging */
/* #define TUN_DEBUG 1 */
#include <linux/types.h>
#ifdef __KERNEL__
#ifdef TUN_DEBUG
#define DBG if(tun->debug)printk
#define DBG1 if(debug==2)printk
#else
#define DBG( a... )
#define DBG1( a... )
#endif
struct tun_struct {
struct list_head list;
unsigned long flags;
int attached;
uid_t owner;
gid_t group;
wait_queue_head_t read_wait;
struct sk_buff_head readq;
struct net_device *dev;
struct fasync_struct *fasync;
unsigned long if_flags;
u8 dev_addr[ETH_ALEN];
u32 chr_filter[2];
u32 net_filter[2];
#ifdef TUN_DEBUG
int debug;
#endif
};
#endif /* __KERNEL__ */
/* Read queue size */
#define TUN_READQ_SIZE 500

View file

@ -7,6 +7,10 @@
#define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1)
#define SIOCDELTUNNEL (SIOCDEVPRIVATE + 2)
#define SIOCCHGTUNNEL (SIOCDEVPRIVATE + 3)
#define SIOCGETPRL (SIOCDEVPRIVATE + 4)
#define SIOCADDPRL (SIOCDEVPRIVATE + 5)
#define SIOCDELPRL (SIOCDEVPRIVATE + 6)
#define SIOCCHGPRL (SIOCDEVPRIVATE + 7)
#define GRE_CSUM __constant_htons(0x8000)
#define GRE_ROUTING __constant_htons(0x4000)
@ -17,9 +21,6 @@
#define GRE_FLAGS __constant_htons(0x00F8)
#define GRE_VERSION __constant_htons(0x0007)
/* i_flags values for SIT mode */
#define SIT_ISATAP 0x0001
struct ip_tunnel_parm
{
char name[IFNAMSIZ];
@ -31,4 +32,19 @@ struct ip_tunnel_parm
struct iphdr iph;
};
/* SIT-mode i_flags */
#define SIT_ISATAP 0x0001
struct ip_tunnel_prl {
__be32 addr;
__u16 flags;
__u16 __reserved;
__u32 datalen;
__u32 __reserved2;
void __user *data;
};
/* PRL flags */
#define PRL_DEFAULT 0x0001
#endif /* _IF_TUNNEL_H_ */

View file

@ -81,7 +81,9 @@ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS)
struct vlan_group {
int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */
struct net_device *real_dev; /* The ethernet(like) device
* the vlan is attached to.
*/
unsigned int nr_vlans;
struct hlist_node hlist; /* linked list */
struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
@ -93,7 +95,7 @@ static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
{
struct net_device **array;
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
return array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN];
return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL;
}
static inline void vlan_group_set_device(struct vlan_group *vg,

View file

@ -1,124 +0,0 @@
/*****************************************************************************
* if_wanpipe.h Header file for the Sangoma AF_WANPIPE Socket
*
* Author: Nenad Corbic
*
* Copyright: (c) 2000 Sangoma Technologies Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* ============================================================================
*
* Jan 28, 2000 Nenad Corbic Initial Version
*
*****************************************************************************/
#ifndef __LINUX_IF_WAN_PACKET_H
#define __LINUX_IF_WAN_PACKET_H
struct wan_sockaddr_ll
{
unsigned short sll_family;
unsigned short sll_protocol;
int sll_ifindex;
unsigned short sll_hatype;
unsigned char sll_pkttype;
unsigned char sll_halen;
unsigned char sll_addr[8];
unsigned char sll_device[14];
unsigned char sll_card[14];
};
typedef struct
{
unsigned char free;
unsigned char state_sk;
int rcvbuf;
int sndbuf;
int rmem;
int wmem;
int sk_count;
unsigned char bound;
char name[14];
unsigned char d_state;
unsigned char svc;
unsigned short lcn;
unsigned char mbox;
unsigned char cmd_busy;
unsigned char command;
unsigned poll;
unsigned poll_cnt;
int rblock;
} wan_debug_hdr_t;
#define MAX_NUM_DEBUG 10
#define X25_PROT 0x16
#define PVC_PROT 0x17
typedef struct
{
wan_debug_hdr_t debug[MAX_NUM_DEBUG];
}wan_debug_t;
#define SIOC_WANPIPE_GET_CALL_DATA (SIOCPROTOPRIVATE + 0)
#define SIOC_WANPIPE_SET_CALL_DATA (SIOCPROTOPRIVATE + 1)
#define SIOC_WANPIPE_ACCEPT_CALL (SIOCPROTOPRIVATE + 2)
#define SIOC_WANPIPE_CLEAR_CALL (SIOCPROTOPRIVATE + 3)
#define SIOC_WANPIPE_RESET_CALL (SIOCPROTOPRIVATE + 4)
#define SIOC_WANPIPE_DEBUG (SIOCPROTOPRIVATE + 5)
#define SIOC_WANPIPE_SET_NONBLOCK (SIOCPROTOPRIVATE + 6)
#define SIOC_WANPIPE_CHECK_TX (SIOCPROTOPRIVATE + 7)
#define SIOC_WANPIPE_SOCK_STATE (SIOCPROTOPRIVATE + 8)
/* Packet types */
#define WAN_PACKET_HOST 0 /* To us */
#define WAN_PACKET_BROADCAST 1 /* To all */
#define WAN_PACKET_MULTICAST 2 /* To group */
#define WAN_PACKET_OTHERHOST 3 /* To someone else */
#define WAN_PACKET_OUTGOING 4 /* Outgoing of any type */
/* These ones are invisible by user level */
#define WAN_PACKET_LOOPBACK 5 /* MC/BRD frame looped back */
#define WAN_PACKET_FASTROUTE 6 /* Fastrouted frame */
/* X25 specific */
#define WAN_PACKET_DATA 7
#define WAN_PACKET_CMD 8
#define WAN_PACKET_ASYNC 9
#define WAN_PACKET_ERR 10
/* Packet socket options */
#define WAN_PACKET_ADD_MEMBERSHIP 1
#define WAN_PACKET_DROP_MEMBERSHIP 2
#define WAN_PACKET_MR_MULTICAST 0
#define WAN_PACKET_MR_PROMISC 1
#define WAN_PACKET_MR_ALLMULTI 2
#ifdef __KERNEL__
/* Private wanpipe socket structures. */
struct wanpipe_opt
{
void *mbox; /* Mail box */
void *card; /* Card bouded to */
struct net_device *dev; /* Bounded device */
unsigned short lcn; /* Binded LCN */
unsigned char svc; /* 0=pvc, 1=svc */
unsigned char timer; /* flag for delayed transmit*/
struct timer_list tx_timer;
unsigned poll_cnt;
unsigned char force; /* Used to force sock release */
atomic_t packet_sent;
unsigned short num;
};
#define wp_sk(__sk) ((struct wanpipe_opt *)(__sk)->sk_protinfo)
#endif
#endif

View file

@ -80,27 +80,6 @@ struct igmpv3_query {
__be32 srcs[0];
};
#ifdef __KERNEL__
#include <linux/skbuff.h>
static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
{
return (struct igmphdr *)skb_transport_header(skb);
}
static inline struct igmpv3_report *
igmpv3_report_hdr(const struct sk_buff *skb)
{
return (struct igmpv3_report *)skb_transport_header(skb);
}
static inline struct igmpv3_query *
igmpv3_query_hdr(const struct sk_buff *skb)
{
return (struct igmpv3_query *)skb_transport_header(skb);
}
#endif
#define IGMP_HOST_MEMBERSHIP_QUERY 0x11 /* From RFC1112 */
#define IGMP_HOST_MEMBERSHIP_REPORT 0x12 /* Ditto */
#define IGMP_DVMRP 0x13 /* DVMRP routing */
@ -151,6 +130,23 @@ static inline struct igmpv3_query *
#include <linux/timer.h>
#include <linux/in.h>
static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
{
return (struct igmphdr *)skb_transport_header(skb);
}
static inline struct igmpv3_report *
igmpv3_report_hdr(const struct sk_buff *skb)
{
return (struct igmpv3_report *)skb_transport_header(skb);
}
static inline struct igmpv3_query *
igmpv3_query_hdr(const struct sk_buff *skb)
{
return (struct igmpv3_query *)skb_transport_header(skb);
}
extern int sysctl_igmp_max_memberships;
extern int sysctl_igmp_max_msf;

View file

@ -48,6 +48,14 @@ extern const struct in6_addr in6addr_any;
#define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } }
extern const struct in6_addr in6addr_loopback;
#define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }
#ifdef __KERNEL__
extern const struct in6_addr in6addr_linklocal_allnodes;
#define IN6ADDR_LINKLOCAL_ALLNODES_INIT \
{ { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }
extern const struct in6_addr in6addr_linklocal_allrouters;
#define IN6ADDR_LINKLOCAL_ALLROUTERS_INIT \
{ { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } }
#endif
struct sockaddr_in6 {
unsigned short int sin6_family; /* AF_INET6 */
@ -249,4 +257,30 @@ struct in6_flowlabel_req
* IP6T_SO_GET_REVISION_TARGET 69
*/
/* RFC5014: Source address selection */
#define IPV6_ADDR_PREFERENCES 72
#define IPV6_PREFER_SRC_TMP 0x0001
#define IPV6_PREFER_SRC_PUBLIC 0x0002
#define IPV6_PREFER_SRC_PUBTMP_DEFAULT 0x0100
#define IPV6_PREFER_SRC_COA 0x0004
#define IPV6_PREFER_SRC_HOME 0x0400
#define IPV6_PREFER_SRC_CGA 0x0008
#define IPV6_PREFER_SRC_NONCGA 0x0800
/*
* Multicast Routing:
* see include/linux/mroute6.h.
*
* MRT6_INIT 200
* MRT6_DONE 201
* MRT6_ADD_MIF 202
* MRT6_DEL_MIF 203
* MRT6_ADD_MFC 204
* MRT6_DEL_MFC 205
* MRT6_VERSION 206
* MRT6_ASSERT 207
* MRT6_PIM 208
* (reserved) 209
*/
#endif

View file

@ -70,13 +70,13 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
ipv4_devconf_set((in_dev), NET_IPV4_CONF_ ## attr, (val))
#define IN_DEV_ANDCONF(in_dev, attr) \
(IPV4_DEVCONF_ALL(in_dev->dev->nd_net, attr) && \
(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \
IN_DEV_CONF_GET((in_dev), attr))
#define IN_DEV_ORCONF(in_dev, attr) \
(IPV4_DEVCONF_ALL(in_dev->dev->nd_net, attr) || \
(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) || \
IN_DEV_CONF_GET((in_dev), attr))
#define IN_DEV_MAXCONF(in_dev, attr) \
(max(IPV4_DEVCONF_ALL(in_dev->dev->nd_net, attr), \
(max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \
IN_DEV_CONF_GET((in_dev), attr)))
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
@ -129,7 +129,7 @@ extern int unregister_inetaddr_notifier(struct notifier_block *nb);
extern struct net_device *ip_dev_find(struct net *net, __be32 addr);
extern int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
extern int devinet_ioctl(unsigned int cmd, void __user *);
extern int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
extern void devinet_init(void);
extern struct in_device *inetdev_by_index(struct net *, int);
extern __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);

View file

@ -102,6 +102,25 @@ extern void disable_irq_nosync(unsigned int irq);
extern void disable_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
extern int irq_can_set_affinity(unsigned int irq);
#else /* CONFIG_SMP */
static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
{
return -EINVAL;
}
static inline int irq_can_set_affinity(unsigned int irq)
{
return 0;
}
#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
#ifdef CONFIG_GENERIC_HARDIRQS
/*
* Special lockdep variants of irq disabling/enabling.

View file

@ -91,8 +91,10 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
* if ref count is zero, don't allow sharing (ioc is going away, it's
* a race).
*/
if (ioc && atomic_inc_not_zero(&ioc->refcount))
if (ioc && atomic_inc_not_zero(&ioc->refcount)) {
atomic_inc(&ioc->nr_tasks);
return ioc;
}
return NULL;
}

View file

@ -53,7 +53,7 @@ struct ipv6_opt_hdr {
/*
* TLV encoded option data follows.
*/
};
} __attribute__ ((packed)); /* required for some archs */
#define ipv6_destopt_hdr ipv6_opt_hdr
#define ipv6_hopopt_hdr ipv6_opt_hdr
@ -159,6 +159,9 @@ struct ipv6_devconf {
__s32 accept_source_route;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
__s32 optimistic_dad;
#endif
#ifdef CONFIG_IPV6_MROUTE
__s32 mc_forwarding;
#endif
void *sysctl;
};
@ -190,6 +193,7 @@ enum {
DEVCONF_PROXY_NDP,
DEVCONF_OPTIMISTIC_DAD,
DEVCONF_ACCEPT_SOURCE_ROUTE,
DEVCONF_MC_FORWARDING,
DEVCONF_MAX
};
@ -230,6 +234,7 @@ struct inet6_skb_parm {
#endif
#define IP6SKB_XFRM_TRANSFORMED 1
#define IP6SKB_FORWARDED 2
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
@ -274,8 +279,29 @@ struct ipv6_pinfo {
__be32 flow_label;
__u32 frag_size;
__s16 hop_limit;
__s16 mcast_hops;
/*
* Packed in 16bits.
* Omit one shift by by putting the signed field at MSB.
*/
#if defined(__BIG_ENDIAN_BITFIELD)
__s16 hop_limit:9;
__u16 __unused_1:7;
#else
__u16 __unused_1:7;
__s16 hop_limit:9;
#endif
#if defined(__BIG_ENDIAN_BITFIELD)
/* Packed in 16bits. */
__s16 mcast_hops:9;
__u16 __unused_2:6,
mc_loop:1;
#else
__u16 mc_loop:1,
__unused_2:6;
__s16 mcast_hops:9;
#endif
int mcast_oif;
/* pktoption flags */
@ -298,11 +324,14 @@ struct ipv6_pinfo {
} rxopt;
/* sockopt flags */
__u8 mc_loop:1,
recverr:1,
__u8 recverr:1,
sndflow:1,
pmtudisc:2,
ipv6only:1;
ipv6only:1,
srcprefs:3; /* 001: prefer temporary address
* 010: prefer public address
* 100: prefer care-of address
*/
__u8 tclass;
__u32 dst_cookie;
@ -315,9 +344,8 @@ struct ipv6_pinfo {
struct sk_buff *pktoptions;
struct {
struct ipv6_txoptions *opt;
struct rt6_info *rt;
int hop_limit;
int tclass;
u8 hop_limit;
u8 tclass;
} cork;
};
@ -458,7 +486,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
#define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\
(((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \
(((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \
((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \
((__sk)->sk_family == AF_INET6) && \
ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
@ -466,7 +494,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#define INET6_TW_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif) \
(((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \
(((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \
(*((__portpair *)&(inet_twsk(__sk)->tw_dport)) == (__ports)) && \
((__sk)->sk_family == PF_INET6) && \
(ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_daddr, (__saddr))) && \

View file

@ -228,21 +228,11 @@ static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
#endif /* CONFIG_GENERIC_PENDING_IRQ */
extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
extern int irq_can_set_affinity(unsigned int irq);
#else /* CONFIG_SMP */
#define move_native_irq(x)
#define move_masked_irq(x)
static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
{
return -EINVAL;
}
static inline int irq_can_set_affinity(unsigned int irq) { return 0; }
#endif /* CONFIG_SMP */
#ifdef CONFIG_IRQBALANCE

View file

@ -16,14 +16,8 @@
#include <linux/ioctl.h>
#ifdef CONFIG_COBALT_MICRO_SERVER
/* Save memory */
#define ISDN_MAX_DRIVERS 2
#define ISDN_MAX_CHANNELS 8
#else
#define ISDN_MAX_DRIVERS 32
#define ISDN_MAX_CHANNELS 64
#endif
/* New ioctl-codes */
#define IIOCNETAIF _IO('I',1)

281
include/linux/kgdb.h Normal file
View file

@ -0,0 +1,281 @@
/*
* This provides the callbacks and functions that KGDB needs to share between
* the core, I/O and arch-specific portions.
*
* Author: Amit Kale <amitkale@linsyssoft.com> and
* Tom Rini <trini@kernel.crashing.org>
*
* 2001-2004 (c) Amit S. Kale and 2003-2005 (c) MontaVista Software, Inc.
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef _KGDB_H_
#define _KGDB_H_
#include <linux/serial_8250.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/atomic.h>
#include <asm/kgdb.h>
struct pt_regs;
/**
* kgdb_skipexception - (optional) exit kgdb_handle_exception early
* @exception: Exception vector number
* @regs: Current &struct pt_regs.
*
* On some architectures it is required to skip a breakpoint
* exception when it occurs after a breakpoint has been removed.
* This can be implemented in the architecture specific portion of
* for kgdb.
*/
extern int kgdb_skipexception(int exception, struct pt_regs *regs);
/**
* kgdb_post_primary_code - (optional) Save error vector/code numbers.
* @regs: Original pt_regs.
* @e_vector: Original error vector.
* @err_code: Original error code.
*
* This is usually needed on architectures which support SMP and
* KGDB. This function is called after all the secondary cpus have
* been put to a know spin state and the primary CPU has control over
* KGDB.
*/
extern void kgdb_post_primary_code(struct pt_regs *regs, int e_vector,
int err_code);
/**
* kgdb_disable_hw_debug - (optional) Disable hardware debugging hook
* @regs: Current &struct pt_regs.
*
* This function will be called if the particular architecture must
* disable hardware debugging while it is processing gdb packets or
* handling exception.
*/
extern void kgdb_disable_hw_debug(struct pt_regs *regs);
struct tasklet_struct;
struct task_struct;
struct uart_port;
/**
* kgdb_breakpoint - compiled in breakpoint
*
* This will be impelmented a static inline per architecture. This
* function is called by the kgdb core to execute an architecture
* specific trap to cause kgdb to enter the exception processing.
*
*/
void kgdb_breakpoint(void);
extern int kgdb_connected;
extern atomic_t kgdb_setting_breakpoint;
extern atomic_t kgdb_cpu_doing_single_step;
extern struct task_struct *kgdb_usethread;
extern struct task_struct *kgdb_contthread;
enum kgdb_bptype {
BP_BREAKPOINT = 0,
BP_HARDWARE_BREAKPOINT,
BP_WRITE_WATCHPOINT,
BP_READ_WATCHPOINT,
BP_ACCESS_WATCHPOINT
};
enum kgdb_bpstate {
BP_UNDEFINED = 0,
BP_REMOVED,
BP_SET,
BP_ACTIVE
};
struct kgdb_bkpt {
unsigned long bpt_addr;
unsigned char saved_instr[BREAK_INSTR_SIZE];
enum kgdb_bptype type;
enum kgdb_bpstate state;
};
#ifndef KGDB_MAX_BREAKPOINTS
# define KGDB_MAX_BREAKPOINTS 1000
#endif
#define KGDB_HW_BREAKPOINT 1
/*
* Functions each KGDB-supporting architecture must provide:
*/
/**
* kgdb_arch_init - Perform any architecture specific initalization.
*
* This function will handle the initalization of any architecture
* specific callbacks.
*/
extern int kgdb_arch_init(void);
/**
* kgdb_arch_exit - Perform any architecture specific uninitalization.
*
* This function will handle the uninitalization of any architecture
* specific callbacks, for dynamic registration and unregistration.
*/
extern void kgdb_arch_exit(void);
/**
* pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs
* @gdb_regs: A pointer to hold the registers in the order GDB wants.
* @regs: The &struct pt_regs of the current process.
*
* Convert the pt_regs in @regs into the format for registers that
* GDB expects, stored in @gdb_regs.
*/
extern void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs);
/**
* sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs
* @gdb_regs: A pointer to hold the registers in the order GDB wants.
* @p: The &struct task_struct of the desired process.
*
* Convert the register values of the sleeping process in @p to
* the format that GDB expects.
* This function is called when kgdb does not have access to the
* &struct pt_regs and therefore it should fill the gdb registers
* @gdb_regs with what has been saved in &struct thread_struct
* thread field during switch_to.
*/
extern void
sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p);
/**
* gdb_regs_to_pt_regs - Convert GDB regs to ptrace regs.
* @gdb_regs: A pointer to hold the registers we've received from GDB.
* @regs: A pointer to a &struct pt_regs to hold these values in.
*
* Convert the GDB regs in @gdb_regs into the pt_regs, and store them
* in @regs.
*/
extern void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs);
/**
* kgdb_arch_handle_exception - Handle architecture specific GDB packets.
* @vector: The error vector of the exception that happened.
* @signo: The signal number of the exception that happened.
* @err_code: The error code of the exception that happened.
* @remcom_in_buffer: The buffer of the packet we have read.
* @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
* @regs: The &struct pt_regs of the current process.
*
* This function MUST handle the 'c' and 's' command packets,
* as well packets to set / remove a hardware breakpoint, if used.
* If there are additional packets which the hardware needs to handle,
* they are handled here. The code should return -1 if it wants to
* process more packets, and a %0 or %1 if it wants to exit from the
* kgdb callback.
*/
extern int
kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer,
char *remcom_out_buffer,
struct pt_regs *regs);
/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them be in a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In
* this case, we have to make sure that interrupts are enabled before
* calling smp_call_function(). The argument to this function is
* the flags that will be used when restoring the interrupts. There is
* local_irq_save() call before kgdb_roundup_cpus().
*
* On non-SMP systems, this is not called.
*/
extern void kgdb_roundup_cpus(unsigned long flags);
/* Optional functions. */
extern int kgdb_validate_break_address(unsigned long addr);
extern int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr);
extern int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle);
/**
* struct kgdb_arch - Describe architecture specific values.
* @gdb_bpt_instr: The instruction to trigger a breakpoint.
* @flags: Flags for the breakpoint, currently just %KGDB_HW_BREAKPOINT.
* @set_breakpoint: Allow an architecture to specify how to set a software
* breakpoint.
* @remove_breakpoint: Allow an architecture to specify how to remove a
* software breakpoint.
* @set_hw_breakpoint: Allow an architecture to specify how to set a hardware
* breakpoint.
* @remove_hw_breakpoint: Allow an architecture to specify how to remove a
* hardware breakpoint.
* @remove_all_hw_break: Allow an architecture to specify how to remove all
* hardware breakpoints.
* @correct_hw_break: Allow an architecture to specify how to correct the
* hardware debug registers.
*/
struct kgdb_arch {
unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
unsigned long flags;
int (*set_breakpoint)(unsigned long, char *);
int (*remove_breakpoint)(unsigned long, char *);
int (*set_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
void (*remove_all_hw_break)(void);
void (*correct_hw_break)(void);
};
/**
* struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
* @name: Name of the I/O driver.
* @read_char: Pointer to a function that will return one char.
* @write_char: Pointer to a function that will write one char.
* @flush: Pointer to a function that will flush any pending writes.
* @init: Pointer to a function that will initialize the device.
* @pre_exception: Pointer to a function that will do any prep work for
* the I/O driver.
* @post_exception: Pointer to a function that will do any cleanup work
* for the I/O driver.
*/
struct kgdb_io {
const char *name;
int (*read_char) (void);
void (*write_char) (u8);
void (*flush) (void);
int (*init) (void);
void (*pre_exception) (void);
void (*post_exception) (void);
};
extern struct kgdb_arch arch_kgdb_ops;
extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
extern int kgdb_hex2long(char **ptr, long *long_val);
extern int kgdb_mem2hex(char *mem, char *buf, int count);
extern int kgdb_hex2mem(char *buf, char *mem, int count);
extern int kgdb_isremovedbreak(unsigned long addr);
extern int
kgdb_handle_exception(int ex_vector, int signo, int err_code,
struct pt_regs *regs);
extern int kgdb_nmicallback(int cpu, void *regs);
extern int kgdb_single_step;
extern atomic_t kgdb_active;
#endif /* _KGDB_H_ */

View file

@ -122,6 +122,8 @@ enum {
ATAPI_MAX_DRAIN = 16 << 10,
ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1,
ATA_SHT_EMULATED = 1,
ATA_SHT_CMD_PER_LUN = 1,
ATA_SHT_THIS_ID = -1,
@ -163,9 +165,6 @@ enum {
ATA_DEV_NONE = 9, /* no device */
/* struct ata_link flags */
ATA_LFLAG_HRST_TO_RESUME = (1 << 0), /* hardreset to resume link */
ATA_LFLAG_SKIP_D2H_BSY = (1 << 1), /* can't wait for the first D2H
* Register FIS clearing BSY */
ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */
ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */
ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */
@ -225,6 +224,7 @@ enum {
ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */
ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */
ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */
ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
@ -249,6 +249,25 @@ enum {
*/
ATA_TMOUT_FF_WAIT = 4 * HZ / 5,
/* Spec mandates to wait for ">= 2ms" before checking status
* after reset. We wait 150ms, because that was the magic
* delay used for ATAPI devices in Hale Landis's ATADRVR, for
* the period of time between when the ATA command register is
* written, and then status is checked. Because waiting for
* "a while" before checking status is fine, post SRST, we
* perform this magic delay here as well.
*
* Old drivers/ide uses the 2mS rule and then waits for ready.
*/
ATA_WAIT_AFTER_RESET_MSECS = 150,
/* If PMP is supported, we have to do follow-up SRST. As some
* PMPs don't send D2H Reg FIS after hardreset, LLDs are
* advised to wait only for the following duration before
* doing SRST.
*/
ATA_TMOUT_PMP_SRST_WAIT = 1 * HZ,
/* ATA bus states */
BUS_UNKNOWN = 0,
BUS_DMA = 1,
@ -292,17 +311,16 @@ enum {
/* reset / recovery action types */
ATA_EH_REVALIDATE = (1 << 0),
ATA_EH_SOFTRESET = (1 << 1),
ATA_EH_HARDRESET = (1 << 2),
ATA_EH_SOFTRESET = (1 << 1), /* meaningful only in ->prereset */
ATA_EH_HARDRESET = (1 << 2), /* meaningful only in ->prereset */
ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
ATA_EH_ENABLE_LINK = (1 << 3),
ATA_EH_LPM = (1 << 4), /* link power management action */
ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE,
/* ata_eh_info->flags */
ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */
ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
ATA_EHI_QUIET = (1 << 3), /* be quiet */
@ -313,7 +331,6 @@ enum {
ATA_EHI_POST_SETMODE = (1 << 20), /* revaildating after setmode */
ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK,
/* max tries if error condition is still set after ->error_handler */
ATA_EH_MAX_TRIES = 5,
@ -352,6 +369,22 @@ enum {
ATAPI_READ_CD = 2, /* READ CD [MSF] */
ATAPI_PASS_THRU = 3, /* SAT pass-thru */
ATAPI_MISC = 4, /* the rest */
/* Timing constants */
ATA_TIMING_SETUP = (1 << 0),
ATA_TIMING_ACT8B = (1 << 1),
ATA_TIMING_REC8B = (1 << 2),
ATA_TIMING_CYC8B = (1 << 3),
ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
ATA_TIMING_CYC8B,
ATA_TIMING_ACTIVE = (1 << 4),
ATA_TIMING_RECOVER = (1 << 5),
ATA_TIMING_CYCLE = (1 << 6),
ATA_TIMING_UDMA = (1 << 7),
ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
};
enum ata_xfer_mask {
@ -412,6 +445,7 @@ enum link_pm {
};
extern struct class_device_attribute class_device_attr_link_power_management_policy;
#ifdef CONFIG_ATA_SFF
struct ata_ioports {
void __iomem *cmd_addr;
void __iomem *data_addr;
@ -429,6 +463,7 @@ struct ata_ioports {
void __iomem *bmdma_addr;
void __iomem *scr_addr;
};
#endif /* CONFIG_ATA_SFF */
struct ata_host {
spinlock_t lock;
@ -436,7 +471,7 @@ struct ata_host {
void __iomem * const *iomap;
unsigned int n_ports;
void *private_data;
const struct ata_port_operations *ops;
struct ata_port_operations *ops;
unsigned long flags;
#ifdef CONFIG_ATA_ACPI
acpi_handle acpi_handle;
@ -605,7 +640,7 @@ struct ata_link {
struct ata_port {
struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
const struct ata_port_operations *ops;
struct ata_port_operations *ops;
spinlock_t *lock;
unsigned long flags; /* ATA_FLAG_xxx */
unsigned int pflags; /* ATA_PFLAG_xxx */
@ -615,7 +650,9 @@ struct ata_port {
struct ata_prd *prd; /* our SG list */
dma_addr_t prd_dma; /* and its DMA mapping */
#ifdef CONFIG_ATA_SFF
struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
#endif /* CONFIG_ATA_SFF */
u8 ctl; /* cache of ATA control register */
u8 last_ctl; /* Cache last written value */
@ -667,81 +704,108 @@ struct ata_port {
u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
};
/* The following initializer overrides a method to NULL whether one of
* its parent has the method defined or not. This is equivalent to
* ERR_PTR(-ENOENT). Unfortunately, ERR_PTR doesn't render a constant
* expression and thus can't be used as an initializer.
*/
#define ATA_OP_NULL (void *)(unsigned long)(-ENOENT)
struct ata_port_operations {
void (*dev_config) (struct ata_device *);
void (*set_piomode) (struct ata_port *, struct ata_device *);
void (*set_dmamode) (struct ata_port *, struct ata_device *);
unsigned long (*mode_filter) (struct ata_device *, unsigned long);
void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
u8 (*check_status)(struct ata_port *ap);
u8 (*check_altstatus)(struct ata_port *ap);
void (*dev_select)(struct ata_port *ap, unsigned int device);
void (*phy_reset) (struct ata_port *ap); /* obsolete */
int (*set_mode) (struct ata_link *link, struct ata_device **r_failed_dev);
int (*cable_detect) (struct ata_port *ap);
int (*check_atapi_dma) (struct ata_queued_cmd *qc);
void (*bmdma_setup) (struct ata_queued_cmd *qc);
void (*bmdma_start) (struct ata_queued_cmd *qc);
unsigned int (*data_xfer) (struct ata_device *dev, unsigned char *buf,
unsigned int buflen, int rw);
int (*qc_defer) (struct ata_queued_cmd *qc);
void (*qc_prep) (struct ata_queued_cmd *qc);
unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
/* port multiplier */
void (*pmp_attach) (struct ata_port *ap);
void (*pmp_detach) (struct ata_port *ap);
/* Error handlers. ->error_handler overrides ->eng_timeout and
* indicates that new-style EH is in place.
/*
* Command execution
*/
void (*eng_timeout) (struct ata_port *ap); /* obsolete */
int (*qc_defer)(struct ata_queued_cmd *qc);
int (*check_atapi_dma)(struct ata_queued_cmd *qc);
void (*qc_prep)(struct ata_queued_cmd *qc);
unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
void (*freeze) (struct ata_port *ap);
void (*thaw) (struct ata_port *ap);
void (*error_handler) (struct ata_port *ap);
void (*post_internal_cmd) (struct ata_queued_cmd *qc);
/*
* Configuration and exception handling
*/
int (*cable_detect)(struct ata_port *ap);
unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask);
void (*set_piomode)(struct ata_port *ap, struct ata_device *dev);
void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev);
int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev);
irq_handler_t irq_handler;
void (*irq_clear) (struct ata_port *);
u8 (*irq_on) (struct ata_port *);
void (*dev_config)(struct ata_device *dev);
int (*scr_read) (struct ata_port *ap, unsigned int sc_reg, u32 *val);
int (*scr_write) (struct ata_port *ap, unsigned int sc_reg, u32 val);
void (*freeze)(struct ata_port *ap);
void (*thaw)(struct ata_port *ap);
ata_prereset_fn_t prereset;
ata_reset_fn_t softreset;
ata_reset_fn_t hardreset;
ata_postreset_fn_t postreset;
ata_prereset_fn_t pmp_prereset;
ata_reset_fn_t pmp_softreset;
ata_reset_fn_t pmp_hardreset;
ata_postreset_fn_t pmp_postreset;
void (*error_handler)(struct ata_port *ap);
void (*post_internal_cmd)(struct ata_queued_cmd *qc);
int (*port_suspend) (struct ata_port *ap, pm_message_t mesg);
int (*port_resume) (struct ata_port *ap);
int (*enable_pm) (struct ata_port *ap, enum link_pm policy);
void (*disable_pm) (struct ata_port *ap);
int (*port_start) (struct ata_port *ap);
void (*port_stop) (struct ata_port *ap);
/*
* Optional features
*/
int (*scr_read)(struct ata_port *ap, unsigned int sc_reg, u32 *val);
int (*scr_write)(struct ata_port *ap, unsigned int sc_reg, u32 val);
void (*pmp_attach)(struct ata_port *ap);
void (*pmp_detach)(struct ata_port *ap);
int (*enable_pm)(struct ata_port *ap, enum link_pm policy);
void (*disable_pm)(struct ata_port *ap);
void (*host_stop) (struct ata_host *host);
/*
* Start, stop, suspend and resume
*/
int (*port_suspend)(struct ata_port *ap, pm_message_t mesg);
int (*port_resume)(struct ata_port *ap);
int (*port_start)(struct ata_port *ap);
void (*port_stop)(struct ata_port *ap);
void (*host_stop)(struct ata_host *host);
void (*bmdma_stop) (struct ata_queued_cmd *qc);
u8 (*bmdma_status) (struct ata_port *ap);
#ifdef CONFIG_ATA_SFF
/*
* SFF / taskfile oriented ops
*/
void (*sff_dev_select)(struct ata_port *ap, unsigned int device);
u8 (*sff_check_status)(struct ata_port *ap);
u8 (*sff_check_altstatus)(struct ata_port *ap);
void (*sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf);
void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf);
void (*sff_exec_command)(struct ata_port *ap,
const struct ata_taskfile *tf);
unsigned int (*sff_data_xfer)(struct ata_device *dev,
unsigned char *buf, unsigned int buflen, int rw);
u8 (*sff_irq_on)(struct ata_port *);
void (*sff_irq_clear)(struct ata_port *);
void (*bmdma_setup)(struct ata_queued_cmd *qc);
void (*bmdma_start)(struct ata_queued_cmd *qc);
void (*bmdma_stop)(struct ata_queued_cmd *qc);
u8 (*bmdma_status)(struct ata_port *ap);
#endif /* CONFIG_ATA_SFF */
/*
* Obsolete
*/
void (*phy_reset)(struct ata_port *ap);
void (*eng_timeout)(struct ata_port *ap);
/*
* ->inherits must be the last field and all the preceding
* fields must be pointers.
*/
const struct ata_port_operations *inherits;
};
struct ata_port_info {
struct scsi_host_template *sht;
unsigned long flags;
unsigned long link_flags;
unsigned long pio_mask;
unsigned long mwdma_mask;
unsigned long udma_mask;
const struct ata_port_operations *port_ops;
irq_handler_t irq_handler;
struct ata_port_operations *port_ops;
void *private_data;
};
@ -759,11 +823,14 @@ struct ata_timing {
#define FIT(v, vmin, vmax) max_t(short, min_t(short, v, vmax), vmin)
/*
* Core layer - drivers/ata/libata-core.c
*/
extern const unsigned long sata_deb_timing_normal[];
extern const unsigned long sata_deb_timing_hotplug[];
extern const unsigned long sata_deb_timing_long[];
extern const struct ata_port_operations ata_dummy_port_ops;
extern struct ata_port_operations ata_dummy_port_ops;
extern const struct ata_port_info ata_dummy_port_info;
static inline const unsigned long *
@ -782,22 +849,21 @@ static inline int ata_port_is_dummy(struct ata_port *ap)
extern void sata_print_link_status(struct ata_link *link);
extern void ata_port_probe(struct ata_port *);
extern void ata_bus_reset(struct ata_port *ap);
extern int sata_set_spd(struct ata_link *link);
extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
extern int sata_link_debounce(struct ata_link *link,
const unsigned long *params, unsigned long deadline);
extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
unsigned long deadline);
extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
extern int ata_std_softreset(struct ata_link *link, unsigned int *classes,
unsigned long deadline);
extern int sata_link_hardreset(struct ata_link *link,
const unsigned long *timing, unsigned long deadline);
const unsigned long *timing, unsigned long deadline,
bool *online, int (*check_ready)(struct ata_link *));
extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
extern void ata_port_disable(struct ata_port *);
extern void ata_std_ports(struct ata_ioports *ioaddr);
extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
@ -810,7 +876,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
struct scsi_host_template *sht);
extern void ata_host_detach(struct ata_host *host);
extern void ata_host_init(struct ata_host *, struct device *,
unsigned long, const struct ata_port_operations *);
unsigned long, struct ata_port_operations *);
extern int ata_scsi_detect(struct scsi_host_template *sht);
extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
@ -823,7 +889,6 @@ extern void ata_sas_port_stop(struct ata_port *ap);
extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
struct ata_port *ap);
extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
extern int sata_scr_valid(struct ata_link *link);
extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
@ -835,21 +900,9 @@ extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
extern void ata_host_resume(struct ata_host *host);
#endif
extern int ata_ratelimit(void);
extern int ata_busy_sleep(struct ata_port *ap,
unsigned long timeout_pat, unsigned long timeout);
extern void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline);
extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline);
extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
unsigned long interval_msec,
unsigned long timeout_msec);
extern unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
u8 *r_err);
/*
* Default driver ops implementations
*/
extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
extern int atapi_cmd_type(u8 opcode);
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
u8 pmp, int is_cmd, u8 *fis);
@ -864,23 +917,9 @@ extern unsigned long ata_xfer_mode2mask(u8 xfer_mode);
extern int ata_xfer_mode2shift(unsigned long xfer_mode);
extern const char *ata_mode_string(unsigned long xfer_mask);
extern unsigned long ata_id_xfermask(const u16 *id);
extern void ata_noop_dev_select(struct ata_port *ap, unsigned int device);
extern void ata_std_dev_select(struct ata_port *ap, unsigned int device);
extern u8 ata_check_status(struct ata_port *ap);
extern u8 ata_altstatus(struct ata_port *ap);
extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
extern int ata_port_start(struct ata_port *ap);
extern int ata_sff_port_start(struct ata_port *ap);
extern irqreturn_t ata_interrupt(int irq, void *dev_instance);
extern unsigned int ata_data_xfer(struct ata_device *dev,
unsigned char *buf, unsigned int buflen, int rw);
extern unsigned int ata_data_xfer_noirq(struct ata_device *dev,
unsigned char *buf, unsigned int buflen, int rw);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
extern void ata_dumb_qc_prep(struct ata_queued_cmd *qc);
extern void ata_qc_prep(struct ata_queued_cmd *qc);
extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
@ -889,24 +928,8 @@ extern void ata_id_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern void ata_id_c_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern void ata_bmdma_setup(struct ata_queued_cmd *qc);
extern void ata_bmdma_start(struct ata_queued_cmd *qc);
extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
extern u8 ata_bmdma_status(struct ata_port *ap);
extern void ata_bmdma_irq_clear(struct ata_port *ap);
extern void ata_bmdma_freeze(struct ata_port *ap);
extern void ata_bmdma_thaw(struct ata_port *ap);
extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
ata_reset_fn_t softreset,
ata_reset_fn_t hardreset,
ata_postreset_fn_t postreset);
extern void ata_bmdma_error_handler(struct ata_port *ap);
extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
void (*finish_qc)(struct ata_queued_cmd *));
extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active);
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *));
extern int ata_std_bios_param(struct scsi_device *sdev,
@ -918,7 +941,6 @@ extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern u8 ata_irq_on(struct ata_port *ap);
extern int ata_cable_40wire(struct ata_port *ap);
extern int ata_cable_80wire(struct ata_port *ap);
@ -926,10 +948,7 @@ extern int ata_cable_sata(struct ata_port *ap);
extern int ata_cable_ignore(struct ata_port *ap);
extern int ata_cable_unknown(struct ata_port *ap);
/*
* Timing helpers
*/
/* Timing helpers */
extern unsigned int ata_pio_need_iordy(const struct ata_device *);
extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
extern int ata_timing_compute(struct ata_device *, unsigned short,
@ -939,24 +958,31 @@ extern void ata_timing_merge(const struct ata_timing *,
unsigned int);
extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle);
enum {
ATA_TIMING_SETUP = (1 << 0),
ATA_TIMING_ACT8B = (1 << 1),
ATA_TIMING_REC8B = (1 << 2),
ATA_TIMING_CYC8B = (1 << 3),
ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
ATA_TIMING_CYC8B,
ATA_TIMING_ACTIVE = (1 << 4),
ATA_TIMING_RECOVER = (1 << 5),
ATA_TIMING_CYCLE = (1 << 6),
ATA_TIMING_UDMA = (1 << 7),
ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
/* PCI */
#ifdef CONFIG_PCI
struct pci_dev;
struct pci_bits {
unsigned int reg; /* PCI config register to read */
unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
unsigned long mask;
unsigned long val;
};
/* libata-acpi.c */
extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
extern void ata_pci_remove_one(struct pci_dev *pdev);
#ifdef CONFIG_PM
extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg);
extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev);
extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
extern int ata_pci_device_resume(struct pci_dev *pdev);
#endif /* CONFIG_PM */
#endif /* CONFIG_PCI */
/*
* ACPI - drivers/ata/libata-acpi.c
*/
#ifdef CONFIG_ATA_ACPI
static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
{
@ -1000,56 +1026,8 @@ static inline int ata_acpi_cbl_80wire(struct ata_port *ap,
}
#endif
#ifdef CONFIG_PCI
struct pci_dev;
extern int ata_pci_init_one(struct pci_dev *pdev,
const struct ata_port_info * const * ppi);
extern void ata_pci_remove_one(struct pci_dev *pdev);
#ifdef CONFIG_PM
extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg);
extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev);
extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
extern int ata_pci_device_resume(struct pci_dev *pdev);
#endif
extern int ata_pci_clear_simplex(struct pci_dev *pdev);
struct pci_bits {
unsigned int reg; /* PCI config register to read */
unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
unsigned long mask;
unsigned long val;
};
extern int ata_pci_init_sff_host(struct ata_host *host);
extern int ata_pci_init_bmdma(struct ata_host *host);
extern int ata_pci_prepare_sff_host(struct pci_dev *pdev,
const struct ata_port_info * const * ppi,
struct ata_host **r_host);
extern int ata_pci_activate_sff_host(struct ata_host *host,
irq_handler_t irq_handler,
struct scsi_host_template *sht);
extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
extern unsigned long ata_pci_default_filter(struct ata_device *dev,
unsigned long xfer_mask);
#endif /* CONFIG_PCI */
/*
* PMP
*/
extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc);
extern int sata_pmp_std_prereset(struct ata_link *link, unsigned long deadline);
extern int sata_pmp_std_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
extern void sata_pmp_std_postreset(struct ata_link *link, unsigned int *class);
extern void sata_pmp_do_eh(struct ata_port *ap,
ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
ata_reset_fn_t hardreset, ata_postreset_fn_t postreset,
ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset,
ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset);
/*
* EH
* EH - drivers/ata/libata-eh.c
*/
extern void ata_port_schedule_eh(struct ata_port *ap);
extern int ata_link_abort(struct ata_link *link);
@ -1066,6 +1044,92 @@ extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
ata_postreset_fn_t postreset);
extern void ata_std_error_handler(struct ata_port *ap);
/*
* Base operations to inherit from and initializers for sht
*
* Operations
*
* base : Common to all libata drivers.
* sata : SATA controllers w/ native interface.
* pmp : SATA controllers w/ PMP support.
* sff : SFF ATA controllers w/o BMDMA support.
* bmdma : SFF ATA controllers w/ BMDMA support.
*
* sht initializers
*
* BASE : Common to all libata drivers. The user must set
* sg_tablesize and dma_boundary.
* PIO : SFF ATA controllers w/ only PIO support.
* BMDMA : SFF ATA controllers w/ BMDMA support. sg_tablesize and
* dma_boundary are set to BMDMA limits.
* NCQ : SATA controllers supporting NCQ. The user must set
* sg_tablesize, dma_boundary and can_queue.
*/
extern const struct ata_port_operations ata_base_port_ops;
extern const struct ata_port_operations sata_port_ops;
#define ATA_BASE_SHT(drv_name) \
.module = THIS_MODULE, \
.name = drv_name, \
.ioctl = ata_scsi_ioctl, \
.queuecommand = ata_scsi_queuecmd, \
.can_queue = ATA_DEF_QUEUE, \
.this_id = ATA_SHT_THIS_ID, \
.cmd_per_lun = ATA_SHT_CMD_PER_LUN, \
.emulated = ATA_SHT_EMULATED, \
.use_clustering = ATA_SHT_USE_CLUSTERING, \
.proc_name = drv_name, \
.slave_configure = ata_scsi_slave_config, \
.slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param
#define ATA_NCQ_SHT(drv_name) \
ATA_BASE_SHT(drv_name), \
.change_queue_depth = ata_scsi_change_queue_depth
/*
* PMP helpers
*/
#ifdef CONFIG_SATA_PMP
static inline bool sata_pmp_supported(struct ata_port *ap)
{
return ap->flags & ATA_FLAG_PMP;
}
static inline bool sata_pmp_attached(struct ata_port *ap)
{
return ap->nr_pmp_links != 0;
}
static inline int ata_is_host_link(const struct ata_link *link)
{
return link == &link->ap->link;
}
#else /* CONFIG_SATA_PMP */
static inline bool sata_pmp_supported(struct ata_port *ap)
{
return false;
}
static inline bool sata_pmp_attached(struct ata_port *ap)
{
return false;
}
static inline int ata_is_host_link(const struct ata_link *link)
{
return 1;
}
#endif /* CONFIG_SATA_PMP */
static inline int sata_srst_pmp(struct ata_link *link)
{
if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
return SATA_PMP_CTRL_PORT;
return link->pmp;
}
/*
* printk helpers
@ -1074,7 +1138,7 @@ extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
printk("%sata%u: "fmt, lv, (ap)->print_id , ##args)
#define ata_link_printk(link, lv, fmt, args...) do { \
if ((link)->ap->nr_pmp_links) \
if (sata_pmp_attached((link)->ap)) \
printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \
(link)->pmp , ##args); \
else \
@ -1094,18 +1158,11 @@ extern void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern void ata_ehi_clear_desc(struct ata_eh_info *ehi);
static inline void ata_ehi_schedule_probe(struct ata_eh_info *ehi)
{
ehi->flags |= ATA_EHI_RESUME_LINK;
ehi->action |= ATA_EH_SOFTRESET;
ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
}
static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
{
ata_ehi_schedule_probe(ehi);
ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
ehi->flags |= ATA_EHI_HOTPLUGGED;
ehi->action |= ATA_EH_ENABLE_LINK;
ehi->action |= ATA_EH_RESET | ATA_EH_ENABLE_LINK;
ehi->err_mask |= AC_ERR_ATA_BUS;
}
@ -1126,7 +1183,7 @@ static inline unsigned int ata_tag_valid(unsigned int tag)
static inline unsigned int ata_tag_internal(unsigned int tag)
{
return tag == ATA_MAX_QUEUE - 1;
return tag == ATA_TAG_INTERNAL;
}
/*
@ -1167,11 +1224,6 @@ static inline unsigned int ata_dev_absent(const struct ata_device *dev)
/*
* link helpers
*/
static inline int ata_is_host_link(const struct ata_link *link)
{
return link == &link->ap->link;
}
static inline int ata_link_max_devices(const struct ata_link *link)
{
if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS)
@ -1186,7 +1238,7 @@ static inline int ata_link_active(struct ata_link *link)
static inline struct ata_link *ata_port_first_link(struct ata_port *ap)
{
if (ap->nr_pmp_links)
if (sata_pmp_attached(ap))
return ap->pmp_link;
return &ap->link;
}
@ -1195,8 +1247,8 @@ static inline struct ata_link *ata_port_next_link(struct ata_link *link)
{
struct ata_port *ap = link->ap;
if (link == &ap->link) {
if (!ap->nr_pmp_links)
if (ata_is_host_link(link)) {
if (!sata_pmp_attached(ap))
return NULL;
return ap->pmp_link;
}
@ -1222,11 +1274,6 @@ static inline struct ata_link *ata_port_next_link(struct ata_link *link)
for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \
(dev) >= (link)->device || ((dev) = NULL); (dev)--)
static inline u8 ata_chk_status(struct ata_port *ap)
{
return ap->ops->check_status(ap);
}
/**
* ata_ncq_enabled - Test whether NCQ is enabled
* @dev: ATA device to test for
@ -1243,74 +1290,6 @@ static inline int ata_ncq_enabled(struct ata_device *dev)
ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
}
/**
* ata_pause - Flush writes and pause 400 nanoseconds.
* @ap: Port to wait for.
*
* LOCKING:
* Inherited from caller.
*/
static inline void ata_pause(struct ata_port *ap)
{
ata_altstatus(ap);
ndelay(400);
}
/**
* ata_busy_wait - Wait for a port status register
* @ap: Port to wait for.
* @bits: bits that must be clear
* @max: number of 10uS waits to perform
*
* Waits up to max*10 microseconds for the selected bits in the port's
* status register to be cleared.
* Returns final value of status register.
*
* LOCKING:
* Inherited from caller.
*/
static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits,
unsigned int max)
{
u8 status;
do {
udelay(10);
status = ata_chk_status(ap);
max--;
} while (status != 0xff && (status & bits) && (max > 0));
return status;
}
/**
* ata_wait_idle - Wait for a port to be idle.
* @ap: Port to wait for.
*
* Waits up to 10ms for port's BUSY and DRQ signals to clear.
* Returns final value of status register.
*
* LOCKING:
* Inherited from caller.
*/
static inline u8 ata_wait_idle(struct ata_port *ap)
{
u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
#ifdef ATA_DEBUG
if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ)))
ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n",
status);
#endif
return status;
}
static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
{
qc->tf.ctl |= ATA_NIEN;
@ -1403,4 +1382,171 @@ static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
return *(struct ata_port **)&host->hostdata[0];
}
/**************************************************************************
* PMP - drivers/ata/libata-pmp.c
*/
#ifdef CONFIG_SATA_PMP
extern const struct ata_port_operations sata_pmp_port_ops;
extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc);
extern void sata_pmp_error_handler(struct ata_port *ap);
#else /* CONFIG_SATA_PMP */
#define sata_pmp_port_ops sata_port_ops
#define sata_pmp_qc_defer_cmd_switch ata_std_qc_defer
#define sata_pmp_error_handler ata_std_error_handler
#endif /* CONFIG_SATA_PMP */
/**************************************************************************
* SFF - drivers/ata/libata-sff.c
*/
#ifdef CONFIG_ATA_SFF
extern const struct ata_port_operations ata_sff_port_ops;
extern const struct ata_port_operations ata_bmdma_port_ops;
/* PIO only, sg_tablesize and dma_boundary limits can be removed */
#define ATA_PIO_SHT(drv_name) \
ATA_BASE_SHT(drv_name), \
.sg_tablesize = LIBATA_MAX_PRD, \
.dma_boundary = ATA_DMA_BOUNDARY
#define ATA_BMDMA_SHT(drv_name) \
ATA_BASE_SHT(drv_name), \
.sg_tablesize = LIBATA_MAX_PRD, \
.dma_boundary = ATA_DMA_BOUNDARY
extern void ata_sff_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device);
extern u8 ata_sff_check_status(struct ata_port *ap);
extern u8 ata_sff_altstatus(struct ata_port *ap);
extern int ata_sff_busy_sleep(struct ata_port *ap,
unsigned long timeout_pat, unsigned long timeout);
extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline);
extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
extern void ata_sff_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf);
extern unsigned int ata_sff_data_xfer(struct ata_device *dev,
unsigned char *buf, unsigned int buflen, int rw);
extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev,
unsigned char *buf, unsigned int buflen, int rw);
extern u8 ata_sff_irq_on(struct ata_port *ap);
extern void ata_sff_irq_clear(struct ata_port *ap);
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq);
extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
extern unsigned int ata_sff_host_intr(struct ata_port *ap,
struct ata_queued_cmd *qc);
extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance);
extern void ata_sff_freeze(struct ata_port *ap);
extern void ata_sff_thaw(struct ata_port *ap);
extern int ata_sff_prereset(struct ata_link *link, unsigned long deadline);
extern unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
u8 *r_err);
extern int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
unsigned long deadline);
extern int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
unsigned long deadline);
extern int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes);
extern void ata_sff_error_handler(struct ata_port *ap);
extern void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc);
extern int ata_sff_port_start(struct ata_port *ap);
extern void ata_sff_std_ports(struct ata_ioports *ioaddr);
extern unsigned long ata_bmdma_mode_filter(struct ata_device *dev,
unsigned long xfer_mask);
extern void ata_bmdma_setup(struct ata_queued_cmd *qc);
extern void ata_bmdma_start(struct ata_queued_cmd *qc);
extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
extern u8 ata_bmdma_status(struct ata_port *ap);
extern void ata_bus_reset(struct ata_port *ap);
#ifdef CONFIG_PCI
extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev);
extern int ata_pci_bmdma_init(struct ata_host *host);
extern int ata_pci_sff_init_host(struct ata_host *host);
extern int ata_pci_sff_prepare_host(struct pci_dev *pdev,
const struct ata_port_info * const * ppi,
struct ata_host **r_host);
extern int ata_pci_sff_activate_host(struct ata_host *host,
irq_handler_t irq_handler,
struct scsi_host_template *sht);
extern int ata_pci_sff_init_one(struct pci_dev *pdev,
const struct ata_port_info * const * ppi,
struct scsi_host_template *sht, void *host_priv);
#endif /* CONFIG_PCI */
/**
* ata_sff_pause - Flush writes and pause 400 nanoseconds.
* @ap: Port to wait for.
*
* LOCKING:
* Inherited from caller.
*/
static inline void ata_sff_pause(struct ata_port *ap)
{
ata_sff_altstatus(ap);
ndelay(400);
}
/**
* ata_sff_busy_wait - Wait for a port status register
* @ap: Port to wait for.
* @bits: bits that must be clear
* @max: number of 10uS waits to perform
*
* Waits up to max*10 microseconds for the selected bits in the port's
* status register to be cleared.
* Returns final value of status register.
*
* LOCKING:
* Inherited from caller.
*/
static inline u8 ata_sff_busy_wait(struct ata_port *ap, unsigned int bits,
unsigned int max)
{
u8 status;
do {
udelay(10);
status = ap->ops->sff_check_status(ap);
max--;
} while (status != 0xff && (status & bits) && (max > 0));
return status;
}
/**
* ata_wait_idle - Wait for a port to be idle.
* @ap: Port to wait for.
*
* Waits up to 10ms for port's BUSY and DRQ signals to clear.
* Returns final value of status register.
*
* LOCKING:
* Inherited from caller.
*/
static inline u8 ata_wait_idle(struct ata_port *ap)
{
u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
#ifdef ATA_DEBUG
if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ)))
ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n",
status);
#endif
return status;
}
#endif /* CONFIG_ATA_SFF */
#endif /* __LINUX_LIBATA_H__ */

View file

@ -21,9 +21,15 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
* modify the filesystem. The lock module shouldn't assign a journal to the FS
* mount. It shouldn't send recovery callbacks to the FS mount. If the node
* dies or withdraws, all locks can be wiped immediately.
*
* LM_MFLAG_CONV_NODROP
* Do not allow the dlm to internally resolve conversion deadlocks by demoting
* the lock to unlocked and then reacquiring it in the requested mode. Instead,
* it should cancel the request and return LM_OUT_CONV_DEADLK.
*/
#define LM_MFLAG_SPECTATOR 0x00000001
#define LM_MFLAG_CONV_NODROP 0x00000002
/*
* lm_lockstruct flags
@ -110,6 +116,9 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
*
* LM_OUT_ASYNC
* The result of the request will be returned in an LM_CB_ASYNC callback.
*
* LM_OUT_CONV_DEADLK
* The lock request was canceled do to a conversion deadlock.
*/
#define LM_OUT_ST_MASK 0x00000003
@ -117,6 +126,7 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
#define LM_OUT_CANCELED 0x00000008
#define LM_OUT_ASYNC 0x00000080
#define LM_OUT_ERROR 0x00000100
#define LM_OUT_CONV_DEADLK 0x00000200
/*
* lm_callback_t types

View file

@ -34,7 +34,7 @@ void mb_cache_destroy(struct mb_cache *);
/* Functions on cache entries */
struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *);
struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t);
int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *,
sector_t, unsigned int[]);
void mb_cache_entry_release(struct mb_cache_entry *);

View file

@ -81,7 +81,7 @@ enum {
MLX4_CMD_SW2HW_CQ = 0x16,
MLX4_CMD_HW2SW_CQ = 0x17,
MLX4_CMD_QUERY_CQ = 0x18,
MLX4_CMD_RESIZE_CQ = 0x2c,
MLX4_CMD_MODIFY_CQ = 0x2c,
/* SRQ commands */
MLX4_CMD_SW2HW_SRQ = 0x35,

View file

@ -45,11 +45,11 @@ struct mlx4_cqe {
u8 sl;
u8 reserved1;
__be16 rlid;
u32 reserved2;
__be32 ipoib_status;
__be32 byte_cnt;
__be16 wqe_index;
__be16 checksum;
u8 reserved3[3];
u8 reserved2[3];
u8 owner_sr_opcode;
};
@ -85,6 +85,16 @@ enum {
MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
};
enum {
MLX4_CQE_IPOIB_STATUS_IPV4 = 1 << 22,
MLX4_CQE_IPOIB_STATUS_IPV4F = 1 << 23,
MLX4_CQE_IPOIB_STATUS_IPV6 = 1 << 24,
MLX4_CQE_IPOIB_STATUS_IPV4OPT = 1 << 25,
MLX4_CQE_IPOIB_STATUS_TCP = 1 << 26,
MLX4_CQE_IPOIB_STATUS_UDP = 1 << 27,
MLX4_CQE_IPOIB_STATUS_IPOK = 1 << 28,
};
static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd,
void __iomem *uar_page,
spinlock_t *doorbell_lock)
@ -120,4 +130,9 @@ enum {
MLX4_CQ_DB_REQ_NOT = 2 << 24
};
int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
u16 count, u16 period);
int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
int entries, struct mlx4_mtt *mtt);
#endif /* MLX4_CQ_H */

View file

@ -186,6 +186,7 @@ struct mlx4_caps {
u32 flags;
u16 stat_rate_support;
u8 port_width_cap[MLX4_MAX_PORTS + 1];
int max_gso_sz;
};
struct mlx4_buf_list {

View file

@ -48,8 +48,7 @@ struct mlx4_interface {
void * (*add) (struct mlx4_dev *dev);
void (*remove)(struct mlx4_dev *dev, void *context);
void (*event) (struct mlx4_dev *dev, void *context,
enum mlx4_dev_event event, int subtype,
int port);
enum mlx4_dev_event event, int port);
struct list_head list;
};

View file

@ -158,10 +158,12 @@ struct mlx4_qp_context {
#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
enum {
MLX4_WQE_CTRL_NEC = 1 << 29,
MLX4_WQE_CTRL_FENCE = 1 << 6,
MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
MLX4_WQE_CTRL_SOLICITED = 1 << 1,
MLX4_WQE_CTRL_NEC = 1 << 29,
MLX4_WQE_CTRL_FENCE = 1 << 6,
MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
MLX4_WQE_CTRL_SOLICITED = 1 << 1,
MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
};
struct mlx4_wqe_ctrl_seg {
@ -217,6 +219,11 @@ struct mlx4_wqe_datagram_seg {
__be32 reservd[2];
};
struct mlx4_lso_seg {
__be32 mss_hdr_size;
__be32 header[0];
};
struct mlx4_wqe_bind_seg {
__be32 flags1;
__be32 flags2;

View file

@ -2,7 +2,11 @@
#define __LINUX_MROUTE_H
#include <linux/sockios.h>
#include <linux/types.h>
#ifdef __KERNEL__
#include <linux/in.h>
#endif
#include <linux/pim.h>
/*
* Based on the MROUTING 3.5 defines primarily to keep
@ -210,27 +214,6 @@ struct mfc_cache
#define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */
#ifdef __KERNEL__
#define PIM_V1_VERSION __constant_htonl(0x10000000)
#define PIM_V1_REGISTER 1
#define PIM_VERSION 2
#define PIM_REGISTER 1
#define PIM_NULL_REGISTER __constant_htonl(0x40000000)
/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
struct pimreghdr
{
__u8 type;
__u8 reserved;
__be16 csum;
__be32 flags;
};
extern int pim_rcv_v1(struct sk_buff *);
struct rtmsg;
extern int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait);
#endif

228
include/linux/mroute6.h Normal file
View file

@ -0,0 +1,228 @@
#ifndef __LINUX_MROUTE6_H
#define __LINUX_MROUTE6_H
#include <linux/types.h>
#include <linux/sockios.h>
/*
* Based on the MROUTING 3.5 defines primarily to keep
* source compatibility with BSD.
*
* See the pim6sd code for the original history.
*
* Protocol Independent Multicast (PIM) data structures included
* Carlos Picoto (cap@di.fc.ul.pt)
*
*/
#define MRT6_BASE 200
#define MRT6_INIT (MRT6_BASE) /* Activate the kernel mroute code */
#define MRT6_DONE (MRT6_BASE+1) /* Shutdown the kernel mroute */
#define MRT6_ADD_MIF (MRT6_BASE+2) /* Add a virtual interface */
#define MRT6_DEL_MIF (MRT6_BASE+3) /* Delete a virtual interface */
#define MRT6_ADD_MFC (MRT6_BASE+4) /* Add a multicast forwarding entry */
#define MRT6_DEL_MFC (MRT6_BASE+5) /* Delete a multicast forwarding entry */
#define MRT6_VERSION (MRT6_BASE+6) /* Get the kernel multicast version */
#define MRT6_ASSERT (MRT6_BASE+7) /* Activate PIM assert mode */
#define MRT6_PIM (MRT6_BASE+8) /* enable PIM code */
#define SIOCGETMIFCNT_IN6 SIOCPROTOPRIVATE /* IP protocol privates */
#define SIOCGETSGCNT_IN6 (SIOCPROTOPRIVATE+1)
#define SIOCGETRPF (SIOCPROTOPRIVATE+2)
#define MAXMIFS 32
typedef unsigned long mifbitmap_t; /* User mode code depends on this lot */
typedef unsigned short mifi_t;
#define ALL_MIFS ((mifi_t)(-1))
#ifndef IF_SETSIZE
#define IF_SETSIZE 256
#endif
typedef __u32 if_mask;
#define NIFBITS (sizeof(if_mask) * 8) /* bits per mask */
#if !defined(__KERNEL__) && !defined(DIV_ROUND_UP)
#define DIV_ROUND_UP(x,y) (((x) + ((y) - 1)) / (y))
#endif
typedef struct if_set {
if_mask ifs_bits[DIV_ROUND_UP(IF_SETSIZE, NIFBITS)];
} if_set;
#define IF_SET(n, p) ((p)->ifs_bits[(n)/NIFBITS] |= (1 << ((n) % NIFBITS)))
#define IF_CLR(n, p) ((p)->ifs_bits[(n)/NIFBITS] &= ~(1 << ((n) % NIFBITS)))
#define IF_ISSET(n, p) ((p)->ifs_bits[(n)/NIFBITS] & (1 << ((n) % NIFBITS)))
#define IF_COPY(f, t) bcopy(f, t, sizeof(*(f)))
#define IF_ZERO(p) bzero(p, sizeof(*(p)))
/*
* Passed by mrouted for an MRT_ADD_MIF - again we use the
* mrouted 3.6 structures for compatibility
*/
struct mif6ctl {
mifi_t mif6c_mifi; /* Index of MIF */
unsigned char mif6c_flags; /* MIFF_ flags */
unsigned char vifc_threshold; /* ttl limit */
u_short mif6c_pifi; /* the index of the physical IF */
unsigned int vifc_rate_limit; /* Rate limiter values (NI) */
};
#define MIFF_REGISTER 0x1 /* register vif */
/*
* Cache manipulation structures for mrouted and PIMd
*/
struct mf6cctl
{
struct sockaddr_in6 mf6cc_origin; /* Origin of mcast */
struct sockaddr_in6 mf6cc_mcastgrp; /* Group in question */
mifi_t mf6cc_parent; /* Where it arrived */
struct if_set mf6cc_ifset; /* Where it is going */
};
/*
* Group count retrieval for pim6sd
*/
struct sioc_sg_req6
{
struct sockaddr_in6 src;
struct sockaddr_in6 grp;
unsigned long pktcnt;
unsigned long bytecnt;
unsigned long wrong_if;
};
/*
* To get vif packet counts
*/
struct sioc_mif_req6
{
mifi_t mifi; /* Which iface */
unsigned long icount; /* In packets */
unsigned long ocount; /* Out packets */
unsigned long ibytes; /* In bytes */
unsigned long obytes; /* Out bytes */
};
/*
* That's all usermode folks
*/
#ifdef __KERNEL__
#include <linux/skbuff.h> /* for struct sk_buff_head */
#ifdef CONFIG_IPV6_MROUTE
static inline int ip6_mroute_opt(int opt)
{
return (opt >= MRT6_BASE) && (opt <= MRT6_BASE + 10);
}
#else
static inline int ip6_mroute_opt(int opt)
{
return 0;
}
#endif
struct sock;
extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, int);
extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
extern int ip6_mr_input(struct sk_buff *skb);
extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
extern void ip6_mr_init(void);
struct mif_device
{
struct net_device *dev; /* Device we are using */
unsigned long bytes_in,bytes_out;
unsigned long pkt_in,pkt_out; /* Statistics */
unsigned long rate_limit; /* Traffic shaping (NI) */
unsigned char threshold; /* TTL threshold */
unsigned short flags; /* Control flags */
int link; /* Physical interface index */
};
#define VIFF_STATIC 0x8000
struct mfc6_cache
{
struct mfc6_cache *next; /* Next entry on cache line */
struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */
struct in6_addr mf6c_origin; /* Source of packet */
mifi_t mf6c_parent; /* Source interface */
int mfc_flags; /* Flags on line */
union {
struct {
unsigned long expires;
struct sk_buff_head unresolved; /* Unresolved buffers */
} unres;
struct {
unsigned long last_assert;
int minvif;
int maxvif;
unsigned long bytes;
unsigned long pkt;
unsigned long wrong_if;
unsigned char ttls[MAXMIFS]; /* TTL thresholds */
} res;
} mfc_un;
};
#define MFC_STATIC 1
#define MFC_NOTIFY 2
#define MFC6_LINES 64
#define MFC6_HASH(a, g) (((__force u32)(a)->s6_addr32[0] ^ \
(__force u32)(a)->s6_addr32[1] ^ \
(__force u32)(a)->s6_addr32[2] ^ \
(__force u32)(a)->s6_addr32[3] ^ \
(__force u32)(g)->s6_addr32[0] ^ \
(__force u32)(g)->s6_addr32[1] ^ \
(__force u32)(g)->s6_addr32[2] ^ \
(__force u32)(g)->s6_addr32[3]) % MFC6_LINES)
#define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */
#endif
#ifdef __KERNEL__
struct rtmsg;
extern int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait);
#ifdef CONFIG_IPV6_MROUTE
extern struct sock *mroute6_socket;
extern int ip6mr_sk_done(struct sock *sk);
#else
#define mroute6_socket NULL
static inline int ip6mr_sk_done(struct sock *sk) { return 0; }
#endif
#endif
/*
* Structure used to communicate from kernel to multicast router.
* We'll overlay the structure onto an MLD header (not an IPv6 heder like igmpmsg{}
* used for IPv4 implementation). This is because this structure will be passed via an
* IPv6 raw socket, on wich an application will only receiver the payload i.e the data after
* the IPv6 header and all the extension headers. (See section 3 of RFC 3542)
*/
struct mrt6msg {
#define MRT6MSG_NOCACHE 1
#define MRT6MSG_WRONGMIF 2
#define MRT6MSG_WHOLEPKT 3 /* used for use level encap */
__u8 im6_mbz; /* must be zero */
__u8 im6_msgtype; /* what type of message */
__u16 im6_mif; /* mif rec'd on */
__u32 im6_pad; /* padding for 64 bit arch */
struct in6_addr im6_src, im6_dst;
};
#endif

View file

@ -192,6 +192,7 @@ struct mtpos {
#define MT_ST_SCSI2LOGICAL 0x800
#define MT_ST_SYSV 0x1000
#define MT_ST_NOWAIT 0x2000
#define MT_ST_SILI 0x4000
/* The mode parameters to be controlled. Parameter chosen with bits 20-28 */
#define MT_ST_CLEAR_DEFAULT 0xfffff

View file

@ -19,6 +19,7 @@
#define _LINUX_NET_H
#include <linux/wait.h>
#include <linux/socket.h>
#include <asm/socket.h>
struct poll_table_struct;
@ -26,7 +27,7 @@ struct pipe_inode_info;
struct inode;
struct net;
#define NPROTO 34 /* should be enough for now.. */
#define NPROTO AF_MAX
#define SYS_SOCKET 1 /* sys_socket(2) */
#define SYS_BIND 2 /* sys_bind(2) */

View file

@ -710,8 +710,10 @@ struct net_device
void (*poll_controller)(struct net_device *dev);
#endif
#ifdef CONFIG_NET_NS
/* Network namespace this network device is inside */
struct net *nd_net;
#endif
/* bridge stuff */
struct net_bridge_port *br_port;
@ -726,6 +728,10 @@ struct net_device
/* rtnetlink link ops */
const struct rtnl_link_ops *rtnl_link_ops;
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536
unsigned int gso_max_size;
/* The TX queue control structures */
unsigned int egress_subqueue_count;
struct net_device_subqueue egress_subqueue[1];
@ -735,6 +741,28 @@ struct net_device
#define NETDEV_ALIGN 32
#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
/*
* Net namespace inlines
*/
static inline
struct net *dev_net(const struct net_device *dev)
{
#ifdef CONFIG_NET_NS
return dev->nd_net;
#else
return &init_net;
#endif
}
static inline
void dev_net_set(struct net_device *dev, struct net *net)
{
#ifdef CONFIG_NET_NS
release_net(dev->nd_net);
dev->nd_net = hold_net(net);
#endif
}
/**
* netdev_priv - access network device private data
* @dev: network device
@ -811,7 +839,7 @@ static inline struct net_device *next_net_device(struct net_device *dev)
struct list_head *lh;
struct net *net;
net = dev->nd_net;
net = dev_net(dev);
lh = dev->dev_list.next;
return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}
@ -1479,6 +1507,12 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
static inline void netif_set_gso_max_size(struct net_device *dev,
unsigned int size)
{
dev->gso_max_size = size;
}
/* On bonding slaves other than the currently active slave, suppress
* duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
* ARP on active-backup slaves with arp_validate enabled.

View file

@ -6,11 +6,13 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/if.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <net/net_namespace.h>
#endif
#include <linux/compiler.h>
@ -61,13 +63,21 @@ union nf_inet_addr {
#ifdef __KERNEL__
#ifdef CONFIG_NETFILTER
static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
const union nf_inet_addr *a2)
{
return a1->all[0] == a2->all[0] &&
a1->all[1] == a2->all[1] &&
a1->all[2] == a2->all[2] &&
a1->all[3] == a2->all[3];
}
extern void netfilter_init(void);
/* Largest hook number + 1 */
#define NF_MAX_HOOKS 8
struct sk_buff;
struct net_device;
typedef unsigned int nf_hookfn(unsigned int hooknum,
struct sk_buff *skb,
@ -224,6 +234,11 @@ struct nf_afinfo {
unsigned short family;
__sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
__sum16 (*checksum_partial)(struct sk_buff *skb,
unsigned int hook,
unsigned int dataoff,
unsigned int len,
u_int8_t protocol);
int (*route)(struct dst_entry **dst, struct flowi *fl);
void (*saveroute)(const struct sk_buff *skb,
struct nf_queue_entry *entry);
@ -253,6 +268,23 @@ nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
return csum;
}
static inline __sum16
nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, unsigned int len,
u_int8_t protocol, unsigned short family)
{
const struct nf_afinfo *afinfo;
__sum16 csum = 0;
rcu_read_lock();
afinfo = nf_get_afinfo(family);
if (afinfo)
csum = afinfo->checksum_partial(skb, hook, dataoff, len,
protocol);
rcu_read_unlock();
return csum;
}
extern int nf_register_afinfo(const struct nf_afinfo *afinfo);
extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
@ -311,5 +343,56 @@ extern void (*nf_ct_destroy)(struct nf_conntrack *);
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
#endif
static inline struct net *nf_pre_routing_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
return in->nd_net;
#else
return &init_net;
#endif
}
static inline struct net *nf_local_in_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
return in->nd_net;
#else
return &init_net;
#endif
}
static inline struct net *nf_forward_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
BUG_ON(in->nd_net != out->nd_net);
return in->nd_net;
#else
return &init_net;
#endif
}
static inline struct net *nf_local_out_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
return out->nd_net;
#else
return &init_net;
#endif
}
static inline struct net *nf_post_routing_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
return out->nd_net;
#else
return &init_net;
#endif
}
#endif /*__KERNEL__*/
#endif /*__LINUX_NETFILTER_H*/

View file

@ -0,0 +1,40 @@
#ifndef _NF_CONNTRACK_DCCP_H
#define _NF_CONNTRACK_DCCP_H
/* Exposed to userspace over nfnetlink */
enum ct_dccp_states {
CT_DCCP_NONE,
CT_DCCP_REQUEST,
CT_DCCP_RESPOND,
CT_DCCP_PARTOPEN,
CT_DCCP_OPEN,
CT_DCCP_CLOSEREQ,
CT_DCCP_CLOSING,
CT_DCCP_TIMEWAIT,
CT_DCCP_IGNORE,
CT_DCCP_INVALID,
__CT_DCCP_MAX
};
#define CT_DCCP_MAX (__CT_DCCP_MAX - 1)
enum ct_dccp_roles {
CT_DCCP_ROLE_CLIENT,
CT_DCCP_ROLE_SERVER,
__CT_DCCP_ROLE_MAX
};
#define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1)
#ifdef __KERNEL__
#include <net/netfilter/nf_conntrack_tuple.h>
struct nf_ct_dccp {
u_int8_t role[IP_CT_DIR_MAX];
u_int8_t state;
u_int8_t last_pkt;
u_int8_t last_dir;
u_int64_t handshake_seq;
};
#endif /* __KERNEL__ */
#endif /* _NF_CONNTRACK_DCCP_H */

View file

@ -5,37 +5,164 @@
#define SIP_PORT 5060
#define SIP_TIMEOUT 3600
enum sip_header_pos {
POS_REG_REQ_URI,
POS_REQ_URI,
POS_FROM,
POS_TO,
POS_VIA,
POS_CONTACT,
POS_CONTENT,
POS_MEDIA,
POS_OWNER_IP4,
POS_CONNECTION_IP4,
POS_OWNER_IP6,
POS_CONNECTION_IP6,
POS_SDP_HEADER,
struct nf_ct_sip_master {
unsigned int register_cseq;
};
enum sip_expectation_classes {
SIP_EXPECT_SIGNALLING,
SIP_EXPECT_AUDIO,
SIP_EXPECT_VIDEO,
__SIP_EXPECT_MAX
};
#define SIP_EXPECT_MAX (__SIP_EXPECT_MAX - 1)
struct sdp_media_type {
const char *name;
unsigned int len;
enum sip_expectation_classes class;
};
#define SDP_MEDIA_TYPE(__name, __class) \
{ \
.name = (__name), \
.len = sizeof(__name) - 1, \
.class = (__class), \
}
struct sip_handler {
const char *method;
unsigned int len;
int (*request)(struct sk_buff *skb,
const char **dptr, unsigned int *datalen,
unsigned int cseq);
int (*response)(struct sk_buff *skb,
const char **dptr, unsigned int *datalen,
unsigned int cseq, unsigned int code);
};
#define SIP_HANDLER(__method, __request, __response) \
{ \
.method = (__method), \
.len = sizeof(__method) - 1, \
.request = (__request), \
.response = (__response), \
}
struct sip_header {
const char *name;
const char *cname;
const char *search;
unsigned int len;
unsigned int clen;
unsigned int slen;
int (*match_len)(const struct nf_conn *ct,
const char *dptr, const char *limit,
int *shift);
};
#define __SIP_HDR(__name, __cname, __search, __match) \
{ \
.name = (__name), \
.len = sizeof(__name) - 1, \
.cname = (__cname), \
.clen = (__cname) ? sizeof(__cname) - 1 : 0, \
.search = (__search), \
.slen = (__search) ? sizeof(__search) - 1 : 0, \
.match_len = (__match), \
}
#define SIP_HDR(__name, __cname, __search, __match) \
__SIP_HDR(__name, __cname, __search, __match)
#define SDP_HDR(__name, __search, __match) \
__SIP_HDR(__name, NULL, __search, __match)
enum sip_header_types {
SIP_HDR_CSEQ,
SIP_HDR_FROM,
SIP_HDR_TO,
SIP_HDR_CONTACT,
SIP_HDR_VIA,
SIP_HDR_EXPIRES,
SIP_HDR_CONTENT_LENGTH,
};
enum sdp_header_types {
SDP_HDR_UNSPEC,
SDP_HDR_VERSION,
SDP_HDR_OWNER_IP4,
SDP_HDR_CONNECTION_IP4,
SDP_HDR_OWNER_IP6,
SDP_HDR_CONNECTION_IP6,
SDP_HDR_MEDIA,
};
extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
struct nf_conn *ct,
const char **dptr);
extern unsigned int (*nf_nat_sdp_hook)(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
struct nf_conntrack_expect *exp,
const char *dptr);
const char **dptr,
unsigned int *datalen);
extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
const char **dptr,
unsigned int *datalen,
struct nf_conntrack_expect *exp,
unsigned int matchoff,
unsigned int matchlen);
extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
const char **dptr,
unsigned int dataoff,
unsigned int *datalen,
enum sdp_header_types type,
enum sdp_header_types term,
const union nf_inet_addr *addr);
extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
const char **dptr,
unsigned int *datalen,
unsigned int matchoff,
unsigned int matchlen,
u_int16_t port);
extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
const char **dptr,
unsigned int dataoff,
unsigned int *datalen,
const union nf_inet_addr *addr);
extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
const char **dptr,
unsigned int *datalen,
struct nf_conntrack_expect *rtp_exp,
struct nf_conntrack_expect *rtcp_exp,
unsigned int mediaoff,
unsigned int medialen,
union nf_inet_addr *rtp_addr);
extern int ct_sip_parse_request(const struct nf_conn *ct,
const char *dptr, unsigned int datalen,
unsigned int *matchoff, unsigned int *matchlen,
union nf_inet_addr *addr, __be16 *port);
extern int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
enum sip_header_types type,
unsigned int *matchoff, unsigned int *matchlen);
extern int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
unsigned int *dataoff, unsigned int datalen,
enum sip_header_types type, int *in_header,
unsigned int *matchoff, unsigned int *matchlen,
union nf_inet_addr *addr, __be16 *port);
extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
const char *name,
unsigned int *matchoff, unsigned int *matchlen,
union nf_inet_addr *addr);
extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
unsigned int off, unsigned int datalen,
const char *name,
unsigned int *matchoff, unsigned int *matchen,
unsigned int *val);
extern int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
enum sdp_header_types type,
enum sdp_header_types term,
unsigned int *matchoff, unsigned int *matchlen);
extern int ct_sip_get_info(const struct nf_conn *ct, const char *dptr,
size_t dlen, unsigned int *matchoff,
unsigned int *matchlen, enum sip_header_pos pos);
extern int ct_sip_lnlen(const char *line, const char *limit);
extern const char *ct_sip_search(const char *needle, const char *haystack,
size_t needle_len, size_t haystack_len,
int case_sensitive);
#endif /* __KERNEL__ */
#endif /* __NF_CONNTRACK_SIP_H__ */

View file

@ -80,6 +80,7 @@ enum ctattr_l4proto {
enum ctattr_protoinfo {
CTA_PROTOINFO_UNSPEC,
CTA_PROTOINFO_TCP,
CTA_PROTOINFO_DCCP,
__CTA_PROTOINFO_MAX
};
#define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1)
@ -95,6 +96,13 @@ enum ctattr_protoinfo_tcp {
};
#define CTA_PROTOINFO_TCP_MAX (__CTA_PROTOINFO_TCP_MAX - 1)
enum ctattr_protoinfo_dccp {
CTA_PROTOINFO_DCCP_UNSPEC,
CTA_PROTOINFO_DCCP_STATE,
__CTA_PROTOINFO_DCCP_MAX,
};
#define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1)
enum ctattr_counters {
CTA_COUNTERS_UNSPEC,
CTA_COUNTERS_PACKETS, /* old 64bit counters */

View file

@ -430,13 +430,13 @@ extern int xt_compat_add_offset(int af, unsigned int offset, short delta);
extern void xt_compat_flush_offsets(int af);
extern short xt_compat_calc_jump(int af, unsigned int offset);
extern int xt_compat_match_offset(struct xt_match *match);
extern int xt_compat_match_offset(const struct xt_match *match);
extern int xt_compat_match_from_user(struct xt_entry_match *m,
void **dstptr, unsigned int *size);
extern int xt_compat_match_to_user(struct xt_entry_match *m,
void __user **dstptr, unsigned int *size);
extern int xt_compat_target_offset(struct xt_target *target);
extern int xt_compat_target_offset(const struct xt_target *target);
extern void xt_compat_target_from_user(struct xt_entry_target *t,
void **dstptr, unsigned int *size);
extern int xt_compat_target_to_user(struct xt_entry_target *t,

View file

@ -37,68 +37,54 @@ struct xt_sctp_info {
#define SCTP_CHUNKMAP_SET(chunkmap, type) \
do { \
chunkmap[type / bytes(u_int32_t)] |= \
(chunkmap)[type / bytes(u_int32_t)] |= \
1 << (type % bytes(u_int32_t)); \
} while (0)
#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
do { \
chunkmap[type / bytes(u_int32_t)] &= \
(chunkmap)[type / bytes(u_int32_t)] &= \
~(1 << (type % bytes(u_int32_t))); \
} while (0)
#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
({ \
(chunkmap[type / bytes (u_int32_t)] & \
((chunkmap)[type / bytes (u_int32_t)] & \
(1 << (type % bytes (u_int32_t)))) ? 1: 0; \
})
#define SCTP_CHUNKMAP_RESET(chunkmap) \
do { \
int i; \
for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \
chunkmap[i] = 0; \
} while (0)
#define SCTP_CHUNKMAP_RESET(chunkmap) \
memset((chunkmap), 0, sizeof(chunkmap))
#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \
do { \
int i; \
for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \
chunkmap[i] = ~0; \
} while (0)
#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \
memset((chunkmap), ~0U, sizeof(chunkmap))
#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \
do { \
int i; \
for (i = 0; i < ARRAY_SIZE(srcmap); i++) \
destmap[i] = srcmap[i]; \
} while (0)
#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \
memcpy((destmap), (srcmap), sizeof(srcmap))
#define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \
({ \
int i; \
int flag = 1; \
for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \
if (chunkmap[i]) { \
flag = 0; \
break; \
} \
} \
flag; \
})
#define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \
__sctp_chunkmap_is_clear((chunkmap), ARRAY_SIZE(chunkmap))
static inline bool
__sctp_chunkmap_is_clear(const u_int32_t *chunkmap, unsigned int n)
{
unsigned int i;
for (i = 0; i < n; ++i)
if (chunkmap[i])
return false;
return true;
}
#define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \
({ \
int i; \
int flag = 1; \
for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \
if (chunkmap[i] != ~0) { \
flag = 0; \
break; \
} \
} \
flag; \
})
#define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \
__sctp_chunkmap_is_all_set((chunkmap), ARRAY_SIZE(chunkmap))
static inline bool
__sctp_chunkmap_is_all_set(const u_int32_t *chunkmap, unsigned int n)
{
unsigned int i;
for (i = 0; i < n; ++i)
if (chunkmap[i] != ~0U)
return false;
return true;
}
#endif /* _XT_SCTP_H_ */

View file

@ -23,8 +23,6 @@
#define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
#define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN
#define arpt_target xt_target
#define arpt_table xt_table
#define ARPT_DEV_ADDR_LEN_MAX 16
@ -266,20 +264,15 @@ struct arpt_error
.target.errorname = "ERROR", \
}
#define arpt_register_target(tgt) \
({ (tgt)->family = NF_ARP; \
xt_register_target(tgt); })
#define arpt_unregister_target(tgt) xt_unregister_target(tgt)
extern struct arpt_table *arpt_register_table(struct net *net,
struct arpt_table *table,
const struct arpt_replace *repl);
extern void arpt_unregister_table(struct arpt_table *table);
extern struct xt_table *arpt_register_table(struct net *net,
struct xt_table *table,
const struct arpt_replace *repl);
extern void arpt_unregister_table(struct xt_table *table);
extern unsigned int arpt_do_table(struct sk_buff *skb,
unsigned int hook,
const struct net_device *in,
const struct net_device *out,
struct arpt_table *table);
struct xt_table *table);
#define ARPT_ALIGN(s) XT_ALIGN(s)

View file

@ -0,0 +1,21 @@
#ifndef __LINUX_BRIDGE_EBT_NFLOG_H
#define __LINUX_BRIDGE_EBT_NFLOG_H
#define EBT_NFLOG_MASK 0x0
#define EBT_NFLOG_PREFIX_SIZE 64
#define EBT_NFLOG_WATCHER "nflog"
#define EBT_NFLOG_DEFAULT_GROUP 0x1
#define EBT_NFLOG_DEFAULT_THRESHOLD 1
struct ebt_nflog_info {
u_int32_t len;
u_int16_t group;
u_int16_t threshold;
u_int16_t flags;
u_int16_t pad;
char prefix[EBT_NFLOG_PREFIX_SIZE];
};
#endif /* __LINUX_BRIDGE_EBT_NFLOG_H */

View file

@ -62,8 +62,6 @@ enum nf_ip_hook_priorities {
NF_IP_PRI_FILTER = 0,
NF_IP_PRI_NAT_SRC = 100,
NF_IP_PRI_SELINUX_LAST = 225,
NF_IP_PRI_CONNTRACK_HELPER = INT_MAX - 2,
NF_IP_PRI_NAT_SEQ_ADJUST = INT_MAX - 1,
NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX,
NF_IP_PRI_LAST = INT_MAX,
};

View file

@ -78,6 +78,18 @@
* or, if no MAC address given, all stations, on the interface identified
* by %NL80211_ATTR_IFINDEX.
*
* @NL80211_CMD_GET_MPATH: Get mesh path attributes for mesh path to
* destination %NL80211_ATTR_MAC on the interface identified by
* %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_SET_MPATH: Set mesh path attributes for mesh path to
* destination %NL80211_ATTR_MAC on the interface identified by
* %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_NEW_PATH: Add a mesh path with given attributes to the
* the interface identified by %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC
* or, if no MAC address given, all mesh paths, on the interface identified
* by %NL80211_ATTR_IFINDEX.
*
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@ -112,6 +124,11 @@ enum nl80211_commands {
/* add commands here */
NL80211_CMD_GET_MPATH,
NL80211_CMD_SET_MPATH,
NL80211_CMD_NEW_MPATH,
NL80211_CMD_DEL_MPATH,
/* used to define NL80211_CMD_MAX below */
__NL80211_CMD_AFTER_LAST,
NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1
@ -157,9 +174,23 @@ enum nl80211_commands {
* restriction (at most %NL80211_MAX_SUPP_RATES).
* @NL80211_ATTR_STA_VLAN: interface index of VLAN interface to move station
* to, or the AP interface the station was originally added to to.
* @NL80211_ATTR_STA_STATS: statistics for a station, part of station info
* @NL80211_ATTR_STA_INFO: information about a station, part of station info
* given for %NL80211_CMD_GET_STATION, nested attribute containing
* info as possible, see &enum nl80211_sta_stats.
* info as possible, see &enum nl80211_sta_info.
*
* @NL80211_ATTR_WIPHY_BANDS: Information about an operating bands,
* consisting of a nested array.
*
* @NL80211_ATTR_MESH_ID: mesh id (1-32 bytes).
* @NL80211_ATTR_PLINK_ACTION: action to perform on the mesh peer link.
* @NL80211_ATTR_MPATH_NEXT_HOP: MAC address of the next hop for a mesh path.
* @NL80211_ATTR_MPATH_INFO: information about a mesh_path, part of mesh path
* info given for %NL80211_CMD_GET_MPATH, nested attribute described at
* &enum nl80211_mpath_info.
*
*
* @NL80211_ATTR_MNTR_FLAGS: flags, nested element with NLA_FLAG attributes of
* &enum nl80211_mntr_flags.
*
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@ -193,10 +224,19 @@ enum nl80211_attrs {
NL80211_ATTR_STA_LISTEN_INTERVAL,
NL80211_ATTR_STA_SUPPORTED_RATES,
NL80211_ATTR_STA_VLAN,
NL80211_ATTR_STA_STATS,
NL80211_ATTR_STA_INFO,
NL80211_ATTR_WIPHY_BANDS,
NL80211_ATTR_MNTR_FLAGS,
/* add attributes here, update the policy in nl80211.c */
NL80211_ATTR_MESH_ID,
NL80211_ATTR_STA_PLINK_ACTION,
NL80211_ATTR_MPATH_NEXT_HOP,
NL80211_ATTR_MPATH_INFO,
__NL80211_ATTR_AFTER_LAST,
NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1
};
@ -213,6 +253,7 @@ enum nl80211_attrs {
* @NL80211_IFTYPE_AP_VLAN: VLAN interface for access points
* @NL80211_IFTYPE_WDS: wireless distribution interface
* @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames
* @NL80211_IFTYPE_MESH_POINT: mesh point
* @NL80211_IFTYPE_MAX: highest interface type number currently defined
* @__NL80211_IFTYPE_AFTER_LAST: internal use
*
@ -228,6 +269,7 @@ enum nl80211_iftype {
NL80211_IFTYPE_AP_VLAN,
NL80211_IFTYPE_WDS,
NL80211_IFTYPE_MONITOR,
NL80211_IFTYPE_MESH_POINT,
/* keep last */
__NL80211_IFTYPE_AFTER_LAST,
@ -257,27 +299,167 @@ enum nl80211_sta_flags {
};
/**
* enum nl80211_sta_stats - station statistics
* enum nl80211_sta_info - station information
*
* These attribute types are used with %NL80211_ATTR_STA_STATS
* These attribute types are used with %NL80211_ATTR_STA_INFO
* when getting information about a station.
*
* @__NL80211_STA_STAT_INVALID: attribute number 0 is reserved
* @NL80211_STA_STAT_INACTIVE_TIME: time since last activity (u32, msecs)
* @NL80211_STA_STAT_RX_BYTES: total received bytes (u32, from this station)
* @NL80211_STA_STAT_TX_BYTES: total transmitted bytes (u32, to this station)
* @__NL80211_STA_STAT_AFTER_LAST: internal
* @NL80211_STA_STAT_MAX: highest possible station stats attribute
* @__NL80211_STA_INFO_INVALID: attribute number 0 is reserved
* @NL80211_STA_INFO_INACTIVE_TIME: time since last activity (u32, msecs)
* @NL80211_STA_INFO_RX_BYTES: total received bytes (u32, from this station)
* @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station)
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
enum nl80211_sta_stats {
__NL80211_STA_STAT_INVALID,
NL80211_STA_STAT_INACTIVE_TIME,
NL80211_STA_STAT_RX_BYTES,
NL80211_STA_STAT_TX_BYTES,
enum nl80211_sta_info {
__NL80211_STA_INFO_INVALID,
NL80211_STA_INFO_INACTIVE_TIME,
NL80211_STA_INFO_RX_BYTES,
NL80211_STA_INFO_TX_BYTES,
NL80211_STA_INFO_LLID,
NL80211_STA_INFO_PLID,
NL80211_STA_INFO_PLINK_STATE,
/* keep last */
__NL80211_STA_STAT_AFTER_LAST,
NL80211_STA_STAT_MAX = __NL80211_STA_STAT_AFTER_LAST - 1
__NL80211_STA_INFO_AFTER_LAST,
NL80211_STA_INFO_MAX = __NL80211_STA_INFO_AFTER_LAST - 1
};
/**
* enum nl80211_mpath_flags - nl80211 mesh path flags
*
* @NL80211_MPATH_FLAG_ACTIVE: the mesh path is active
* @NL80211_MPATH_FLAG_RESOLVING: the mesh path discovery process is running
* @NL80211_MPATH_FLAG_DSN_VALID: the mesh path contains a valid DSN
* @NL80211_MPATH_FLAG_FIXED: the mesh path has been manually set
* @NL80211_MPATH_FLAG_RESOLVED: the mesh path discovery process succeeded
*/
enum nl80211_mpath_flags {
NL80211_MPATH_FLAG_ACTIVE = 1<<0,
NL80211_MPATH_FLAG_RESOLVING = 1<<1,
NL80211_MPATH_FLAG_DSN_VALID = 1<<2,
NL80211_MPATH_FLAG_FIXED = 1<<3,
NL80211_MPATH_FLAG_RESOLVED = 1<<4,
};
/**
* enum nl80211_mpath_info - mesh path information
*
* These attribute types are used with %NL80211_ATTR_MPATH_INFO when getting
* information about a mesh path.
*
* @__NL80211_MPATH_INFO_INVALID: attribute number 0 is reserved
* @NL80211_ATTR_MPATH_FRAME_QLEN: number of queued frames for this destination
* @NL80211_ATTR_MPATH_DSN: destination sequence number
* @NL80211_ATTR_MPATH_METRIC: metric (cost) of this mesh path
* @NL80211_ATTR_MPATH_EXPTIME: expiration time for the path, in msec from now
* @NL80211_ATTR_MPATH_FLAGS: mesh path flags, enumerated in
* &enum nl80211_mpath_flags;
* @NL80211_ATTR_MPATH_DISCOVERY_TIMEOUT: total path discovery timeout, in msec
* @NL80211_ATTR_MPATH_DISCOVERY_RETRIES: mesh path discovery retries
*/
enum nl80211_mpath_info {
__NL80211_MPATH_INFO_INVALID,
NL80211_MPATH_INFO_FRAME_QLEN,
NL80211_MPATH_INFO_DSN,
NL80211_MPATH_INFO_METRIC,
NL80211_MPATH_INFO_EXPTIME,
NL80211_MPATH_INFO_FLAGS,
NL80211_MPATH_INFO_DISCOVERY_TIMEOUT,
NL80211_MPATH_INFO_DISCOVERY_RETRIES,
/* keep last */
__NL80211_MPATH_INFO_AFTER_LAST,
NL80211_MPATH_INFO_MAX = __NL80211_MPATH_INFO_AFTER_LAST - 1
};
/**
* enum nl80211_band_attr - band attributes
* @__NL80211_BAND_ATTR_INVALID: attribute number 0 is reserved
* @NL80211_BAND_ATTR_FREQS: supported frequencies in this band,
* an array of nested frequency attributes
* @NL80211_BAND_ATTR_RATES: supported bitrates in this band,
* an array of nested bitrate attributes
*/
enum nl80211_band_attr {
__NL80211_BAND_ATTR_INVALID,
NL80211_BAND_ATTR_FREQS,
NL80211_BAND_ATTR_RATES,
/* keep last */
__NL80211_BAND_ATTR_AFTER_LAST,
NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1
};
/**
* enum nl80211_frequency_attr - frequency attributes
* @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz
* @NL80211_FREQUENCY_ATTR_DISABLED: Channel is disabled in current
* regulatory domain.
* @NL80211_FREQUENCY_ATTR_PASSIVE_SCAN: Only passive scanning is
* permitted on this channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_NO_IBSS: IBSS networks are not permitted
* on this channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_RADAR: Radar detection is mandatory
* on this channel in current regulatory domain.
*/
enum nl80211_frequency_attr {
__NL80211_FREQUENCY_ATTR_INVALID,
NL80211_FREQUENCY_ATTR_FREQ,
NL80211_FREQUENCY_ATTR_DISABLED,
NL80211_FREQUENCY_ATTR_PASSIVE_SCAN,
NL80211_FREQUENCY_ATTR_NO_IBSS,
NL80211_FREQUENCY_ATTR_RADAR,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
NL80211_FREQUENCY_ATTR_MAX = __NL80211_FREQUENCY_ATTR_AFTER_LAST - 1
};
/**
* enum nl80211_bitrate_attr - bitrate attributes
* @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps
* @NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE: Short preamble supported
* in 2.4 GHz band.
*/
enum nl80211_bitrate_attr {
__NL80211_BITRATE_ATTR_INVALID,
NL80211_BITRATE_ATTR_RATE,
NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE,
/* keep last */
__NL80211_BITRATE_ATTR_AFTER_LAST,
NL80211_BITRATE_ATTR_MAX = __NL80211_BITRATE_ATTR_AFTER_LAST - 1
};
/**
* enum nl80211_mntr_flags - monitor configuration flags
*
* Monitor configuration flags.
*
* @__NL80211_MNTR_FLAG_INVALID: reserved
*
* @NL80211_MNTR_FLAG_FCSFAIL: pass frames with bad FCS
* @NL80211_MNTR_FLAG_PLCPFAIL: pass frames with bad PLCP
* @NL80211_MNTR_FLAG_CONTROL: pass control frames
* @NL80211_MNTR_FLAG_OTHER_BSS: disable BSSID filtering
* @NL80211_MNTR_FLAG_COOK_FRAMES: report frames after processing.
* overrides all other flags.
*
* @__NL80211_MNTR_FLAG_AFTER_LAST: internal use
* @NL80211_MNTR_FLAG_MAX: highest possible monitor flag
*/
enum nl80211_mntr_flags {
__NL80211_MNTR_FLAG_INVALID,
NL80211_MNTR_FLAG_FCSFAIL,
NL80211_MNTR_FLAG_PLCPFAIL,
NL80211_MNTR_FLAG_CONTROL,
NL80211_MNTR_FLAG_OTHER_BSS,
NL80211_MNTR_FLAG_COOK_FRAMES,
/* keep last */
__NL80211_MNTR_FLAG_AFTER_LAST,
NL80211_MNTR_FLAG_MAX = __NL80211_MNTR_FLAG_AFTER_LAST - 1
};
#endif /* __LINUX_NL80211_H */

View file

@ -1,74 +0,0 @@
#ifndef __LINUX_PCOUNTER_H
#define __LINUX_PCOUNTER_H
/*
* Using a dynamic percpu 'int' variable has a cost :
* 1) Extra dereference
* Current per_cpu_ptr() implementation uses an array per 'percpu variable'.
* 2) memory cost of NR_CPUS*(32+sizeof(void *)) instead of num_possible_cpus()*4
*
* This pcounter implementation is an abstraction to be able to use
* either a static or a dynamic per cpu variable.
* One dynamic per cpu variable gets a fast & cheap implementation, we can
* change pcounter implementation too.
*/
struct pcounter {
#ifdef CONFIG_SMP
void (*add)(struct pcounter *self, int inc);
int (*getval)(const struct pcounter *self, int cpu);
int *per_cpu_values;
#else
int val;
#endif
};
#ifdef CONFIG_SMP
#include <linux/percpu.h>
#define DEFINE_PCOUNTER(NAME) \
static DEFINE_PER_CPU(int, NAME##_pcounter_values); \
static void NAME##_pcounter_add(struct pcounter *self, int val) \
{ \
__get_cpu_var(NAME##_pcounter_values) += val; \
} \
static int NAME##_pcounter_getval(const struct pcounter *self, int cpu) \
{ \
return per_cpu(NAME##_pcounter_values, cpu); \
} \
#define PCOUNTER_MEMBER_INITIALIZER(NAME, MEMBER) \
MEMBER = { \
.add = NAME##_pcounter_add, \
.getval = NAME##_pcounter_getval, \
}
static inline void pcounter_add(struct pcounter *self, int inc)
{
self->add(self, inc);
}
extern int pcounter_getval(const struct pcounter *self);
extern int pcounter_alloc(struct pcounter *self);
extern void pcounter_free(struct pcounter *self);
#else /* CONFIG_SMP */
static inline void pcounter_add(struct pcounter *self, int inc)
{
self->val += inc;
}
static inline int pcounter_getval(const struct pcounter *self)
{
return self->val;
}
#define DEFINE_PCOUNTER(NAME)
#define PCOUNTER_MEMBER_INITIALIZER(NAME, MEMBER)
#define pcounter_alloc(self) 0
#define pcounter_free(self)
#endif /* CONFIG_SMP */
#endif /* __LINUX_PCOUNTER_H */

View file

@ -39,7 +39,8 @@
SUPPORTED_1000baseT_Half | \
SUPPORTED_1000baseT_Full)
/* Set phydev->irq to PHY_POLL if interrupts are not supported,
/*
* Set phydev->irq to PHY_POLL if interrupts are not supported,
* or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
* the attached driver handles the interrupt
*/
@ -63,8 +64,6 @@ typedef enum {
PHY_INTERFACE_MODE_RTBI
} phy_interface_t;
#define MII_BUS_MAX 4
#define PHY_INIT_TIMEOUT 100000
#define PHY_STATE_TIME 1
@ -74,20 +73,30 @@ typedef enum {
#define PHY_MAX_ADDR 32
/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
#define PHY_ID_FMT "%x:%02x"
#define PHY_ID_FMT "%s:%02x"
/* The Bus class for PHYs. Devices which provide access to
* PHYs should register using this structure */
/*
* Need to be a little smaller than phydev->dev.bus_id to leave room
* for the ":%02x"
*/
#define MII_BUS_ID_SIZE (BUS_ID_SIZE - 3)
/*
* The Bus class for PHYs. Devices which provide access to
* PHYs should register using this structure
*/
struct mii_bus {
const char *name;
int id;
char id[MII_BUS_ID_SIZE];
void *priv;
int (*read)(struct mii_bus *bus, int phy_id, int regnum);
int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val);
int (*reset)(struct mii_bus *bus);
/* A lock to ensure that only one thing can read/write
* the MDIO bus at a time */
/*
* A lock to ensure that only one thing can read/write
* the MDIO bus at a time
*/
struct mutex mdio_lock;
struct device *dev;
@ -98,8 +107,10 @@ struct mii_bus {
/* Phy addresses to be ignored when probing */
u32 phy_mask;
/* Pointer to an array of interrupts, each PHY's
* interrupt at the index matching its address */
/*
* Pointer to an array of interrupts, each PHY's
* interrupt at the index matching its address
*/
int *irq;
};
@ -251,7 +262,8 @@ struct phy_device {
/* Bus address of the PHY (0-32) */
int addr;
/* forced speed & duplex (no autoneg)
/*
* forced speed & duplex (no autoneg)
* partner speed & duplex & pause (autoneg)
*/
int speed;
@ -274,8 +286,10 @@ struct phy_device {
int link_timeout;
/* Interrupt number for this PHY
* -1 means no interrupt */
/*
* Interrupt number for this PHY
* -1 means no interrupt
*/
int irq;
/* private data pointer */
@ -325,22 +339,28 @@ struct phy_driver {
u32 features;
u32 flags;
/* Called to initialize the PHY,
* including after a reset */
/*
* Called to initialize the PHY,
* including after a reset
*/
int (*config_init)(struct phy_device *phydev);
/* Called during discovery. Used to set
* up device-specific structures, if any */
/*
* Called during discovery. Used to set
* up device-specific structures, if any
*/
int (*probe)(struct phy_device *phydev);
/* PHY Power Management */
int (*suspend)(struct phy_device *phydev);
int (*resume)(struct phy_device *phydev);
/* Configures the advertisement and resets
/*
* Configures the advertisement and resets
* autonegotiation if phydev->autoneg is on,
* forces the speed to the current settings in phydev
* if phydev->autoneg is off */
* if phydev->autoneg is off
*/
int (*config_aneg)(struct phy_device *phydev);
/* Determines the negotiated speed and duplex */
@ -361,6 +381,7 @@ struct phy_driver {
int phy_read(struct phy_device *phydev, u16 regnum);
int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id);
struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
int phy_clear_interrupt(struct phy_device *phydev);
int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);

45
include/linux/pim.h Normal file
View file

@ -0,0 +1,45 @@
#ifndef __LINUX_PIM_H
#define __LINUX_PIM_H
#include <asm/byteorder.h>
#ifndef __KERNEL__
struct pim {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 pim_type:4, /* PIM message type */
pim_ver:4; /* PIM version */
#elif defined(__BIG_ENDIAN_BITFIELD)
__u8 pim_ver:4; /* PIM version */
pim_type:4; /* PIM message type */
#endif
__u8 pim_rsv; /* Reserved */
__be16 pim_cksum; /* Checksum */
};
#define PIM_MINLEN 8
#endif
/* Message types - V1 */
#define PIM_V1_VERSION __constant_htonl(0x10000000)
#define PIM_V1_REGISTER 1
/* Message types - V2 */
#define PIM_VERSION 2
#define PIM_REGISTER 1
#if defined(__KERNEL__)
#define PIM_NULL_REGISTER __constant_htonl(0x40000000)
/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
struct pimreghdr
{
__u8 type;
__u8 reserved;
__be16 csum;
__be32 flags;
};
struct sk_buff;
extern int pim_rcv_v1(struct sk_buff *);
#endif
#endif

View file

@ -160,14 +160,18 @@ enum {
#ifdef __KERNEL__
#include <linux/spinlock.h>
#include <linux/rwsem.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/dqblk_xfs.h>
#include <linux/dqblk_v1.h>
#include <linux/dqblk_v2.h>
#include <asm/atomic.h>
extern spinlock_t dq_data_lock;
/* Maximal numbers of writes for quota operation (insert/delete/update)

View file

@ -213,6 +213,11 @@ int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
/*
* Maximum number of entries that will be allocated in one piece, if
* a list larger than this is required then chaining will be utilized.

View file

@ -36,7 +36,11 @@
extern unsigned securebits;
/* Maximum number of letters for an LSM name string */
#define SECURITY_NAME_MAX 10
struct ctl_table;
struct audit_krule;
/*
* These functions are in security/capability.c and are used
@ -136,6 +140,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
/**
* struct security_operations - main security structure
*
* Security module identifier.
*
* @name:
* A string that acts as a unique identifeir for the LSM with max number
* of characters = SECURITY_NAME_MAX.
*
* Security hooks for program execution operations.
*
* @bprm_alloc_security:
@ -468,6 +478,11 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @dentry is the dentry being changed.
* Return 0 on success. If error is returned, then the operation
* causing setuid bit removal is failed.
* @inode_getsecid:
* Get the secid associated with the node.
* @inode contains a pointer to the inode.
* @secid contains a pointer to the location where result will be saved.
* In case of failure, @secid will be set to zero.
*
* Security hooks for file operations
*
@ -636,6 +651,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @task_getsecid:
* Retrieve the security identifier of the process @p.
* @p contains the task_struct for the process and place is into @secid.
* In case of failure, @secid will be set to zero.
*
* @task_setgroups:
* Check permission before setting the supplementary group set of the
* current process.
@ -910,24 +927,24 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Security hooks for XFRM operations.
*
* @xfrm_policy_alloc_security:
* @xp contains the xfrm_policy being added to Security Policy Database
* used by the XFRM system.
* @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy
* Database used by the XFRM system.
* @sec_ctx contains the security context information being provided by
* the user-level policy update program (e.g., setkey).
* Allocate a security structure to the xp->security field; the security
* field is initialized to NULL when the xfrm_policy is allocated.
* Return 0 if operation was successful (memory to allocate, legal context)
* @xfrm_policy_clone_security:
* @old contains an existing xfrm_policy in the SPD.
* @new contains a new xfrm_policy being cloned from old.
* Allocate a security structure to the new->security field
* that contains the information from the old->security field.
* @old_ctx contains an existing xfrm_sec_ctx.
* @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
* Allocate a security structure in new_ctxp that contains the
* information from the old_ctx structure.
* Return 0 if operation was successful (memory to allocate).
* @xfrm_policy_free_security:
* @xp contains the xfrm_policy
* @ctx contains the xfrm_sec_ctx
* Deallocate xp->security.
* @xfrm_policy_delete_security:
* @xp contains the xfrm_policy.
* @ctx contains the xfrm_sec_ctx.
* Authorize deletion of xp->security.
* @xfrm_state_alloc_security:
* @x contains the xfrm_state being added to the Security Association
@ -947,7 +964,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @x contains the xfrm_state.
* Authorize deletion of x->security.
* @xfrm_policy_lookup:
* @xp contains the xfrm_policy for which the access control is being
* @ctx contains the xfrm_sec_ctx for which the access control is being
* checked.
* @fl_secid contains the flow security label that is used to authorize
* access to the policy xp.
@ -997,6 +1014,11 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @ipcp contains the kernel IPC permission structure
* @flag contains the desired (requested) permission set
* Return 0 if permission is granted.
* @ipc_getsecid:
* Get the secid associated with the ipc object.
* @ipcp contains the kernel IPC permission structure.
* @secid contains a pointer to the location where result will be saved.
* In case of failure, @secid will be set to zero.
*
* Security hooks for individual messages held in System V IPC message queues
* @msg_msg_alloc_security:
@ -1223,9 +1245,42 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @secdata contains the security context.
* @seclen contains the length of the security context.
*
* Security hooks for Audit
*
* @audit_rule_init:
* Allocate and initialize an LSM audit rule structure.
* @field contains the required Audit action. Fields flags are defined in include/linux/audit.h
* @op contains the operator the rule uses.
* @rulestr contains the context where the rule will be applied to.
* @lsmrule contains a pointer to receive the result.
* Return 0 if @lsmrule has been successfully set,
* -EINVAL in case of an invalid rule.
*
* @audit_rule_known:
* Specifies whether given @rule contains any fields related to current LSM.
* @rule contains the audit rule of interest.
* Return 1 in case of relation found, 0 otherwise.
*
* @audit_rule_match:
* Determine if given @secid matches a rule previously approved
* by @audit_rule_known.
* @secid contains the security id in question.
* @field contains the field which relates to current LSM.
* @op contains the operator that will be used for matching.
* @rule points to the audit rule that will be checked against.
* @actx points to the audit context associated with the check.
* Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure.
*
* @audit_rule_free:
* Deallocate the LSM audit rule structure previously allocated by
* audit_rule_init.
* @rule contains the allocated rule
*
* This is the main security structure.
*/
struct security_operations {
char name[SECURITY_NAME_MAX + 1];
int (*ptrace) (struct task_struct * parent, struct task_struct * child);
int (*capget) (struct task_struct * target,
kernel_cap_t * effective,
@ -1317,6 +1372,7 @@ struct security_operations {
int (*inode_getsecurity)(const struct inode *inode, const char *name, void **buffer, bool alloc);
int (*inode_setsecurity)(struct inode *inode, const char *name, const void *value, size_t size, int flags);
int (*inode_listsecurity)(struct inode *inode, char *buffer, size_t buffer_size);
void (*inode_getsecid)(const struct inode *inode, u32 *secid);
int (*file_permission) (struct file * file, int mask);
int (*file_alloc_security) (struct file * file);
@ -1369,6 +1425,7 @@ struct security_operations {
void (*task_to_inode)(struct task_struct *p, struct inode *inode);
int (*ipc_permission) (struct kern_ipc_perm * ipcp, short flag);
void (*ipc_getsecid) (struct kern_ipc_perm *ipcp, u32 *secid);
int (*msg_msg_alloc_security) (struct msg_msg * msg);
void (*msg_msg_free_security) (struct msg_msg * msg);
@ -1454,17 +1511,17 @@ struct security_operations {
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
int (*xfrm_policy_alloc_security) (struct xfrm_policy *xp,
int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp,
struct xfrm_user_sec_ctx *sec_ctx);
int (*xfrm_policy_clone_security) (struct xfrm_policy *old, struct xfrm_policy *new);
void (*xfrm_policy_free_security) (struct xfrm_policy *xp);
int (*xfrm_policy_delete_security) (struct xfrm_policy *xp);
int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
int (*xfrm_state_alloc_security) (struct xfrm_state *x,
struct xfrm_user_sec_ctx *sec_ctx,
u32 secid);
void (*xfrm_state_free_security) (struct xfrm_state *x);
int (*xfrm_state_delete_security) (struct xfrm_state *x);
int (*xfrm_policy_lookup)(struct xfrm_policy *xp, u32 fl_secid, u8 dir);
int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
int (*xfrm_state_pol_flow_match)(struct xfrm_state *x,
struct xfrm_policy *xp, struct flowi *fl);
int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall);
@ -1480,10 +1537,18 @@ struct security_operations {
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
int (*audit_rule_init)(u32 field, u32 op, char *rulestr, void **lsmrule);
int (*audit_rule_known)(struct audit_krule *krule);
int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule,
struct audit_context *actx);
void (*audit_rule_free)(void *lsmrule);
#endif /* CONFIG_AUDIT */
};
/* prototypes */
extern int security_init (void);
extern int security_module_enable(struct security_operations *ops);
extern int register_security (struct security_operations *ops);
extern int mod_reg_security (const char *name, struct security_operations *ops);
extern struct dentry *securityfs_create_file(const char *name, mode_t mode,
@ -1578,6 +1643,7 @@ int security_inode_killpriv(struct dentry *dentry);
int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc);
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags);
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size);
void security_inode_getsecid(const struct inode *inode, u32 *secid);
int security_file_permission(struct file *file, int mask);
int security_file_alloc(struct file *file);
void security_file_free(struct file *file);
@ -1622,6 +1688,7 @@ int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
void security_task_reparent_to_init(struct task_struct *p);
void security_task_to_inode(struct task_struct *p, struct inode *inode);
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid);
int security_msg_msg_alloc(struct msg_msg *msg);
void security_msg_msg_free(struct msg_msg *msg);
int security_msg_queue_alloc(struct msg_queue *msq);
@ -2022,6 +2089,11 @@ static inline int security_inode_listsecurity(struct inode *inode, char *buffer,
return 0;
}
static inline void security_inode_getsecid(const struct inode *inode, u32 *secid)
{
*secid = 0;
}
static inline int security_file_permission (struct file *file, int mask)
{
return 0;
@ -2137,7 +2209,9 @@ static inline int security_task_getsid (struct task_struct *p)
}
static inline void security_task_getsecid (struct task_struct *p, u32 *secid)
{ }
{
*secid = 0;
}
static inline int security_task_setgroups (struct group_info *group_info)
{
@ -2216,6 +2290,11 @@ static inline int security_ipc_permission (struct kern_ipc_perm *ipcp,
return 0;
}
static inline void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
{
*secid = 0;
}
static inline int security_msg_msg_alloc (struct msg_msg * msg)
{
return 0;
@ -2562,16 +2641,16 @@ static inline void security_inet_conn_established(struct sock *sk,
#ifdef CONFIG_SECURITY_NETWORK_XFRM
int security_xfrm_policy_alloc(struct xfrm_policy *xp, struct xfrm_user_sec_ctx *sec_ctx);
int security_xfrm_policy_clone(struct xfrm_policy *old, struct xfrm_policy *new);
void security_xfrm_policy_free(struct xfrm_policy *xp);
int security_xfrm_policy_delete(struct xfrm_policy *xp);
int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx);
int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp);
void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx);
int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid);
int security_xfrm_state_delete(struct xfrm_state *x);
void security_xfrm_state_free(struct xfrm_state *x);
int security_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir);
int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp, struct flowi *fl);
int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid);
@ -2579,21 +2658,21 @@ void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
#else /* CONFIG_SECURITY_NETWORK_XFRM */
static inline int security_xfrm_policy_alloc(struct xfrm_policy *xp, struct xfrm_user_sec_ctx *sec_ctx)
static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx)
{
return 0;
}
static inline int security_xfrm_policy_clone(struct xfrm_policy *old, struct xfrm_policy *new)
static inline int security_xfrm_policy_clone(struct xfrm_sec_ctx *old, struct xfrm_sec_ctx **new_ctxp)
{
return 0;
}
static inline void security_xfrm_policy_free(struct xfrm_policy *xp)
static inline void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
{
}
static inline int security_xfrm_policy_delete(struct xfrm_policy *xp)
static inline int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
{
return 0;
}
@ -2619,7 +2698,7 @@ static inline int security_xfrm_state_delete(struct xfrm_state *x)
return 0;
}
static inline int security_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir)
static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
{
return 0;
}
@ -2672,5 +2751,38 @@ static inline int security_key_permission(key_ref_t key_ref,
#endif
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
#ifdef CONFIG_SECURITY
int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule);
int security_audit_rule_known(struct audit_krule *krule);
int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule,
struct audit_context *actx);
void security_audit_rule_free(void *lsmrule);
#else
static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr,
void **lsmrule)
{
return 0;
}
static inline int security_audit_rule_known(struct audit_krule *krule)
{
return 0;
}
static inline int security_audit_rule_match(u32 secid, u32 field, u32 op,
void *lsmrule, struct audit_context *actx)
{
return 0;
}
static inline void security_audit_rule_free(void *lsmrule)
{ }
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_AUDIT */
#endif /* ! __LINUX_SECURITY_H */

View file

@ -16,98 +16,10 @@
struct selinux_audit_rule;
struct audit_context;
struct inode;
struct kern_ipc_perm;
#ifdef CONFIG_SECURITY_SELINUX
/**
* selinux_audit_rule_init - alloc/init an selinux audit rule structure.
* @field: the field this rule refers to
* @op: the operater the rule uses
* @rulestr: the text "target" of the rule
* @rule: pointer to the new rule structure returned via this
*
* Returns 0 if successful, -errno if not. On success, the rule structure
* will be allocated internally. The caller must free this structure with
* selinux_audit_rule_free() after use.
*/
int selinux_audit_rule_init(u32 field, u32 op, char *rulestr,
struct selinux_audit_rule **rule);
/**
* selinux_audit_rule_free - free an selinux audit rule structure.
* @rule: pointer to the audit rule to be freed
*
* This will free all memory associated with the given rule.
* If @rule is NULL, no operation is performed.
*/
void selinux_audit_rule_free(struct selinux_audit_rule *rule);
/**
* selinux_audit_rule_match - determine if a context ID matches a rule.
* @sid: the context ID to check
* @field: the field this rule refers to
* @op: the operater the rule uses
* @rule: pointer to the audit rule to check against
* @actx: the audit context (can be NULL) associated with the check
*
* Returns 1 if the context id matches the rule, 0 if it does not, and
* -errno on failure.
*/
int selinux_audit_rule_match(u32 sid, u32 field, u32 op,
struct selinux_audit_rule *rule,
struct audit_context *actx);
/**
* selinux_audit_set_callback - set the callback for policy reloads.
* @callback: the function to call when the policy is reloaded
*
* This sets the function callback function that will update the rules
* upon policy reloads. This callback should rebuild all existing rules
* using selinux_audit_rule_init().
*/
void selinux_audit_set_callback(int (*callback)(void));
/**
* selinux_sid_to_string - map a security context ID to a string
* @sid: security context ID to be converted.
* @ctx: address of context string to be returned
* @ctxlen: length of returned context string.
*
* Returns 0 if successful, -errno if not. On success, the context
* string will be allocated internally, and the caller must call
* kfree() on it after use.
*/
int selinux_sid_to_string(u32 sid, char **ctx, u32 *ctxlen);
/**
* selinux_get_inode_sid - get the inode's security context ID
* @inode: inode structure to get the sid from.
* @sid: pointer to security context ID to be filled in.
*
* Returns nothing
*/
void selinux_get_inode_sid(const struct inode *inode, u32 *sid);
/**
* selinux_get_ipc_sid - get the ipc security context ID
* @ipcp: ipc structure to get the sid from.
* @sid: pointer to security context ID to be filled in.
*
* Returns nothing
*/
void selinux_get_ipc_sid(const struct kern_ipc_perm *ipcp, u32 *sid);
/**
* selinux_get_task_sid - return the SID of task
* @tsk: the task whose SID will be returned
* @sid: pointer to security context ID to be filled in.
*
* Returns nothing
*/
void selinux_get_task_sid(struct task_struct *tsk, u32 *sid);
/**
* selinux_string_to_sid - map a security context string to a security ID
* @str: the security context string to be mapped
@ -151,52 +63,6 @@ void selinux_secmark_refcount_inc(void);
void selinux_secmark_refcount_dec(void);
#else
static inline int selinux_audit_rule_init(u32 field, u32 op,
char *rulestr,
struct selinux_audit_rule **rule)
{
return -EOPNOTSUPP;
}
static inline void selinux_audit_rule_free(struct selinux_audit_rule *rule)
{
return;
}
static inline int selinux_audit_rule_match(u32 sid, u32 field, u32 op,
struct selinux_audit_rule *rule,
struct audit_context *actx)
{
return 0;
}
static inline void selinux_audit_set_callback(int (*callback)(void))
{
return;
}
static inline int selinux_sid_to_string(u32 sid, char **ctx, u32 *ctxlen)
{
*ctx = NULL;
*ctxlen = 0;
return 0;
}
static inline void selinux_get_inode_sid(const struct inode *inode, u32 *sid)
{
*sid = 0;
}
static inline void selinux_get_ipc_sid(const struct kern_ipc_perm *ipcp, u32 *sid)
{
*sid = 0;
}
static inline void selinux_get_task_sid(struct task_struct *tsk, u32 *sid)
{
*sid = 0;
}
static inline int selinux_string_to_sid(const char *str, u32 *sid)
{
*sid = 0;

51
include/linux/semaphore.h Normal file
View file

@ -0,0 +1,51 @@
/*
* Copyright (c) 2008 Intel Corporation
* Author: Matthew Wilcox <willy@linux.intel.com>
*
* Distributed under the terms of the GNU GPL, version 2
*
* Please see kernel/semaphore.c for documentation of these functions
*/
#ifndef __LINUX_SEMAPHORE_H
#define __LINUX_SEMAPHORE_H
#include <linux/list.h>
#include <linux/spinlock.h>
/* Please don't access any members of this structure directly */
struct semaphore {
spinlock_t lock;
unsigned int count;
struct list_head wait_list;
};
#define __SEMAPHORE_INITIALIZER(name, n) \
{ \
.lock = __SPIN_LOCK_UNLOCKED((name).lock), \
.count = n, \
.wait_list = LIST_HEAD_INIT((name).wait_list), \
}
#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
static inline void sema_init(struct semaphore *sem, int val)
{
static struct lock_class_key __key;
*sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
}
#define init_MUTEX(sem) sema_init(sem, 1)
#define init_MUTEX_LOCKED(sem) sema_init(sem, 0)
extern void down(struct semaphore *sem);
extern int __must_check down_interruptible(struct semaphore *sem);
extern int __must_check down_killable(struct semaphore *sem);
extern int __must_check down_trylock(struct semaphore *sem);
extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
extern void up(struct semaphore *sem);
#endif /* __LINUX_SEMAPHORE_H */

View file

@ -62,18 +62,5 @@ extern struct list_head *seq_list_start_head(struct list_head *head,
extern struct list_head *seq_list_next(void *v, struct list_head *head,
loff_t *ppos);
struct net;
struct seq_net_private {
struct net *net;
};
int seq_open_net(struct inode *, struct file *,
const struct seq_operations *, int);
int seq_release_net(struct inode *, struct file *);
static inline struct net *seq_file_net(struct seq_file *seq)
{
return ((struct seq_net_private *)seq->private)->net;
}
#endif
#endif

View file

@ -0,0 +1,27 @@
#ifndef __SEQ_FILE_NET_H__
#define __SEQ_FILE_NET_H__
#include <linux/seq_file.h>
struct net;
extern struct net init_net;
struct seq_net_private {
#ifdef CONFIG_NET_NS
struct net *net;
#endif
};
int seq_open_net(struct inode *, struct file *,
const struct seq_operations *, int);
int seq_release_net(struct inode *, struct file *);
static inline struct net *seq_file_net(struct seq_file *seq)
{
#ifdef CONFIG_NET_NS
return ((struct seq_net_private *)seq->private)->net;
#else
return &init_net;
#endif
}
#endif

View file

@ -213,6 +213,10 @@ struct uart_ops {
void (*config_port)(struct uart_port *, int);
int (*verify_port)(struct uart_port *, struct serial_struct *);
int (*ioctl)(struct uart_port *, unsigned int, unsigned long);
#ifdef CONFIG_CONSOLE_POLL
void (*poll_put_char)(struct uart_port *, unsigned char);
int (*poll_get_char)(struct uart_port *);
#endif
};
#define UART_CONFIG_TYPE (1 << 0)

View file

@ -256,7 +256,10 @@ struct sk_buff {
ktime_t tstamp;
struct net_device *dev;
struct dst_entry *dst;
union {
struct dst_entry *dst;
struct rtable *rtable;
};
struct sec_path *sp;
/*
@ -310,7 +313,10 @@ struct sk_buff {
__u16 tc_verd; /* traffic control verdict */
#endif
#endif
/* 2 byte hole */
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
/* 14 bit hole */
#ifdef CONFIG_NET_DMA
dma_cookie_t dma_cookie;
@ -657,11 +663,21 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
}
/*
* Insert an sk_buff at the start of a list.
* Insert an sk_buff on a list.
*
* The "__skb_xxxx()" functions are the non-atomic ones that
* can only be called with interrupts disabled.
*/
extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
newsk->next = next;
newsk->prev = prev;
next->prev = prev->next = newsk;
list->qlen++;
}
/**
* __skb_queue_after - queue a buffer at the list head
@ -678,13 +694,17 @@ static inline void __skb_queue_after(struct sk_buff_head *list,
struct sk_buff *prev,
struct sk_buff *newsk)
{
struct sk_buff *next;
list->qlen++;
__skb_insert(newsk, prev, prev->next, list);
}
next = prev->next;
newsk->next = next;
newsk->prev = prev;
next->prev = prev->next = newsk;
extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
struct sk_buff_head *list);
static inline void __skb_queue_before(struct sk_buff_head *list,
struct sk_buff *next,
struct sk_buff *newsk)
{
__skb_insert(newsk, next->prev, next, list);
}
/**
@ -718,66 +738,7 @@ extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
static inline void __skb_queue_tail(struct sk_buff_head *list,
struct sk_buff *newsk)
{
struct sk_buff *prev, *next;
list->qlen++;
next = (struct sk_buff *)list;
prev = next->prev;
newsk->next = next;
newsk->prev = prev;
next->prev = prev->next = newsk;
}
/**
* __skb_dequeue - remove from the head of the queue
* @list: list to dequeue from
*
* Remove the head of the list. This function does not take any locks
* so must be used with appropriate locks held only. The head item is
* returned or %NULL if the list is empty.
*/
extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
struct sk_buff *next, *prev, *result;
prev = (struct sk_buff *) list;
next = prev->next;
result = NULL;
if (next != prev) {
result = next;
next = next->next;
list->qlen--;
next->prev = prev;
prev->next = next;
result->next = result->prev = NULL;
}
return result;
}
/*
* Insert a packet on a list.
*/
extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
newsk->next = next;
newsk->prev = prev;
next->prev = prev->next = newsk;
list->qlen++;
}
/*
* Place a packet after a given packet in a list.
*/
extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
{
__skb_insert(newsk, old, old->next, list);
__skb_queue_before(list, (struct sk_buff *)list, newsk);
}
/*
@ -797,8 +758,22 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
prev->next = next;
}
/* XXX: more streamlined implementation */
/**
* __skb_dequeue - remove from the head of the queue
* @list: list to dequeue from
*
* Remove the head of the list. This function does not take any locks
* so must be used with appropriate locks held only. The head item is
* returned or %NULL if the list is empty.
*/
extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek(list);
if (skb)
__skb_unlink(skb, list);
return skb;
}
/**
* __skb_dequeue_tail - remove from the tail of the queue
@ -889,6 +864,7 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
/*
* Add data to an sk_buff
*/
extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
{
unsigned char *tmp = skb_tail_pointer(skb);
@ -898,26 +874,7 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
return tmp;
}
/**
* skb_put - add data to a buffer
* @skb: buffer to use
* @len: amount of data to add
*
* This function extends the used data area of the buffer. If this would
* exceed the total buffer size the kernel will panic. A pointer to the
* first byte of the extra data is returned.
*/
static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
{
unsigned char *tmp = skb_tail_pointer(skb);
SKB_LINEAR_ASSERT(skb);
skb->tail += len;
skb->len += len;
if (unlikely(skb->tail > skb->end))
skb_over_panic(skb, len, current_text_addr());
return tmp;
}
extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data -= len;
@ -925,24 +882,7 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
return skb->data;
}
/**
* skb_push - add data to the start of a buffer
* @skb: buffer to use
* @len: amount of data to add
*
* This function extends the used data area of the buffer at the buffer
* start. If this would exceed the total buffer headroom the kernel will
* panic. A pointer to the first byte of the extra data is returned.
*/
static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data -= len;
skb->len += len;
if (unlikely(skb->data<skb->head))
skb_under_panic(skb, len, current_text_addr());
return skb->data;
}
extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
{
skb->len -= len;
@ -950,21 +890,6 @@ static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
return skb->data += len;
}
/**
* skb_pull - remove data from the start of a buffer
* @skb: buffer to use
* @len: amount of data to remove
*
* This function removes data from the start of a buffer, returning
* the memory to the headroom. A pointer to the next data in the buffer
* is returned. Once the data has been pulled future pushes will overwrite
* the old data.
*/
static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
{
return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
}
extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
@ -1205,21 +1130,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
skb_set_tail_pointer(skb, len);
}
/**
* skb_trim - remove end from a buffer
* @skb: buffer to alter
* @len: new length
*
* Cut the length of a buffer down by removing data from the tail. If
* the buffer is already under the length specified it is not modified.
* The skb must be linear.
*/
static inline void skb_trim(struct sk_buff *skb, unsigned int len)
{
if (skb->len > len)
__skb_trim(skb, len);
}
extern void skb_trim(struct sk_buff *skb, unsigned int len);
static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
@ -1302,22 +1213,7 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
return skb;
}
/**
* dev_alloc_skb - allocate an skbuff for receiving
* @length: length to allocate
*
* Allocate a new &sk_buff and assign it a usage count of one. The
* buffer has unspecified headroom built in. Users should allocate
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
*
* %NULL is returned if there is no free memory. Although this function
* allocates memory it can be called from an interrupt.
*/
static inline struct sk_buff *dev_alloc_skb(unsigned int length)
{
return __dev_alloc_skb(length, GFP_ATOMIC);
}
extern struct sk_buff *dev_alloc_skb(unsigned int length);
extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
unsigned int length, gfp_t gfp_mask);

View file

@ -45,9 +45,9 @@ struct kmem_cache_cpu {
struct kmem_cache_node {
spinlock_t list_lock; /* Protect partial list and nr_partial */
unsigned long nr_partial;
atomic_long_t nr_slabs;
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
struct list_head full;
#endif
};

13
include/linux/smc91x.h Normal file
View file

@ -0,0 +1,13 @@
#ifndef __SMC91X_H__
#define __SMC91X_H__
#define SMC91X_USE_8BIT (1 << 0)
#define SMC91X_USE_16BIT (1 << 1)
#define SMC91X_USE_32BIT (1 << 2)
struct smc91x_platdata {
unsigned long flags;
unsigned long irq_flags; /* IRQF_... */
};
#endif /* __SMC91X_H__ */

View file

@ -295,43 +295,6 @@ do { \
1 : ({ local_irq_restore(flags); 0; }); \
})
/*
* Locks two spinlocks l1 and l2.
* l1_first indicates if spinlock l1 should be taken first.
*/
static inline void double_spin_lock(spinlock_t *l1, spinlock_t *l2,
bool l1_first)
__acquires(l1)
__acquires(l2)
{
if (l1_first) {
spin_lock(l1);
spin_lock(l2);
} else {
spin_lock(l2);
spin_lock(l1);
}
}
/*
* Unlocks two spinlocks l1 and l2.
* l1_taken_first indicates if spinlock l1 was taken first and therefore
* should be released after spinlock l2.
*/
static inline void double_spin_unlock(spinlock_t *l1, spinlock_t *l2,
bool l1_taken_first)
__releases(l1)
__releases(l2)
{
if (l1_taken_first) {
spin_unlock(l2);
spin_unlock(l1);
} else {
spin_unlock(l1);
spin_unlock(l2);
}
}
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)

View file

@ -72,10 +72,18 @@ struct ssb_device;
/* Lowlevel read/write operations on the device MMIO.
* Internal, don't use that outside of ssb. */
struct ssb_bus_ops {
u8 (*read8)(struct ssb_device *dev, u16 offset);
u16 (*read16)(struct ssb_device *dev, u16 offset);
u32 (*read32)(struct ssb_device *dev, u16 offset);
void (*write8)(struct ssb_device *dev, u16 offset, u8 value);
void (*write16)(struct ssb_device *dev, u16 offset, u16 value);
void (*write32)(struct ssb_device *dev, u16 offset, u32 value);
#ifdef CONFIG_SSB_BLOCKIO
void (*block_read)(struct ssb_device *dev, void *buffer,
size_t count, u16 offset, u8 reg_width);
void (*block_write)(struct ssb_device *dev, const void *buffer,
size_t count, u16 offset, u8 reg_width);
#endif
};
@ -129,6 +137,10 @@ struct ssb_device {
const struct ssb_bus_ops *ops;
struct device *dev;
/* Pointer to the device that has to be used for
* any DMA related operation. */
struct device *dma_dev;
struct ssb_bus *bus;
struct ssb_device_id id;
@ -243,9 +255,9 @@ struct ssb_bus {
/* Pointer to the PCMCIA device (only if bustype == SSB_BUSTYPE_PCMCIA). */
struct pcmcia_device *host_pcmcia;
#ifdef CONFIG_SSB_PCIHOST
#ifdef CONFIG_SSB_SPROM
/* Mutex to protect the SPROM writing. */
struct mutex pci_sprom_mutex;
struct mutex sprom_mutex;
#endif
/* ID information about the Chip. */
@ -258,9 +270,6 @@ struct ssb_bus {
struct ssb_device devices[SSB_MAX_NR_CORES];
u8 nr_devices;
/* Reference count. Number of suspended devices. */
u8 suspend_cnt;
/* Software ID number for this bus. */
unsigned int busnumber;
@ -332,6 +341,13 @@ extern int ssb_bus_pcmciabus_register(struct ssb_bus *bus,
extern void ssb_bus_unregister(struct ssb_bus *bus);
/* Suspend a SSB bus.
* Call this from the parent bus suspend routine. */
extern int ssb_bus_suspend(struct ssb_bus *bus);
/* Resume a SSB bus.
* Call this from the parent bus resume routine. */
extern int ssb_bus_resume(struct ssb_bus *bus);
extern u32 ssb_clockspeed(struct ssb_bus *bus);
/* Is the device enabled in hardware? */
@ -344,6 +360,10 @@ void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags);
/* Device MMIO register read/write functions. */
static inline u8 ssb_read8(struct ssb_device *dev, u16 offset)
{
return dev->ops->read8(dev, offset);
}
static inline u16 ssb_read16(struct ssb_device *dev, u16 offset)
{
return dev->ops->read16(dev, offset);
@ -352,6 +372,10 @@ static inline u32 ssb_read32(struct ssb_device *dev, u16 offset)
{
return dev->ops->read32(dev, offset);
}
static inline void ssb_write8(struct ssb_device *dev, u16 offset, u8 value)
{
dev->ops->write8(dev, offset, value);
}
static inline void ssb_write16(struct ssb_device *dev, u16 offset, u16 value)
{
dev->ops->write16(dev, offset, value);
@ -360,6 +384,19 @@ static inline void ssb_write32(struct ssb_device *dev, u16 offset, u32 value)
{
dev->ops->write32(dev, offset, value);
}
#ifdef CONFIG_SSB_BLOCKIO
static inline void ssb_block_read(struct ssb_device *dev, void *buffer,
size_t count, u16 offset, u8 reg_width)
{
dev->ops->block_read(dev, buffer, count, offset, reg_width);
}
static inline void ssb_block_write(struct ssb_device *dev, const void *buffer,
size_t count, u16 offset, u8 reg_width)
{
dev->ops->block_write(dev, buffer, count, offset, reg_width);
}
#endif /* CONFIG_SSB_BLOCKIO */
/* Translation (routing) bits that need to be ORed to DMA
@ -412,5 +449,12 @@ extern int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl);
extern u32 ssb_admatch_base(u32 adm);
extern u32 ssb_admatch_size(u32 adm);
/* PCI device mapping and fixup routines.
* Called from the architecture pcibios init code.
* These are only available on SSB_EMBEDDED configurations. */
#ifdef CONFIG_SSB_EMBEDDED
int ssb_pcibios_plat_dev_init(struct pci_dev *dev);
int ssb_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
#endif /* CONFIG_SSB_EMBEDDED */
#endif /* LINUX_SSB_H_ */

View file

@ -367,8 +367,7 @@ static inline bool ssb_chipco_available(struct ssb_chipcommon *cc)
extern void ssb_chipcommon_init(struct ssb_chipcommon *cc);
#include <linux/pm.h>
extern void ssb_chipco_suspend(struct ssb_chipcommon *cc, pm_message_t state);
extern void ssb_chipco_suspend(struct ssb_chipcommon *cc);
extern void ssb_chipco_resume(struct ssb_chipcommon *cc);
extern void ssb_chipco_get_clockcpu(struct ssb_chipcommon *cc,
@ -390,6 +389,10 @@ extern void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc,
extern void ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc,
u32 ticks);
void ssb_chipco_irq_mask(struct ssb_chipcommon *cc, u32 mask, u32 value);
u32 ssb_chipco_irq_status(struct ssb_chipcommon *cc, u32 mask);
/* Chipcommon GPIO pin access. */
u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask);
u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value);

View file

@ -0,0 +1,174 @@
#ifndef LINUX_SSB_DRIVER_GIGE_H_
#define LINUX_SSB_DRIVER_GIGE_H_
#include <linux/ssb/ssb.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#ifdef CONFIG_SSB_DRIVER_GIGE
#define SSB_GIGE_PCIIO 0x0000 /* PCI I/O Registers (1024 bytes) */
#define SSB_GIGE_RESERVED 0x0400 /* Reserved (1024 bytes) */
#define SSB_GIGE_PCICFG 0x0800 /* PCI config space (256 bytes) */
#define SSB_GIGE_SHIM_FLUSHSTAT 0x0C00 /* PCI to OCP: Flush status control (32bit) */
#define SSB_GIGE_SHIM_FLUSHRDA 0x0C04 /* PCI to OCP: Flush read address (32bit) */
#define SSB_GIGE_SHIM_FLUSHTO 0x0C08 /* PCI to OCP: Flush timeout counter (32bit) */
#define SSB_GIGE_SHIM_BARRIER 0x0C0C /* PCI to OCP: Barrier register (32bit) */
#define SSB_GIGE_SHIM_MAOCPSI 0x0C10 /* PCI to OCP: MaocpSI Control (32bit) */
#define SSB_GIGE_SHIM_SIOCPMA 0x0C14 /* PCI to OCP: SiocpMa Control (32bit) */
/* TM Status High flags */
#define SSB_GIGE_TMSHIGH_RGMII 0x00010000 /* Have an RGMII PHY-bus */
/* TM Status Low flags */
#define SSB_GIGE_TMSLOW_TXBYPASS 0x00080000 /* TX bypass (no delay) */
#define SSB_GIGE_TMSLOW_RXBYPASS 0x00100000 /* RX bypass (no delay) */
#define SSB_GIGE_TMSLOW_DLLEN 0x01000000 /* Enable DLL controls */
/* Boardflags (low) */
#define SSB_GIGE_BFL_ROBOSWITCH 0x0010
#define SSB_GIGE_MEM_RES_NAME "SSB Broadcom 47xx GigE memory"
#define SSB_GIGE_IO_RES_NAME "SSB Broadcom 47xx GigE I/O"
struct ssb_gige {
struct ssb_device *dev;
spinlock_t lock;
/* True, if the device has an RGMII bus.
* False, if the device has a GMII bus. */
bool has_rgmii;
/* The PCI controller device. */
struct pci_controller pci_controller;
struct pci_ops pci_ops;
struct resource mem_resource;
struct resource io_resource;
};
/* Check whether a PCI device is a SSB Gigabit Ethernet core. */
extern bool pdev_is_ssb_gige_core(struct pci_dev *pdev);
/* Convert a pci_dev pointer to a ssb_gige pointer. */
static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev)
{
if (!pdev_is_ssb_gige_core(pdev))
return NULL;
return container_of(pdev->bus->ops, struct ssb_gige, pci_ops);
}
/* Returns whether the PHY is connected by an RGMII bus. */
static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev)
{
struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
return (dev ? dev->has_rgmii : 0);
}
/* Returns whether we have a Roboswitch. */
static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev)
{
struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
if (dev)
return !!(dev->dev->bus->sprom.boardflags_lo &
SSB_GIGE_BFL_ROBOSWITCH);
return 0;
}
/* Returns whether we can only do one DMA at once. */
static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev)
{
struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
if (dev)
return ((dev->dev->bus->chip_id == 0x4785) &&
(dev->dev->bus->chip_rev < 2));
return 0;
}
/* Returns whether we must flush posted writes. */
static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev)
{
struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
if (dev)
return (dev->dev->bus->chip_id == 0x4785);
return 0;
}
extern char * nvram_get(const char *name);
/* Get the device MAC address */
static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
{
#ifdef CONFIG_BCM947XX
char *res = nvram_get("et0macaddr");
if (res)
memcpy(macaddr, res, 6);
#endif
}
extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev,
struct pci_dev *pdev);
extern int ssb_gige_map_irq(struct ssb_device *sdev,
const struct pci_dev *pdev);
/* The GigE driver is not a standalone module, because we don't have support
* for unregistering the driver. So we could not unload the module anyway. */
extern int ssb_gige_init(void);
static inline void ssb_gige_exit(void)
{
/* Currently we can not unregister the GigE driver,
* because we can not unregister the PCI bridge. */
BUG();
}
#else /* CONFIG_SSB_DRIVER_GIGE */
/* Gigabit Ethernet driver disabled */
static inline int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev,
struct pci_dev *pdev)
{
return -ENOSYS;
}
static inline int ssb_gige_map_irq(struct ssb_device *sdev,
const struct pci_dev *pdev)
{
return -ENOSYS;
}
static inline int ssb_gige_init(void)
{
return 0;
}
static inline void ssb_gige_exit(void)
{
}
static inline bool pdev_is_ssb_gige_core(struct pci_dev *pdev)
{
return 0;
}
static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev)
{
return NULL;
}
static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev)
{
return 0;
}
static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev)
{
return 0;
}
static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev)
{
return 0;
}
static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev)
{
return 0;
}
#endif /* CONFIG_SSB_DRIVER_GIGE */
#endif /* LINUX_SSB_DRIVER_GIGE_H_ */

View file

@ -1,6 +1,11 @@
#ifndef LINUX_SSB_PCICORE_H_
#define LINUX_SSB_PCICORE_H_
#include <linux/types.h>
struct pci_dev;
#ifdef CONFIG_SSB_DRIVER_PCICORE
/* PCI core registers. */
@ -88,6 +93,9 @@ extern void ssb_pcicore_init(struct ssb_pcicore *pc);
extern int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
struct ssb_device *dev);
int ssb_pcicore_plat_dev_init(struct pci_dev *d);
int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
#else /* CONFIG_SSB_DRIVER_PCICORE */
@ -107,5 +115,16 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
return 0;
}
static inline
int ssb_pcicore_plat_dev_init(struct pci_dev *d)
{
return -ENODEV;
}
static inline
int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return -ENODEV;
}
#endif /* CONFIG_SSB_DRIVER_PCICORE */
#endif /* LINUX_SSB_PCICORE_H_ */

View file

@ -239,6 +239,11 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
return (struct tcp_request_sock *)req;
}
struct tcp_deferred_accept_info {
struct sock *listen_sk;
struct request_sock *request;
};
struct tcp_sock {
/* inet_connection_sock has to be the first member of tcp_sock */
struct inet_connection_sock inet_conn;
@ -374,6 +379,8 @@ struct tcp_sock {
unsigned int keepalive_intvl; /* time interval between keep alive probes */
int linger2;
struct tcp_deferred_accept_info defer_tcp_accept;
unsigned long last_synq_overflow;
u32 tso_deferred;

View file

@ -50,7 +50,7 @@ struct thermal_cooling_device_ops {
};
#define THERMAL_TRIPS_NONE -1
#define THERMAL_MAX_TRIPS 10
#define THERMAL_MAX_TRIPS 12
#define THERMAL_NAME_LENGTH 20
struct thermal_cooling_device {
int id;

View file

@ -9,6 +9,9 @@
#include <linux/types.h>
struct timespec;
struct compat_timespec;
/*
* System call restart block.
*/
@ -26,6 +29,15 @@ struct restart_block {
u32 bitset;
u64 time;
} futex;
/* For nanosleep */
struct {
clockid_t index;
struct timespec __user *rmtp;
#ifdef CONFIG_COMPAT
struct compat_timespec __user *compat_rmtp;
#endif
u64 expires;
} nanosleep;
};
};

View file

@ -86,9 +86,10 @@ static inline int transport_container_register(struct transport_container *tc)
return attribute_container_register(&tc->ac);
}
static inline int transport_container_unregister(struct transport_container *tc)
static inline void transport_container_unregister(struct transport_container *tc)
{
return attribute_container_unregister(&tc->ac);
if (unlikely(attribute_container_unregister(&tc->ac)))
BUG();
}
int transport_class_register(struct transport_class *);

View file

@ -125,6 +125,7 @@
#include <linux/cdev.h>
struct tty_struct;
struct tty_driver;
struct tty_operations {
int (*open)(struct tty_struct * tty, struct file * filp);
@ -157,6 +158,11 @@ struct tty_operations {
int (*tiocmget)(struct tty_struct *tty, struct file *file);
int (*tiocmset)(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
#ifdef CONFIG_CONSOLE_POLL
int (*poll_init)(struct tty_driver *driver, int line, char *options);
int (*poll_get_char)(struct tty_driver *driver, int line);
void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
#endif
};
struct tty_driver {
@ -220,6 +226,11 @@ struct tty_driver {
int (*tiocmget)(struct tty_struct *tty, struct file *file);
int (*tiocmset)(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
#ifdef CONFIG_CONSOLE_POLL
int (*poll_init)(struct tty_driver *driver, int line, char *options);
int (*poll_get_char)(struct tty_driver *driver, int line);
void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
#endif
struct list_head tty_drivers;
};
@ -230,6 +241,7 @@ struct tty_driver *alloc_tty_driver(int lines);
void put_tty_driver(struct tty_driver *driver);
void tty_set_operations(struct tty_driver *driver,
const struct tty_operations *op);
extern struct tty_driver *tty_find_polling_driver(char *name, int *line);
/* tty driver magic number */
#define TTY_DRIVER_MAGIC 0x5402

View file

@ -84,4 +84,26 @@ static inline unsigned long __copy_from_user_nocache(void *to,
ret; \
})
/*
* probe_kernel_read(): safely attempt to read from a location
* @dst: pointer to the buffer that shall take the data
* @src: address to read from
* @size: size of the data chunk
*
* Safely read from address @src to the buffer at @dst. If a kernel fault
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_read(void *dst, void *src, size_t size);
/*
* probe_kernel_write(): safely attempt to write to a location
* @dst: address to write to
* @src: pointer to the data that shall be written
* @size: size of the data chunk
*
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_write(void *dst, void *src, size_t size);
#endif /* __LINUX_UACCESS_H__ */

View file

@ -26,15 +26,6 @@ struct udphdr {
__sum16 check;
};
#ifdef __KERNEL__
#include <linux/skbuff.h>
static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
{
return (struct udphdr *)skb_transport_header(skb);
}
#endif
/* UDP socket options */
#define UDP_CORK 1 /* Never send partially complete segments */
#define UDP_ENCAP 100 /* Set the socket to accept encapsulated packets */
@ -45,9 +36,14 @@ static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
#define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */
#ifdef __KERNEL__
#include <linux/types.h>
#include <net/inet_sock.h>
#include <linux/skbuff.h>
static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
{
return (struct udphdr *)skb_transport_header(skb);
}
#define UDP_HTABLE_SIZE 128
struct udp_sock {
@ -82,6 +78,7 @@ static inline struct udp_sock *udp_sk(const struct sock *sk)
{
return (struct udp_sock *)sk;
}
#define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag)
#endif

View file

@ -455,6 +455,7 @@
#define IW_MODE_REPEAT 4 /* Wireless Repeater (forwarder) */
#define IW_MODE_SECOND 5 /* Secondary master/repeater (backup) */
#define IW_MODE_MONITOR 6 /* Passive monitor (listen only) */
#define IW_MODE_MESH 7 /* Mesh (IEEE 802.11s) network */
/* Statistics flags (bitmask in updated) */
#define IW_QUAL_QUAL_UPDATED 0x01 /* Value was updated since last read */

View file

@ -113,7 +113,8 @@ enum
{
XFRM_POLICY_TYPE_MAIN = 0,
XFRM_POLICY_TYPE_SUB = 1,
XFRM_POLICY_TYPE_MAX = 2
XFRM_POLICY_TYPE_MAX = 2,
XFRM_POLICY_TYPE_ANY = 255
};
enum