Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
This commit is contained in:
commit
cf9fe114e3
745 changed files with 30239 additions and 9943 deletions
|
|
@ -119,7 +119,7 @@ extern int pci_mmcfg_config_num;
|
|||
extern int sbf_port;
|
||||
extern unsigned long acpi_realmode_flags;
|
||||
|
||||
int acpi_register_gsi (u32 gsi, int triggering, int polarity);
|
||||
int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity);
|
||||
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
|
|
|
|||
|
|
@ -159,6 +159,7 @@
|
|||
#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
struct amba_device; /* in uncompress this is included but amba/bus.h is not */
|
||||
struct amba_pl010_data {
|
||||
void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl);
|
||||
};
|
||||
|
|
|
|||
|
|
@ -14,13 +14,12 @@
|
|||
#ifndef _LINUX_AUTO_FS_H
|
||||
#define _LINUX_AUTO_FS_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/fs.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
#else
|
||||
#include <asm/types.h>
|
||||
#include <sys/ioctl.h>
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
|
|
|||
|
|
@ -116,9 +116,9 @@ struct blk_io_trace {
|
|||
* The remap event
|
||||
*/
|
||||
struct blk_io_trace_remap {
|
||||
__be32 device;
|
||||
__be32 device_from;
|
||||
__be64 sector;
|
||||
__be32 device_to;
|
||||
__be64 sector_from;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
|
@ -165,8 +165,9 @@ struct blk_trace {
|
|||
|
||||
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
|
||||
extern void blk_trace_shutdown(struct request_queue *);
|
||||
extern int do_blk_trace_setup(struct request_queue *q,
|
||||
char *name, dev_t dev, struct blk_user_trace_setup *buts);
|
||||
extern int do_blk_trace_setup(struct request_queue *q, char *name,
|
||||
dev_t dev, struct block_device *bdev,
|
||||
struct blk_user_trace_setup *buts);
|
||||
extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
|
||||
|
||||
/**
|
||||
|
|
@ -193,22 +194,42 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
|
|||
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
|
||||
void *data, size_t len);
|
||||
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||
struct block_device *bdev,
|
||||
char __user *arg);
|
||||
extern int blk_trace_startstop(struct request_queue *q, int start);
|
||||
extern int blk_trace_remove(struct request_queue *q);
|
||||
extern int blk_trace_init_sysfs(struct device *dev);
|
||||
|
||||
extern struct attribute_group blk_trace_attr_group;
|
||||
|
||||
#else /* !CONFIG_BLK_DEV_IO_TRACE */
|
||||
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
|
||||
#define blk_trace_shutdown(q) do { } while (0)
|
||||
#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY)
|
||||
#define blk_add_driver_data(q, rq, data, len) do {} while (0)
|
||||
#define blk_trace_setup(q, name, dev, arg) (-ENOTTY)
|
||||
#define blk_trace_startstop(q, start) (-ENOTTY)
|
||||
#define blk_trace_remove(q) (-ENOTTY)
|
||||
#define blk_add_trace_msg(q, fmt, ...) do { } while (0)
|
||||
# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
|
||||
# define blk_trace_shutdown(q) do { } while (0)
|
||||
# define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY)
|
||||
# define blk_add_driver_data(q, rq, data, len) do {} while (0)
|
||||
# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
|
||||
# define blk_trace_startstop(q, start) (-ENOTTY)
|
||||
# define blk_trace_remove(q) (-ENOTTY)
|
||||
# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
|
||||
static inline int blk_trace_init_sysfs(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_IO_TRACE */
|
||||
|
||||
#if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK)
|
||||
|
||||
static inline int blk_cmd_buf_len(struct request *rq)
|
||||
{
|
||||
return blk_pc_request(rq) ? rq->cmd_len * 3 : 1;
|
||||
}
|
||||
|
||||
extern void blk_dump_cmd(char *buf, struct request *rq);
|
||||
extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
|
||||
extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq);
|
||||
|
||||
#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -222,6 +222,8 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from);
|
|||
int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from);
|
||||
int get_compat_sigevent(struct sigevent *event,
|
||||
const struct compat_sigevent __user *u_event);
|
||||
long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig,
|
||||
struct compat_siginfo __user *uinfo);
|
||||
|
||||
static inline int compat_timeval_compare(struct compat_timeval *lhs,
|
||||
struct compat_timeval *rhs)
|
||||
|
|
|
|||
|
|
@ -1022,6 +1022,8 @@ typedef struct cpumask *cpumask_var_t;
|
|||
|
||||
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
|
||||
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
|
||||
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
|
||||
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
|
||||
void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
|
||||
void free_cpumask_var(cpumask_var_t mask);
|
||||
void free_bootmem_cpumask_var(cpumask_var_t mask);
|
||||
|
|
@ -1040,6 +1042,19 @@ static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
|
|||
return true;
|
||||
}
|
||||
|
||||
static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
|
||||
{
|
||||
cpumask_clear(*mask);
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
|
||||
int node)
|
||||
{
|
||||
cpumask_clear(*mask);
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
|
||||
{
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
#define _LINUX_CRED_H
|
||||
|
||||
#include <linux/capability.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/key.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
|
|
|
|||
|
|
@ -32,6 +32,8 @@ extern void dma_debug_add_bus(struct bus_type *bus);
|
|||
|
||||
extern void dma_debug_init(u32 num_entries);
|
||||
|
||||
extern int dma_debug_resize_entries(u32 num_entries);
|
||||
|
||||
extern void debug_dma_map_page(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size,
|
||||
int direction, dma_addr_t dma_addr,
|
||||
|
|
@ -91,6 +93,11 @@ static inline void dma_debug_init(u32 num_entries)
|
|||
{
|
||||
}
|
||||
|
||||
static inline int dma_debug_resize_entries(u32 num_entries)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void debug_dma_map_page(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size,
|
||||
int direction, dma_addr_t dma_addr,
|
||||
|
|
|
|||
|
|
@ -108,6 +108,7 @@ struct irte {
|
|||
};
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
extern int intr_remapping_enabled;
|
||||
extern int intr_remapping_supported(void);
|
||||
extern int enable_intr_remapping(int);
|
||||
extern void disable_intr_remapping(void);
|
||||
extern int reenable_intr_remapping(int);
|
||||
|
|
@ -157,6 +158,8 @@ static inline struct intel_iommu *map_ioapic_to_ir(int apic)
|
|||
}
|
||||
#define irq_remapped(irq) (0)
|
||||
#define enable_intr_remapping(mode) (-1)
|
||||
#define disable_intr_remapping() (0)
|
||||
#define reenable_intr_remapping(mode) (0)
|
||||
#define intr_remapping_enabled (0)
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -233,8 +233,6 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size);
|
|||
|
||||
extern int skip_trace(unsigned long ip);
|
||||
|
||||
extern void ftrace_release(void *start, unsigned long size);
|
||||
|
||||
extern void ftrace_disable_daemon(void);
|
||||
extern void ftrace_enable_daemon(void);
|
||||
#else
|
||||
|
|
@ -325,13 +323,8 @@ static inline void __ftrace_enabled_restore(int enabled)
|
|||
|
||||
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
||||
extern void ftrace_init(void);
|
||||
extern void ftrace_init_module(struct module *mod,
|
||||
unsigned long *start, unsigned long *end);
|
||||
#else
|
||||
static inline void ftrace_init(void) { }
|
||||
static inline void
|
||||
ftrace_init_module(struct module *mod,
|
||||
unsigned long *start, unsigned long *end) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
@ -368,6 +361,7 @@ struct ftrace_ret_stack {
|
|||
unsigned long ret;
|
||||
unsigned long func;
|
||||
unsigned long long calltime;
|
||||
unsigned long long subtime;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
@ -379,8 +373,6 @@ extern void return_to_handler(void);
|
|||
|
||||
extern int
|
||||
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
|
||||
extern void
|
||||
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
|
||||
|
||||
/*
|
||||
* Sometimes we don't want to trace a function with the function
|
||||
|
|
@ -496,8 +488,15 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
|
|||
|
||||
extern int ftrace_dump_on_oops;
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#define INIT_TRACE_RECURSION .trace_recursion = 0,
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_TRACING */
|
||||
|
||||
#ifndef INIT_TRACE_RECURSION
|
||||
#define INIT_TRACE_RECURSION
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HW_BRANCH_TRACER
|
||||
|
||||
|
|
|
|||
172
include/linux/ftrace_event.h
Normal file
172
include/linux/ftrace_event.h
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
#ifndef _LINUX_FTRACE_EVENT_H
|
||||
#define _LINUX_FTRACE_EVENT_H
|
||||
|
||||
#include <linux/trace_seq.h>
|
||||
#include <linux/ring_buffer.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
struct trace_array;
|
||||
struct tracer;
|
||||
struct dentry;
|
||||
|
||||
DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq);
|
||||
|
||||
struct trace_print_flags {
|
||||
unsigned long mask;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
|
||||
unsigned long flags,
|
||||
const struct trace_print_flags *flag_array);
|
||||
|
||||
const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
|
||||
const struct trace_print_flags *symbol_array);
|
||||
|
||||
/*
|
||||
* The trace entry - the most basic unit of tracing. This is what
|
||||
* is printed in the end as a single line in the trace output, such as:
|
||||
*
|
||||
* bash-15816 [01] 235.197585: idle_cpu <- irq_enter
|
||||
*/
|
||||
struct trace_entry {
|
||||
unsigned short type;
|
||||
unsigned char flags;
|
||||
unsigned char preempt_count;
|
||||
int pid;
|
||||
int tgid;
|
||||
};
|
||||
|
||||
#define FTRACE_MAX_EVENT \
|
||||
((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
|
||||
|
||||
/*
|
||||
* Trace iterator - used by printout routines who present trace
|
||||
* results to users and which routines might sleep, etc:
|
||||
*/
|
||||
struct trace_iterator {
|
||||
struct trace_array *tr;
|
||||
struct tracer *trace;
|
||||
void *private;
|
||||
int cpu_file;
|
||||
struct mutex mutex;
|
||||
struct ring_buffer_iter *buffer_iter[NR_CPUS];
|
||||
unsigned long iter_flags;
|
||||
|
||||
/* The below is zeroed out in pipe_read */
|
||||
struct trace_seq seq;
|
||||
struct trace_entry *ent;
|
||||
int cpu;
|
||||
u64 ts;
|
||||
|
||||
loff_t pos;
|
||||
long idx;
|
||||
|
||||
cpumask_var_t started;
|
||||
};
|
||||
|
||||
|
||||
typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
|
||||
int flags);
|
||||
struct trace_event {
|
||||
struct hlist_node node;
|
||||
struct list_head list;
|
||||
int type;
|
||||
trace_print_func trace;
|
||||
trace_print_func raw;
|
||||
trace_print_func hex;
|
||||
trace_print_func binary;
|
||||
};
|
||||
|
||||
extern int register_ftrace_event(struct trace_event *event);
|
||||
extern int unregister_ftrace_event(struct trace_event *event);
|
||||
|
||||
/* Return values for print_line callback */
|
||||
enum print_line_t {
|
||||
TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
|
||||
TRACE_TYPE_HANDLED = 1,
|
||||
TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
|
||||
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
|
||||
};
|
||||
|
||||
|
||||
struct ring_buffer_event *
|
||||
trace_current_buffer_lock_reserve(int type, unsigned long len,
|
||||
unsigned long flags, int pc);
|
||||
void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
|
||||
unsigned long flags, int pc);
|
||||
void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
|
||||
unsigned long flags, int pc);
|
||||
void trace_current_buffer_discard_commit(struct ring_buffer_event *event);
|
||||
|
||||
void tracing_record_cmdline(struct task_struct *tsk);
|
||||
|
||||
struct ftrace_event_call {
|
||||
struct list_head list;
|
||||
char *name;
|
||||
char *system;
|
||||
struct dentry *dir;
|
||||
struct trace_event *event;
|
||||
int enabled;
|
||||
int (*regfunc)(void);
|
||||
void (*unregfunc)(void);
|
||||
int id;
|
||||
int (*raw_init)(void);
|
||||
int (*show_format)(struct trace_seq *s);
|
||||
int (*define_fields)(void);
|
||||
struct list_head fields;
|
||||
int filter_active;
|
||||
void *filter;
|
||||
void *mod;
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
atomic_t profile_count;
|
||||
int (*profile_enable)(struct ftrace_event_call *);
|
||||
void (*profile_disable)(struct ftrace_event_call *);
|
||||
#endif
|
||||
};
|
||||
|
||||
#define MAX_FILTER_PRED 32
|
||||
#define MAX_FILTER_STR_VAL 128
|
||||
|
||||
extern int init_preds(struct ftrace_event_call *call);
|
||||
extern void destroy_preds(struct ftrace_event_call *call);
|
||||
extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
|
||||
extern int filter_current_check_discard(struct ftrace_event_call *call,
|
||||
void *rec,
|
||||
struct ring_buffer_event *event);
|
||||
|
||||
extern int trace_define_field(struct ftrace_event_call *call, char *type,
|
||||
char *name, int offset, int size, int is_signed);
|
||||
|
||||
#define is_signed_type(type) (((type)(-1)) < 0)
|
||||
|
||||
int trace_set_clr_event(const char *system, const char *event, int set);
|
||||
|
||||
/*
|
||||
* The double __builtin_constant_p is because gcc will give us an error
|
||||
* if we try to allocate the static variable to fmt if it is not a
|
||||
* constant. Even with the outer if statement optimizing out.
|
||||
*/
|
||||
#define event_trace_printk(ip, fmt, args...) \
|
||||
do { \
|
||||
__trace_printk_check_format(fmt, ##args); \
|
||||
tracing_record_cmdline(current); \
|
||||
if (__builtin_constant_p(fmt)) { \
|
||||
static const char *trace_printk_fmt \
|
||||
__attribute__((section("__trace_printk_fmt"))) = \
|
||||
__builtin_constant_p(fmt) ? fmt : NULL; \
|
||||
\
|
||||
__trace_bprintk(ip, trace_printk_fmt, ##args); \
|
||||
} else \
|
||||
__trace_printk(ip, fmt, ##args); \
|
||||
} while (0)
|
||||
|
||||
#define __common_field(type, item, is_signed) \
|
||||
ret = trace_define_field(event_call, #type, "common_" #item, \
|
||||
offsetof(typeof(field.ent), item), \
|
||||
sizeof(field.ent.item), is_signed); \
|
||||
if (ret) \
|
||||
return ret;
|
||||
|
||||
#endif /* _LINUX_FTRACE_EVENT_H */
|
||||
|
|
@ -23,6 +23,8 @@ union ktime;
|
|||
#define FUTEX_TRYLOCK_PI 8
|
||||
#define FUTEX_WAIT_BITSET 9
|
||||
#define FUTEX_WAKE_BITSET 10
|
||||
#define FUTEX_WAIT_REQUEUE_PI 11
|
||||
#define FUTEX_CMP_REQUEUE_PI 12
|
||||
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
#define FUTEX_CLOCK_REALTIME 256
|
||||
|
|
@ -38,6 +40,10 @@ union ktime;
|
|||
#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
|
||||
#define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITS | FUTEX_PRIVATE_FLAG)
|
||||
#define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITS | FUTEX_PRIVATE_FLAG)
|
||||
#define FUTEX_WAIT_REQUEUE_PI_PRIVATE (FUTEX_WAIT_REQUEUE_PI | \
|
||||
FUTEX_PRIVATE_FLAG)
|
||||
#define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | \
|
||||
FUTEX_PRIVATE_FLAG)
|
||||
|
||||
/*
|
||||
* Support for robust futexes: the kernel cleans up held futexes at
|
||||
|
|
|
|||
|
|
@ -16,35 +16,33 @@
|
|||
struct fbd_ioat {
|
||||
unsigned int vendor;
|
||||
unsigned int ioat_dev;
|
||||
unsigned int enabled;
|
||||
};
|
||||
|
||||
/*
|
||||
* The i5000 chip-set has the same hooks as the i7300
|
||||
* but support is disabled by default because this driver
|
||||
* has not been validated on that platform.
|
||||
* but it is not enabled by default and must be manually
|
||||
* manually enabled with "forceload=1" because it is
|
||||
* only lightly validated.
|
||||
*/
|
||||
#define SUPPORT_I5000 0
|
||||
|
||||
static const struct fbd_ioat fbd_ioat_list[] = {
|
||||
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB},
|
||||
#if SUPPORT_I5000
|
||||
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT},
|
||||
#endif
|
||||
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1},
|
||||
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0},
|
||||
{0, 0}
|
||||
};
|
||||
|
||||
/* table of devices that work with this driver */
|
||||
static const struct pci_device_id pci_tbl[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) },
|
||||
#if SUPPORT_I5000
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
|
||||
#endif
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
||||
/* Check for known platforms with I/O-AT */
|
||||
static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev,
|
||||
struct pci_dev **ioat_dev)
|
||||
struct pci_dev **ioat_dev,
|
||||
int enable_all)
|
||||
{
|
||||
int i;
|
||||
struct pci_dev *memdev, *dmadev;
|
||||
|
|
@ -69,6 +67,8 @@ static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev,
|
|||
for (i = 0; fbd_ioat_list[i].vendor != 0; i++) {
|
||||
if (dmadev->vendor == fbd_ioat_list[i].vendor &&
|
||||
dmadev->device == fbd_ioat_list[i].ioat_dev) {
|
||||
if (!(fbd_ioat_list[i].enabled || enable_all))
|
||||
continue;
|
||||
if (fbd_dev)
|
||||
*fbd_dev = memdev;
|
||||
if (ioat_dev)
|
||||
|
|
|
|||
|
|
@ -1109,7 +1109,7 @@ void ide_fix_driveid(u16 *);
|
|||
|
||||
extern void ide_fixstring(u8 *, const int, const int);
|
||||
|
||||
int ide_busy_sleep(ide_hwif_t *, unsigned long, int);
|
||||
int ide_busy_sleep(ide_drive_t *, unsigned long, int);
|
||||
|
||||
int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
|
||||
|
||||
|
|
|
|||
|
|
@ -174,6 +174,7 @@ extern struct cred init_cred;
|
|||
INIT_TRACE_IRQFLAGS \
|
||||
INIT_LOCKDEP \
|
||||
INIT_FTRACE_GRAPH \
|
||||
INIT_TRACE_RECURSION \
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -656,6 +656,7 @@ struct input_absinfo {
|
|||
#define ABS_MT_POSITION_Y 0x36 /* Center Y ellipse position */
|
||||
#define ABS_MT_TOOL_TYPE 0x37 /* Type of touching device */
|
||||
#define ABS_MT_BLOB_ID 0x38 /* Group a set of packets as a blob */
|
||||
#define ABS_MT_TRACKING_ID 0x39 /* Unique ID of initiated contact */
|
||||
|
||||
#define ABS_MAX 0x3f
|
||||
#define ABS_CNT (ABS_MAX+1)
|
||||
|
|
|
|||
|
|
@ -566,6 +566,6 @@ struct irq_desc;
|
|||
extern int early_irq_init(void);
|
||||
extern int arch_probe_nr_irqs(void);
|
||||
extern int arch_early_irq_init(void);
|
||||
extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
|
||||
extern int arch_init_chip_data(struct irq_desc *desc, int node);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -117,7 +117,7 @@ struct irq_chip {
|
|||
void (*eoi)(unsigned int irq);
|
||||
|
||||
void (*end)(unsigned int irq);
|
||||
void (*set_affinity)(unsigned int irq,
|
||||
int (*set_affinity)(unsigned int irq,
|
||||
const struct cpumask *dest);
|
||||
int (*retrigger)(unsigned int irq);
|
||||
int (*set_type)(unsigned int irq, unsigned int flow_type);
|
||||
|
|
@ -187,7 +187,7 @@ struct irq_desc {
|
|||
spinlock_t lock;
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_var_t affinity;
|
||||
unsigned int cpu;
|
||||
unsigned int node;
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_var_t pending_mask;
|
||||
#endif
|
||||
|
|
@ -201,26 +201,23 @@ struct irq_desc {
|
|||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
|
||||
struct irq_desc *desc, int cpu);
|
||||
struct irq_desc *desc, int node);
|
||||
extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
|
||||
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
extern struct irq_desc irq_desc[NR_IRQS];
|
||||
#else /* CONFIG_SPARSE_IRQ */
|
||||
extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
|
||||
#endif /* CONFIG_SPARSE_IRQ */
|
||||
|
||||
extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
|
||||
|
||||
static inline struct irq_desc *
|
||||
irq_remap_to_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
|
||||
return irq_to_desc(irq);
|
||||
#else
|
||||
return desc;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA_IRQ_DESC
|
||||
extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node);
|
||||
#else
|
||||
static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
|
||||
{
|
||||
return desc;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
|
||||
|
||||
/*
|
||||
* Migration helpers for obsolete names, they will go away:
|
||||
|
|
@ -386,7 +383,7 @@ extern void set_irq_noprobe(unsigned int irq);
|
|||
extern void set_irq_probe(unsigned int irq);
|
||||
|
||||
/* Handle dynamic irq creation and destruction */
|
||||
extern unsigned int create_irq_nr(unsigned int irq_want);
|
||||
extern unsigned int create_irq_nr(unsigned int irq_want, int node);
|
||||
extern int create_irq(void);
|
||||
extern void destroy_irq(unsigned int irq);
|
||||
|
||||
|
|
@ -424,47 +421,48 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
/**
|
||||
* init_alloc_desc_masks - allocate cpumasks for irq_desc
|
||||
* alloc_desc_masks - allocate cpumasks for irq_desc
|
||||
* @desc: pointer to irq_desc struct
|
||||
* @cpu: cpu which will be handling the cpumasks
|
||||
* @boot: true if need bootmem
|
||||
*
|
||||
* Allocates affinity and pending_mask cpumask if required.
|
||||
* Returns true if successful (or not required).
|
||||
* Side effect: affinity has all bits set, pending_mask has all bits clear.
|
||||
*/
|
||||
static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
|
||||
static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
|
||||
bool boot)
|
||||
{
|
||||
int node;
|
||||
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
if (boot) {
|
||||
alloc_bootmem_cpumask_var(&desc->affinity);
|
||||
cpumask_setall(desc->affinity);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
alloc_bootmem_cpumask_var(&desc->pending_mask);
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
node = cpu_to_node(cpu);
|
||||
|
||||
if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
|
||||
return false;
|
||||
cpumask_setall(desc->affinity);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
|
||||
free_cpumask_var(desc->affinity);
|
||||
return false;
|
||||
}
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void init_desc_masks(struct irq_desc *desc)
|
||||
{
|
||||
cpumask_setall(desc->affinity);
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* init_copy_desc_masks - copy cpumasks for irq_desc
|
||||
* @old_desc: pointer to old irq_desc struct
|
||||
|
|
@ -478,7 +476,7 @@ static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
|
|||
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
|
||||
struct irq_desc *new_desc)
|
||||
{
|
||||
#ifdef CONFIG_CPUMASKS_OFFSTACK
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
cpumask_copy(new_desc->affinity, old_desc->affinity);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
|
|
@ -499,12 +497,16 @@ static inline void free_desc_masks(struct irq_desc *old_desc,
|
|||
|
||||
#else /* !CONFIG_SMP */
|
||||
|
||||
static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
|
||||
static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
|
||||
bool boot)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void init_desc_masks(struct irq_desc *desc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
|
||||
struct irq_desc *new_desc)
|
||||
{
|
||||
|
|
|
|||
25
include/linux/kmemtrace.h
Normal file
25
include/linux/kmemtrace.h
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Copyright (C) 2008 Eduard - Gabriel Munteanu
|
||||
*
|
||||
* This file is released under GPL version 2.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_KMEMTRACE_H
|
||||
#define _LINUX_KMEMTRACE_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <trace/events/kmem.h>
|
||||
|
||||
#ifdef CONFIG_KMEMTRACE
|
||||
extern void kmemtrace_init(void);
|
||||
#else
|
||||
static inline void kmemtrace_init(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _LINUX_KMEMTRACE_H */
|
||||
|
||||
|
|
@ -19,6 +19,7 @@ struct anon_vma;
|
|||
struct file_ra_state;
|
||||
struct user_struct;
|
||||
struct writeback_control;
|
||||
struct rlimit;
|
||||
|
||||
#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
|
||||
extern unsigned long max_mapnr;
|
||||
|
|
@ -1031,8 +1032,6 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn,
|
|||
unsigned long end_pfn);
|
||||
extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
|
||||
unsigned long end_pfn);
|
||||
extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn,
|
||||
unsigned long end_pfn);
|
||||
extern void remove_all_active_ranges(void);
|
||||
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
|
||||
unsigned long end_pfn);
|
||||
|
|
@ -1319,8 +1318,8 @@ int vmemmap_populate_basepages(struct page *start_page,
|
|||
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
|
||||
void vmemmap_populate_print_last(void);
|
||||
|
||||
extern void *alloc_locked_buffer(size_t size);
|
||||
extern void free_locked_buffer(void *buffer, size_t size);
|
||||
extern void release_locked_buffer(void *buffer, size_t size);
|
||||
extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
|
||||
size_t size);
|
||||
extern void refund_locked_memory(struct mm_struct *mm, size_t size);
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_MM_H */
|
||||
|
|
|
|||
|
|
@ -30,6 +30,8 @@ extern unsigned int kmmio_count;
|
|||
|
||||
extern int register_kmmio_probe(struct kmmio_probe *p);
|
||||
extern void unregister_kmmio_probe(struct kmmio_probe *p);
|
||||
extern int kmmio_init(void);
|
||||
extern void kmmio_cleanup(void);
|
||||
|
||||
#ifdef CONFIG_MMIOTRACE
|
||||
/* kmmio is active by some kmmio_probes? */
|
||||
|
|
|
|||
|
|
@ -337,6 +337,14 @@ struct module
|
|||
const char **trace_bprintk_fmt_start;
|
||||
unsigned int num_trace_bprintk_fmt;
|
||||
#endif
|
||||
#ifdef CONFIG_EVENT_TRACING
|
||||
struct ftrace_event_call *trace_events;
|
||||
unsigned int num_trace_events;
|
||||
#endif
|
||||
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
||||
unsigned long *ftrace_callsites;
|
||||
unsigned int num_ftrace_callsites;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MODULE_UNLOAD
|
||||
/* What modules depend on me? */
|
||||
|
|
|
|||
|
|
@ -150,5 +150,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
|
|||
*/
|
||||
extern int mutex_trylock(struct mutex *lock);
|
||||
extern void mutex_unlock(struct mutex *lock);
|
||||
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
#ifndef __NET_DROPMON_H
|
||||
#define __NET_DROPMON_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/netlink.h>
|
||||
|
||||
struct net_dm_drop_point {
|
||||
|
|
|
|||
|
|
@ -35,6 +35,9 @@ enum tcp_conntrack {
|
|||
/* Has unacknowledged data */
|
||||
#define IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED 0x10
|
||||
|
||||
/* The field td_maxack has been set */
|
||||
#define IP_CT_TCP_FLAG_MAXACK_SET 0x20
|
||||
|
||||
struct nf_ct_tcp_flags {
|
||||
__u8 flags;
|
||||
__u8 mask;
|
||||
|
|
@ -46,6 +49,7 @@ struct ip_ct_tcp_state {
|
|||
u_int32_t td_end; /* max of seq + len */
|
||||
u_int32_t td_maxend; /* max of ack + max(win, 1) */
|
||||
u_int32_t td_maxwin; /* max(win) */
|
||||
u_int32_t td_maxack; /* max of ack */
|
||||
u_int8_t td_scale; /* window scale factor */
|
||||
u_int8_t flags; /* per direction options */
|
||||
};
|
||||
|
|
|
|||
|
|
@ -324,6 +324,10 @@ struct parport {
|
|||
int spintime;
|
||||
atomic_t ref_count;
|
||||
|
||||
unsigned long devflags;
|
||||
#define PARPORT_DEVPROC_REGISTERED 0
|
||||
struct pardevice *proc_device; /* Currently register proc device */
|
||||
|
||||
struct list_head full_list;
|
||||
struct parport *slaves[3];
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1406,7 +1406,7 @@
|
|||
#define PCI_DEVICE_ID_VIA_82C598_1 0x8598
|
||||
#define PCI_DEVICE_ID_VIA_838X_1 0xB188
|
||||
#define PCI_DEVICE_ID_VIA_83_87XX_1 0xB198
|
||||
#define PCI_DEVICE_ID_VIA_C409_IDE 0XC409
|
||||
#define PCI_DEVICE_ID_VIA_VX855_IDE 0xC409
|
||||
#define PCI_DEVICE_ID_VIA_ANON 0xFFFF
|
||||
|
||||
#define PCI_VENDOR_ID_SIEMENS 0x110A
|
||||
|
|
|
|||
|
|
@ -95,7 +95,6 @@ extern void __ptrace_link(struct task_struct *child,
|
|||
struct task_struct *new_parent);
|
||||
extern void __ptrace_unlink(struct task_struct *child);
|
||||
extern void exit_ptrace(struct task_struct *tracer);
|
||||
extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags);
|
||||
#define PTRACE_MODE_READ 1
|
||||
#define PTRACE_MODE_ATTACH 2
|
||||
/* Returns 0 on success, -errno on denial. */
|
||||
|
|
@ -327,15 +326,6 @@ static inline void user_enable_block_step(struct task_struct *task)
|
|||
#define arch_ptrace_untrace(task) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef arch_ptrace_fork
|
||||
/*
|
||||
* Do machine-specific work to initialize a new task.
|
||||
*
|
||||
* This is called from copy_process().
|
||||
*/
|
||||
#define arch_ptrace_fork(child, clone_flags) do { } while (0)
|
||||
#endif
|
||||
|
||||
extern int task_current_syscall(struct task_struct *target, long *callno,
|
||||
unsigned long args[6], unsigned int maxargs,
|
||||
unsigned long *sp, unsigned long *pc);
|
||||
|
|
|
|||
|
|
@ -198,6 +198,32 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
|||
at->prev = last;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_entry_rcu - get the struct for this entry
|
||||
* @ptr: the &struct list_head pointer.
|
||||
* @type: the type of the struct this is embedded in.
|
||||
* @member: the name of the list_struct within the struct.
|
||||
*
|
||||
* This primitive may safely run concurrently with the _rcu list-mutation
|
||||
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_entry_rcu(ptr, type, member) \
|
||||
container_of(rcu_dereference(ptr), type, member)
|
||||
|
||||
/**
|
||||
* list_first_entry_rcu - get the first element from a list
|
||||
* @ptr: the list head to take the element from.
|
||||
* @type: the type of the struct this is embedded in.
|
||||
* @member: the name of the list_struct within the struct.
|
||||
*
|
||||
* Note, that list is expected to be not empty.
|
||||
*
|
||||
* This primitive may safely run concurrently with the _rcu list-mutation
|
||||
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_first_entry_rcu(ptr, type, member) \
|
||||
list_entry_rcu((ptr)->next, type, member)
|
||||
|
||||
#define __list_for_each_rcu(pos, head) \
|
||||
for (pos = rcu_dereference((head)->next); \
|
||||
pos != (head); \
|
||||
|
|
@ -214,9 +240,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
|||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_entry_rcu(pos, head, member) \
|
||||
for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \
|
||||
for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
|
||||
prefetch(pos->member.next), &pos->member != (head); \
|
||||
pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member))
|
||||
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
|
||||
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -161,8 +161,15 @@ struct rcu_data {
|
|||
unsigned long offline_fqs; /* Kicked due to being offline. */
|
||||
unsigned long resched_ipi; /* Sent a resched IPI. */
|
||||
|
||||
/* 5) For future __rcu_pending statistics. */
|
||||
/* 5) __rcu_pending() statistics. */
|
||||
long n_rcu_pending; /* rcu_pending() calls since boot. */
|
||||
long n_rp_qs_pending;
|
||||
long n_rp_cb_ready;
|
||||
long n_rp_cpu_needs_gp;
|
||||
long n_rp_gp_completed;
|
||||
long n_rp_gp_started;
|
||||
long n_rp_need_fqs;
|
||||
long n_rp_need_nothing;
|
||||
|
||||
int cpu;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ struct ring_buffer_iter;
|
|||
* Don't refer to this struct directly, use functions below.
|
||||
*/
|
||||
struct ring_buffer_event {
|
||||
u32 type:2, len:3, time_delta:27;
|
||||
u32 type_len:5, time_delta:27;
|
||||
u32 array[];
|
||||
};
|
||||
|
||||
|
|
@ -24,7 +24,8 @@ struct ring_buffer_event {
|
|||
* size is variable depending on how much
|
||||
* padding is needed
|
||||
* If time_delta is non zero:
|
||||
* everything else same as RINGBUF_TYPE_DATA
|
||||
* array[0] holds the actual length
|
||||
* size = 4 + length (bytes)
|
||||
*
|
||||
* @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
|
||||
* array[0] = time delta (28 .. 59)
|
||||
|
|
@ -35,22 +36,23 @@ struct ring_buffer_event {
|
|||
* array[1..2] = tv_sec
|
||||
* size = 16 bytes
|
||||
*
|
||||
* @RINGBUF_TYPE_DATA: Data record
|
||||
* If len is zero:
|
||||
* <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
|
||||
* Data record
|
||||
* If type_len is zero:
|
||||
* array[0] holds the actual length
|
||||
* array[1..(length+3)/4] holds data
|
||||
* size = 4 + 4 + length (bytes)
|
||||
* size = 4 + length (bytes)
|
||||
* else
|
||||
* length = len << 2
|
||||
* length = type_len << 2
|
||||
* array[0..(length+3)/4-1] holds data
|
||||
* size = 4 + length (bytes)
|
||||
*/
|
||||
enum ring_buffer_type {
|
||||
RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
|
||||
RINGBUF_TYPE_PADDING,
|
||||
RINGBUF_TYPE_TIME_EXTEND,
|
||||
/* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */
|
||||
RINGBUF_TYPE_TIME_STAMP,
|
||||
RINGBUF_TYPE_DATA,
|
||||
};
|
||||
|
||||
unsigned ring_buffer_event_length(struct ring_buffer_event *event);
|
||||
|
|
@ -68,13 +70,54 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event)
|
|||
return event->time_delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* ring_buffer_event_discard can discard any event in the ring buffer.
|
||||
* it is up to the caller to protect against a reader from
|
||||
* consuming it or a writer from wrapping and replacing it.
|
||||
*
|
||||
* No external protection is needed if this is called before
|
||||
* the event is commited. But in that case it would be better to
|
||||
* use ring_buffer_discard_commit.
|
||||
*
|
||||
* Note, if an event that has not been committed is discarded
|
||||
* with ring_buffer_event_discard, it must still be committed.
|
||||
*/
|
||||
void ring_buffer_event_discard(struct ring_buffer_event *event);
|
||||
|
||||
/*
|
||||
* ring_buffer_discard_commit will remove an event that has not
|
||||
* ben committed yet. If this is used, then ring_buffer_unlock_commit
|
||||
* must not be called on the discarded event. This function
|
||||
* will try to remove the event from the ring buffer completely
|
||||
* if another event has not been written after it.
|
||||
*
|
||||
* Example use:
|
||||
*
|
||||
* if (some_condition)
|
||||
* ring_buffer_discard_commit(buffer, event);
|
||||
* else
|
||||
* ring_buffer_unlock_commit(buffer, event);
|
||||
*/
|
||||
void ring_buffer_discard_commit(struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event);
|
||||
|
||||
/*
|
||||
* size is in bytes for each per CPU buffer.
|
||||
*/
|
||||
struct ring_buffer *
|
||||
ring_buffer_alloc(unsigned long size, unsigned flags);
|
||||
__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
|
||||
|
||||
/*
|
||||
* Because the ring buffer is generic, if other users of the ring buffer get
|
||||
* traced by ftrace, it can produce lockdep warnings. We need to keep each
|
||||
* ring buffer's lock class separate.
|
||||
*/
|
||||
#define ring_buffer_alloc(size, flags) \
|
||||
({ \
|
||||
static struct lock_class_key __key; \
|
||||
__ring_buffer_alloc((size), (flags), &__key); \
|
||||
})
|
||||
|
||||
void ring_buffer_free(struct ring_buffer *buffer);
|
||||
|
||||
int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
|
||||
|
|
@ -122,6 +165,8 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer);
|
|||
unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
|
||||
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
|
||||
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
|
||||
unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
|
||||
unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu);
|
||||
|
||||
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
|
||||
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
|
||||
|
|
@ -137,6 +182,11 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
|
|||
int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
|
||||
size_t len, int cpu, int full);
|
||||
|
||||
struct trace_seq;
|
||||
|
||||
int ring_buffer_print_entry_header(struct trace_seq *s);
|
||||
int ring_buffer_print_page_header(struct trace_seq *s);
|
||||
|
||||
enum ring_buffer_flags {
|
||||
RB_FL_OVERWRITE = 1 << 0,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -77,6 +77,7 @@ struct sched_param {
|
|||
#include <linux/proportions.h>
|
||||
#include <linux/seccomp.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/rtmutex.h>
|
||||
|
||||
#include <linux/time.h>
|
||||
|
|
@ -96,8 +97,8 @@ struct exec_domain;
|
|||
struct futex_pi_state;
|
||||
struct robust_list_head;
|
||||
struct bio;
|
||||
struct bts_tracer;
|
||||
struct fs_struct;
|
||||
struct bts_context;
|
||||
|
||||
/*
|
||||
* List of flags we want to share for kernel threads,
|
||||
|
|
@ -116,6 +117,7 @@ struct fs_struct;
|
|||
* 11 bit fractions.
|
||||
*/
|
||||
extern unsigned long avenrun[]; /* Load averages */
|
||||
extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
|
||||
|
||||
#define FSHIFT 11 /* nr of bits of precision */
|
||||
#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
|
||||
|
|
@ -135,8 +137,8 @@ DECLARE_PER_CPU(unsigned long, process_counts);
|
|||
extern int nr_processes(void);
|
||||
extern unsigned long nr_running(void);
|
||||
extern unsigned long nr_uninterruptible(void);
|
||||
extern unsigned long nr_active(void);
|
||||
extern unsigned long nr_iowait(void);
|
||||
extern void calc_global_load(void);
|
||||
|
||||
extern unsigned long get_parent_ip(unsigned long addr);
|
||||
|
||||
|
|
@ -838,7 +840,17 @@ struct sched_group {
|
|||
*/
|
||||
u32 reciprocal_cpu_power;
|
||||
|
||||
unsigned long cpumask[];
|
||||
/*
|
||||
* The CPUs this group covers.
|
||||
*
|
||||
* NOTE: this field is variable length. (Allocated dynamically
|
||||
* by attaching extra space to the end of the structure,
|
||||
* depending on how many CPUs the kernel has booted up with)
|
||||
*
|
||||
* It is also be embedded into static data structures at build
|
||||
* time. (See 'struct static_sched_group' in kernel/sched.c)
|
||||
*/
|
||||
unsigned long cpumask[0];
|
||||
};
|
||||
|
||||
static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
|
||||
|
|
@ -924,8 +936,17 @@ struct sched_domain {
|
|||
char *name;
|
||||
#endif
|
||||
|
||||
/* span of all CPUs in this domain */
|
||||
unsigned long span[];
|
||||
/*
|
||||
* Span of all CPUs in this domain.
|
||||
*
|
||||
* NOTE: this field is variable length. (Allocated dynamically
|
||||
* by attaching extra space to the end of the structure,
|
||||
* depending on how many CPUs the kernel has booted up with)
|
||||
*
|
||||
* It is also be embedded into static data structures at build
|
||||
* time. (See 'struct static_sched_domain' in kernel/sched.c)
|
||||
*/
|
||||
unsigned long span[0];
|
||||
};
|
||||
|
||||
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
|
||||
|
|
@ -1209,18 +1230,11 @@ struct task_struct {
|
|||
struct list_head ptraced;
|
||||
struct list_head ptrace_entry;
|
||||
|
||||
#ifdef CONFIG_X86_PTRACE_BTS
|
||||
/*
|
||||
* This is the tracer handle for the ptrace BTS extension.
|
||||
* This field actually belongs to the ptracer task.
|
||||
*/
|
||||
struct bts_tracer *bts;
|
||||
/*
|
||||
* The buffer to hold the BTS data.
|
||||
*/
|
||||
void *bts_buffer;
|
||||
size_t bts_size;
|
||||
#endif /* CONFIG_X86_PTRACE_BTS */
|
||||
struct bts_context *bts;
|
||||
|
||||
/* PID/PID hash table linkage. */
|
||||
struct pid_link pids[PIDTYPE_MAX];
|
||||
|
|
@ -1428,7 +1442,9 @@ struct task_struct {
|
|||
#ifdef CONFIG_TRACING
|
||||
/* state flags for use by tracers */
|
||||
unsigned long trace;
|
||||
#endif
|
||||
/* bitmask of trace recursion */
|
||||
unsigned long trace_recursion;
|
||||
#endif /* CONFIG_TRACING */
|
||||
};
|
||||
|
||||
/* Future-safe accessor for struct task_struct's cpus_allowed. */
|
||||
|
|
@ -2001,8 +2017,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
|
|||
extern char *get_task_comm(char *to, struct task_struct *tsk);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void wait_task_context_switch(struct task_struct *p);
|
||||
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
|
||||
#else
|
||||
static inline void wait_task_context_switch(struct task_struct *p) {}
|
||||
static inline unsigned long wait_task_inactive(struct task_struct *p,
|
||||
long match_state)
|
||||
{
|
||||
|
|
@ -2010,7 +2028,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
|
|||
}
|
||||
#endif
|
||||
|
||||
#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
|
||||
#define next_task(p) \
|
||||
list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
|
||||
|
||||
#define for_each_process(p) \
|
||||
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
|
||||
|
|
@ -2049,8 +2068,8 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2)
|
|||
|
||||
static inline struct task_struct *next_thread(const struct task_struct *p)
|
||||
{
|
||||
return list_entry(rcu_dereference(p->thread_group.next),
|
||||
struct task_struct, thread_group);
|
||||
return list_entry_rcu(p->thread_group.next,
|
||||
struct task_struct, thread_group);
|
||||
}
|
||||
|
||||
static inline int thread_group_empty(struct task_struct *p)
|
||||
|
|
|
|||
|
|
@ -235,6 +235,8 @@ static inline int valid_signal(unsigned long sig)
|
|||
extern int next_signal(struct sigpending *pending, sigset_t *mask);
|
||||
extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
|
||||
extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
|
||||
extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
|
||||
siginfo_t *info);
|
||||
extern long do_sigpending(void __user *, unsigned long);
|
||||
extern int sigprocmask(int, sigset_t *, sigset_t *);
|
||||
extern int show_unhandled_signals;
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@
|
|||
#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
|
||||
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
|
||||
#include <linux/compiler.h>
|
||||
#include <trace/kmemtrace.h>
|
||||
#include <linux/kmemtrace.h>
|
||||
|
||||
/* Size description struct for general caches. */
|
||||
struct cache_sizes {
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@
|
|||
#include <linux/gfp.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <trace/kmemtrace.h>
|
||||
#include <linux/kmemtrace.h>
|
||||
|
||||
enum stat_item {
|
||||
ALLOC_FASTPATH, /* Allocation from cpu slab */
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
|||
#define __raw_spin_is_locked(lock) ((void)(lock), 0)
|
||||
/* for sched.c and kernel_lock.c: */
|
||||
# define __raw_spin_lock(lock) do { (void)(lock); } while (0)
|
||||
# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
|
||||
# define __raw_spin_unlock(lock) do { (void)(lock); } while (0)
|
||||
# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
|
||||
#endif /* DEBUG_SPINLOCK */
|
||||
|
|
|
|||
|
|
@ -437,6 +437,11 @@ static inline int mem_cgroup_cache_charge_swapin(struct page *page,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SWAP */
|
||||
#endif /* __KERNEL__*/
|
||||
#endif /* _LINUX_SWAP_H */
|
||||
|
|
|
|||
|
|
@ -29,7 +29,8 @@ extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
|
|||
|
||||
extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev,
|
||||
phys_addr_t address);
|
||||
extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address);
|
||||
extern phys_addr_t swiotlb_bus_to_phys(struct device *hwdev,
|
||||
dma_addr_t address);
|
||||
|
||||
extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size);
|
||||
|
||||
|
|
|
|||
|
|
@ -21,13 +21,14 @@ struct restart_block {
|
|||
struct {
|
||||
unsigned long arg0, arg1, arg2, arg3;
|
||||
};
|
||||
/* For futex_wait */
|
||||
/* For futex_wait and futex_wait_requeue_pi */
|
||||
struct {
|
||||
u32 *uaddr;
|
||||
u32 val;
|
||||
u32 flags;
|
||||
u32 bitset;
|
||||
u64 time;
|
||||
u32 *uaddr2;
|
||||
} futex;
|
||||
/* For nanosleep */
|
||||
struct {
|
||||
|
|
|
|||
92
include/linux/trace_seq.h
Normal file
92
include/linux/trace_seq.h
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
#ifndef _LINUX_TRACE_SEQ_H
|
||||
#define _LINUX_TRACE_SEQ_H
|
||||
|
||||
#include <linux/fs.h>
|
||||
|
||||
/*
|
||||
* Trace sequences are used to allow a function to call several other functions
|
||||
* to create a string of data to use (up to a max of PAGE_SIZE.
|
||||
*/
|
||||
|
||||
struct trace_seq {
|
||||
unsigned char buffer[PAGE_SIZE];
|
||||
unsigned int len;
|
||||
unsigned int readpos;
|
||||
};
|
||||
|
||||
static inline void
|
||||
trace_seq_init(struct trace_seq *s)
|
||||
{
|
||||
s->len = 0;
|
||||
s->readpos = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Currently only defined when tracing is enabled.
|
||||
*/
|
||||
#ifdef CONFIG_TRACING
|
||||
extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
|
||||
__attribute__ ((format (printf, 2, 3)));
|
||||
extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
|
||||
__attribute__ ((format (printf, 2, 0)));
|
||||
extern int
|
||||
trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
|
||||
extern void trace_print_seq(struct seq_file *m, struct trace_seq *s);
|
||||
extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
|
||||
size_t cnt);
|
||||
extern int trace_seq_puts(struct trace_seq *s, const char *str);
|
||||
extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
|
||||
extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len);
|
||||
extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
|
||||
size_t len);
|
||||
extern void *trace_seq_reserve(struct trace_seq *s, size_t len);
|
||||
extern int trace_seq_path(struct trace_seq *s, struct path *path);
|
||||
|
||||
#else /* CONFIG_TRACING */
|
||||
static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int
|
||||
trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s)
|
||||
{
|
||||
}
|
||||
static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
|
||||
size_t cnt)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int trace_seq_puts(struct trace_seq *s, const char *str)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int trace_seq_putc(struct trace_seq *s, unsigned char c)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int
|
||||
trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
|
||||
size_t len)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void *trace_seq_reserve(struct trace_seq *s, size_t len)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline int trace_seq_path(struct trace_seq *s, struct path *path)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_TRACING */
|
||||
|
||||
#endif /* _LINUX_TRACE_SEQ_H */
|
||||
|
|
@ -259,14 +259,12 @@ static inline void tracehook_finish_clone(struct task_struct *child,
|
|||
|
||||
/**
|
||||
* tracehook_report_clone - in parent, new child is about to start running
|
||||
* @trace: return value from tracehook_prepare_clone()
|
||||
* @regs: parent's user register state
|
||||
* @clone_flags: flags from parent's system call
|
||||
* @pid: new child's PID in the parent's namespace
|
||||
* @child: new child task
|
||||
*
|
||||
* Called after a child is set up, but before it has been started
|
||||
* running. @trace is the value returned by tracehook_prepare_clone().
|
||||
* Called after a child is set up, but before it has been started running.
|
||||
* This is not a good place to block, because the child has not started
|
||||
* yet. Suspend the child here if desired, and then block in
|
||||
* tracehook_report_clone_complete(). This must prevent the child from
|
||||
|
|
@ -276,13 +274,14 @@ static inline void tracehook_finish_clone(struct task_struct *child,
|
|||
*
|
||||
* Called with no locks held, but the child cannot run until this returns.
|
||||
*/
|
||||
static inline void tracehook_report_clone(int trace, struct pt_regs *regs,
|
||||
static inline void tracehook_report_clone(struct pt_regs *regs,
|
||||
unsigned long clone_flags,
|
||||
pid_t pid, struct task_struct *child)
|
||||
{
|
||||
if (unlikely(trace) || unlikely(clone_flags & CLONE_PTRACE)) {
|
||||
if (unlikely(task_ptrace(child))) {
|
||||
/*
|
||||
* The child starts up with an immediate SIGSTOP.
|
||||
* It doesn't matter who attached/attaching to this
|
||||
* task, the pending SIGSTOP is right in any case.
|
||||
*/
|
||||
sigaddset(&child->pending.signal, SIGSTOP);
|
||||
set_tsk_thread_flag(child, TIF_SIGPENDING);
|
||||
|
|
|
|||
|
|
@ -31,6 +31,8 @@ struct tracepoint {
|
|||
* Keep in sync with vmlinux.lds.h.
|
||||
*/
|
||||
|
||||
#ifndef DECLARE_TRACE
|
||||
|
||||
#define TP_PROTO(args...) args
|
||||
#define TP_ARGS(args...) args
|
||||
|
||||
|
|
@ -114,6 +116,7 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
|
|||
struct tracepoint *end)
|
||||
{ }
|
||||
#endif /* CONFIG_TRACEPOINTS */
|
||||
#endif /* DECLARE_TRACE */
|
||||
|
||||
/*
|
||||
* Connect a probe to a tracepoint.
|
||||
|
|
@ -154,10 +157,8 @@ static inline void tracepoint_synchronize_unregister(void)
|
|||
}
|
||||
|
||||
#define PARAMS(args...) args
|
||||
#define TRACE_FORMAT(name, proto, args, fmt) \
|
||||
DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
|
||||
|
||||
|
||||
#ifndef TRACE_EVENT
|
||||
/*
|
||||
* For use with the TRACE_EVENT macro:
|
||||
*
|
||||
|
|
@ -262,5 +263,6 @@ static inline void tracepoint_synchronize_unregister(void)
|
|||
|
||||
#define TRACE_EVENT(name, proto, args, struct, assign, print) \
|
||||
DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -132,8 +132,6 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
|
|||
list_del(&old->task_list);
|
||||
}
|
||||
|
||||
void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
|
||||
int nr_exclusive, int sync, void *key);
|
||||
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
|
||||
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
|
||||
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue