Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
This commit is contained in:
commit
5e66dd6d66
662 changed files with 27205 additions and 9469 deletions
63
include/linux/Kbuild
Normal file
63
include/linux/Kbuild
Normal file
|
@ -0,0 +1,63 @@
|
|||
header-y := byteorder/ dvb/ hdlc/ isdn/ nfsd/ raid/ sunrpc/ tc_act/ \
|
||||
netfilter/ netfilter_arp/ netfilter_bridge/ netfilter_ipv4/ \
|
||||
netfilter_ipv6/
|
||||
|
||||
header-y += affs_fs.h affs_hardblocks.h aio_abi.h a.out.h arcfb.h \
|
||||
atmapi.h atmbr2684.h atmclip.h atm_eni.h atm_he.h \
|
||||
atm_idt77105.h atmioc.h atmlec.h atmmpc.h atm_nicstar.h \
|
||||
atmppp.h atmsap.h atmsvc.h atm_zatm.h auto_fs4.h auxvec.h \
|
||||
awe_voice.h ax25.h b1lli.h baycom.h bfs_fs.h blkpg.h \
|
||||
bpqether.h cdk.h chio.h coda_psdev.h coff.h comstats.h \
|
||||
consolemap.h cycx_cfm.h dm-ioctl.h dn.h dqblk_v1.h \
|
||||
dqblk_v2.h dqblk_xfs.h efs_fs_sb.h elf-fdpic.h elf.h elf-em.h \
|
||||
fadvise.h fd.h fdreg.h ftape-header-segment.h ftape-vendors.h \
|
||||
fuse.h futex.h genetlink.h gen_stats.h gigaset_dev.h hdsmart.h \
|
||||
hpfs_fs.h hysdn_if.h i2c-dev.h i8k.h icmp.h \
|
||||
if_arcnet.h if_arp.h if_bonding.h if_cablemodem.h if_fc.h \
|
||||
if_fddi.h if.h if_hippi.h if_infiniband.h if_packet.h \
|
||||
if_plip.h if_ppp.h if_slip.h if_strip.h if_tunnel.h in6.h \
|
||||
in_route.h ioctl.h ip.h ipmi_msgdefs.h ip_mp_alg.h ipsec.h \
|
||||
ipx.h irda.h isdn_divertif.h iso_fs.h ite_gpio.h ixjuser.h \
|
||||
jffs2.h keyctl.h limits.h major.h matroxfb.h meye.h minix_fs.h \
|
||||
mmtimer.h mqueue.h mtio.h ncp_no.h netfilter_arp.h netrom.h \
|
||||
nfs2.h nfs4_mount.h nfs_mount.h openprom_fs.h param.h \
|
||||
pci_ids.h pci_regs.h personality.h pfkeyv2.h pg.h pkt_cls.h \
|
||||
pkt_sched.h posix_types.h ppdev.h prctl.h ps2esdi.h qic117.h \
|
||||
qnxtypes.h quotaio_v1.h quotaio_v2.h radeonfb.h raw.h \
|
||||
resource.h rose.h sctp.h smbno.h snmp.h sockios.h som.h \
|
||||
sound.h stddef.h synclink.h telephony.h termios.h ticable.h \
|
||||
times.h tiocl.h tipc.h toshiba.h ultrasound.h un.h utime.h \
|
||||
utsname.h video_decoder.h video_encoder.h videotext.h vt.h \
|
||||
wavefront.h wireless.h xattr.h x25.h zorro_ids.h
|
||||
|
||||
unifdef-y += acct.h adb.h adfs_fs.h agpgart.h apm_bios.h atalk.h \
|
||||
atmarp.h atmdev.h atm.h atm_tcp.h audit.h auto_fs.h binfmts.h \
|
||||
capability.h capi.h cciss_ioctl.h cdrom.h cm4000_cs.h \
|
||||
cn_proc.h coda.h connector.h cramfs_fs.h cuda.h cyclades.h \
|
||||
dccp.h dirent.h divert.h elfcore.h errno.h errqueue.h \
|
||||
ethtool.h eventpoll.h ext2_fs.h ext3_fs.h fb.h fcntl.h \
|
||||
filter.h flat.h fs.h ftape.h gameport.h generic_serial.h \
|
||||
genhd.h hayesesp.h hdlcdrv.h hdlc.h hdreg.h hiddev.h hpet.h \
|
||||
i2c.h i2o-dev.h icmpv6.h if_bridge.h if_ec.h \
|
||||
if_eql.h if_ether.h if_frad.h if_ltalk.h if_pppox.h \
|
||||
if_shaper.h if_tr.h if_tun.h if_vlan.h if_wanpipe.h igmp.h \
|
||||
inet_diag.h in.h inotify.h input.h ipc.h ipmi.h ipv6.h \
|
||||
ipv6_route.h isdn.h isdnif.h isdn_ppp.h isicom.h jbd.h \
|
||||
joystick.h kdev_t.h kd.h kernelcapi.h kernel.h keyboard.h \
|
||||
llc.h loop.h lp.h mempolicy.h mii.h mman.h mroute.h msdos_fs.h \
|
||||
msg.h nbd.h ncp_fs.h ncp.h ncp_mount.h netdevice.h \
|
||||
netfilter_bridge.h netfilter_decnet.h netfilter.h \
|
||||
netfilter_ipv4.h netfilter_ipv6.h netfilter_logging.h net.h \
|
||||
netlink.h nfs3.h nfs4.h nfsacl.h nfs_fs.h nfs.h nfs_idmap.h \
|
||||
n_r3964.h nubus.h nvram.h parport.h patchkey.h pci.h pktcdvd.h \
|
||||
pmu.h poll.h ppp_defs.h ppp-comp.h ptrace.h qnx4_fs.h quota.h \
|
||||
random.h reboot.h reiserfs_fs.h reiserfs_xattr.h romfs_fs.h \
|
||||
route.h rtc.h rtnetlink.h scc.h sched.h sdla.h \
|
||||
selinux_netlink.h sem.h serial_core.h serial.h serio.h shm.h \
|
||||
signal.h smb_fs.h smb.h smb_mount.h socket.h sonet.h sonypi.h \
|
||||
soundcard.h stat.h sysctl.h tcp.h time.h timex.h tty.h types.h \
|
||||
udf_fs_i.h udp.h uinput.h uio.h unistd.h usb_ch9.h \
|
||||
usbdevice_fs.h user.h videodev2.h videodev.h wait.h \
|
||||
wanrouter.h watchdog.h xfrm.h zftape.h
|
||||
|
||||
objhdr-y := version.h
|
2
include/linux/byteorder/Kbuild
Normal file
2
include/linux/byteorder/Kbuild
Normal file
|
@ -0,0 +1,2 @@
|
|||
unifdef-y += generic.h swabb.h swab.h
|
||||
header-y += big_endian.h little_endian.h pdp_endian.h
|
|
@ -21,6 +21,18 @@ struct completion {
|
|||
#define DECLARE_COMPLETION(work) \
|
||||
struct completion work = COMPLETION_INITIALIZER(work)
|
||||
|
||||
/*
|
||||
* Lockdep needs to run a non-constant initializer for on-stack
|
||||
* completions - so we use the _ONSTACK() variant for those that
|
||||
* are on the kernel stack:
|
||||
*/
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
# define DECLARE_COMPLETION_ONSTACK(work) \
|
||||
struct completion work = ({ init_completion(&work); work; })
|
||||
#else
|
||||
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
|
||||
#endif
|
||||
|
||||
static inline void init_completion(struct completion *x)
|
||||
{
|
||||
x->done = 0;
|
||||
|
|
|
@ -114,6 +114,18 @@ struct dentry {
|
|||
unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
|
||||
};
|
||||
|
||||
/*
|
||||
* dentry->d_lock spinlock nesting subclasses:
|
||||
*
|
||||
* 0: normal
|
||||
* 1: nested
|
||||
*/
|
||||
enum dentry_d_lock_class
|
||||
{
|
||||
DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */
|
||||
DENTRY_D_LOCK_NESTED
|
||||
};
|
||||
|
||||
struct dentry_operations {
|
||||
int (*d_revalidate)(struct dentry *, struct nameidata *);
|
||||
int (*d_hash) (struct dentry *, struct qstr *);
|
||||
|
|
69
include/linux/debug_locks.h
Normal file
69
include/linux/debug_locks.h
Normal file
|
@ -0,0 +1,69 @@
|
|||
#ifndef __LINUX_DEBUG_LOCKING_H
|
||||
#define __LINUX_DEBUG_LOCKING_H
|
||||
|
||||
extern int debug_locks;
|
||||
extern int debug_locks_silent;
|
||||
|
||||
/*
|
||||
* Generic 'turn off all lock debugging' function:
|
||||
*/
|
||||
extern int debug_locks_off(void);
|
||||
|
||||
/*
|
||||
* In the debug case we carry the caller's instruction pointer into
|
||||
* other functions, but we dont want the function argument overhead
|
||||
* in the nondebug case - hence these macros:
|
||||
*/
|
||||
#define _RET_IP_ (unsigned long)__builtin_return_address(0)
|
||||
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
|
||||
|
||||
#define DEBUG_LOCKS_WARN_ON(c) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
\
|
||||
if (unlikely(c)) { \
|
||||
if (debug_locks_off()) \
|
||||
WARN_ON(1); \
|
||||
__ret = 1; \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
|
||||
#else
|
||||
# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
|
||||
extern void locking_selftest(void);
|
||||
#else
|
||||
# define locking_selftest() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
extern void debug_show_all_locks(void);
|
||||
extern void debug_show_held_locks(struct task_struct *task);
|
||||
extern void debug_check_no_locks_freed(const void *from, unsigned long len);
|
||||
extern void debug_check_no_locks_held(struct task_struct *task);
|
||||
#else
|
||||
static inline void debug_show_all_locks(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_show_held_locks(struct task_struct *task)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
debug_check_no_locks_freed(const void *from, unsigned long len)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
debug_check_no_locks_held(struct task_struct *task)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -44,7 +44,7 @@ enum dma_event {
|
|||
};
|
||||
|
||||
/**
|
||||
* typedef dma_cookie_t
|
||||
* typedef dma_cookie_t - an opaque DMA cookie
|
||||
*
|
||||
* if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
|
||||
*/
|
||||
|
@ -80,14 +80,14 @@ struct dma_chan_percpu {
|
|||
|
||||
/**
|
||||
* struct dma_chan - devices supply DMA channels, clients use them
|
||||
* @client: ptr to the client user of this chan, will be NULL when unused
|
||||
* @device: ptr to the dma device who supplies this channel, always !NULL
|
||||
* @client: ptr to the client user of this chan, will be %NULL when unused
|
||||
* @device: ptr to the dma device who supplies this channel, always !%NULL
|
||||
* @cookie: last cookie value returned to client
|
||||
* @chan_id:
|
||||
* @class_dev:
|
||||
* @chan_id: channel ID for sysfs
|
||||
* @class_dev: class device for sysfs
|
||||
* @refcount: kref, used in "bigref" slow-mode
|
||||
* @slow_ref:
|
||||
* @rcu:
|
||||
* @slow_ref: indicates that the DMA channel is free
|
||||
* @rcu: the DMA channel's RCU head
|
||||
* @client_node: used to add this to the client chan list
|
||||
* @device_node: used to add this to the device chan list
|
||||
* @local: per-cpu pointer to a struct dma_chan_percpu
|
||||
|
@ -162,10 +162,17 @@ struct dma_client {
|
|||
* @chancnt: how many DMA channels are supported
|
||||
* @channels: the list of struct dma_chan
|
||||
* @global_node: list_head for global dma_device_list
|
||||
* @refcount:
|
||||
* @done:
|
||||
* @dev_id:
|
||||
* Other func ptrs: used to make use of this device's capabilities
|
||||
* @refcount: reference count
|
||||
* @done: IO completion struct
|
||||
* @dev_id: unique device ID
|
||||
* @device_alloc_chan_resources: allocate resources and return the
|
||||
* number of allocated descriptors
|
||||
* @device_free_chan_resources: release DMA channel's resources
|
||||
* @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer
|
||||
* @device_memcpy_buf_to_pg: memcpy buf pointer to struct page
|
||||
* @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset
|
||||
* @device_memcpy_complete: poll the status of an IOAT DMA transaction
|
||||
* @device_memcpy_issue_pending: push appended descriptors to hardware
|
||||
*/
|
||||
struct dma_device {
|
||||
|
||||
|
@ -211,7 +218,7 @@ void dma_async_client_chan_request(struct dma_client *client,
|
|||
* Both @dest and @src must be mappable to a bus address according to the
|
||||
* DMA mapping API rules for streaming mappings.
|
||||
* Both @dest and @src must stay memory resident (kernel memory or locked
|
||||
* user space pages)
|
||||
* user space pages).
|
||||
*/
|
||||
static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
|
||||
void *dest, void *src, size_t len)
|
||||
|
@ -225,7 +232,7 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
|
|||
}
|
||||
|
||||
/**
|
||||
* dma_async_memcpy_buf_to_pg - offloaded copy
|
||||
* dma_async_memcpy_buf_to_pg - offloaded copy from address to page
|
||||
* @chan: DMA channel to offload copy to
|
||||
* @page: destination page
|
||||
* @offset: offset in page to copy to
|
||||
|
@ -250,18 +257,18 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
|
|||
}
|
||||
|
||||
/**
|
||||
* dma_async_memcpy_buf_to_pg - offloaded copy
|
||||
* dma_async_memcpy_pg_to_pg - offloaded copy from page to page
|
||||
* @chan: DMA channel to offload copy to
|
||||
* @dest_page: destination page
|
||||
* @dest_pg: destination page
|
||||
* @dest_off: offset in page to copy to
|
||||
* @src_page: source page
|
||||
* @src_pg: source page
|
||||
* @src_off: offset in page to copy from
|
||||
* @len: length
|
||||
*
|
||||
* Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
|
||||
* address according to the DMA mapping API rules for streaming mappings.
|
||||
* Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
|
||||
* (kernel memory or locked user space pages)
|
||||
* (kernel memory or locked user space pages).
|
||||
*/
|
||||
static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
|
||||
struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
|
||||
|
@ -278,7 +285,7 @@ static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
|
|||
|
||||
/**
|
||||
* dma_async_memcpy_issue_pending - flush pending copies to HW
|
||||
* @chan:
|
||||
* @chan: target DMA channel
|
||||
*
|
||||
* This allows drivers to push copies to HW in batches,
|
||||
* reducing MMIO writes where possible.
|
||||
|
|
2
include/linux/dvb/Kbuild
Normal file
2
include/linux/dvb/Kbuild
Normal file
|
@ -0,0 +1,2 @@
|
|||
header-y += ca.h frontend.h net.h osd.h version.h
|
||||
unifdef-y := audio.h dmx.h video.h
|
|
@ -435,6 +435,21 @@ struct block_device {
|
|||
unsigned long bd_private;
|
||||
};
|
||||
|
||||
/*
|
||||
* bdev->bd_mutex nesting subclasses for the lock validator:
|
||||
*
|
||||
* 0: normal
|
||||
* 1: 'whole'
|
||||
* 2: 'partition'
|
||||
*/
|
||||
enum bdev_bd_mutex_lock_class
|
||||
{
|
||||
BD_MUTEX_NORMAL,
|
||||
BD_MUTEX_WHOLE,
|
||||
BD_MUTEX_PARTITION
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Radix-tree tags, for tagging dirty and writeback pages within the pagecache
|
||||
* radix trees
|
||||
|
@ -542,6 +557,25 @@ struct inode {
|
|||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* inode->i_mutex nesting subclasses for the lock validator:
|
||||
*
|
||||
* 0: the object of the current VFS operation
|
||||
* 1: parent
|
||||
* 2: child/target
|
||||
* 3: quota file
|
||||
*
|
||||
* The locking order between these classes is
|
||||
* parent -> child -> normal -> quota
|
||||
*/
|
||||
enum inode_i_mutex_lock_class
|
||||
{
|
||||
I_MUTEX_NORMAL,
|
||||
I_MUTEX_PARENT,
|
||||
I_MUTEX_CHILD,
|
||||
I_MUTEX_QUOTA
|
||||
};
|
||||
|
||||
/*
|
||||
* NOTE: in a 32bit arch with a preemptable kernel and
|
||||
* an UP compile the i_size_read/write must be atomic
|
||||
|
@ -1276,6 +1310,8 @@ struct file_system_type {
|
|||
struct module *owner;
|
||||
struct file_system_type * next;
|
||||
struct list_head fs_supers;
|
||||
struct lock_class_key s_lock_key;
|
||||
struct lock_class_key s_umount_key;
|
||||
};
|
||||
|
||||
extern int get_sb_bdev(struct file_system_type *fs_type,
|
||||
|
@ -1404,6 +1440,7 @@ extern void bd_set_size(struct block_device *, loff_t size);
|
|||
extern void bd_forget(struct inode *inode);
|
||||
extern void bdput(struct block_device *);
|
||||
extern struct block_device *open_by_devnum(dev_t, unsigned);
|
||||
extern struct block_device *open_partition_by_devnum(dev_t, unsigned);
|
||||
extern const struct file_operations def_blk_fops;
|
||||
extern const struct address_space_operations def_blk_aops;
|
||||
extern const struct file_operations def_chr_fops;
|
||||
|
@ -1414,6 +1451,7 @@ extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long);
|
|||
extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
|
||||
extern int blkdev_get(struct block_device *, mode_t, unsigned);
|
||||
extern int blkdev_put(struct block_device *);
|
||||
extern int blkdev_put_partition(struct block_device *);
|
||||
extern int bd_claim(struct block_device *, void *);
|
||||
extern void bd_release(struct block_device *);
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
|
@ -86,9 +87,6 @@ extern void synchronize_irq(unsigned int irq);
|
|||
# define synchronize_irq(irq) barrier()
|
||||
#endif
|
||||
|
||||
#define nmi_enter() irq_enter()
|
||||
#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET)
|
||||
|
||||
struct task_struct;
|
||||
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
|
@ -97,12 +95,35 @@ static inline void account_system_vtime(struct task_struct *tsk)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* It is safe to do non-atomic ops on ->hardirq_context,
|
||||
* because NMI handlers may not preempt and the ops are
|
||||
* always balanced, so the interrupted value of ->hardirq_context
|
||||
* will always be restored.
|
||||
*/
|
||||
#define irq_enter() \
|
||||
do { \
|
||||
account_system_vtime(current); \
|
||||
add_preempt_count(HARDIRQ_OFFSET); \
|
||||
trace_hardirq_enter(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Exit irq context without processing softirqs:
|
||||
*/
|
||||
#define __irq_exit() \
|
||||
do { \
|
||||
trace_hardirq_exit(); \
|
||||
account_system_vtime(current); \
|
||||
sub_preempt_count(HARDIRQ_OFFSET); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Exit irq context and process softirqs if needed:
|
||||
*/
|
||||
extern void irq_exit(void);
|
||||
|
||||
#define nmi_enter() do { lockdep_off(); irq_enter(); } while (0)
|
||||
#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0)
|
||||
|
||||
#endif /* LINUX_HARDIRQ_H */
|
||||
|
|
1
include/linux/hdlc/Kbuild
Normal file
1
include/linux/hdlc/Kbuild
Normal file
|
@ -0,0 +1 @@
|
|||
header-y += ioctl.h
|
|
@ -91,6 +91,7 @@ struct hrtimer_base {
|
|||
ktime_t (*get_softirq_time)(void);
|
||||
struct hrtimer *curr_timer;
|
||||
ktime_t softirq_time;
|
||||
struct lock_class_key lock_key;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -1359,7 +1359,7 @@ extern struct semaphore ide_cfg_sem;
|
|||
* ide_drive_t->hwif: constant, no locking
|
||||
*/
|
||||
|
||||
#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable(); } while (0)
|
||||
#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0)
|
||||
|
||||
extern struct bus_type ide_bus_type;
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ struct idr {
|
|||
.id_free = NULL, \
|
||||
.layers = 0, \
|
||||
.id_free_cnt = 0, \
|
||||
.lock = SPIN_LOCK_UNLOCKED, \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
||||
}
|
||||
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
|
||||
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
|
||||
#include <linux/file.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#define INIT_FDTABLE \
|
||||
{ \
|
||||
|
@ -21,7 +23,7 @@
|
|||
.count = ATOMIC_INIT(1), \
|
||||
.fdt = &init_files.fdtab, \
|
||||
.fdtab = INIT_FDTABLE, \
|
||||
.file_lock = SPIN_LOCK_UNLOCKED, \
|
||||
.file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), \
|
||||
.next_fd = 0, \
|
||||
.close_on_exec_init = { { 0, } }, \
|
||||
.open_fds_init = { { 0, } }, \
|
||||
|
@ -36,7 +38,7 @@
|
|||
.user_id = 0, \
|
||||
.next = NULL, \
|
||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \
|
||||
.ctx_lock = SPIN_LOCK_UNLOCKED, \
|
||||
.ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \
|
||||
.reqs_active = 0U, \
|
||||
.max_reqs = ~0U, \
|
||||
}
|
||||
|
@ -48,7 +50,7 @@
|
|||
.mm_users = ATOMIC_INIT(2), \
|
||||
.mm_count = ATOMIC_INIT(1), \
|
||||
.mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \
|
||||
.page_table_lock = SPIN_LOCK_UNLOCKED, \
|
||||
.page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \
|
||||
.mmlist = LIST_HEAD_INIT(name.mmlist), \
|
||||
.cpu_vm_mask = CPU_MASK_ALL, \
|
||||
}
|
||||
|
@ -69,7 +71,7 @@
|
|||
#define INIT_SIGHAND(sighand) { \
|
||||
.count = ATOMIC_INIT(1), \
|
||||
.action = { { { .sa_handler = NULL, } }, }, \
|
||||
.siglock = SPIN_LOCK_UNLOCKED, \
|
||||
.siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
|
||||
}
|
||||
|
||||
extern struct group_info init_groups;
|
||||
|
@ -119,12 +121,13 @@ extern struct group_info init_groups;
|
|||
.list = LIST_HEAD_INIT(tsk.pending.list), \
|
||||
.signal = {{0}}}, \
|
||||
.blocked = {{0}}, \
|
||||
.alloc_lock = SPIN_LOCK_UNLOCKED, \
|
||||
.alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
|
||||
.journal_info = NULL, \
|
||||
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
|
||||
.fs_excl = ATOMIC_INIT(0), \
|
||||
.pi_lock = SPIN_LOCK_UNLOCKED, \
|
||||
INIT_RT_MUTEXES(tsk) \
|
||||
INIT_TRACE_IRQFLAGS \
|
||||
INIT_LOCKDEP \
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/irqreturn.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
|
@ -80,12 +81,64 @@ extern int request_irq(unsigned int,
|
|||
unsigned long, const char *, void *);
|
||||
extern void free_irq(unsigned int, void *);
|
||||
|
||||
/*
|
||||
* On lockdep we dont want to enable hardirqs in hardirq
|
||||
* context. Use local_irq_enable_in_hardirq() to annotate
|
||||
* kernel code that has to do this nevertheless (pretty much
|
||||
* the only valid case is for old/broken hardware that is
|
||||
* insanely slow).
|
||||
*
|
||||
* NOTE: in theory this might break fragile code that relies
|
||||
* on hardirq delivery - in practice we dont seem to have such
|
||||
* places left. So the only effect should be slightly increased
|
||||
* irqs-off latencies.
|
||||
*/
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
# define local_irq_enable_in_hardirq() do { } while (0)
|
||||
#else
|
||||
# define local_irq_enable_in_hardirq() local_irq_enable()
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||
extern void disable_irq_nosync(unsigned int irq);
|
||||
extern void disable_irq(unsigned int irq);
|
||||
extern void enable_irq(unsigned int irq);
|
||||
|
||||
/*
|
||||
* Special lockdep variants of irq disabling/enabling.
|
||||
* These should be used for locking constructs that
|
||||
* know that a particular irq context which is disabled,
|
||||
* and which is the only irq-context user of a lock,
|
||||
* that it's safe to take the lock in the irq-disabled
|
||||
* section without disabling hardirqs.
|
||||
*
|
||||
* On !CONFIG_LOCKDEP they are equivalent to the normal
|
||||
* irq disable/enable methods.
|
||||
*/
|
||||
static inline void disable_irq_nosync_lockdep(unsigned int irq)
|
||||
{
|
||||
disable_irq_nosync(irq);
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
local_irq_disable();
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void disable_irq_lockdep(unsigned int irq)
|
||||
{
|
||||
disable_irq(irq);
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
local_irq_disable();
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void enable_irq_lockdep(unsigned int irq)
|
||||
{
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
local_irq_enable();
|
||||
#endif
|
||||
enable_irq(irq);
|
||||
}
|
||||
|
||||
/* IRQ wakeup (PM) control: */
|
||||
extern int set_irq_wake(unsigned int irq, unsigned int on);
|
||||
|
||||
|
@ -99,7 +152,19 @@ static inline int disable_irq_wake(unsigned int irq)
|
|||
return set_irq_wake(irq, 0);
|
||||
}
|
||||
|
||||
#endif
|
||||
#else /* !CONFIG_GENERIC_HARDIRQS */
|
||||
/*
|
||||
* NOTE: non-genirq architectures, if they want to support the lock
|
||||
* validator need to define the methods below in their asm/irq.h
|
||||
* files, under an #ifdef CONFIG_LOCKDEP section.
|
||||
*/
|
||||
# ifndef CONFIG_LOCKDEP
|
||||
# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
|
||||
# define disable_irq_lockdep(irq) disable_irq(irq)
|
||||
# define enable_irq_lockdep(irq) enable_irq(irq)
|
||||
# endif
|
||||
|
||||
#endif /* CONFIG_GENERIC_HARDIRQS */
|
||||
|
||||
#ifndef __ARCH_SET_SOFTIRQ_PENDING
|
||||
#define set_softirq_pending(x) (local_softirq_pending() = (x))
|
||||
|
@ -135,13 +200,11 @@ static inline void __deprecated save_and_cli(unsigned long *x)
|
|||
#define save_and_cli(x) save_and_cli(&x)
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/* SoftIRQ primitives. */
|
||||
#define local_bh_disable() \
|
||||
do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
|
||||
#define __local_bh_enable() \
|
||||
do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
|
||||
|
||||
extern void local_bh_disable(void);
|
||||
extern void __local_bh_enable(void);
|
||||
extern void _local_bh_enable(void);
|
||||
extern void local_bh_enable(void);
|
||||
extern void local_bh_enable_ip(unsigned long ip);
|
||||
|
||||
/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
|
||||
frequency threaded job scheduling. For almost all the purposes
|
||||
|
|
|
@ -55,6 +55,7 @@ struct resource_list {
|
|||
#define IORESOURCE_IRQ_LOWEDGE (1<<1)
|
||||
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
|
||||
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
|
||||
#define IORESOURCE_IRQ_SHAREABLE (1<<4)
|
||||
|
||||
/* ISA PnP DMA specific bits (IORESOURCE_BITS) */
|
||||
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
|
||||
|
|
96
include/linux/irqflags.h
Normal file
96
include/linux/irqflags.h
Normal file
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
* include/linux/irqflags.h
|
||||
*
|
||||
* IRQ flags tracing: follow the state of the hardirq and softirq flags and
|
||||
* provide callbacks for transitions between ON and OFF states.
|
||||
*
|
||||
* This file gets included from lowlevel asm headers too, to provide
|
||||
* wrapped versions of the local_irq_*() APIs, based on the
|
||||
* raw_local_irq_*() macros from the lowlevel headers.
|
||||
*/
|
||||
#ifndef _LINUX_TRACE_IRQFLAGS_H
|
||||
#define _LINUX_TRACE_IRQFLAGS_H
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
extern void trace_hardirqs_on(void);
|
||||
extern void trace_hardirqs_off(void);
|
||||
extern void trace_softirqs_on(unsigned long ip);
|
||||
extern void trace_softirqs_off(unsigned long ip);
|
||||
# define trace_hardirq_context(p) ((p)->hardirq_context)
|
||||
# define trace_softirq_context(p) ((p)->softirq_context)
|
||||
# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
|
||||
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
|
||||
# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
|
||||
# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
|
||||
# define trace_softirq_enter() do { current->softirq_context++; } while (0)
|
||||
# define trace_softirq_exit() do { current->softirq_context--; } while (0)
|
||||
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
|
||||
#else
|
||||
# define trace_hardirqs_on() do { } while (0)
|
||||
# define trace_hardirqs_off() do { } while (0)
|
||||
# define trace_softirqs_on(ip) do { } while (0)
|
||||
# define trace_softirqs_off(ip) do { } while (0)
|
||||
# define trace_hardirq_context(p) 0
|
||||
# define trace_softirq_context(p) 0
|
||||
# define trace_hardirqs_enabled(p) 0
|
||||
# define trace_softirqs_enabled(p) 0
|
||||
# define trace_hardirq_enter() do { } while (0)
|
||||
# define trace_hardirq_exit() do { } while (0)
|
||||
# define trace_softirq_enter() do { } while (0)
|
||||
# define trace_softirq_exit() do { } while (0)
|
||||
# define INIT_TRACE_IRQFLAGS
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
|
||||
|
||||
#include <asm/irqflags.h>
|
||||
|
||||
#define local_irq_enable() \
|
||||
do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
|
||||
#define local_irq_disable() \
|
||||
do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
|
||||
#define local_irq_save(flags) \
|
||||
do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0)
|
||||
|
||||
#define local_irq_restore(flags) \
|
||||
do { \
|
||||
if (raw_irqs_disabled_flags(flags)) { \
|
||||
raw_local_irq_restore(flags); \
|
||||
trace_hardirqs_off(); \
|
||||
} else { \
|
||||
trace_hardirqs_on(); \
|
||||
raw_local_irq_restore(flags); \
|
||||
} \
|
||||
} while (0)
|
||||
#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
|
||||
/*
|
||||
* The local_irq_*() APIs are equal to the raw_local_irq*()
|
||||
* if !TRACE_IRQFLAGS.
|
||||
*/
|
||||
# define raw_local_irq_disable() local_irq_disable()
|
||||
# define raw_local_irq_enable() local_irq_enable()
|
||||
# define raw_local_irq_save(flags) local_irq_save(flags)
|
||||
# define raw_local_irq_restore(flags) local_irq_restore(flags)
|
||||
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
|
||||
#define safe_halt() \
|
||||
do { \
|
||||
trace_hardirqs_on(); \
|
||||
raw_safe_halt(); \
|
||||
} while (0)
|
||||
|
||||
#define local_save_flags(flags) raw_local_save_flags(flags)
|
||||
|
||||
#define irqs_disabled() \
|
||||
({ \
|
||||
unsigned long flags; \
|
||||
\
|
||||
raw_local_save_flags(flags); \
|
||||
raw_irqs_disabled_flags(flags); \
|
||||
})
|
||||
|
||||
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
|
||||
#endif /* CONFIG_X86 */
|
||||
|
||||
#endif
|
1
include/linux/isdn/Kbuild
Normal file
1
include/linux/isdn/Kbuild
Normal file
|
@ -0,0 +1 @@
|
|||
header-y += capicmd.h
|
|
@ -57,10 +57,25 @@ do { \
|
|||
#define print_fn_descriptor_symbol(fmt, addr) print_symbol(fmt, addr)
|
||||
#endif
|
||||
|
||||
#define print_symbol(fmt, addr) \
|
||||
do { \
|
||||
__check_printsym_format(fmt, ""); \
|
||||
__print_symbol(fmt, addr); \
|
||||
static inline void print_symbol(const char *fmt, unsigned long addr)
|
||||
{
|
||||
__check_printsym_format(fmt, "");
|
||||
__print_symbol(fmt, (unsigned long)
|
||||
__builtin_extract_return_addr((void *)addr));
|
||||
}
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
#define print_ip_sym(ip) \
|
||||
do { \
|
||||
printk("[<%08lx>]", ip); \
|
||||
print_symbol(" %s\n", ip); \
|
||||
} while(0)
|
||||
#else
|
||||
#define print_ip_sym(ip) \
|
||||
do { \
|
||||
printk("[<%016lx>]", ip); \
|
||||
print_symbol(" %s\n", ip); \
|
||||
} while(0)
|
||||
#endif
|
||||
|
||||
#endif /*_LINUX_KALLSYMS_H*/
|
||||
|
|
353
include/linux/lockdep.h
Normal file
353
include/linux/lockdep.h
Normal file
|
@ -0,0 +1,353 @@
|
|||
/*
|
||||
* Runtime locking correctness validator
|
||||
*
|
||||
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||
*
|
||||
* see Documentation/lockdep-design.txt for more details.
|
||||
*/
|
||||
#ifndef __LINUX_LOCKDEP_H
|
||||
#define __LINUX_LOCKDEP_H
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/debug_locks.h>
|
||||
#include <linux/stacktrace.h>
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
|
||||
/*
|
||||
* Lock-class usage-state bits:
|
||||
*/
|
||||
enum lock_usage_bit
|
||||
{
|
||||
LOCK_USED = 0,
|
||||
LOCK_USED_IN_HARDIRQ,
|
||||
LOCK_USED_IN_SOFTIRQ,
|
||||
LOCK_ENABLED_SOFTIRQS,
|
||||
LOCK_ENABLED_HARDIRQS,
|
||||
LOCK_USED_IN_HARDIRQ_READ,
|
||||
LOCK_USED_IN_SOFTIRQ_READ,
|
||||
LOCK_ENABLED_SOFTIRQS_READ,
|
||||
LOCK_ENABLED_HARDIRQS_READ,
|
||||
LOCK_USAGE_STATES
|
||||
};
|
||||
|
||||
/*
|
||||
* Usage-state bitmasks:
|
||||
*/
|
||||
#define LOCKF_USED (1 << LOCK_USED)
|
||||
#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
|
||||
#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
|
||||
#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
|
||||
#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
|
||||
|
||||
#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
|
||||
#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
|
||||
|
||||
#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
|
||||
#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
|
||||
#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
|
||||
#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
|
||||
|
||||
#define LOCKF_ENABLED_IRQS_READ \
|
||||
(LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
|
||||
#define LOCKF_USED_IN_IRQ_READ \
|
||||
(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
|
||||
|
||||
#define MAX_LOCKDEP_SUBCLASSES 8UL
|
||||
|
||||
/*
|
||||
* Lock-classes are keyed via unique addresses, by embedding the
|
||||
* lockclass-key into the kernel (or module) .data section. (For
|
||||
* static locks we use the lock address itself as the key.)
|
||||
*/
|
||||
struct lockdep_subclass_key {
|
||||
char __one_byte;
|
||||
} __attribute__ ((__packed__));
|
||||
|
||||
struct lock_class_key {
|
||||
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
|
||||
};
|
||||
|
||||
/*
|
||||
* The lock-class itself:
|
||||
*/
|
||||
struct lock_class {
|
||||
/*
|
||||
* class-hash:
|
||||
*/
|
||||
struct list_head hash_entry;
|
||||
|
||||
/*
|
||||
* global list of all lock-classes:
|
||||
*/
|
||||
struct list_head lock_entry;
|
||||
|
||||
struct lockdep_subclass_key *key;
|
||||
unsigned int subclass;
|
||||
|
||||
/*
|
||||
* IRQ/softirq usage tracking bits:
|
||||
*/
|
||||
unsigned long usage_mask;
|
||||
struct stack_trace usage_traces[LOCK_USAGE_STATES];
|
||||
|
||||
/*
|
||||
* These fields represent a directed graph of lock dependencies,
|
||||
* to every node we attach a list of "forward" and a list of
|
||||
* "backward" graph nodes.
|
||||
*/
|
||||
struct list_head locks_after, locks_before;
|
||||
|
||||
/*
|
||||
* Generation counter, when doing certain classes of graph walking,
|
||||
* to ensure that we check one node only once:
|
||||
*/
|
||||
unsigned int version;
|
||||
|
||||
/*
|
||||
* Statistics counter:
|
||||
*/
|
||||
unsigned long ops;
|
||||
|
||||
const char *name;
|
||||
int name_version;
|
||||
};
|
||||
|
||||
/*
|
||||
* Map the lock object (the lock instance) to the lock-class object.
|
||||
* This is embedded into specific lock instances:
|
||||
*/
|
||||
struct lockdep_map {
|
||||
struct lock_class_key *key;
|
||||
struct lock_class *class[MAX_LOCKDEP_SUBCLASSES];
|
||||
const char *name;
|
||||
};
|
||||
|
||||
/*
|
||||
* Every lock has a list of other locks that were taken after it.
|
||||
* We only grow the list, never remove from it:
|
||||
*/
|
||||
struct lock_list {
|
||||
struct list_head entry;
|
||||
struct lock_class *class;
|
||||
struct stack_trace trace;
|
||||
};
|
||||
|
||||
/*
|
||||
* We record lock dependency chains, so that we can cache them:
|
||||
*/
|
||||
struct lock_chain {
|
||||
struct list_head entry;
|
||||
u64 chain_key;
|
||||
};
|
||||
|
||||
struct held_lock {
|
||||
/*
|
||||
* One-way hash of the dependency chain up to this point. We
|
||||
* hash the hashes step by step as the dependency chain grows.
|
||||
*
|
||||
* We use it for dependency-caching and we skip detection
|
||||
* passes and dependency-updates if there is a cache-hit, so
|
||||
* it is absolutely critical for 100% coverage of the validator
|
||||
* to have a unique key value for every unique dependency path
|
||||
* that can occur in the system, to make a unique hash value
|
||||
* as likely as possible - hence the 64-bit width.
|
||||
*
|
||||
* The task struct holds the current hash value (initialized
|
||||
* with zero), here we store the previous hash value:
|
||||
*/
|
||||
u64 prev_chain_key;
|
||||
struct lock_class *class;
|
||||
unsigned long acquire_ip;
|
||||
struct lockdep_map *instance;
|
||||
|
||||
/*
|
||||
* The lock-stack is unified in that the lock chains of interrupt
|
||||
* contexts nest ontop of process context chains, but we 'separate'
|
||||
* the hashes by starting with 0 if we cross into an interrupt
|
||||
* context, and we also keep do not add cross-context lock
|
||||
* dependencies - the lock usage graph walking covers that area
|
||||
* anyway, and we'd just unnecessarily increase the number of
|
||||
* dependencies otherwise. [Note: hardirq and softirq contexts
|
||||
* are separated from each other too.]
|
||||
*
|
||||
* The following field is used to detect when we cross into an
|
||||
* interrupt context:
|
||||
*/
|
||||
int irq_context;
|
||||
int trylock;
|
||||
int read;
|
||||
int check;
|
||||
int hardirqs_off;
|
||||
};
|
||||
|
||||
/*
|
||||
* Initialization, self-test and debugging-output methods:
|
||||
*/
|
||||
extern void lockdep_init(void);
|
||||
extern void lockdep_info(void);
|
||||
extern void lockdep_reset(void);
|
||||
extern void lockdep_reset_lock(struct lockdep_map *lock);
|
||||
extern void lockdep_free_key_range(void *start, unsigned long size);
|
||||
|
||||
extern void lockdep_off(void);
|
||||
extern void lockdep_on(void);
|
||||
extern int lockdep_internal(void);
|
||||
|
||||
/*
|
||||
* These methods are used by specific locking variants (spinlocks,
|
||||
* rwlocks, mutexes and rwsems) to pass init/acquire/release events
|
||||
* to lockdep:
|
||||
*/
|
||||
|
||||
extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
||||
/*
|
||||
* Reinitialize a lock key - for cases where there is special locking or
|
||||
* special initialization of locks so that the validator gets the scope
|
||||
* of dependencies wrong: they are either too broad (they need a class-split)
|
||||
* or they are too narrow (they suffer from a false class-split):
|
||||
*/
|
||||
#define lockdep_set_class(lock, key) \
|
||||
lockdep_init_map(&(lock)->dep_map, #key, key)
|
||||
#define lockdep_set_class_and_name(lock, key, name) \
|
||||
lockdep_init_map(&(lock)->dep_map, name, key)
|
||||
|
||||
/*
|
||||
* Acquire a lock.
|
||||
*
|
||||
* Values for "read":
|
||||
*
|
||||
* 0: exclusive (write) acquire
|
||||
* 1: read-acquire (no recursion allowed)
|
||||
* 2: read-acquire with same-instance recursion allowed
|
||||
*
|
||||
* Values for check:
|
||||
*
|
||||
* 0: disabled
|
||||
* 1: simple checks (freeing, held-at-exit-time, etc.)
|
||||
* 2: full validation
|
||||
*/
|
||||
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
int trylock, int read, int check, unsigned long ip);
|
||||
|
||||
extern void lock_release(struct lockdep_map *lock, int nested,
|
||||
unsigned long ip);
|
||||
|
||||
# define INIT_LOCKDEP .lockdep_recursion = 0,
|
||||
|
||||
#else /* !LOCKDEP */
|
||||
|
||||
static inline void lockdep_off(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void lockdep_on(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int lockdep_internal(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
# define lock_acquire(l, s, t, r, c, i) do { } while (0)
|
||||
# define lock_release(l, n, i) do { } while (0)
|
||||
# define lockdep_init() do { } while (0)
|
||||
# define lockdep_info() do { } while (0)
|
||||
# define lockdep_init_map(lock, name, key) do { (void)(key); } while (0)
|
||||
# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
|
||||
# define lockdep_set_class_and_name(lock, key, name) \
|
||||
do { (void)(key); } while (0)
|
||||
# define INIT_LOCKDEP
|
||||
# define lockdep_reset() do { debug_locks = 1; } while (0)
|
||||
# define lockdep_free_key_range(start, size) do { } while (0)
|
||||
/*
|
||||
* The class key takes no space if lockdep is disabled:
|
||||
*/
|
||||
struct lock_class_key { };
|
||||
#endif /* !LOCKDEP */
|
||||
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
|
||||
extern void early_init_irq_lock_class(void);
|
||||
#else
|
||||
# define early_init_irq_lock_class() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
extern void early_boot_irqs_off(void);
|
||||
extern void early_boot_irqs_on(void);
|
||||
#else
|
||||
# define early_boot_irqs_off() do { } while (0)
|
||||
# define early_boot_irqs_on() do { } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For trivial one-depth nesting of a lock-class, the following
|
||||
* global define can be used. (Subsystems with multiple levels
|
||||
* of nesting should define their own lock-nesting subclasses.)
|
||||
*/
|
||||
#define SINGLE_DEPTH_NESTING 1
|
||||
|
||||
/*
|
||||
* Map the dependency ops to NOP or to real lockdep ops, depending
|
||||
* on the per lock-class debug mode:
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
|
||||
# else
|
||||
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
|
||||
# endif
|
||||
# define spin_release(l, n, i) lock_release(l, n, i)
|
||||
#else
|
||||
# define spin_acquire(l, s, t, i) do { } while (0)
|
||||
# define spin_release(l, n, i) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
|
||||
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
|
||||
# else
|
||||
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
|
||||
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
|
||||
# endif
|
||||
# define rwlock_release(l, n, i) lock_release(l, n, i)
|
||||
#else
|
||||
# define rwlock_acquire(l, s, t, i) do { } while (0)
|
||||
# define rwlock_acquire_read(l, s, t, i) do { } while (0)
|
||||
# define rwlock_release(l, n, i) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
|
||||
# else
|
||||
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
|
||||
# endif
|
||||
# define mutex_release(l, n, i) lock_release(l, n, i)
|
||||
#else
|
||||
# define mutex_acquire(l, s, t, i) do { } while (0)
|
||||
# define mutex_release(l, n, i) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
|
||||
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
|
||||
# else
|
||||
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
|
||||
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
|
||||
# endif
|
||||
# define rwsem_release(l, n, i) lock_release(l, n, i)
|
||||
#else
|
||||
# define rwsem_acquire(l, s, t, i) do { } while (0)
|
||||
# define rwsem_acquire_read(l, s, t, i) do { } while (0)
|
||||
# define rwsem_release(l, n, i) do { } while (0)
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_LOCKDEP_H */
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/prio_tree.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/debug_locks.h>
|
||||
|
||||
struct mempolicy;
|
||||
struct anon_vma;
|
||||
|
@ -1034,13 +1035,6 @@ static inline void vm_stat_account(struct mm_struct *mm,
|
|||
}
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
static inline void
|
||||
debug_check_no_locks_freed(const void *from, unsigned long len)
|
||||
{
|
||||
mutex_debug_check_no_locks_freed(from, len);
|
||||
rt_mutex_debug_check_no_locks_freed(from, len);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DEBUG_PAGEALLOC
|
||||
static inline void
|
||||
kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
|
|
|
@ -150,6 +150,10 @@ struct zone {
|
|||
unsigned long lowmem_reserve[MAX_NR_ZONES];
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
/*
|
||||
* zone reclaim becomes active if more unmapped pages exist.
|
||||
*/
|
||||
unsigned long min_unmapped_ratio;
|
||||
struct per_cpu_pageset *pageset[NR_CPUS];
|
||||
#else
|
||||
struct per_cpu_pageset pageset[NR_CPUS];
|
||||
|
@ -414,6 +418,8 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
|
|||
void __user *, size_t *, loff_t *);
|
||||
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
|
||||
void __user *, size_t *, loff_t *);
|
||||
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
|
||||
struct file *, void __user *, size_t *, loff_t *);
|
||||
|
||||
#include <linux/topology.h>
|
||||
/* Returns the number of the current Node. */
|
||||
|
|
|
@ -358,6 +358,7 @@ static inline int module_is_live(struct module *mod)
|
|||
/* Is this address in a module? (second is with no locks, for oops) */
|
||||
struct module *module_text_address(unsigned long addr);
|
||||
struct module *__module_text_address(unsigned long addr);
|
||||
int is_module_address(unsigned long addr);
|
||||
|
||||
/* Returns module and fills in value, defined and namebuf, or NULL if
|
||||
symnum out of range. */
|
||||
|
@ -496,6 +497,11 @@ static inline struct module *__module_text_address(unsigned long addr)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline int is_module_address(unsigned long addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get/put a kernel symbol (calls should be symmetric) */
|
||||
#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); })
|
||||
#define symbol_put(x) do { } while(0)
|
||||
|
|
|
@ -19,21 +19,21 @@
|
|||
|
||||
/**
|
||||
* struct nand_bbt_descr - bad block table descriptor
|
||||
* @param options options for this descriptor
|
||||
* @param pages the page(s) where we find the bbt, used with
|
||||
* @options: options for this descriptor
|
||||
* @pages: the page(s) where we find the bbt, used with
|
||||
* option BBT_ABSPAGE when bbt is searched,
|
||||
* then we store the found bbts pages here.
|
||||
* Its an array and supports up to 8 chips now
|
||||
* @param offs offset of the pattern in the oob area of the page
|
||||
* @param veroffs offset of the bbt version counter in the oob are of the page
|
||||
* @param version version read from the bbt page during scan
|
||||
* @param len length of the pattern, if 0 no pattern check is performed
|
||||
* @param maxblocks maximum number of blocks to search for a bbt. This number of
|
||||
* blocks is reserved at the end of the device
|
||||
* @offs: offset of the pattern in the oob area of the page
|
||||
* @veroffs: offset of the bbt version counter in the oob area of the page
|
||||
* @version: version read from the bbt page during scan
|
||||
* @len: length of the pattern, if 0 no pattern check is performed
|
||||
* @maxblocks: maximum number of blocks to search for a bbt. This
|
||||
* number of blocks is reserved at the end of the device
|
||||
* where the tables are written.
|
||||
* @param reserved_block_code if non-0, this pattern denotes a reserved
|
||||
* @reserved_block_code: if non-0, this pattern denotes a reserved
|
||||
* (rather than bad) block in the stored bbt
|
||||
* @param pattern pattern to identify bad block table or factory marked
|
||||
* @pattern: pattern to identify bad block table or factory marked
|
||||
* good / bad blocks, can be NULL, if len = 0
|
||||
*
|
||||
* Descriptor for the bad block table marker and the descriptor for the
|
||||
|
@ -93,12 +93,15 @@ struct nand_bbt_descr {
|
|||
#define ONENAND_BADBLOCK_POS 0
|
||||
|
||||
/**
|
||||
* struct bbt_info - [GENERIC] Bad Block Table data structure
|
||||
* @param bbt_erase_shift [INTERN] number of address bits in a bbt entry
|
||||
* @param badblockpos [INTERN] position of the bad block marker in the oob area
|
||||
* @param bbt [INTERN] bad block table pointer
|
||||
* @param badblock_pattern [REPLACEABLE] bad block scan pattern used for initial bad block scan
|
||||
* @param priv [OPTIONAL] pointer to private bbm date
|
||||
* struct bbm_info - [GENERIC] Bad Block Table data structure
|
||||
* @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
|
||||
* @badblockpos: [INTERN] position of the bad block marker in the oob area
|
||||
* @options: options for this descriptor
|
||||
* @bbt: [INTERN] bad block table pointer
|
||||
* @isbad_bbt: function to determine if a block is bad
|
||||
* @badblock_pattern: [REPLACEABLE] bad block scan pattern used for
|
||||
* initial bad block scan
|
||||
* @priv: [OPTIONAL] pointer to private bbm date
|
||||
*/
|
||||
struct bbm_info {
|
||||
int bbt_erase_shift;
|
||||
|
|
|
@ -77,11 +77,11 @@ typedef enum {
|
|||
*
|
||||
* @len: number of bytes to write/read. When a data buffer is given
|
||||
* (datbuf != NULL) this is the number of data bytes. When
|
||||
+ no data buffer is available this is the number of oob bytes.
|
||||
* no data buffer is available this is the number of oob bytes.
|
||||
*
|
||||
* @retlen: number of bytes written/read. When a data buffer is given
|
||||
* (datbuf != NULL) this is the number of data bytes. When
|
||||
+ no data buffer is available this is the number of oob bytes.
|
||||
* no data buffer is available this is the number of oob bytes.
|
||||
*
|
||||
* @ooblen: number of oob bytes per page
|
||||
* @ooboffs: offset of oob data in the oob area (only relevant when
|
||||
|
|
|
@ -202,7 +202,7 @@ typedef enum {
|
|||
struct nand_chip;
|
||||
|
||||
/**
|
||||
* struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independend devices
|
||||
* struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
|
||||
* @lock: protection lock
|
||||
* @active: the mtd device which holds the controller currently
|
||||
* @wq: wait queue to sleep on if a NAND operation is in progress
|
||||
|
@ -223,12 +223,15 @@ struct nand_hw_control {
|
|||
* @total: total number of ecc bytes per page
|
||||
* @prepad: padding information for syndrome based ecc generators
|
||||
* @postpad: padding information for syndrome based ecc generators
|
||||
* @layout: ECC layout control struct pointer
|
||||
* @hwctl: function to control hardware ecc generator. Must only
|
||||
* be provided if an hardware ECC is available
|
||||
* @calculate: function for ecc calculation or readback from ecc hardware
|
||||
* @correct: function for ecc correction, matching to ecc generator (sw/hw)
|
||||
* @read_page: function to read a page according to the ecc generator requirements
|
||||
* @write_page: function to write a page according to the ecc generator requirements
|
||||
* @read_oob: function to read chip OOB data
|
||||
* @write_oob: function to write chip OOB data
|
||||
*/
|
||||
struct nand_ecc_ctrl {
|
||||
nand_ecc_modes_t mode;
|
||||
|
@ -300,11 +303,15 @@ struct nand_buffers {
|
|||
* @cmdfunc: [REPLACEABLE] hardwarespecific function for writing commands to the chip
|
||||
* @waitfunc: [REPLACEABLE] hardwarespecific function for wait on ready
|
||||
* @ecc: [BOARDSPECIFIC] ecc control ctructure
|
||||
* @buffers: buffer structure for read/write
|
||||
* @hwcontrol: platform-specific hardware control structure
|
||||
* @ops: oob operation operands
|
||||
* @erase_cmd: [INTERN] erase command write function, selectable due to AND support
|
||||
* @scan_bbt: [REPLACEABLE] function to scan bad block table
|
||||
* @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR)
|
||||
* @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress
|
||||
* @state: [INTERN] the current state of the NAND device
|
||||
* @oob_poi: poison value buffer
|
||||
* @page_shift: [INTERN] number of address bits in a page (column address bits)
|
||||
* @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
|
||||
* @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
|
||||
|
@ -400,7 +407,6 @@ struct nand_chip {
|
|||
|
||||
/**
|
||||
* struct nand_flash_dev - NAND Flash Device ID Structure
|
||||
*
|
||||
* @name: Identify the device type
|
||||
* @id: device ID code
|
||||
* @pagesize: Pagesize in bytes. Either 256 or 512 or 0
|
||||
|
@ -519,9 +525,8 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
|
|||
|
||||
/**
|
||||
* struct platform_nand_chip - chip level device structure
|
||||
*
|
||||
* @nr_chips: max. number of chips to scan for
|
||||
* @chip_offs: chip number offset
|
||||
* @chip_offset: chip number offset
|
||||
* @nr_partitions: number of partitions pointed to by partitions (or zero)
|
||||
* @partitions: mtd partition list
|
||||
* @chip_delay: R/B delay value in us
|
||||
|
@ -542,11 +547,10 @@ struct platform_nand_chip {
|
|||
|
||||
/**
|
||||
* struct platform_nand_ctrl - controller level device structure
|
||||
*
|
||||
* @hwcontrol: platform specific hardware control structure
|
||||
* @dev_ready: platform specific function to read ready/busy pin
|
||||
* @select_chip: platform specific chip select function
|
||||
* @priv_data: private data to transport driver specific settings
|
||||
* @priv: private data to transport driver specific settings
|
||||
*
|
||||
* All fields are optional and depend on the hardware driver requirements
|
||||
*/
|
||||
|
|
|
@ -23,7 +23,7 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips);
|
|||
/* Free resources held by the OneNAND device */
|
||||
extern void onenand_release(struct mtd_info *mtd);
|
||||
|
||||
/**
|
||||
/*
|
||||
* onenand_state_t - chip states
|
||||
* Enumeration for OneNAND flash chip state
|
||||
*/
|
||||
|
@ -42,9 +42,9 @@ typedef enum {
|
|||
|
||||
/**
|
||||
* struct onenand_bufferram - OneNAND BufferRAM Data
|
||||
* @param block block address in BufferRAM
|
||||
* @param page page address in BufferRAM
|
||||
* @param valid valid flag
|
||||
* @block: block address in BufferRAM
|
||||
* @page: page address in BufferRAM
|
||||
* @valid: valid flag
|
||||
*/
|
||||
struct onenand_bufferram {
|
||||
int block;
|
||||
|
@ -54,32 +54,43 @@ struct onenand_bufferram {
|
|||
|
||||
/**
|
||||
* struct onenand_chip - OneNAND Private Flash Chip Data
|
||||
* @param base [BOARDSPECIFIC] address to access OneNAND
|
||||
* @param chipsize [INTERN] the size of one chip for multichip arrays
|
||||
* @param device_id [INTERN] device ID
|
||||
* @param verstion_id [INTERN] version ID
|
||||
* @param options [BOARDSPECIFIC] various chip options. They can partly be set to inform onenand_scan about
|
||||
* @param erase_shift [INTERN] number of address bits in a block
|
||||
* @param page_shift [INTERN] number of address bits in a page
|
||||
* @param ppb_shift [INTERN] number of address bits in a pages per block
|
||||
* @param page_mask [INTERN] a page per block mask
|
||||
* @param bufferam_index [INTERN] BufferRAM index
|
||||
* @param bufferam [INTERN] BufferRAM info
|
||||
* @param readw [REPLACEABLE] hardware specific function for read short
|
||||
* @param writew [REPLACEABLE] hardware specific function for write short
|
||||
* @param command [REPLACEABLE] hardware specific function for writing commands to the chip
|
||||
* @param wait [REPLACEABLE] hardware specific function for wait on ready
|
||||
* @param read_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area
|
||||
* @param write_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area
|
||||
* @param read_word [REPLACEABLE] hardware specific function for read register of OneNAND
|
||||
* @param write_word [REPLACEABLE] hardware specific function for write register of OneNAND
|
||||
* @param scan_bbt [REPLACEALBE] hardware specific function for scaning Bad block Table
|
||||
* @param chip_lock [INTERN] spinlock used to protect access to this structure and the chip
|
||||
* @param wq [INTERN] wait queue to sleep on if a OneNAND operation is in progress
|
||||
* @param state [INTERN] the current state of the OneNAND device
|
||||
* @param ecclayout [REPLACEABLE] the default ecc placement scheme
|
||||
* @param bbm [REPLACEABLE] pointer to Bad Block Management
|
||||
* @param priv [OPTIONAL] pointer to private chip date
|
||||
* @base: [BOARDSPECIFIC] address to access OneNAND
|
||||
* @chipsize: [INTERN] the size of one chip for multichip arrays
|
||||
* @device_id: [INTERN] device ID
|
||||
* @density_mask: chip density, used for DDP devices
|
||||
* @verstion_id: [INTERN] version ID
|
||||
* @options: [BOARDSPECIFIC] various chip options. They can
|
||||
* partly be set to inform onenand_scan about
|
||||
* @erase_shift: [INTERN] number of address bits in a block
|
||||
* @page_shift: [INTERN] number of address bits in a page
|
||||
* @ppb_shift: [INTERN] number of address bits in a pages per block
|
||||
* @page_mask: [INTERN] a page per block mask
|
||||
* @bufferram_index: [INTERN] BufferRAM index
|
||||
* @bufferram: [INTERN] BufferRAM info
|
||||
* @readw: [REPLACEABLE] hardware specific function for read short
|
||||
* @writew: [REPLACEABLE] hardware specific function for write short
|
||||
* @command: [REPLACEABLE] hardware specific function for writing
|
||||
* commands to the chip
|
||||
* @wait: [REPLACEABLE] hardware specific function for wait on ready
|
||||
* @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
|
||||
* @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
|
||||
* @read_word: [REPLACEABLE] hardware specific function for read
|
||||
* register of OneNAND
|
||||
* @write_word: [REPLACEABLE] hardware specific function for write
|
||||
* register of OneNAND
|
||||
* @mmcontrol: sync burst read function
|
||||
* @block_markbad: function to mark a block as bad
|
||||
* @scan_bbt: [REPLACEALBE] hardware specific function for scanning
|
||||
* Bad block Table
|
||||
* @chip_lock: [INTERN] spinlock used to protect access to this
|
||||
* structure and the chip
|
||||
* @wq: [INTERN] wait queue to sleep on if a OneNAND
|
||||
* operation is in progress
|
||||
* @state: [INTERN] the current state of the OneNAND device
|
||||
* @page_buf: data buffer
|
||||
* @ecclayout: [REPLACEABLE] the default ecc placement scheme
|
||||
* @bbm: [REPLACEABLE] pointer to Bad Block Management
|
||||
* @priv: [OPTIONAL] pointer to private chip date
|
||||
*/
|
||||
struct onenand_chip {
|
||||
void __iomem *base;
|
||||
|
@ -147,9 +158,9 @@ struct onenand_chip {
|
|||
#define ONENAND_MFR_SAMSUNG 0xec
|
||||
|
||||
/**
|
||||
* struct nand_manufacturers - NAND Flash Manufacturer ID Structure
|
||||
* @param name: Manufacturer name
|
||||
* @param id: manufacturer ID code of device.
|
||||
* struct onenand_manufacturers - NAND Flash Manufacturer ID Structure
|
||||
* @name: Manufacturer name
|
||||
* @id: manufacturer ID code of device.
|
||||
*/
|
||||
struct onenand_manufacturers {
|
||||
int id;
|
||||
|
|
|
@ -2,22 +2,22 @@
|
|||
#define __LINUX_MUTEX_DEBUG_H
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
/*
|
||||
* Mutexes - debugging helpers:
|
||||
*/
|
||||
|
||||
#define __DEBUG_MUTEX_INITIALIZER(lockname) \
|
||||
, .held_list = LIST_HEAD_INIT(lockname.held_list), \
|
||||
.name = #lockname , .magic = &lockname
|
||||
#define __DEBUG_MUTEX_INITIALIZER(lockname) \
|
||||
, .magic = &lockname
|
||||
|
||||
#define mutex_init(sem) __mutex_init(sem, __FUNCTION__)
|
||||
#define mutex_init(mutex) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__mutex_init((mutex), #mutex, &__key); \
|
||||
} while (0)
|
||||
|
||||
extern void FASTCALL(mutex_destroy(struct mutex *lock));
|
||||
|
||||
extern void mutex_debug_show_all_locks(void);
|
||||
extern void mutex_debug_show_held_locks(struct task_struct *filter);
|
||||
extern void mutex_debug_check_no_locks_held(struct task_struct *task);
|
||||
extern void mutex_debug_check_no_locks_freed(const void *from, unsigned long len);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/spinlock_types.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
|
@ -50,11 +51,12 @@ struct mutex {
|
|||
struct list_head wait_list;
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
struct thread_info *owner;
|
||||
struct list_head held_list;
|
||||
unsigned long acquire_ip;
|
||||
const char *name;
|
||||
void *magic;
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lockdep_map dep_map;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -74,24 +76,34 @@ struct mutex_waiter {
|
|||
# include <linux/mutex-debug.h>
|
||||
#else
|
||||
# define __DEBUG_MUTEX_INITIALIZER(lockname)
|
||||
# define mutex_init(mutex) __mutex_init(mutex, NULL)
|
||||
# define mutex_init(mutex) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__mutex_init((mutex), #mutex, &__key); \
|
||||
} while (0)
|
||||
# define mutex_destroy(mutex) do { } while (0)
|
||||
# define mutex_debug_show_all_locks() do { } while (0)
|
||||
# define mutex_debug_show_held_locks(p) do { } while (0)
|
||||
# define mutex_debug_check_no_locks_held(task) do { } while (0)
|
||||
# define mutex_debug_check_no_locks_freed(from, len) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
|
||||
, .dep_map = { .name = #lockname }
|
||||
#else
|
||||
# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
|
||||
#endif
|
||||
|
||||
#define __MUTEX_INITIALIZER(lockname) \
|
||||
{ .count = ATOMIC_INIT(1) \
|
||||
, .wait_lock = SPIN_LOCK_UNLOCKED \
|
||||
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
|
||||
__DEBUG_MUTEX_INITIALIZER(lockname) }
|
||||
__DEBUG_MUTEX_INITIALIZER(lockname) \
|
||||
__DEP_MAP_MUTEX_INITIALIZER(lockname) }
|
||||
|
||||
#define DEFINE_MUTEX(mutexname) \
|
||||
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
|
||||
|
||||
extern void fastcall __mutex_init(struct mutex *lock, const char *name);
|
||||
extern void __mutex_init(struct mutex *lock, const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
||||
/***
|
||||
* mutex_is_locked - is the mutex locked
|
||||
|
@ -110,6 +122,13 @@ static inline int fastcall mutex_is_locked(struct mutex *lock)
|
|||
*/
|
||||
extern void fastcall mutex_lock(struct mutex *lock);
|
||||
extern int fastcall mutex_lock_interruptible(struct mutex *lock);
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
|
||||
#else
|
||||
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* NOTE: mutex_trylock() follows the spin_trylock() convention,
|
||||
* not the down_trylock() convention!
|
||||
|
|
11
include/linux/netfilter/Kbuild
Normal file
11
include/linux/netfilter/Kbuild
Normal file
|
@ -0,0 +1,11 @@
|
|||
header-y := nf_conntrack_sctp.h nf_conntrack_tuple_common.h \
|
||||
nfnetlink_conntrack.h nfnetlink_log.h nfnetlink_queue.h \
|
||||
xt_CLASSIFY.h xt_comment.h xt_connbytes.h xt_connmark.h \
|
||||
xt_CONNMARK.h xt_conntrack.h xt_dccp.h xt_esp.h \
|
||||
xt_helper.h xt_length.h xt_limit.h xt_mac.h xt_mark.h \
|
||||
xt_MARK.h xt_multiport.h xt_NFQUEUE.h xt_pkttype.h \
|
||||
xt_policy.h xt_realm.h xt_sctp.h xt_state.h xt_string.h \
|
||||
xt_tcpmss.h xt_tcpudp.h
|
||||
|
||||
unifdef-y := nf_conntrack_common.h nf_conntrack_ftp.h \
|
||||
nf_conntrack_tcp.h nfnetlink.h x_tables.h xt_physdev.h
|
2
include/linux/netfilter_arp/Kbuild
Normal file
2
include/linux/netfilter_arp/Kbuild
Normal file
|
@ -0,0 +1,2 @@
|
|||
header-y := arpt_mangle.h
|
||||
unifdef-y := arp_tables.h
|
4
include/linux/netfilter_bridge/Kbuild
Normal file
4
include/linux/netfilter_bridge/Kbuild
Normal file
|
@ -0,0 +1,4 @@
|
|||
header-y += ebt_among.h ebt_arp.h ebt_arpreply.h ebt_ip.h ebt_limit.h \
|
||||
ebt_log.h ebt_mark_m.h ebt_mark_t.h ebt_nat.h ebt_pkttype.h \
|
||||
ebt_redirect.h ebt_stp.h ebt_ulog.h ebt_vlan.h
|
||||
unifdef-y := ebtables.h ebt_802_3.h
|
21
include/linux/netfilter_ipv4/Kbuild
Normal file
21
include/linux/netfilter_ipv4/Kbuild
Normal file
|
@ -0,0 +1,21 @@
|
|||
|
||||
header-y := ip_conntrack_helper.h ip_conntrack_helper_h323_asn1.h \
|
||||
ip_conntrack_helper_h323_types.h ip_conntrack_protocol.h \
|
||||
ip_conntrack_sctp.h ip_conntrack_tcp.h ip_conntrack_tftp.h \
|
||||
ip_nat_pptp.h ipt_addrtype.h ipt_ah.h \
|
||||
ipt_CLASSIFY.h ipt_CLUSTERIP.h ipt_comment.h \
|
||||
ipt_connbytes.h ipt_connmark.h ipt_CONNMARK.h \
|
||||
ipt_conntrack.h ipt_dccp.h ipt_dscp.h ipt_DSCP.h ipt_ecn.h \
|
||||
ipt_ECN.h ipt_esp.h ipt_hashlimit.h ipt_helper.h \
|
||||
ipt_iprange.h ipt_length.h ipt_limit.h ipt_LOG.h ipt_mac.h \
|
||||
ipt_mark.h ipt_MARK.h ipt_multiport.h ipt_NFQUEUE.h \
|
||||
ipt_owner.h ipt_physdev.h ipt_pkttype.h ipt_policy.h \
|
||||
ipt_realm.h ipt_recent.h ipt_REJECT.h ipt_SAME.h \
|
||||
ipt_sctp.h ipt_state.h ipt_string.h ipt_tcpmss.h \
|
||||
ipt_TCPMSS.h ipt_tos.h ipt_TOS.h ipt_ttl.h ipt_TTL.h \
|
||||
ipt_ULOG.h
|
||||
|
||||
unifdef-y := ip_conntrack.h ip_conntrack_h323.h ip_conntrack_irc.h \
|
||||
ip_conntrack_pptp.h ip_conntrack_proto_gre.h \
|
||||
ip_conntrack_tuple.h ip_nat.h ip_nat_rule.h ip_queue.h \
|
||||
ip_tables.h
|
6
include/linux/netfilter_ipv6/Kbuild
Normal file
6
include/linux/netfilter_ipv6/Kbuild
Normal file
|
@ -0,0 +1,6 @@
|
|||
header-y += ip6t_HL.h ip6t_LOG.h ip6t_MARK.h ip6t_REJECT.h ip6t_ah.h \
|
||||
ip6t_esp.h ip6t_frag.h ip6t_hl.h ip6t_ipv6header.h \
|
||||
ip6t_length.h ip6t_limit.h ip6t_mac.h ip6t_mark.h \
|
||||
ip6t_multiport.h ip6t_opts.h ip6t_owner.h ip6t_policy.h \
|
||||
ip6t_physdev.h ip6t_rt.h
|
||||
unifdef-y := ip6_tables.h
|
2
include/linux/nfsd/Kbuild
Normal file
2
include/linux/nfsd/Kbuild
Normal file
|
@ -0,0 +1,2 @@
|
|||
unifdef-y := const.h export.h stats.h syscall.h nfsfh.h debug.h auth.h
|
||||
|
|
@ -65,7 +65,7 @@ struct raw_notifier_head {
|
|||
} while (0)
|
||||
|
||||
#define ATOMIC_NOTIFIER_INIT(name) { \
|
||||
.lock = SPIN_LOCK_UNLOCKED, \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
||||
.head = NULL }
|
||||
#define BLOCKING_NOTIFIER_INIT(name) { \
|
||||
.rwsem = __RWSEM_INITIALIZER((name).rwsem), \
|
||||
|
|
|
@ -44,6 +44,11 @@
|
|||
|
||||
/********** drivers/atm/ **********/
|
||||
#define ATM_POISON_FREE 0x12
|
||||
#define ATM_POISON 0xdeadbeef
|
||||
|
||||
/********** net/ **********/
|
||||
#define NEIGHBOR_DEAD 0xdeadbeef
|
||||
#define NETFILTER_LINK_POISON 0xdead57ac
|
||||
|
||||
/********** kernel/mutexes **********/
|
||||
#define MUTEX_DEBUG_INIT 0x11
|
||||
|
|
1
include/linux/raid/Kbuild
Normal file
1
include/linux/raid/Kbuild
Normal file
|
@ -0,0 +1 @@
|
|||
header-y += md_p.h md_u.h
|
|
@ -29,8 +29,6 @@ struct rt_mutex {
|
|||
struct task_struct *owner;
|
||||
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
||||
int save_state;
|
||||
struct list_head held_list_entry;
|
||||
unsigned long acquire_ip;
|
||||
const char *name, *file;
|
||||
int line;
|
||||
void *magic;
|
||||
|
@ -98,14 +96,6 @@ extern int rt_mutex_trylock(struct rt_mutex *lock);
|
|||
|
||||
extern void rt_mutex_unlock(struct rt_mutex *lock);
|
||||
|
||||
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
||||
# define INIT_RT_MUTEX_DEBUG(tsk) \
|
||||
.held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \
|
||||
.held_list_lock = SPIN_LOCK_UNLOCKED
|
||||
#else
|
||||
# define INIT_RT_MUTEX_DEBUG(tsk)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RT_MUTEXES
|
||||
# define INIT_RT_MUTEXES(tsk) \
|
||||
.pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \
|
||||
|
|
|
@ -32,30 +32,37 @@ struct rw_semaphore {
|
|||
__s32 activity;
|
||||
spinlock_t wait_lock;
|
||||
struct list_head wait_list;
|
||||
#if RWSEM_DEBUG
|
||||
int debug;
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lockdep_map dep_map;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* initialisation
|
||||
*/
|
||||
#if RWSEM_DEBUG
|
||||
#define __RWSEM_DEBUG_INIT , 0
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
|
||||
#else
|
||||
#define __RWSEM_DEBUG_INIT /* */
|
||||
# define __RWSEM_DEP_MAP_INIT(lockname)
|
||||
#endif
|
||||
|
||||
#define __RWSEM_INITIALIZER(name) \
|
||||
{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT }
|
||||
{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
|
||||
|
||||
#define DECLARE_RWSEM(name) \
|
||||
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
|
||||
|
||||
extern void FASTCALL(init_rwsem(struct rw_semaphore *sem));
|
||||
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
||||
#define init_rwsem(sem) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__init_rwsem((sem), #sem, &__key); \
|
||||
} while (0)
|
||||
|
||||
extern void FASTCALL(__down_read(struct rw_semaphore *sem));
|
||||
extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem));
|
||||
extern void FASTCALL(__down_write(struct rw_semaphore *sem));
|
||||
extern void FASTCALL(__down_write_nested(struct rw_semaphore *sem, int subclass));
|
||||
extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem));
|
||||
extern void FASTCALL(__up_read(struct rw_semaphore *sem));
|
||||
extern void FASTCALL(__up_write(struct rw_semaphore *sem));
|
||||
|
|
|
@ -9,8 +9,6 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#define RWSEM_DEBUG 0
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
@ -26,89 +24,58 @@ struct rw_semaphore;
|
|||
#include <asm/rwsem.h> /* use an arch-specific implementation */
|
||||
#endif
|
||||
|
||||
#ifndef rwsemtrace
|
||||
#if RWSEM_DEBUG
|
||||
extern void FASTCALL(rwsemtrace(struct rw_semaphore *sem, const char *str));
|
||||
#else
|
||||
#define rwsemtrace(SEM,FMT)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* lock for reading
|
||||
*/
|
||||
static inline void down_read(struct rw_semaphore *sem)
|
||||
{
|
||||
might_sleep();
|
||||
rwsemtrace(sem,"Entering down_read");
|
||||
__down_read(sem);
|
||||
rwsemtrace(sem,"Leaving down_read");
|
||||
}
|
||||
extern void down_read(struct rw_semaphore *sem);
|
||||
|
||||
/*
|
||||
* trylock for reading -- returns 1 if successful, 0 if contention
|
||||
*/
|
||||
static inline int down_read_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
int ret;
|
||||
rwsemtrace(sem,"Entering down_read_trylock");
|
||||
ret = __down_read_trylock(sem);
|
||||
rwsemtrace(sem,"Leaving down_read_trylock");
|
||||
return ret;
|
||||
}
|
||||
extern int down_read_trylock(struct rw_semaphore *sem);
|
||||
|
||||
/*
|
||||
* lock for writing
|
||||
*/
|
||||
static inline void down_write(struct rw_semaphore *sem)
|
||||
{
|
||||
might_sleep();
|
||||
rwsemtrace(sem,"Entering down_write");
|
||||
__down_write(sem);
|
||||
rwsemtrace(sem,"Leaving down_write");
|
||||
}
|
||||
extern void down_write(struct rw_semaphore *sem);
|
||||
|
||||
/*
|
||||
* trylock for writing -- returns 1 if successful, 0 if contention
|
||||
*/
|
||||
static inline int down_write_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
int ret;
|
||||
rwsemtrace(sem,"Entering down_write_trylock");
|
||||
ret = __down_write_trylock(sem);
|
||||
rwsemtrace(sem,"Leaving down_write_trylock");
|
||||
return ret;
|
||||
}
|
||||
extern int down_write_trylock(struct rw_semaphore *sem);
|
||||
|
||||
/*
|
||||
* release a read lock
|
||||
*/
|
||||
static inline void up_read(struct rw_semaphore *sem)
|
||||
{
|
||||
rwsemtrace(sem,"Entering up_read");
|
||||
__up_read(sem);
|
||||
rwsemtrace(sem,"Leaving up_read");
|
||||
}
|
||||
extern void up_read(struct rw_semaphore *sem);
|
||||
|
||||
/*
|
||||
* release a write lock
|
||||
*/
|
||||
static inline void up_write(struct rw_semaphore *sem)
|
||||
{
|
||||
rwsemtrace(sem,"Entering up_write");
|
||||
__up_write(sem);
|
||||
rwsemtrace(sem,"Leaving up_write");
|
||||
}
|
||||
extern void up_write(struct rw_semaphore *sem);
|
||||
|
||||
/*
|
||||
* downgrade write lock to read lock
|
||||
*/
|
||||
static inline void downgrade_write(struct rw_semaphore *sem)
|
||||
{
|
||||
rwsemtrace(sem,"Entering downgrade_write");
|
||||
__downgrade_write(sem);
|
||||
rwsemtrace(sem,"Leaving downgrade_write");
|
||||
}
|
||||
extern void downgrade_write(struct rw_semaphore *sem);
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
/*
|
||||
* nested locking:
|
||||
*/
|
||||
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
|
||||
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
|
||||
/*
|
||||
* Take/release a lock when not the owner will release it:
|
||||
*/
|
||||
extern void down_read_non_owner(struct rw_semaphore *sem);
|
||||
extern void up_read_non_owner(struct rw_semaphore *sem);
|
||||
#else
|
||||
# define down_read_nested(sem, subclass) down_read(sem)
|
||||
# define down_write_nested(sem, subclass) down_write(sem)
|
||||
# define down_read_non_owner(sem) down_read(sem)
|
||||
# define up_read_non_owner(sem) up_read(sem)
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_RWSEM_H */
|
||||
|
|
|
@ -184,11 +184,11 @@ extern unsigned long weighted_cpuload(const int cpu);
|
|||
extern rwlock_t tasklist_lock;
|
||||
extern spinlock_t mmlist_lock;
|
||||
|
||||
typedef struct task_struct task_t;
|
||||
struct task_struct;
|
||||
|
||||
extern void sched_init(void);
|
||||
extern void sched_init_smp(void);
|
||||
extern void init_idle(task_t *idle, int cpu);
|
||||
extern void init_idle(struct task_struct *idle, int cpu);
|
||||
|
||||
extern cpumask_t nohz_cpu_mask;
|
||||
|
||||
|
@ -383,7 +383,7 @@ struct signal_struct {
|
|||
wait_queue_head_t wait_chldexit; /* for wait4() */
|
||||
|
||||
/* current thread group signal load-balancing target: */
|
||||
task_t *curr_target;
|
||||
struct task_struct *curr_target;
|
||||
|
||||
/* shared signal handling: */
|
||||
struct sigpending shared_pending;
|
||||
|
@ -534,7 +534,6 @@ extern struct user_struct *find_user(uid_t);
|
|||
extern struct user_struct root_user;
|
||||
#define INIT_USER (&root_user)
|
||||
|
||||
typedef struct prio_array prio_array_t;
|
||||
struct backing_dev_info;
|
||||
struct reclaim_state;
|
||||
|
||||
|
@ -699,7 +698,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp);
|
|||
((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
|
||||
|
||||
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
|
||||
extern void prefetch_stack(struct task_struct*);
|
||||
extern void prefetch_stack(struct task_struct *t);
|
||||
#else
|
||||
static inline void prefetch_stack(struct task_struct *t) { }
|
||||
#endif
|
||||
|
@ -715,6 +714,8 @@ enum sleep_type {
|
|||
SLEEP_INTERRUPTED,
|
||||
};
|
||||
|
||||
struct prio_array;
|
||||
|
||||
struct task_struct {
|
||||
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
|
||||
struct thread_info *thread_info;
|
||||
|
@ -732,7 +733,7 @@ struct task_struct {
|
|||
int load_weight; /* for niceness load balancing purposes */
|
||||
int prio, static_prio, normal_prio;
|
||||
struct list_head run_list;
|
||||
prio_array_t *array;
|
||||
struct prio_array *array;
|
||||
|
||||
unsigned short ioprio;
|
||||
unsigned int btrace_seq;
|
||||
|
@ -865,16 +866,34 @@ struct task_struct {
|
|||
struct plist_head pi_waiters;
|
||||
/* Deadlock detection and priority inheritance handling */
|
||||
struct rt_mutex_waiter *pi_blocked_on;
|
||||
# ifdef CONFIG_DEBUG_RT_MUTEXES
|
||||
spinlock_t held_list_lock;
|
||||
struct list_head held_list_head;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
/* mutex deadlock detection */
|
||||
struct mutex_waiter *blocked_on;
|
||||
#endif
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
unsigned int irq_events;
|
||||
int hardirqs_enabled;
|
||||
unsigned long hardirq_enable_ip;
|
||||
unsigned int hardirq_enable_event;
|
||||
unsigned long hardirq_disable_ip;
|
||||
unsigned int hardirq_disable_event;
|
||||
int softirqs_enabled;
|
||||
unsigned long softirq_disable_ip;
|
||||
unsigned int softirq_disable_event;
|
||||
unsigned long softirq_enable_ip;
|
||||
unsigned int softirq_enable_event;
|
||||
int hardirq_context;
|
||||
int softirq_context;
|
||||
#endif
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
# define MAX_LOCK_DEPTH 30UL
|
||||
u64 curr_chain_key;
|
||||
int lockdep_depth;
|
||||
struct held_lock held_locks[MAX_LOCK_DEPTH];
|
||||
unsigned int lockdep_recursion;
|
||||
#endif
|
||||
|
||||
/* journalling filesystem info */
|
||||
void *journal_info;
|
||||
|
@ -1013,9 +1032,9 @@ static inline void put_task_struct(struct task_struct *t)
|
|||
#define used_math() tsk_used_math(current)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern int set_cpus_allowed(task_t *p, cpumask_t new_mask);
|
||||
extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
|
||||
#else
|
||||
static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
|
||||
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
||||
{
|
||||
if (!cpu_isset(0, new_mask))
|
||||
return -EINVAL;
|
||||
|
@ -1024,7 +1043,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
|
|||
#endif
|
||||
|
||||
extern unsigned long long sched_clock(void);
|
||||
extern unsigned long long current_sched_time(const task_t *current_task);
|
||||
extern unsigned long long
|
||||
current_sched_time(const struct task_struct *current_task);
|
||||
|
||||
/* sched_exec is called by processes performing an exec */
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -1042,27 +1062,27 @@ static inline void idle_task_exit(void) {}
|
|||
extern void sched_idle_next(void);
|
||||
|
||||
#ifdef CONFIG_RT_MUTEXES
|
||||
extern int rt_mutex_getprio(task_t *p);
|
||||
extern void rt_mutex_setprio(task_t *p, int prio);
|
||||
extern void rt_mutex_adjust_pi(task_t *p);
|
||||
extern int rt_mutex_getprio(struct task_struct *p);
|
||||
extern void rt_mutex_setprio(struct task_struct *p, int prio);
|
||||
extern void rt_mutex_adjust_pi(struct task_struct *p);
|
||||
#else
|
||||
static inline int rt_mutex_getprio(task_t *p)
|
||||
static inline int rt_mutex_getprio(struct task_struct *p)
|
||||
{
|
||||
return p->normal_prio;
|
||||
}
|
||||
# define rt_mutex_adjust_pi(p) do { } while (0)
|
||||
#endif
|
||||
|
||||
extern void set_user_nice(task_t *p, long nice);
|
||||
extern int task_prio(const task_t *p);
|
||||
extern int task_nice(const task_t *p);
|
||||
extern int can_nice(const task_t *p, const int nice);
|
||||
extern int task_curr(const task_t *p);
|
||||
extern void set_user_nice(struct task_struct *p, long nice);
|
||||
extern int task_prio(const struct task_struct *p);
|
||||
extern int task_nice(const struct task_struct *p);
|
||||
extern int can_nice(const struct task_struct *p, const int nice);
|
||||
extern int task_curr(const struct task_struct *p);
|
||||
extern int idle_cpu(int cpu);
|
||||
extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
|
||||
extern task_t *idle_task(int cpu);
|
||||
extern task_t *curr_task(int cpu);
|
||||
extern void set_curr_task(int cpu, task_t *p);
|
||||
extern struct task_struct *idle_task(int cpu);
|
||||
extern struct task_struct *curr_task(int cpu);
|
||||
extern void set_curr_task(int cpu, struct task_struct *p);
|
||||
|
||||
void yield(void);
|
||||
|
||||
|
@ -1119,8 +1139,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
|
|||
#else
|
||||
static inline void kick_process(struct task_struct *tsk) { }
|
||||
#endif
|
||||
extern void FASTCALL(sched_fork(task_t * p, int clone_flags));
|
||||
extern void FASTCALL(sched_exit(task_t * p));
|
||||
extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
|
||||
extern void FASTCALL(sched_exit(struct task_struct * p));
|
||||
|
||||
extern int in_group_p(gid_t);
|
||||
extern int in_egroup_p(gid_t);
|
||||
|
@ -1225,17 +1245,17 @@ extern NORET_TYPE void do_group_exit(int);
|
|||
extern void daemonize(const char *, ...);
|
||||
extern int allow_signal(int);
|
||||
extern int disallow_signal(int);
|
||||
extern task_t *child_reaper;
|
||||
extern struct task_struct *child_reaper;
|
||||
|
||||
extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
|
||||
extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
|
||||
task_t *fork_idle(int);
|
||||
struct task_struct *fork_idle(int);
|
||||
|
||||
extern void set_task_comm(struct task_struct *tsk, char *from);
|
||||
extern void get_task_comm(char *to, struct task_struct *tsk);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void wait_task_inactive(task_t * p);
|
||||
extern void wait_task_inactive(struct task_struct * p);
|
||||
#else
|
||||
#define wait_task_inactive(p) do { } while (0)
|
||||
#endif
|
||||
|
@ -1261,13 +1281,13 @@ extern void wait_task_inactive(task_t * p);
|
|||
/* de_thread depends on thread_group_leader not being a pid based check */
|
||||
#define thread_group_leader(p) (p == p->group_leader)
|
||||
|
||||
static inline task_t *next_thread(const task_t *p)
|
||||
static inline struct task_struct *next_thread(const struct task_struct *p)
|
||||
{
|
||||
return list_entry(rcu_dereference(p->thread_group.next),
|
||||
task_t, thread_group);
|
||||
struct task_struct, thread_group);
|
||||
}
|
||||
|
||||
static inline int thread_group_empty(task_t *p)
|
||||
static inline int thread_group_empty(struct task_struct *p)
|
||||
{
|
||||
return list_empty(&p->thread_group);
|
||||
}
|
||||
|
|
|
@ -38,9 +38,17 @@ typedef struct {
|
|||
* These macros triggered gcc-3.x compile-time problems. We think these are
|
||||
* OK now. Be cautious.
|
||||
*/
|
||||
#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED }
|
||||
#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0)
|
||||
#define __SEQLOCK_UNLOCKED(lockname) \
|
||||
{ 0, __SPIN_LOCK_UNLOCKED(lockname) }
|
||||
|
||||
#define SEQLOCK_UNLOCKED \
|
||||
__SEQLOCK_UNLOCKED(old_style_seqlock_init)
|
||||
|
||||
#define seqlock_init(x) \
|
||||
do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); } while (0)
|
||||
|
||||
#define DEFINE_SEQLOCK(x) \
|
||||
seqlock_t x = __SEQLOCK_UNLOCKED(x)
|
||||
|
||||
/* Lock out other writers and update the count.
|
||||
* Acts like a normal spin_lock/unlock.
|
||||
|
|
|
@ -216,10 +216,11 @@ struct uart_port {
|
|||
unsigned char __iomem *membase; /* read/write[bwl] */
|
||||
unsigned int irq; /* irq number */
|
||||
unsigned int uartclk; /* base uart clock */
|
||||
unsigned char fifosize; /* tx fifo size */
|
||||
unsigned int fifosize; /* tx fifo size */
|
||||
unsigned char x_char; /* xon/xoff char */
|
||||
unsigned char regshift; /* reg offset shift */
|
||||
unsigned char iotype; /* io access style */
|
||||
unsigned char unused1;
|
||||
|
||||
#define UPIO_PORT (0)
|
||||
#define UPIO_HUB6 (1)
|
||||
|
|
|
@ -604,9 +604,12 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
|
|||
return list_->qlen;
|
||||
}
|
||||
|
||||
extern struct lock_class_key skb_queue_lock_key;
|
||||
|
||||
static inline void skb_queue_head_init(struct sk_buff_head *list)
|
||||
{
|
||||
spin_lock_init(&list->lock);
|
||||
lockdep_set_class(&list->lock, &skb_queue_lock_key);
|
||||
list->prev = list->next = (struct sk_buff *)list;
|
||||
list->qlen = 0;
|
||||
}
|
||||
|
|
|
@ -82,14 +82,40 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
|
|||
/*
|
||||
* Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
|
||||
*/
|
||||
#if defined(CONFIG_SMP)
|
||||
#ifdef CONFIG_SMP
|
||||
# include <asm/spinlock.h>
|
||||
#else
|
||||
# include <linux/spinlock_up.h>
|
||||
#endif
|
||||
|
||||
#define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
|
||||
#define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
extern void __spin_lock_init(spinlock_t *lock, const char *name,
|
||||
struct lock_class_key *key);
|
||||
# define spin_lock_init(lock) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__spin_lock_init((lock), #lock, &__key); \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
# define spin_lock_init(lock) \
|
||||
do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
extern void __rwlock_init(rwlock_t *lock, const char *name,
|
||||
struct lock_class_key *key);
|
||||
# define rwlock_init(lock) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__rwlock_init((lock), #lock, &__key); \
|
||||
} while (0)
|
||||
#else
|
||||
# define rwlock_init(lock) \
|
||||
do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
|
||||
#endif
|
||||
|
||||
#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
|
||||
|
||||
|
@ -113,7 +139,6 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
|
|||
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
|
||||
extern int _raw_spin_trylock(spinlock_t *lock);
|
||||
extern void _raw_spin_unlock(spinlock_t *lock);
|
||||
|
||||
extern void _raw_read_lock(rwlock_t *lock);
|
||||
extern int _raw_read_trylock(rwlock_t *lock);
|
||||
extern void _raw_read_unlock(rwlock_t *lock);
|
||||
|
@ -121,17 +146,17 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
|
|||
extern int _raw_write_trylock(rwlock_t *lock);
|
||||
extern void _raw_write_unlock(rwlock_t *lock);
|
||||
#else
|
||||
# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
|
||||
# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
|
||||
# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
|
||||
# define _raw_spin_lock_flags(lock, flags) \
|
||||
__raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
|
||||
# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
|
||||
# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
|
||||
# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
|
||||
# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
|
||||
# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
|
||||
# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
|
||||
# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
|
||||
# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
|
||||
# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
|
||||
# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
|
||||
# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
|
||||
#endif
|
||||
|
||||
#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
|
||||
|
@ -147,6 +172,13 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
|
|||
#define write_trylock(lock) __cond_lock(_write_trylock(lock))
|
||||
|
||||
#define spin_lock(lock) _spin_lock(lock)
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
|
||||
#else
|
||||
# define spin_lock_nested(lock, subclass) _spin_lock(lock)
|
||||
#endif
|
||||
|
||||
#define write_lock(lock) _write_lock(lock)
|
||||
#define read_lock(lock) _read_lock(lock)
|
||||
|
||||
|
@ -172,21 +204,18 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
|
|||
/*
|
||||
* We inline the unlock functions in the nondebug case:
|
||||
*/
|
||||
#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
|
||||
#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \
|
||||
!defined(CONFIG_SMP)
|
||||
# define spin_unlock(lock) _spin_unlock(lock)
|
||||
# define read_unlock(lock) _read_unlock(lock)
|
||||
# define write_unlock(lock) _write_unlock(lock)
|
||||
#else
|
||||
# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
|
||||
# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock)
|
||||
# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
|
||||
# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
|
||||
# define read_unlock_irq(lock) _read_unlock_irq(lock)
|
||||
# define write_unlock_irq(lock) _write_unlock_irq(lock)
|
||||
#else
|
||||
# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
|
||||
# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock)
|
||||
# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock)
|
||||
# define spin_unlock_irq(lock) \
|
||||
do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)
|
||||
# define read_unlock_irq(lock) \
|
||||
|
|
|
@ -20,6 +20,8 @@ int in_lock_functions(unsigned long addr);
|
|||
#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
|
||||
|
||||
void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t);
|
||||
void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
|
||||
__acquires(spinlock_t);
|
||||
void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t);
|
||||
void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t);
|
||||
void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t);
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
|
||||
|
||||
#define _spin_lock(lock) __LOCK(lock)
|
||||
#define _spin_lock_nested(lock, subclass) __LOCK(lock)
|
||||
#define _read_lock(lock) __LOCK(lock)
|
||||
#define _write_lock(lock) __LOCK(lock)
|
||||
#define _spin_lock_bh(lock) __LOCK_BH(lock)
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
* Released under the General Public License (GPL).
|
||||
*/
|
||||
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
# include <asm/spinlock_types.h>
|
||||
#else
|
||||
|
@ -24,6 +26,9 @@ typedef struct {
|
|||
unsigned int magic, owner_cpu;
|
||||
void *owner;
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lockdep_map dep_map;
|
||||
#endif
|
||||
} spinlock_t;
|
||||
|
||||
#define SPINLOCK_MAGIC 0xdead4ead
|
||||
|
@ -37,31 +42,53 @@ typedef struct {
|
|||
unsigned int magic, owner_cpu;
|
||||
void *owner;
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lockdep_map dep_map;
|
||||
#endif
|
||||
} rwlock_t;
|
||||
|
||||
#define RWLOCK_MAGIC 0xdeaf1eed
|
||||
|
||||
#define SPINLOCK_OWNER_INIT ((void *)-1L)
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
|
||||
#else
|
||||
# define SPIN_DEP_MAP_INIT(lockname)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
|
||||
#else
|
||||
# define RW_DEP_MAP_INIT(lockname)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
# define SPIN_LOCK_UNLOCKED \
|
||||
# define __SPIN_LOCK_UNLOCKED(lockname) \
|
||||
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
|
||||
.magic = SPINLOCK_MAGIC, \
|
||||
.owner = SPINLOCK_OWNER_INIT, \
|
||||
.owner_cpu = -1 }
|
||||
#define RW_LOCK_UNLOCKED \
|
||||
.owner_cpu = -1, \
|
||||
SPIN_DEP_MAP_INIT(lockname) }
|
||||
#define __RW_LOCK_UNLOCKED(lockname) \
|
||||
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
|
||||
.magic = RWLOCK_MAGIC, \
|
||||
.owner = SPINLOCK_OWNER_INIT, \
|
||||
.owner_cpu = -1 }
|
||||
.owner_cpu = -1, \
|
||||
RW_DEP_MAP_INIT(lockname) }
|
||||
#else
|
||||
# define SPIN_LOCK_UNLOCKED \
|
||||
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED }
|
||||
#define RW_LOCK_UNLOCKED \
|
||||
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED }
|
||||
# define __SPIN_LOCK_UNLOCKED(lockname) \
|
||||
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
|
||||
SPIN_DEP_MAP_INIT(lockname) }
|
||||
#define __RW_LOCK_UNLOCKED(lockname) \
|
||||
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
|
||||
RW_DEP_MAP_INIT(lockname) }
|
||||
#endif
|
||||
|
||||
#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
|
||||
#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
|
||||
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
|
||||
#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
|
||||
|
||||
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
|
||||
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
|
||||
|
||||
#endif /* __LINUX_SPINLOCK_TYPES_H */
|
||||
|
|
|
@ -12,10 +12,14 @@
|
|||
* Released under the General Public License (GPL).
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
#if defined(CONFIG_DEBUG_SPINLOCK) || \
|
||||
defined(CONFIG_DEBUG_LOCK_ALLOC)
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int slock;
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lockdep_map dep_map;
|
||||
#endif
|
||||
} raw_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
||||
|
@ -30,6 +34,9 @@ typedef struct { } raw_spinlock_t;
|
|||
|
||||
typedef struct {
|
||||
/* no debug version on UP */
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lockdep_map dep_map;
|
||||
#endif
|
||||
} raw_rwlock_t;
|
||||
|
||||
#define __RAW_RW_LOCK_UNLOCKED { }
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
|
||||
#define __raw_spin_is_locked(x) ((x)->slock == 0)
|
||||
|
||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||
|
|
20
include/linux/stacktrace.h
Normal file
20
include/linux/stacktrace.h
Normal file
|
@ -0,0 +1,20 @@
|
|||
#ifndef __LINUX_STACKTRACE_H
|
||||
#define __LINUX_STACKTRACE_H
|
||||
|
||||
#ifdef CONFIG_STACKTRACE
|
||||
struct stack_trace {
|
||||
unsigned int nr_entries, max_entries;
|
||||
unsigned long *entries;
|
||||
};
|
||||
|
||||
extern void save_stack_trace(struct stack_trace *trace,
|
||||
struct task_struct *task, int all_contexts,
|
||||
unsigned int skip);
|
||||
|
||||
extern void print_stack_trace(struct stack_trace *trace, int spaces);
|
||||
#else
|
||||
# define save_stack_trace(trace, task, all, skip) do { } while (0)
|
||||
# define print_stack_trace(trace) do { } while (0)
|
||||
#endif
|
||||
|
||||
#endif
|
1
include/linux/sunrpc/Kbuild
Normal file
1
include/linux/sunrpc/Kbuild
Normal file
|
@ -0,0 +1 @@
|
|||
unifdef-y := debug.h
|
|
@ -189,6 +189,7 @@ extern long vm_total_pages;
|
|||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern int zone_reclaim_mode;
|
||||
extern int sysctl_min_unmapped_ratio;
|
||||
extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
|
||||
#else
|
||||
#define zone_reclaim_mode 0
|
||||
|
|
|
@ -188,7 +188,7 @@ enum
|
|||
VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */
|
||||
VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */
|
||||
VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */
|
||||
VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */
|
||||
VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */
|
||||
VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
|
||||
VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
|
||||
};
|
||||
|
|
1
include/linux/tc_act/Kbuild
Normal file
1
include/linux/tc_act/Kbuild
Normal file
|
@ -0,0 +1 @@
|
|||
header-y += tc_gact.h tc_ipt.h tc_mirred.h tc_pedit.h
|
1
include/linux/tc_ematch/Kbuild
Normal file
1
include/linux/tc_ematch/Kbuild
Normal file
|
@ -0,0 +1 @@
|
|||
headers-y := tc_em_cmp.h tc_em_meta.h tc_em_nbyte.h tc_em_text.h
|
|
@ -1,4 +1,4 @@
|
|||
#include <linux/version.h>
|
||||
#include <linux/utsrelease.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/* Simply sanity version stamp for modules. */
|
||||
|
|
|
@ -68,7 +68,7 @@ struct task_struct;
|
|||
wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
|
||||
|
||||
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
|
||||
.lock = SPIN_LOCK_UNLOCKED, \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
||||
.task_list = { &(name).task_list, &(name).task_list } }
|
||||
|
||||
#define DECLARE_WAIT_QUEUE_HEAD(name) \
|
||||
|
@ -77,9 +77,15 @@ struct task_struct;
|
|||
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
|
||||
{ .flags = word, .bit_nr = bit, }
|
||||
|
||||
/*
|
||||
* lockdep: we want one lock-class for all waitqueue locks.
|
||||
*/
|
||||
extern struct lock_class_key waitqueue_lock_key;
|
||||
|
||||
static inline void init_waitqueue_head(wait_queue_head_t *q)
|
||||
{
|
||||
spin_lock_init(&q->lock);
|
||||
lockdep_set_class(&q->lock, &waitqueue_lock_key);
|
||||
INIT_LIST_HEAD(&q->task_list);
|
||||
}
|
||||
|
||||
|
|
|
@ -63,6 +63,8 @@ extern void destroy_workqueue(struct workqueue_struct *wq);
|
|||
|
||||
extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
|
||||
extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay));
|
||||
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
||||
struct work_struct *work, unsigned long delay);
|
||||
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
|
||||
|
||||
extern int FASTCALL(schedule_work(struct work_struct *work));
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue