Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
		
			
				
	
	
		
			89 lines
		
	
	
	
		
			2.1 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			89 lines
		
	
	
	
		
			2.1 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
#ifndef __ASM_SH_SEMAPHORE_HELPER_H
 | 
						|
#define __ASM_SH_SEMAPHORE_HELPER_H
 | 
						|
 | 
						|
/*
 | 
						|
 * SMP- and interrupt-safe semaphores helper functions.
 | 
						|
 *
 | 
						|
 * (C) Copyright 1996 Linus Torvalds
 | 
						|
 * (C) Copyright 1999 Andrea Arcangeli
 | 
						|
 */
 | 
						|
 | 
						|
/*
 | 
						|
 * These two _must_ execute atomically wrt each other.
 | 
						|
 *
 | 
						|
 * This is trivially done with load_locked/store_cond,
 | 
						|
 * which we have.  Let the rest of the losers suck eggs.
 | 
						|
 */
 | 
						|
static __inline__ void wake_one_more(struct semaphore * sem)
 | 
						|
{
 | 
						|
	atomic_inc((atomic_t *)&sem->sleepers);
 | 
						|
}
 | 
						|
 | 
						|
static __inline__ int waking_non_zero(struct semaphore *sem)
 | 
						|
{
 | 
						|
	unsigned long flags;
 | 
						|
	int ret = 0;
 | 
						|
 | 
						|
	spin_lock_irqsave(&semaphore_wake_lock, flags);
 | 
						|
	if (sem->sleepers > 0) {
 | 
						|
		sem->sleepers--;
 | 
						|
		ret = 1;
 | 
						|
	}
 | 
						|
	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * waking_non_zero_interruptible:
 | 
						|
 *	1	got the lock
 | 
						|
 *	0	go to sleep
 | 
						|
 *	-EINTR	interrupted
 | 
						|
 *
 | 
						|
 * We must undo the sem->count down_interruptible() increment while we are
 | 
						|
 * protected by the spinlock in order to make atomic this atomic_inc() with the
 | 
						|
 * atomic_read() in wake_one_more(), otherwise we can race. -arca
 | 
						|
 */
 | 
						|
static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
 | 
						|
						struct task_struct *tsk)
 | 
						|
{
 | 
						|
	unsigned long flags;
 | 
						|
	int ret = 0;
 | 
						|
 | 
						|
	spin_lock_irqsave(&semaphore_wake_lock, flags);
 | 
						|
	if (sem->sleepers > 0) {
 | 
						|
		sem->sleepers--;
 | 
						|
		ret = 1;
 | 
						|
	} else if (signal_pending(tsk)) {
 | 
						|
		atomic_inc(&sem->count);
 | 
						|
		ret = -EINTR;
 | 
						|
	}
 | 
						|
	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * waking_non_zero_trylock:
 | 
						|
 *	1	failed to lock
 | 
						|
 *	0	got the lock
 | 
						|
 *
 | 
						|
 * We must undo the sem->count down_trylock() increment while we are
 | 
						|
 * protected by the spinlock in order to make atomic this atomic_inc() with the
 | 
						|
 * atomic_read() in wake_one_more(), otherwise we can race. -arca
 | 
						|
 */
 | 
						|
static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
 | 
						|
{
 | 
						|
	unsigned long flags;
 | 
						|
	int ret = 1;
 | 
						|
 | 
						|
	spin_lock_irqsave(&semaphore_wake_lock, flags);
 | 
						|
	if (sem->sleepers <= 0)
 | 
						|
		atomic_inc(&sem->count);
 | 
						|
	else {
 | 
						|
		sem->sleepers--;
 | 
						|
		ret = 0;
 | 
						|
	}
 | 
						|
	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
 | 
						|
#endif /* __ASM_SH_SEMAPHORE_HELPER_H */
 |