[BACK]Return to smplock.h CVS log [TXT][DIR] Up to [Development] / linux-2.4-xfs / include / asm-i386

File: [Development] / linux-2.4-xfs / include / asm-i386 / smplock.h (download)

Revision 1.1, Wed Dec 31 00:54:49 2003 UTC (13 years, 9 months ago) by cattelan
Branch: MAIN
CVS Tags: HEAD

Initial Import 2.4.24pre2

#ifndef __ASM_SMPLOCK_H
#define __ASM_SMPLOCK_H

/*
 * <asm/smplock.h>
 *
 * i386 SMP lock implementation
 */
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <asm/current.h>

extern spinlock_cacheline_t kernel_flag_cacheline;  
#define kernel_flag kernel_flag_cacheline.lock      

#define kernel_locked()		spin_is_locked(&kernel_flag)

/*
 * Release global kernel lock and global interrupt lock
 */
#define release_kernel_lock(task, cpu) \
do { \
	if (task->lock_depth >= 0) \
		spin_unlock(&kernel_flag); \
	release_irqlock(cpu); \
	__sti(); \
} while (0)

/*
 * Re-acquire the kernel lock
 */
#define reacquire_kernel_lock(task) \
do { \
	if (task->lock_depth >= 0) \
		spin_lock(&kernel_flag); \
} while (0)


/*
 * Getting the big kernel lock.
 *
 * This cannot happen asynchronously,
 * so we only need to worry about other
 * CPU's.
 */
static __inline__ void lock_kernel(void)
{
#if 1
	if (!++current->lock_depth)
		spin_lock(&kernel_flag);
#else
	__asm__ __volatile__(
		"incl %1\n\t"
		"jne 9f"
		spin_lock_string
		"\n9:"
		:"=m" (__dummy_lock(&kernel_flag)),
		 "=m" (current->lock_depth));
#endif
}

static __inline__ void unlock_kernel(void)
{
	if (current->lock_depth < 0)
		out_of_line_bug();
#if 1
	if (--current->lock_depth < 0)
		spin_unlock(&kernel_flag);
#else
	__asm__ __volatile__(
		"decl %1\n\t"
		"jns 9f\n\t"
		spin_unlock_string
		"\n9:"
		:"=m" (__dummy_lock(&kernel_flag)),
		 "=m" (current->lock_depth));
#endif
}

#endif /* __ASM_SMPLOCK_H */