[BACK]Return to U3copy_to_user.S CVS log [TXT][DIR] Up to [Development] / linux-2.6-xfs / arch / sparc64 / lib

File: [Development] / linux-2.6-xfs / arch / sparc64 / lib / U3copy_to_user.S (download)

Revision 1.2, Mon Aug 16 03:52:41 2004 UTC (13 years, 2 months ago) by nathans
Branch: MAIN
Changes since 1.1: +245 -377 lines

Merge up to 2.6.8.1

/* U3copy_to_user.S: UltraSparc-III optimized memcpy.
 *
 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
 */

#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>

#define XCC xcc

#define EXNV(x,y,a,b)	\
98:	x,y;				\
	.section .fixup;		\
	.align 4;			\
99:	retl;				\
	 a, b, %o0;			\
	.section __ex_table;		\
	.align 4;			\
	.word 98b, 99b;			\
	.text;				\
	.align 4;
#define EXNV2(x,y,a,b)	\
98:	x,y;				\
	.section .fixup;		\
	.align 4;			\
99:	a, b, %o0;			\
	retl;				\
	 add %o0, 1, %o0;		\
	.section __ex_table;		\
	.align 4;			\
	.word 98b, 99b;			\
	.text;				\
	.align 4;
#define EXNV3(x,y,a,b)	\
98:	x,y;				\
	.section .fixup;		\
	.align 4;			\
99:	a, b, %o0;			\
	retl;				\
	 add %o0, 4, %o0;		\
	.section __ex_table;		\
	.align 4;			\
	.word 98b, 99b;			\
	.text;				\
	.align 4;
#define EXNV4(x,y,a,b)	\
98:	x,y;				\
	.section .fixup;		\
	.align 4;			\
99:	a, b, %o0;			\
	retl;				\
	 add %o0, 8, %o0;		\
	.section __ex_table;		\
	.align 4;			\
	.word 98b, 99b;			\
	.text;				\
	.align 4;
#define EX(x,y,a,b)			\
98:	x,y;				\
	.section .fixup;		\
	.align 4;			\
99:	VISExitHalf;			\
	retl;				\
	 a, b, %o0;			\
	.section __ex_table;		\
	.align 4;			\
	.word 98b, 99b;			\
	.text;				\
	.align 4;
#define EXBLK1(x,y)			\
98:	x,y;				\
	.section .fixup;		\
	.align 4;			\
99:	VISExitHalf;			\
	add %o4, 0x1c0, %o1;		\
	and %o2, (0x40 - 1), %o2;	\
	retl;				\
	 add %o1, %o2, %o0;		\
	.section __ex_table;		\
	.align 4;			\
	.word 98b, 99b;			\
	.text;				\
	.align 4;
#define EXBLK2(x,y)			\
98:	x,y;				\
	.section .fixup;		\
	.align 4;			\
99:	VISExitHalf;			\
	sll %o3, 6, %o3;		\
	and %o2, (0x40 - 1), %o2;	\
	add %o3, 0x80, %o1;		\
	retl;				\
	 add %o1, %o2, %o0;		\
	.section __ex_table;		\
	.align 4;			\
	.word 98b, 99b;			\
	.text;				\
	.align 4;
#define EXBLK3(x,y)			\
98:	x,y;				\
	.section .fixup;		\
	.align 4;			\
99:	VISExitHalf;			\
	and %o2, (0x40 - 1), %o2;	\
	retl;				\
	 add %o2, 0x80, %o0;		\
	.section __ex_table;		\
	.align 4;			\
	.word 98b, 99b;			\
	.text;				\
	.align 4;
#define EXBLK4(x,y)			\
98:	x,y;				\
	.section .fixup;		\
	.align 4;			\
99:	VISExitHalf;			\
	and %o2, (0x40 - 1), %o2;	\
	retl;				\
	 add %o2, 0x40, %o0;		\
	.section __ex_table;		\
	.align 4;			\
	.word 98b, 99b;			\
	.text;				\
	.align 4;

	.register	%g2,#scratch
	.register	%g3,#scratch

	/* Special/non-trivial issues of this code:
	 *
	 * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
	 * 2) Only low 32 FPU registers are used so that only the
	 *    lower half of the FPU register set is dirtied by this
	 *    code.  This is especially important in the kernel.
	 * 3) This code never prefetches cachelines past the end
	 *    of the source buffer.
	 */

	.text
	.align	32

	/* The cheetah's flexible spine, oversized liver, enlarged heart,
	 * slender muscular body, and claws make it the swiftest hunter
	 * in Africa and the fastest animal on land.  Can reach speeds
	 * of up to 2.4GB per second.
	 */

	.globl	U3copy_to_user
U3copy_to_user:	/* %o0=dst, %o1=src, %o2=len */
	/* Writing to %asi is _expensive_ so we hardcode it.
	 * Reading %asi to check for KERNEL_DS is comparatively
	 * cheap.
	 */
	rd		%asi, %g1
	cmp		%g1, ASI_AIUS
	bne,pn		%icc, U3memcpy_user_stub
	 nop

	cmp		%o2, 0
	be,pn		%XCC, 85f
	 or		%o0, %o1, %o3
	cmp		%o2, 16
	bleu,a,pn	%XCC, 80f
	 or		%o3, %o2, %o3

	cmp		%o2, 256
	blu,pt		%XCC, 70f
	 andcc		%o3, 0x7, %g0

	ba,pt		%xcc, 1f
	 andcc		%o0, 0x3f, %g2

	/* Here len >= 256 and condition codes reflect execution
	 * of "andcc %o0, 0x7, %g2", done by caller.
	 */
	.align		64
1:
	/* Is 'dst' already aligned on an 64-byte boundary? */
	be,pt		%XCC, 2f

	/* Compute abs((dst & 0x3f) - 0x40) into %g2.  This is the number
	 * of bytes to copy to make 'dst' 64-byte aligned.  We pre-
	 * subtract this from 'len'.
	 */
	 sub		%g2, 0x40, %g2
	sub		%g0, %g2, %g2
	sub		%o2, %g2, %o2

	/* Copy %g2 bytes from src to dst, one byte at a time. */
1:	ldub		[%o1 + 0x00], %o3
	add		%o1, 0x1, %o1
	add		%o0, 0x1, %o0
	subcc		%g2, 0x1, %g2

	bg,pt		%XCC, 1b
	 EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2)

2:	VISEntryHalf
	and		%o1, 0x7, %g1
	ba,pt		%xcc, 1f
	 alignaddr	%o1, %g0, %o1

	.align		64
1:
	membar		#StoreLoad | #StoreStore | #LoadStore
	prefetch	[%o1 + 0x000], #one_read
	prefetch	[%o1 + 0x040], #one_read
	andn		%o2, (0x40 - 1), %o4
	prefetch	[%o1 + 0x080], #one_read
	prefetch	[%o1 + 0x0c0], #one_read
	ldd		[%o1 + 0x000], %f0
	prefetch	[%o1 + 0x100], #one_read
	ldd		[%o1 + 0x008], %f2
	prefetch	[%o1 + 0x140], #one_read
	ldd		[%o1 + 0x010], %f4
	prefetch	[%o1 + 0x180], #one_read
	faligndata	%f0, %f2, %f16
	ldd		[%o1 + 0x018], %f6
	faligndata	%f2, %f4, %f18
	ldd		[%o1 + 0x020], %f8
	faligndata	%f4, %f6, %f20
	ldd		[%o1 + 0x028], %f10
	faligndata	%f6, %f8, %f22

	ldd		[%o1 + 0x030], %f12
	faligndata	%f8, %f10, %f24
	ldd		[%o1 + 0x038], %f14
	faligndata	%f10, %f12, %f26
	ldd		[%o1 + 0x040], %f0

	sub		%o4, 0x80, %o4
	add		%o1, 0x40, %o1
	ba,pt		%xcc, 1f
	 srl		%o4, 6, %o3

	.align		64
1:
	ldd		[%o1 + 0x008], %f2
	faligndata	%f12, %f14, %f28
	ldd		[%o1 + 0x010], %f4
	faligndata	%f14, %f0, %f30
	EXBLK2(stda %f16, [%o0] ASI_BLK_AIUS)
	ldd		[%o1 + 0x018], %f6
	faligndata	%f0, %f2, %f16

	ldd		[%o1 + 0x020], %f8
	faligndata	%f2, %f4, %f18
	ldd		[%o1 + 0x028], %f10
	faligndata	%f4, %f6, %f20
	ldd		[%o1 + 0x030], %f12
	faligndata	%f6, %f8, %f22
	ldd		[%o1 + 0x038], %f14
	faligndata	%f8, %f10, %f24

	ldd		[%o1 + 0x040], %f0
	prefetch	[%o1 + 0x180], #one_read
	faligndata	%f10, %f12, %f26
	subcc		%o3, 0x01, %o3
	add		%o1, 0x40, %o1
	bg,pt		%XCC, 1b
	 add		%o0, 0x40, %o0

	/* Finally we copy the last full 64-byte block. */
	ldd		[%o1 + 0x008], %f2
	faligndata	%f12, %f14, %f28
	ldd		[%o1 + 0x010], %f4
	faligndata	%f14, %f0, %f30
	EXBLK3(stda %f16, [%o0] ASI_BLK_AIUS)
	ldd		[%o1 + 0x018], %f6
	faligndata	%f0, %f2, %f16
	ldd		[%o1 + 0x020], %f8
	faligndata	%f2, %f4, %f18
	ldd		[%o1 + 0x028], %f10
	faligndata	%f4, %f6, %f20
	ldd		[%o1 + 0x030], %f12
	faligndata	%f6, %f8, %f22
	ldd		[%o1 + 0x038], %f14
	faligndata	%f8, %f10, %f24
	cmp		%g1, 0
	be,pt		%XCC, 1f
	 add		%o0, 0x40, %o0
	ldd		[%o1 + 0x040], %f0
1:	faligndata	%f10, %f12, %f26
	faligndata	%f12, %f14, %f28
	faligndata	%f14, %f0, %f30
	EXBLK4(stda %f16, [%o0] ASI_BLK_AIUS)
	add		%o0, 0x40, %o0
	add		%o1, 0x40, %o1

	membar		#Sync

	/* Now we copy the (len modulo 64) bytes at the end.
	 * Note how we borrow the %f0 loaded above.
	 *
	 * Also notice how this code is careful not to perform a
	 * load past the end of the src buffer.
	 */
	and		%o2, 0x3f, %o2
	andcc		%o2, 0x38, %g2
	be,pn		%XCC, 2f
	 subcc		%g2, 0x8, %g2
	be,pn		%XCC, 2f
	 cmp		%g1, 0

	be,a,pt		%XCC, 1f
	 ldd		[%o1 + 0x00], %f0

1:	ldd		[%o1 + 0x08], %f2
	add		%o1, 0x8, %o1
	sub		%o2, 0x8, %o2
	subcc		%g2, 0x8, %g2
	faligndata	%f0, %f2, %f8
	EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8)
	be,pn		%XCC, 2f
	 add		%o0, 0x8, %o0
	ldd		[%o1 + 0x08], %f0
	add		%o1, 0x8, %o1
	sub		%o2, 0x8, %o2
	subcc		%g2, 0x8, %g2
	faligndata	%f2, %f0, %f8
	EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8)
	bne,pn		%XCC, 1b
	 add		%o0, 0x8, %o0

	/* If anything is left, we copy it one byte at a time.
	 * Note that %g1 is (src & 0x3) saved above before the
	 * alignaddr was performed.
	 */
2:
	cmp		%o2, 0
	add		%o1, %g1, %o1
	VISExitHalf
	be,pn		%XCC, 85f
	 sub		%o0, %o1, %o3

	andcc		%g1, 0x7, %g0
	bne,pn		%icc, 90f
	 andcc		%o2, 0x8, %g0
	be,pt		%icc, 1f
	 nop
	ldx		[%o1], %o5
	EXNV(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
	add		%o1, 0x8, %o1

1:	andcc		%o2, 0x4, %g0
	be,pt		%icc, 1f
	 nop
	lduw		[%o1], %o5
	EXNV(stwa %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x7)
	add		%o1, 0x4, %o1

1:	andcc		%o2, 0x2, %g0
	be,pt		%icc, 1f
	 nop
	lduh		[%o1], %o5
	EXNV(stha %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x3)
	add		%o1, 0x2, %o1

1:	andcc		%o2, 0x1, %g0
	be,pt		%icc, 85f
	 nop
	ldub		[%o1], %o5
	ba,pt		%xcc, 85f
	 EXNV(stba %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x1)

70: /* 16 < len <= 64 */
	bne,pn		%XCC, 90f
	 sub		%o0, %o1, %o3

	andn		%o2, 0x7, %o4
	and		%o2, 0x7, %o2
1:	subcc		%o4, 0x8, %o4
	ldx		[%o1], %o5
	EXNV4(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %o4)
	bgu,pt		%XCC, 1b
	 add		%o1, 0x8, %o1
	andcc		%o2, 0x4, %g0
	be,pt		%XCC, 1f
	 nop
	sub		%o2, 0x4, %o2
	lduw		[%o1], %o5
	EXNV3(stwa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
	add		%o1, 0x4, %o1
1:	cmp		%o2, 0
	be,pt		%XCC, 85f
	 nop
	ba,pt		%xcc, 90f
	 nop

80: /* 0 < len <= 16 */
	andcc		%o3, 0x3, %g0
	bne,pn		%XCC, 90f
	 sub		%o0, %o1, %o3

1:
	subcc		%o2, 4, %o2
	lduw		[%o1], %g1
	EXNV3(stwa %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
	bgu,pt		%XCC, 1b
	 add		%o1, 4, %o1

85:	retl
	 clr		%o0

	.align	32
90:
	subcc		%o2, 1, %o2
	ldub		[%o1], %g1
	EXNV2(stba %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
	bgu,pt		%XCC, 90b
	 add		%o1, 1, %o1
	retl
	 clr		%o0