| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | /* | 
					
						
							|  |  |  |  * Copyright (C) 2002 Paul Mackerras, IBM Corp. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * This program is free software; you can redistribute it and/or
 | 
					
						
							|  |  |  |  * modify it under the terms of the GNU General Public License | 
					
						
							|  |  |  |  * as published by the Free Software Foundation; either version
 | 
					
						
							|  |  |  |  * 2 of the License, or (at your option) any later version. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | #include <asm/processor.h> | 
					
						
							|  |  |  | #include <asm/ppc_asm.h> | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	.align	7
 | 
					
						
							| 
									
										
										
										
											2014-04-03 16:01:11 +11:00
										 |  |  | _GLOBAL_TOC(memcpy) | 
					
						
							| 
									
										
										
										
											2012-05-30 20:22:09 +00:00
										 |  |  | BEGIN_FTR_SECTION | 
					
						
							| 
									
										
										
										
											2014-04-30 09:12:01 +10:00
										 |  |  | #ifdef __LITTLE_ENDIAN__ | 
					
						
							|  |  |  | 	cmpdi	cr7,r5,0 | 
					
						
							|  |  |  | #else | 
					
						
							| 
									
										
										
										
											2014-02-14 19:21:03 +01:00
										 |  |  | 	std	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* save destination pointer for return value */ | 
					
						
							| 
									
										
										
										
											2014-04-30 09:12:01 +10:00
										 |  |  | #endif | 
					
						
							| 
									
										
										
										
											2012-05-30 20:22:09 +00:00
										 |  |  | FTR_SECTION_ELSE | 
					
						
							| 
									
										
										
										
											2014-01-21 15:22:17 +11:00
										 |  |  | #ifndef SELFTEST | 
					
						
							| 
									
										
										
										
											2012-05-30 20:22:09 +00:00
										 |  |  | 	b	memcpy_power7 | 
					
						
							| 
									
										
										
										
											2014-01-21 15:22:17 +11:00
										 |  |  | #endif | 
					
						
							| 
									
										
										
										
											2012-05-30 20:22:09 +00:00
										 |  |  | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) | 
					
						
							| 
									
										
										
										
											2014-04-30 09:12:01 +10:00
										 |  |  | #ifdef __LITTLE_ENDIAN__ | 
					
						
							|  |  |  | 	/* dumb little-endian memcpy that will get replaced at runtime */ | 
					
						
							|  |  |  | 	addi r9,r3,-1 | 
					
						
							|  |  |  | 	addi r4,r4,-1 | 
					
						
							|  |  |  | 	beqlr cr7 | 
					
						
							|  |  |  | 	mtctr r5 | 
					
						
							|  |  |  | 1:	lbzu r10,1(r4) | 
					
						
							|  |  |  | 	stbu r10,1(r9) | 
					
						
							|  |  |  | 	bdnz 1b | 
					
						
							|  |  |  | 	blr | 
					
						
							|  |  |  | #else | 
					
						
							| 
									
										
										
										
											2012-06-25 13:33:16 +00:00
										 |  |  | 	PPC_MTOCRF(0x01,r5) | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 	cmpldi	cr1,r5,16 | 
					
						
							|  |  |  | 	neg	r6,r3		# LS 3 bits = # bytes to 8-byte dest bdry | 
					
						
							|  |  |  | 	andi.	r6,r6,7 | 
					
						
							|  |  |  | 	dcbt	0,r4 | 
					
						
							|  |  |  | 	blt	cr1,.Lshort_copy | 
					
						
							| 
									
										
											  
											
												powerpc: Update 64bit memcpy() using CPU_FTR_UNALIGNED_LD_STD
Update memcpy() to add two new feature sections: one for aligning the
destination before copying and one for copying using aligned load
and store doubles.
These new feature sections will only affect Power6 and Cell because
the CPU feature bit was only added to these two processors.
Power6 gets its best performance in memcpy() when aligning neither the
source nor the destination, while Cell gets its best performance when
just the destination is aligned. But in order to save on CPU feature
bits we can use the previously added CPU_FTR_CP_USE_DCBTZ feature bit
to differentiate between Power6 and Cell (because CPU_FTR_CP_USE_DCBTZ
was added to Cell but not Power6).
The first feature section acts to nop out the branch that takes us to
the code that aligns us to an eight byte boundary for the destination.
We only want to nop out this branch on Power6.
So the ALT_FTR_SECTION_END() for this feature section creates a test
mask of the two feature bits ORed together and provides an expected
result of just CPU_FTR_UNALIGNED_LD_STD, thus we nop out the branch
if we're on a CPU that has CPU_FTR_UNALIGNED_LD_STD set and
CPU_FTR_CP_USE_DCBTZ unset.
For the second feature section added, if we're on a CPU that has the
CPU_FTR_UNALIGNED_LD_STD bit set then we don't want to do the copy
with aligned loads and stores (and the appropriate shifting left and
right instructions), so we want to nop out the branch to
.Lsrc_unaligned.
The andi. used for this branch is moved to just above the branch
because this allows us to nop out both instructions with just one
feature section which gives us better performance and doesn't hurt
readability which two separate feature sections did.
Moving the andi. to just above the branch doesn't have any noticeable
negative effect on the remaining 64bit processors (the ones that
didn't have this feature bit added).
On Cell this simple modification results in an improvement to measured
memcpy() bandwidth of up to 50% in the hot cache case and up to 15% in
the cold cache case.
On Power6 we get memory bandwidth results that are up to three times
faster in the hot cache case and up to 50% faster in the cold cache
case.
Commit 2a9294369bd020db89bfdf78b84c3615b39a5c84 ("powerpc: Add new CPU
feature: CPU_FTR_CP_USE_DCBTZ") was where CPU_FTR_CP_USE_DCBTZ was
added.
To say that Cell gets its best performance in memcpy() with just the
destination aligned is true but only for the reason that the indirect
shift and rotate instructions, sld and srd, are microcoded on Cell.
This means that either the destination or the source can be aligned,
but not both, and seeing as we get better performance with the
destination aligned we choose this option.
While we're at it make a one line change from cmpldi r1,... to
cmpldi cr1,... for consistency.
Signed-off-by: Mark Nelson <markn@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
											
										 
											2008-10-27 00:46:51 +00:00
										 |  |  | /* Below we want to nop out the bne if we're on a CPU that has the | 
					
						
							|  |  |  |    CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit | 
					
						
							|  |  |  |    cleared. | 
					
						
							|  |  |  |    At the time of writing the only CPU that has this combination of bits | 
					
						
							|  |  |  |    set is Power6. */ | 
					
						
							|  |  |  | BEGIN_FTR_SECTION | 
					
						
							|  |  |  | 	nop | 
					
						
							|  |  |  | FTR_SECTION_ELSE | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 	bne	.Ldst_unaligned | 
					
						
							| 
									
										
											  
											
												powerpc: Update 64bit memcpy() using CPU_FTR_UNALIGNED_LD_STD
Update memcpy() to add two new feature sections: one for aligning the
destination before copying and one for copying using aligned load
and store doubles.
These new feature sections will only affect Power6 and Cell because
the CPU feature bit was only added to these two processors.
Power6 gets its best performance in memcpy() when aligning neither the
source nor the destination, while Cell gets its best performance when
just the destination is aligned. But in order to save on CPU feature
bits we can use the previously added CPU_FTR_CP_USE_DCBTZ feature bit
to differentiate between Power6 and Cell (because CPU_FTR_CP_USE_DCBTZ
was added to Cell but not Power6).
The first feature section acts to nop out the branch that takes us to
the code that aligns us to an eight byte boundary for the destination.
We only want to nop out this branch on Power6.
So the ALT_FTR_SECTION_END() for this feature section creates a test
mask of the two feature bits ORed together and provides an expected
result of just CPU_FTR_UNALIGNED_LD_STD, thus we nop out the branch
if we're on a CPU that has CPU_FTR_UNALIGNED_LD_STD set and
CPU_FTR_CP_USE_DCBTZ unset.
For the second feature section added, if we're on a CPU that has the
CPU_FTR_UNALIGNED_LD_STD bit set then we don't want to do the copy
with aligned loads and stores (and the appropriate shifting left and
right instructions), so we want to nop out the branch to
.Lsrc_unaligned.
The andi. used for this branch is moved to just above the branch
because this allows us to nop out both instructions with just one
feature section which gives us better performance and doesn't hurt
readability which two separate feature sections did.
Moving the andi. to just above the branch doesn't have any noticeable
negative effect on the remaining 64bit processors (the ones that
didn't have this feature bit added).
On Cell this simple modification results in an improvement to measured
memcpy() bandwidth of up to 50% in the hot cache case and up to 15% in
the cold cache case.
On Power6 we get memory bandwidth results that are up to three times
faster in the hot cache case and up to 50% faster in the cold cache
case.
Commit 2a9294369bd020db89bfdf78b84c3615b39a5c84 ("powerpc: Add new CPU
feature: CPU_FTR_CP_USE_DCBTZ") was where CPU_FTR_CP_USE_DCBTZ was
added.
To say that Cell gets its best performance in memcpy() with just the
destination aligned is true but only for the reason that the indirect
shift and rotate instructions, sld and srd, are microcoded on Cell.
This means that either the destination or the source can be aligned,
but not both, and seeing as we get better performance with the
destination aligned we choose this option.
While we're at it make a one line change from cmpldi r1,... to
cmpldi cr1,... for consistency.
Signed-off-by: Mark Nelson <markn@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
											
										 
											2008-10-27 00:46:51 +00:00
										 |  |  | ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \ | 
					
						
							|  |  |  |                     CPU_FTR_UNALIGNED_LD_STD) | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | .Ldst_aligned: | 
					
						
							|  |  |  | 	addi	r3,r3,-16 | 
					
						
							| 
									
										
											  
											
												powerpc: Update 64bit memcpy() using CPU_FTR_UNALIGNED_LD_STD
Update memcpy() to add two new feature sections: one for aligning the
destination before copying and one for copying using aligned load
and store doubles.
These new feature sections will only affect Power6 and Cell because
the CPU feature bit was only added to these two processors.
Power6 gets its best performance in memcpy() when aligning neither the
source nor the destination, while Cell gets its best performance when
just the destination is aligned. But in order to save on CPU feature
bits we can use the previously added CPU_FTR_CP_USE_DCBTZ feature bit
to differentiate between Power6 and Cell (because CPU_FTR_CP_USE_DCBTZ
was added to Cell but not Power6).
The first feature section acts to nop out the branch that takes us to
the code that aligns us to an eight byte boundary for the destination.
We only want to nop out this branch on Power6.
So the ALT_FTR_SECTION_END() for this feature section creates a test
mask of the two feature bits ORed together and provides an expected
result of just CPU_FTR_UNALIGNED_LD_STD, thus we nop out the branch
if we're on a CPU that has CPU_FTR_UNALIGNED_LD_STD set and
CPU_FTR_CP_USE_DCBTZ unset.
For the second feature section added, if we're on a CPU that has the
CPU_FTR_UNALIGNED_LD_STD bit set then we don't want to do the copy
with aligned loads and stores (and the appropriate shifting left and
right instructions), so we want to nop out the branch to
.Lsrc_unaligned.
The andi. used for this branch is moved to just above the branch
because this allows us to nop out both instructions with just one
feature section which gives us better performance and doesn't hurt
readability which two separate feature sections did.
Moving the andi. to just above the branch doesn't have any noticeable
negative effect on the remaining 64bit processors (the ones that
didn't have this feature bit added).
On Cell this simple modification results in an improvement to measured
memcpy() bandwidth of up to 50% in the hot cache case and up to 15% in
the cold cache case.
On Power6 we get memory bandwidth results that are up to three times
faster in the hot cache case and up to 50% faster in the cold cache
case.
Commit 2a9294369bd020db89bfdf78b84c3615b39a5c84 ("powerpc: Add new CPU
feature: CPU_FTR_CP_USE_DCBTZ") was where CPU_FTR_CP_USE_DCBTZ was
added.
To say that Cell gets its best performance in memcpy() with just the
destination aligned is true but only for the reason that the indirect
shift and rotate instructions, sld and srd, are microcoded on Cell.
This means that either the destination or the source can be aligned,
but not both, and seeing as we get better performance with the
destination aligned we choose this option.
While we're at it make a one line change from cmpldi r1,... to
cmpldi cr1,... for consistency.
Signed-off-by: Mark Nelson <markn@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
											
										 
											2008-10-27 00:46:51 +00:00
										 |  |  | BEGIN_FTR_SECTION | 
					
						
							|  |  |  | 	andi.	r0,r4,7 | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 	bne	.Lsrc_unaligned | 
					
						
							| 
									
										
											  
											
												powerpc: Update 64bit memcpy() using CPU_FTR_UNALIGNED_LD_STD
Update memcpy() to add two new feature sections: one for aligning the
destination before copying and one for copying using aligned load
and store doubles.
These new feature sections will only affect Power6 and Cell because
the CPU feature bit was only added to these two processors.
Power6 gets its best performance in memcpy() when aligning neither the
source nor the destination, while Cell gets its best performance when
just the destination is aligned. But in order to save on CPU feature
bits we can use the previously added CPU_FTR_CP_USE_DCBTZ feature bit
to differentiate between Power6 and Cell (because CPU_FTR_CP_USE_DCBTZ
was added to Cell but not Power6).
The first feature section acts to nop out the branch that takes us to
the code that aligns us to an eight byte boundary for the destination.
We only want to nop out this branch on Power6.
So the ALT_FTR_SECTION_END() for this feature section creates a test
mask of the two feature bits ORed together and provides an expected
result of just CPU_FTR_UNALIGNED_LD_STD, thus we nop out the branch
if we're on a CPU that has CPU_FTR_UNALIGNED_LD_STD set and
CPU_FTR_CP_USE_DCBTZ unset.
For the second feature section added, if we're on a CPU that has the
CPU_FTR_UNALIGNED_LD_STD bit set then we don't want to do the copy
with aligned loads and stores (and the appropriate shifting left and
right instructions), so we want to nop out the branch to
.Lsrc_unaligned.
The andi. used for this branch is moved to just above the branch
because this allows us to nop out both instructions with just one
feature section which gives us better performance and doesn't hurt
readability which two separate feature sections did.
Moving the andi. to just above the branch doesn't have any noticeable
negative effect on the remaining 64bit processors (the ones that
didn't have this feature bit added).
On Cell this simple modification results in an improvement to measured
memcpy() bandwidth of up to 50% in the hot cache case and up to 15% in
the cold cache case.
On Power6 we get memory bandwidth results that are up to three times
faster in the hot cache case and up to 50% faster in the cold cache
case.
Commit 2a9294369bd020db89bfdf78b84c3615b39a5c84 ("powerpc: Add new CPU
feature: CPU_FTR_CP_USE_DCBTZ") was where CPU_FTR_CP_USE_DCBTZ was
added.
To say that Cell gets its best performance in memcpy() with just the
destination aligned is true but only for the reason that the indirect
shift and rotate instructions, sld and srd, are microcoded on Cell.
This means that either the destination or the source can be aligned,
but not both, and seeing as we get better performance with the
destination aligned we choose this option.
While we're at it make a one line change from cmpldi r1,... to
cmpldi cr1,... for consistency.
Signed-off-by: Mark Nelson <markn@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
											
										 
											2008-10-27 00:46:51 +00:00
										 |  |  | END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 	srdi	r7,r5,4 | 
					
						
							|  |  |  | 	ld	r9,0(r4) | 
					
						
							|  |  |  | 	addi	r4,r4,-8 | 
					
						
							|  |  |  | 	mtctr	r7 | 
					
						
							|  |  |  | 	andi.	r5,r5,7 | 
					
						
							|  |  |  | 	bf	cr7*4+0,2f | 
					
						
							|  |  |  | 	addi	r3,r3,8 | 
					
						
							|  |  |  | 	addi	r4,r4,8 | 
					
						
							|  |  |  | 	mr	r8,r9 | 
					
						
							|  |  |  | 	blt	cr1,3f | 
					
						
							|  |  |  | 1:	ld	r9,8(r4) | 
					
						
							|  |  |  | 	std	r8,8(r3) | 
					
						
							|  |  |  | 2:	ldu	r8,16(r4) | 
					
						
							|  |  |  | 	stdu	r9,16(r3) | 
					
						
							|  |  |  | 	bdnz	1b | 
					
						
							|  |  |  | 3:	std	r8,8(r3) | 
					
						
							| 
									
										
										
										
											2006-08-31 13:22:58 +10:00
										 |  |  | 	beq	3f | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 	addi	r3,r3,16 | 
					
						
							|  |  |  | .Ldo_tail: | 
					
						
							|  |  |  | 	bf	cr7*4+1,1f | 
					
						
							| 
									
										
										
										
											2009-02-25 13:26:48 +00:00
										 |  |  | 	lwz	r9,8(r4) | 
					
						
							|  |  |  | 	addi	r4,r4,4 | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 	stw	r9,0(r3) | 
					
						
							|  |  |  | 	addi	r3,r3,4 | 
					
						
							|  |  |  | 1:	bf	cr7*4+2,2f | 
					
						
							| 
									
										
										
										
											2009-02-25 13:26:48 +00:00
										 |  |  | 	lhz	r9,8(r4) | 
					
						
							|  |  |  | 	addi	r4,r4,2 | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 	sth	r9,0(r3) | 
					
						
							|  |  |  | 	addi	r3,r3,2 | 
					
						
							|  |  |  | 2:	bf	cr7*4+3,3f | 
					
						
							| 
									
										
										
										
											2009-02-25 13:26:48 +00:00
										 |  |  | 	lbz	r9,8(r4) | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 	stb	r9,0(r3) | 
					
						
							| 
									
										
										
										
											2014-02-14 19:21:03 +01:00
										 |  |  | 3:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* return dest pointer */ | 
					
						
							| 
									
										
										
										
											2006-08-31 13:22:58 +10:00
										 |  |  | 	blr | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 
 | 
					
						
							|  |  |  | .Lsrc_unaligned: | 
					
						
							|  |  |  | 	srdi	r6,r5,3 | 
					
						
							|  |  |  | 	addi	r5,r5,-16 | 
					
						
							|  |  |  | 	subf	r4,r0,r4 | 
					
						
							|  |  |  | 	srdi	r7,r5,4 | 
					
						
							|  |  |  | 	sldi	r10,r0,3 | 
					
						
							|  |  |  | 	cmpdi	cr6,r6,3 | 
					
						
							|  |  |  | 	andi.	r5,r5,7 | 
					
						
							|  |  |  | 	mtctr	r7 | 
					
						
							|  |  |  | 	subfic	r11,r10,64 | 
					
						
							|  |  |  | 	add	r5,r5,r0 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bt	cr7*4+0,0f | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	ld	r9,0(r4)	# 3+2n loads, 2+2n stores | 
					
						
							|  |  |  | 	ld	r0,8(r4) | 
					
						
							|  |  |  | 	sld	r6,r9,r10 | 
					
						
							|  |  |  | 	ldu	r9,16(r4) | 
					
						
							|  |  |  | 	srd	r7,r0,r11 | 
					
						
							|  |  |  | 	sld	r8,r0,r10 | 
					
						
							|  |  |  | 	or	r7,r7,r6 | 
					
						
							|  |  |  | 	blt	cr6,4f | 
					
						
							|  |  |  | 	ld	r0,8(r4) | 
					
						
							|  |  |  | 	# s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12 | 
					
						
							|  |  |  | 	b	2f | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 0:	ld	r0,0(r4)	# 4+2n loads, 3+2n stores | 
					
						
							|  |  |  | 	ldu	r9,8(r4) | 
					
						
							|  |  |  | 	sld	r8,r0,r10 | 
					
						
							|  |  |  | 	addi	r3,r3,-8 | 
					
						
							|  |  |  | 	blt	cr6,5f | 
					
						
							|  |  |  | 	ld	r0,8(r4) | 
					
						
							|  |  |  | 	srd	r12,r9,r11 | 
					
						
							|  |  |  | 	sld	r6,r9,r10 | 
					
						
							|  |  |  | 	ldu	r9,16(r4) | 
					
						
							|  |  |  | 	or	r12,r8,r12 | 
					
						
							|  |  |  | 	srd	r7,r0,r11 | 
					
						
							|  |  |  | 	sld	r8,r0,r10 | 
					
						
							|  |  |  | 	addi	r3,r3,16 | 
					
						
							|  |  |  | 	beq	cr6,3f | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	# d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9 | 
					
						
							|  |  |  | 1:	or	r7,r7,r6 | 
					
						
							|  |  |  | 	ld	r0,8(r4) | 
					
						
							|  |  |  | 	std	r12,8(r3) | 
					
						
							|  |  |  | 2:	srd	r12,r9,r11 | 
					
						
							|  |  |  | 	sld	r6,r9,r10 | 
					
						
							|  |  |  | 	ldu	r9,16(r4) | 
					
						
							|  |  |  | 	or	r12,r8,r12 | 
					
						
							|  |  |  | 	stdu	r7,16(r3) | 
					
						
							|  |  |  | 	srd	r7,r0,r11 | 
					
						
							|  |  |  | 	sld	r8,r0,r10 | 
					
						
							|  |  |  | 	bdnz	1b | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 3:	std	r12,8(r3) | 
					
						
							|  |  |  | 	or	r7,r7,r6 | 
					
						
							|  |  |  | 4:	std	r7,16(r3) | 
					
						
							|  |  |  | 5:	srd	r12,r9,r11 | 
					
						
							|  |  |  | 	or	r12,r8,r12 | 
					
						
							|  |  |  | 	std	r12,24(r3) | 
					
						
							| 
									
										
										
										
											2006-08-31 13:22:58 +10:00
										 |  |  | 	beq	4f | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 	cmpwi	cr1,r5,8 | 
					
						
							|  |  |  | 	addi	r3,r3,32 | 
					
						
							|  |  |  | 	sld	r9,r9,r10 | 
					
						
							| 
									
										
										
										
											2009-02-25 13:26:48 +00:00
										 |  |  | 	ble	cr1,6f | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 	ld	r0,8(r4) | 
					
						
							|  |  |  | 	srd	r7,r0,r11 | 
					
						
							|  |  |  | 	or	r9,r7,r9 | 
					
						
							| 
									
										
										
										
											2009-02-25 13:26:48 +00:00
										 |  |  | 6: | 
					
						
							|  |  |  | 	bf	cr7*4+1,1f | 
					
						
							|  |  |  | 	rotldi	r9,r9,32 | 
					
						
							|  |  |  | 	stw	r9,0(r3) | 
					
						
							|  |  |  | 	addi	r3,r3,4 | 
					
						
							|  |  |  | 1:	bf	cr7*4+2,2f | 
					
						
							|  |  |  | 	rotldi	r9,r9,16 | 
					
						
							|  |  |  | 	sth	r9,0(r3) | 
					
						
							|  |  |  | 	addi	r3,r3,2 | 
					
						
							|  |  |  | 2:	bf	cr7*4+3,3f | 
					
						
							|  |  |  | 	rotldi	r9,r9,8 | 
					
						
							|  |  |  | 	stb	r9,0(r3) | 
					
						
							| 
									
										
										
										
											2014-02-14 19:21:03 +01:00
										 |  |  | 3:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* return dest pointer */ | 
					
						
							| 
									
										
										
										
											2009-02-25 13:26:48 +00:00
										 |  |  | 	blr | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 
 | 
					
						
							|  |  |  | .Ldst_unaligned: | 
					
						
							| 
									
										
										
										
											2012-06-25 13:33:16 +00:00
										 |  |  | 	PPC_MTOCRF(0x01,r6)		# put #bytes to 8B bdry into cr7 | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 	subf	r5,r6,r5 | 
					
						
							|  |  |  | 	li	r7,0 | 
					
						
							| 
									
										
											  
											
												powerpc: Update 64bit memcpy() using CPU_FTR_UNALIGNED_LD_STD
Update memcpy() to add two new feature sections: one for aligning the
destination before copying and one for copying using aligned load
and store doubles.
These new feature sections will only affect Power6 and Cell because
the CPU feature bit was only added to these two processors.
Power6 gets its best performance in memcpy() when aligning neither the
source nor the destination, while Cell gets its best performance when
just the destination is aligned. But in order to save on CPU feature
bits we can use the previously added CPU_FTR_CP_USE_DCBTZ feature bit
to differentiate between Power6 and Cell (because CPU_FTR_CP_USE_DCBTZ
was added to Cell but not Power6).
The first feature section acts to nop out the branch that takes us to
the code that aligns us to an eight byte boundary for the destination.
We only want to nop out this branch on Power6.
So the ALT_FTR_SECTION_END() for this feature section creates a test
mask of the two feature bits ORed together and provides an expected
result of just CPU_FTR_UNALIGNED_LD_STD, thus we nop out the branch
if we're on a CPU that has CPU_FTR_UNALIGNED_LD_STD set and
CPU_FTR_CP_USE_DCBTZ unset.
For the second feature section added, if we're on a CPU that has the
CPU_FTR_UNALIGNED_LD_STD bit set then we don't want to do the copy
with aligned loads and stores (and the appropriate shifting left and
right instructions), so we want to nop out the branch to
.Lsrc_unaligned.
The andi. used for this branch is moved to just above the branch
because this allows us to nop out both instructions with just one
feature section which gives us better performance and doesn't hurt
readability which two separate feature sections did.
Moving the andi. to just above the branch doesn't have any noticeable
negative effect on the remaining 64bit processors (the ones that
didn't have this feature bit added).
On Cell this simple modification results in an improvement to measured
memcpy() bandwidth of up to 50% in the hot cache case and up to 15% in
the cold cache case.
On Power6 we get memory bandwidth results that are up to three times
faster in the hot cache case and up to 50% faster in the cold cache
case.
Commit 2a9294369bd020db89bfdf78b84c3615b39a5c84 ("powerpc: Add new CPU
feature: CPU_FTR_CP_USE_DCBTZ") was where CPU_FTR_CP_USE_DCBTZ was
added.
To say that Cell gets its best performance in memcpy() with just the
destination aligned is true but only for the reason that the indirect
shift and rotate instructions, sld and srd, are microcoded on Cell.
This means that either the destination or the source can be aligned,
but not both, and seeing as we get better performance with the
destination aligned we choose this option.
While we're at it make a one line change from cmpldi r1,... to
cmpldi cr1,... for consistency.
Signed-off-by: Mark Nelson <markn@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
											
										 
											2008-10-27 00:46:51 +00:00
										 |  |  | 	cmpldi	cr1,r5,16 | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 	bf	cr7*4+3,1f | 
					
						
							|  |  |  | 	lbz	r0,0(r4) | 
					
						
							|  |  |  | 	stb	r0,0(r3) | 
					
						
							|  |  |  | 	addi	r7,r7,1 | 
					
						
							|  |  |  | 1:	bf	cr7*4+2,2f | 
					
						
							|  |  |  | 	lhzx	r0,r7,r4 | 
					
						
							|  |  |  | 	sthx	r0,r7,r3 | 
					
						
							|  |  |  | 	addi	r7,r7,2 | 
					
						
							|  |  |  | 2:	bf	cr7*4+1,3f | 
					
						
							|  |  |  | 	lwzx	r0,r7,r4 | 
					
						
							|  |  |  | 	stwx	r0,r7,r3 | 
					
						
							| 
									
										
										
										
											2012-06-25 13:33:16 +00:00
										 |  |  | 3:	PPC_MTOCRF(0x01,r5) | 
					
						
							| 
									
										
										
										
											2005-09-26 16:04:21 +10:00
										 |  |  | 	add	r4,r6,r4 | 
					
						
							|  |  |  | 	add	r3,r6,r3 | 
					
						
							|  |  |  | 	b	.Ldst_aligned | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | .Lshort_copy: | 
					
						
							|  |  |  | 	bf	cr7*4+0,1f | 
					
						
							|  |  |  | 	lwz	r0,0(r4) | 
					
						
							|  |  |  | 	lwz	r9,4(r4) | 
					
						
							|  |  |  | 	addi	r4,r4,8 | 
					
						
							|  |  |  | 	stw	r0,0(r3) | 
					
						
							|  |  |  | 	stw	r9,4(r3) | 
					
						
							|  |  |  | 	addi	r3,r3,8 | 
					
						
							|  |  |  | 1:	bf	cr7*4+1,2f | 
					
						
							|  |  |  | 	lwz	r0,0(r4) | 
					
						
							|  |  |  | 	addi	r4,r4,4 | 
					
						
							|  |  |  | 	stw	r0,0(r3) | 
					
						
							|  |  |  | 	addi	r3,r3,4 | 
					
						
							|  |  |  | 2:	bf	cr7*4+2,3f | 
					
						
							|  |  |  | 	lhz	r0,0(r4) | 
					
						
							|  |  |  | 	addi	r4,r4,2 | 
					
						
							|  |  |  | 	sth	r0,0(r3) | 
					
						
							|  |  |  | 	addi	r3,r3,2 | 
					
						
							|  |  |  | 3:	bf	cr7*4+3,4f | 
					
						
							|  |  |  | 	lbz	r0,0(r4) | 
					
						
							|  |  |  | 	stb	r0,0(r3) | 
					
						
							| 
									
										
										
										
											2014-02-14 19:21:03 +01:00
										 |  |  | 4:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* return dest pointer */ | 
					
						
							| 
									
										
										
										
											2006-08-31 13:22:58 +10:00
										 |  |  | 	blr | 
					
						
							| 
									
										
										
										
											2014-04-30 09:12:01 +10:00
										 |  |  | #endif |