 c8adfeccee
			
		
	
	
	c8adfeccee
	
	
	
		
			
			In 2fae7cdb60 ("powerpc: Fix VMX in
interrupt check in POWER7 copy loops"), Anton inadvertently
introduced a regression for memcpy on POWER7 machines. copyuser and
memcpy diverge slightly in their use of cr1 (copyuser doesn't use it,
but memcpy does) and you end up clobbering that register with your fix.
That results in (taken from an FC18 kernel):
[   18.824604] Unrecoverable VMX/Altivec Unavailable Exception f20 at c000000000052f40
[   18.824618] Oops: Unrecoverable VMX/Altivec Unavailable Exception, sig: 6 [#1]
[   18.824623] SMP NR_CPUS=1024 NUMA pSeries
[   18.824633] Modules linked in: tg3(+) be2net(+) cxgb4(+) ipr(+) sunrpc xts lrw gf128mul dm_crypt dm_round_robin dm_multipath linear raid10 raid456 async_raid6_recov async_memcpy async_pq raid6_pq async_xor xor async_tx raid1 raid0 scsi_dh_rdac scsi_dh_hp_sw scsi_dh_emc scsi_dh_alua squashfs cramfs
[   18.824705] NIP: c000000000052f40 LR: c00000000020b874 CTR: 0000000000000512
[   18.824709] REGS: c000001f1fef7790 TRAP: 0f20   Not tainted  (3.6.0-0.rc6.git0.2.fc18.ppc64)
[   18.824713] MSR: 8000000000009032 <SF,EE,ME,IR,DR,RI>  CR: 4802802e  XER: 20000010
[   18.824726] SOFTE: 0
[   18.824728] CFAR: 0000000000000f20
[   18.824731] TASK = c000000fa7128400[0] 'swapper/24' THREAD: c000000fa7480000 CPU: 24
GPR00: 00000000ffffffc0 c000001f1fef7a10 c00000000164edc0 c000000f9b9a8120
GPR04: c000000f9b9a8124 0000000000001438 0000000000000060 03ffffff064657ee
GPR08: 0000000080000000 0000000000000010 0000000000000020 0000000000000030
GPR12: 0000000028028022 c00000000ff25400 0000000000000001 0000000000000000
GPR16: 0000000000000000 7fffffffffffffff c0000000016b2180 c00000000156a500
GPR20: c000000f968c7a90 c0000000131c31d8 c000001f1fef4000 c000000001561d00
GPR24: 000000000000000a 0000000000000000 0000000000000001 0000000000000012
GPR28: c000000fa5c04f80 00000000000008bc c0000000015c0a28 000000000000022e
[   18.824792] NIP [c000000000052f40] .memcpy_power7+0x5a0/0x7c4
[   18.824797] LR [c00000000020b874] .pcpu_free_area+0x174/0x2d0
[   18.824800] Call Trace:
[   18.824803] [c000001f1fef7a10] [c000000000052c14] .memcpy_power7+0x274/0x7c4 (unreliable)
[   18.824809] [c000001f1fef7b10] [c00000000020b874] .pcpu_free_area+0x174/0x2d0
[   18.824813] [c000001f1fef7bb0] [c00000000020ba88] .free_percpu+0xb8/0x1b0
[   18.824819] [c000001f1fef7c50] [c00000000043d144] .throtl_pd_exit+0x94/0xd0
[   18.824824] [c000001f1fef7cf0] [c00000000043acf8] .blkg_free+0x88/0xe0
[   18.824829] [c000001f1fef7d90] [c00000000018c048] .rcu_process_callbacks+0x2e8/0x8a0
[   18.824835] [c000001f1fef7e90] [c0000000000a8ce8] .__do_softirq+0x158/0x4d0
[   18.824840] [c000001f1fef7f90] [c000000000025ecc] .call_do_softirq+0x14/0x24
[   18.824845] [c000000fa7483650] [c000000000010e80] .do_softirq+0x160/0x1a0
[   18.824850] [c000000fa74836f0] [c0000000000a94a4] .irq_exit+0xf4/0x120
[   18.824854] [c000000fa7483780] [c000000000020c44] .timer_interrupt+0x154/0x4d0
[   18.824859] [c000000fa7483830] [c000000000003be0] decrementer_common+0x160/0x180
[   18.824866] --- Exception: 901 at .plpar_hcall_norets+0x84/0xd4
[   18.824866]     LR = .check_and_cede_processor+0x48/0x80
[   18.824871] [c000000fa7483b20] [c00000000007f018] .check_and_cede_processor+0x18/0x80 (unreliable)
[   18.824877] [c000000fa7483b90] [c00000000007f104] .dedicated_cede_loop+0x84/0x150
[   18.824883] [c000000fa7483c50] [c0000000006bc030] .cpuidle_enter+0x30/0x50
[   18.824887] [c000000fa7483cc0] [c0000000006bc9f4] .cpuidle_idle_call+0x104/0x720
[   18.824892] [c000000fa7483d80] [c000000000070af8] .pSeries_idle+0x18/0x40
[   18.824897] [c000000fa7483df0] [c000000000019084] .cpu_idle+0x1a4/0x380
[   18.824902] [c000000fa7483ec0] [c0000000008a4c18] .start_secondary+0x520/0x528
[   18.824907] [c000000fa7483f90] [c0000000000093f0] .start_secondary_prolog+0x10/0x14
[   18.824911] Instruction dump:
[   18.824914] 38840008 90030000 90e30004 38630008 7ca62850 7cc300d0 78c7e102 7cf01120
[   18.824923] 78c60660 39200010 39400020 39600030 <7e00200c> 7c0020ce 38840010 409f001c
[   18.824935] ---[ end trace 0bb95124affaaa45 ]---
[   18.825046] Unrecoverable VMX/Altivec Unavailable Exception f20 at c000000000052d08
I believe the right fix is to make memcpy match usercopy and not use
cr1.
Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: <stable@kernel.org> [v3.6]
		
	
			
		
			
				
	
	
		
			647 lines
		
	
	
	
		
			10 KiB
			
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
			
		
		
	
	
			647 lines
		
	
	
	
		
			10 KiB
			
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
| /*
 | |
|  * This program is free software; you can redistribute it and/or modify
 | |
|  * it under the terms of the GNU General Public License as published by
 | |
|  * the Free Software Foundation; either version 2 of the License, or
 | |
|  * (at your option) any later version.
 | |
|  *
 | |
|  * This program is distributed in the hope that it will be useful,
 | |
|  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
|  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | |
|  * GNU General Public License for more details.
 | |
|  *
 | |
|  * You should have received a copy of the GNU General Public License
 | |
|  * along with this program; if not, write to the Free Software
 | |
|  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 | |
|  *
 | |
|  * Copyright (C) IBM Corporation, 2012
 | |
|  *
 | |
|  * Author: Anton Blanchard <anton@au.ibm.com>
 | |
|  */
 | |
| #include <asm/ppc_asm.h>
 | |
| 
 | |
| _GLOBAL(memcpy_power7)
 | |
| #ifdef CONFIG_ALTIVEC
 | |
| 	cmpldi	r5,16
 | |
| 	cmpldi	cr1,r5,4096
 | |
| 
 | |
| 	std	r3,48(r1)
 | |
| 
 | |
| 	blt	.Lshort_copy
 | |
| 	bgt	cr1,.Lvmx_copy
 | |
| #else
 | |
| 	cmpldi	r5,16
 | |
| 
 | |
| 	std	r3,48(r1)
 | |
| 
 | |
| 	blt	.Lshort_copy
 | |
| #endif
 | |
| 
 | |
| .Lnonvmx_copy:
 | |
| 	/* Get the source 8B aligned */
 | |
| 	neg	r6,r4
 | |
| 	mtocrf	0x01,r6
 | |
| 	clrldi	r6,r6,(64-3)
 | |
| 
 | |
| 	bf	cr7*4+3,1f
 | |
| 	lbz	r0,0(r4)
 | |
| 	addi	r4,r4,1
 | |
| 	stb	r0,0(r3)
 | |
| 	addi	r3,r3,1
 | |
| 
 | |
| 1:	bf	cr7*4+2,2f
 | |
| 	lhz	r0,0(r4)
 | |
| 	addi	r4,r4,2
 | |
| 	sth	r0,0(r3)
 | |
| 	addi	r3,r3,2
 | |
| 
 | |
| 2:	bf	cr7*4+1,3f
 | |
| 	lwz	r0,0(r4)
 | |
| 	addi	r4,r4,4
 | |
| 	stw	r0,0(r3)
 | |
| 	addi	r3,r3,4
 | |
| 
 | |
| 3:	sub	r5,r5,r6
 | |
| 	cmpldi	r5,128
 | |
| 	blt	5f
 | |
| 
 | |
| 	mflr	r0
 | |
| 	stdu	r1,-STACKFRAMESIZE(r1)
 | |
| 	std	r14,STK_REG(R14)(r1)
 | |
| 	std	r15,STK_REG(R15)(r1)
 | |
| 	std	r16,STK_REG(R16)(r1)
 | |
| 	std	r17,STK_REG(R17)(r1)
 | |
| 	std	r18,STK_REG(R18)(r1)
 | |
| 	std	r19,STK_REG(R19)(r1)
 | |
| 	std	r20,STK_REG(R20)(r1)
 | |
| 	std	r21,STK_REG(R21)(r1)
 | |
| 	std	r22,STK_REG(R22)(r1)
 | |
| 	std	r0,STACKFRAMESIZE+16(r1)
 | |
| 
 | |
| 	srdi	r6,r5,7
 | |
| 	mtctr	r6
 | |
| 
 | |
| 	/* Now do cacheline (128B) sized loads and stores. */
 | |
| 	.align	5
 | |
| 4:
 | |
| 	ld	r0,0(r4)
 | |
| 	ld	r6,8(r4)
 | |
| 	ld	r7,16(r4)
 | |
| 	ld	r8,24(r4)
 | |
| 	ld	r9,32(r4)
 | |
| 	ld	r10,40(r4)
 | |
| 	ld	r11,48(r4)
 | |
| 	ld	r12,56(r4)
 | |
| 	ld	r14,64(r4)
 | |
| 	ld	r15,72(r4)
 | |
| 	ld	r16,80(r4)
 | |
| 	ld	r17,88(r4)
 | |
| 	ld	r18,96(r4)
 | |
| 	ld	r19,104(r4)
 | |
| 	ld	r20,112(r4)
 | |
| 	ld	r21,120(r4)
 | |
| 	addi	r4,r4,128
 | |
| 	std	r0,0(r3)
 | |
| 	std	r6,8(r3)
 | |
| 	std	r7,16(r3)
 | |
| 	std	r8,24(r3)
 | |
| 	std	r9,32(r3)
 | |
| 	std	r10,40(r3)
 | |
| 	std	r11,48(r3)
 | |
| 	std	r12,56(r3)
 | |
| 	std	r14,64(r3)
 | |
| 	std	r15,72(r3)
 | |
| 	std	r16,80(r3)
 | |
| 	std	r17,88(r3)
 | |
| 	std	r18,96(r3)
 | |
| 	std	r19,104(r3)
 | |
| 	std	r20,112(r3)
 | |
| 	std	r21,120(r3)
 | |
| 	addi	r3,r3,128
 | |
| 	bdnz	4b
 | |
| 
 | |
| 	clrldi	r5,r5,(64-7)
 | |
| 
 | |
| 	ld	r14,STK_REG(R14)(r1)
 | |
| 	ld	r15,STK_REG(R15)(r1)
 | |
| 	ld	r16,STK_REG(R16)(r1)
 | |
| 	ld	r17,STK_REG(R17)(r1)
 | |
| 	ld	r18,STK_REG(R18)(r1)
 | |
| 	ld	r19,STK_REG(R19)(r1)
 | |
| 	ld	r20,STK_REG(R20)(r1)
 | |
| 	ld	r21,STK_REG(R21)(r1)
 | |
| 	ld	r22,STK_REG(R22)(r1)
 | |
| 	addi	r1,r1,STACKFRAMESIZE
 | |
| 
 | |
| 	/* Up to 127B to go */
 | |
| 5:	srdi	r6,r5,4
 | |
| 	mtocrf	0x01,r6
 | |
| 
 | |
| 6:	bf	cr7*4+1,7f
 | |
| 	ld	r0,0(r4)
 | |
| 	ld	r6,8(r4)
 | |
| 	ld	r7,16(r4)
 | |
| 	ld	r8,24(r4)
 | |
| 	ld	r9,32(r4)
 | |
| 	ld	r10,40(r4)
 | |
| 	ld	r11,48(r4)
 | |
| 	ld	r12,56(r4)
 | |
| 	addi	r4,r4,64
 | |
| 	std	r0,0(r3)
 | |
| 	std	r6,8(r3)
 | |
| 	std	r7,16(r3)
 | |
| 	std	r8,24(r3)
 | |
| 	std	r9,32(r3)
 | |
| 	std	r10,40(r3)
 | |
| 	std	r11,48(r3)
 | |
| 	std	r12,56(r3)
 | |
| 	addi	r3,r3,64
 | |
| 
 | |
| 	/* Up to 63B to go */
 | |
| 7:	bf	cr7*4+2,8f
 | |
| 	ld	r0,0(r4)
 | |
| 	ld	r6,8(r4)
 | |
| 	ld	r7,16(r4)
 | |
| 	ld	r8,24(r4)
 | |
| 	addi	r4,r4,32
 | |
| 	std	r0,0(r3)
 | |
| 	std	r6,8(r3)
 | |
| 	std	r7,16(r3)
 | |
| 	std	r8,24(r3)
 | |
| 	addi	r3,r3,32
 | |
| 
 | |
| 	/* Up to 31B to go */
 | |
| 8:	bf	cr7*4+3,9f
 | |
| 	ld	r0,0(r4)
 | |
| 	ld	r6,8(r4)
 | |
| 	addi	r4,r4,16
 | |
| 	std	r0,0(r3)
 | |
| 	std	r6,8(r3)
 | |
| 	addi	r3,r3,16
 | |
| 
 | |
| 9:	clrldi	r5,r5,(64-4)
 | |
| 
 | |
| 	/* Up to 15B to go */
 | |
| .Lshort_copy:
 | |
| 	mtocrf	0x01,r5
 | |
| 	bf	cr7*4+0,12f
 | |
| 	lwz	r0,0(r4)	/* Less chance of a reject with word ops */
 | |
| 	lwz	r6,4(r4)
 | |
| 	addi	r4,r4,8
 | |
| 	stw	r0,0(r3)
 | |
| 	stw	r6,4(r3)
 | |
| 	addi	r3,r3,8
 | |
| 
 | |
| 12:	bf	cr7*4+1,13f
 | |
| 	lwz	r0,0(r4)
 | |
| 	addi	r4,r4,4
 | |
| 	stw	r0,0(r3)
 | |
| 	addi	r3,r3,4
 | |
| 
 | |
| 13:	bf	cr7*4+2,14f
 | |
| 	lhz	r0,0(r4)
 | |
| 	addi	r4,r4,2
 | |
| 	sth	r0,0(r3)
 | |
| 	addi	r3,r3,2
 | |
| 
 | |
| 14:	bf	cr7*4+3,15f
 | |
| 	lbz	r0,0(r4)
 | |
| 	stb	r0,0(r3)
 | |
| 
 | |
| 15:	ld	r3,48(r1)
 | |
| 	blr
 | |
| 
 | |
| .Lunwind_stack_nonvmx_copy:
 | |
| 	addi	r1,r1,STACKFRAMESIZE
 | |
| 	b	.Lnonvmx_copy
 | |
| 
 | |
| #ifdef CONFIG_ALTIVEC
 | |
| .Lvmx_copy:
 | |
| 	mflr	r0
 | |
| 	std	r4,56(r1)
 | |
| 	std	r5,64(r1)
 | |
| 	std	r0,16(r1)
 | |
| 	stdu	r1,-STACKFRAMESIZE(r1)
 | |
| 	bl	.enter_vmx_copy
 | |
| 	cmpwi	cr1,r3,0
 | |
| 	ld	r0,STACKFRAMESIZE+16(r1)
 | |
| 	ld	r3,STACKFRAMESIZE+48(r1)
 | |
| 	ld	r4,STACKFRAMESIZE+56(r1)
 | |
| 	ld	r5,STACKFRAMESIZE+64(r1)
 | |
| 	mtlr	r0
 | |
| 
 | |
| 	/*
 | |
| 	 * We prefetch both the source and destination using enhanced touch
 | |
| 	 * instructions. We use a stream ID of 0 for the load side and
 | |
| 	 * 1 for the store side.
 | |
| 	 */
 | |
| 	clrrdi	r6,r4,7
 | |
| 	clrrdi	r9,r3,7
 | |
| 	ori	r9,r9,1		/* stream=1 */
 | |
| 
 | |
| 	srdi	r7,r5,7		/* length in cachelines, capped at 0x3FF */
 | |
| 	cmpldi	r7,0x3FF
 | |
| 	ble	1f
 | |
| 	li	r7,0x3FF
 | |
| 1:	lis	r0,0x0E00	/* depth=7 */
 | |
| 	sldi	r7,r7,7
 | |
| 	or	r7,r7,r0
 | |
| 	ori	r10,r7,1	/* stream=1 */
 | |
| 
 | |
| 	lis	r8,0x8000	/* GO=1 */
 | |
| 	clrldi	r8,r8,32
 | |
| 
 | |
| .machine push
 | |
| .machine "power4"
 | |
| 	dcbt	r0,r6,0b01000
 | |
| 	dcbt	r0,r7,0b01010
 | |
| 	dcbtst	r0,r9,0b01000
 | |
| 	dcbtst	r0,r10,0b01010
 | |
| 	eieio
 | |
| 	dcbt	r0,r8,0b01010	/* GO */
 | |
| .machine pop
 | |
| 
 | |
| 	beq	cr1,.Lunwind_stack_nonvmx_copy
 | |
| 
 | |
| 	/*
 | |
| 	 * If source and destination are not relatively aligned we use a
 | |
| 	 * slower permute loop.
 | |
| 	 */
 | |
| 	xor	r6,r4,r3
 | |
| 	rldicl.	r6,r6,0,(64-4)
 | |
| 	bne	.Lvmx_unaligned_copy
 | |
| 
 | |
| 	/* Get the destination 16B aligned */
 | |
| 	neg	r6,r3
 | |
| 	mtocrf	0x01,r6
 | |
| 	clrldi	r6,r6,(64-4)
 | |
| 
 | |
| 	bf	cr7*4+3,1f
 | |
| 	lbz	r0,0(r4)
 | |
| 	addi	r4,r4,1
 | |
| 	stb	r0,0(r3)
 | |
| 	addi	r3,r3,1
 | |
| 
 | |
| 1:	bf	cr7*4+2,2f
 | |
| 	lhz	r0,0(r4)
 | |
| 	addi	r4,r4,2
 | |
| 	sth	r0,0(r3)
 | |
| 	addi	r3,r3,2
 | |
| 
 | |
| 2:	bf	cr7*4+1,3f
 | |
| 	lwz	r0,0(r4)
 | |
| 	addi	r4,r4,4
 | |
| 	stw	r0,0(r3)
 | |
| 	addi	r3,r3,4
 | |
| 
 | |
| 3:	bf	cr7*4+0,4f
 | |
| 	ld	r0,0(r4)
 | |
| 	addi	r4,r4,8
 | |
| 	std	r0,0(r3)
 | |
| 	addi	r3,r3,8
 | |
| 
 | |
| 4:	sub	r5,r5,r6
 | |
| 
 | |
| 	/* Get the desination 128B aligned */
 | |
| 	neg	r6,r3
 | |
| 	srdi	r7,r6,4
 | |
| 	mtocrf	0x01,r7
 | |
| 	clrldi	r6,r6,(64-7)
 | |
| 
 | |
| 	li	r9,16
 | |
| 	li	r10,32
 | |
| 	li	r11,48
 | |
| 
 | |
| 	bf	cr7*4+3,5f
 | |
| 	lvx	vr1,r0,r4
 | |
| 	addi	r4,r4,16
 | |
| 	stvx	vr1,r0,r3
 | |
| 	addi	r3,r3,16
 | |
| 
 | |
| 5:	bf	cr7*4+2,6f
 | |
| 	lvx	vr1,r0,r4
 | |
| 	lvx	vr0,r4,r9
 | |
| 	addi	r4,r4,32
 | |
| 	stvx	vr1,r0,r3
 | |
| 	stvx	vr0,r3,r9
 | |
| 	addi	r3,r3,32
 | |
| 
 | |
| 6:	bf	cr7*4+1,7f
 | |
| 	lvx	vr3,r0,r4
 | |
| 	lvx	vr2,r4,r9
 | |
| 	lvx	vr1,r4,r10
 | |
| 	lvx	vr0,r4,r11
 | |
| 	addi	r4,r4,64
 | |
| 	stvx	vr3,r0,r3
 | |
| 	stvx	vr2,r3,r9
 | |
| 	stvx	vr1,r3,r10
 | |
| 	stvx	vr0,r3,r11
 | |
| 	addi	r3,r3,64
 | |
| 
 | |
| 7:	sub	r5,r5,r6
 | |
| 	srdi	r6,r5,7
 | |
| 
 | |
| 	std	r14,STK_REG(R14)(r1)
 | |
| 	std	r15,STK_REG(R15)(r1)
 | |
| 	std	r16,STK_REG(R16)(r1)
 | |
| 
 | |
| 	li	r12,64
 | |
| 	li	r14,80
 | |
| 	li	r15,96
 | |
| 	li	r16,112
 | |
| 
 | |
| 	mtctr	r6
 | |
| 
 | |
| 	/*
 | |
| 	 * Now do cacheline sized loads and stores. By this stage the
 | |
| 	 * cacheline stores are also cacheline aligned.
 | |
| 	 */
 | |
| 	.align	5
 | |
| 8:
 | |
| 	lvx	vr7,r0,r4
 | |
| 	lvx	vr6,r4,r9
 | |
| 	lvx	vr5,r4,r10
 | |
| 	lvx	vr4,r4,r11
 | |
| 	lvx	vr3,r4,r12
 | |
| 	lvx	vr2,r4,r14
 | |
| 	lvx	vr1,r4,r15
 | |
| 	lvx	vr0,r4,r16
 | |
| 	addi	r4,r4,128
 | |
| 	stvx	vr7,r0,r3
 | |
| 	stvx	vr6,r3,r9
 | |
| 	stvx	vr5,r3,r10
 | |
| 	stvx	vr4,r3,r11
 | |
| 	stvx	vr3,r3,r12
 | |
| 	stvx	vr2,r3,r14
 | |
| 	stvx	vr1,r3,r15
 | |
| 	stvx	vr0,r3,r16
 | |
| 	addi	r3,r3,128
 | |
| 	bdnz	8b
 | |
| 
 | |
| 	ld	r14,STK_REG(R14)(r1)
 | |
| 	ld	r15,STK_REG(R15)(r1)
 | |
| 	ld	r16,STK_REG(R16)(r1)
 | |
| 
 | |
| 	/* Up to 127B to go */
 | |
| 	clrldi	r5,r5,(64-7)
 | |
| 	srdi	r6,r5,4
 | |
| 	mtocrf	0x01,r6
 | |
| 
 | |
| 	bf	cr7*4+1,9f
 | |
| 	lvx	vr3,r0,r4
 | |
| 	lvx	vr2,r4,r9
 | |
| 	lvx	vr1,r4,r10
 | |
| 	lvx	vr0,r4,r11
 | |
| 	addi	r4,r4,64
 | |
| 	stvx	vr3,r0,r3
 | |
| 	stvx	vr2,r3,r9
 | |
| 	stvx	vr1,r3,r10
 | |
| 	stvx	vr0,r3,r11
 | |
| 	addi	r3,r3,64
 | |
| 
 | |
| 9:	bf	cr7*4+2,10f
 | |
| 	lvx	vr1,r0,r4
 | |
| 	lvx	vr0,r4,r9
 | |
| 	addi	r4,r4,32
 | |
| 	stvx	vr1,r0,r3
 | |
| 	stvx	vr0,r3,r9
 | |
| 	addi	r3,r3,32
 | |
| 
 | |
| 10:	bf	cr7*4+3,11f
 | |
| 	lvx	vr1,r0,r4
 | |
| 	addi	r4,r4,16
 | |
| 	stvx	vr1,r0,r3
 | |
| 	addi	r3,r3,16
 | |
| 
 | |
| 	/* Up to 15B to go */
 | |
| 11:	clrldi	r5,r5,(64-4)
 | |
| 	mtocrf	0x01,r5
 | |
| 	bf	cr7*4+0,12f
 | |
| 	ld	r0,0(r4)
 | |
| 	addi	r4,r4,8
 | |
| 	std	r0,0(r3)
 | |
| 	addi	r3,r3,8
 | |
| 
 | |
| 12:	bf	cr7*4+1,13f
 | |
| 	lwz	r0,0(r4)
 | |
| 	addi	r4,r4,4
 | |
| 	stw	r0,0(r3)
 | |
| 	addi	r3,r3,4
 | |
| 
 | |
| 13:	bf	cr7*4+2,14f
 | |
| 	lhz	r0,0(r4)
 | |
| 	addi	r4,r4,2
 | |
| 	sth	r0,0(r3)
 | |
| 	addi	r3,r3,2
 | |
| 
 | |
| 14:	bf	cr7*4+3,15f
 | |
| 	lbz	r0,0(r4)
 | |
| 	stb	r0,0(r3)
 | |
| 
 | |
| 15:	addi	r1,r1,STACKFRAMESIZE
 | |
| 	ld	r3,48(r1)
 | |
| 	b	.exit_vmx_copy		/* tail call optimise */
 | |
| 
 | |
| .Lvmx_unaligned_copy:
 | |
| 	/* Get the destination 16B aligned */
 | |
| 	neg	r6,r3
 | |
| 	mtocrf	0x01,r6
 | |
| 	clrldi	r6,r6,(64-4)
 | |
| 
 | |
| 	bf	cr7*4+3,1f
 | |
| 	lbz	r0,0(r4)
 | |
| 	addi	r4,r4,1
 | |
| 	stb	r0,0(r3)
 | |
| 	addi	r3,r3,1
 | |
| 
 | |
| 1:	bf	cr7*4+2,2f
 | |
| 	lhz	r0,0(r4)
 | |
| 	addi	r4,r4,2
 | |
| 	sth	r0,0(r3)
 | |
| 	addi	r3,r3,2
 | |
| 
 | |
| 2:	bf	cr7*4+1,3f
 | |
| 	lwz	r0,0(r4)
 | |
| 	addi	r4,r4,4
 | |
| 	stw	r0,0(r3)
 | |
| 	addi	r3,r3,4
 | |
| 
 | |
| 3:	bf	cr7*4+0,4f
 | |
| 	lwz	r0,0(r4)	/* Less chance of a reject with word ops */
 | |
| 	lwz	r7,4(r4)
 | |
| 	addi	r4,r4,8
 | |
| 	stw	r0,0(r3)
 | |
| 	stw	r7,4(r3)
 | |
| 	addi	r3,r3,8
 | |
| 
 | |
| 4:	sub	r5,r5,r6
 | |
| 
 | |
| 	/* Get the desination 128B aligned */
 | |
| 	neg	r6,r3
 | |
| 	srdi	r7,r6,4
 | |
| 	mtocrf	0x01,r7
 | |
| 	clrldi	r6,r6,(64-7)
 | |
| 
 | |
| 	li	r9,16
 | |
| 	li	r10,32
 | |
| 	li	r11,48
 | |
| 
 | |
| 	lvsl	vr16,0,r4	/* Setup permute control vector */
 | |
| 	lvx	vr0,0,r4
 | |
| 	addi	r4,r4,16
 | |
| 
 | |
| 	bf	cr7*4+3,5f
 | |
| 	lvx	vr1,r0,r4
 | |
| 	vperm	vr8,vr0,vr1,vr16
 | |
| 	addi	r4,r4,16
 | |
| 	stvx	vr8,r0,r3
 | |
| 	addi	r3,r3,16
 | |
| 	vor	vr0,vr1,vr1
 | |
| 
 | |
| 5:	bf	cr7*4+2,6f
 | |
| 	lvx	vr1,r0,r4
 | |
| 	vperm	vr8,vr0,vr1,vr16
 | |
| 	lvx	vr0,r4,r9
 | |
| 	vperm	vr9,vr1,vr0,vr16
 | |
| 	addi	r4,r4,32
 | |
| 	stvx	vr8,r0,r3
 | |
| 	stvx	vr9,r3,r9
 | |
| 	addi	r3,r3,32
 | |
| 
 | |
| 6:	bf	cr7*4+1,7f
 | |
| 	lvx	vr3,r0,r4
 | |
| 	vperm	vr8,vr0,vr3,vr16
 | |
| 	lvx	vr2,r4,r9
 | |
| 	vperm	vr9,vr3,vr2,vr16
 | |
| 	lvx	vr1,r4,r10
 | |
| 	vperm	vr10,vr2,vr1,vr16
 | |
| 	lvx	vr0,r4,r11
 | |
| 	vperm	vr11,vr1,vr0,vr16
 | |
| 	addi	r4,r4,64
 | |
| 	stvx	vr8,r0,r3
 | |
| 	stvx	vr9,r3,r9
 | |
| 	stvx	vr10,r3,r10
 | |
| 	stvx	vr11,r3,r11
 | |
| 	addi	r3,r3,64
 | |
| 
 | |
| 7:	sub	r5,r5,r6
 | |
| 	srdi	r6,r5,7
 | |
| 
 | |
| 	std	r14,STK_REG(R14)(r1)
 | |
| 	std	r15,STK_REG(R15)(r1)
 | |
| 	std	r16,STK_REG(R16)(r1)
 | |
| 
 | |
| 	li	r12,64
 | |
| 	li	r14,80
 | |
| 	li	r15,96
 | |
| 	li	r16,112
 | |
| 
 | |
| 	mtctr	r6
 | |
| 
 | |
| 	/*
 | |
| 	 * Now do cacheline sized loads and stores. By this stage the
 | |
| 	 * cacheline stores are also cacheline aligned.
 | |
| 	 */
 | |
| 	.align	5
 | |
| 8:
 | |
| 	lvx	vr7,r0,r4
 | |
| 	vperm	vr8,vr0,vr7,vr16
 | |
| 	lvx	vr6,r4,r9
 | |
| 	vperm	vr9,vr7,vr6,vr16
 | |
| 	lvx	vr5,r4,r10
 | |
| 	vperm	vr10,vr6,vr5,vr16
 | |
| 	lvx	vr4,r4,r11
 | |
| 	vperm	vr11,vr5,vr4,vr16
 | |
| 	lvx	vr3,r4,r12
 | |
| 	vperm	vr12,vr4,vr3,vr16
 | |
| 	lvx	vr2,r4,r14
 | |
| 	vperm	vr13,vr3,vr2,vr16
 | |
| 	lvx	vr1,r4,r15
 | |
| 	vperm	vr14,vr2,vr1,vr16
 | |
| 	lvx	vr0,r4,r16
 | |
| 	vperm	vr15,vr1,vr0,vr16
 | |
| 	addi	r4,r4,128
 | |
| 	stvx	vr8,r0,r3
 | |
| 	stvx	vr9,r3,r9
 | |
| 	stvx	vr10,r3,r10
 | |
| 	stvx	vr11,r3,r11
 | |
| 	stvx	vr12,r3,r12
 | |
| 	stvx	vr13,r3,r14
 | |
| 	stvx	vr14,r3,r15
 | |
| 	stvx	vr15,r3,r16
 | |
| 	addi	r3,r3,128
 | |
| 	bdnz	8b
 | |
| 
 | |
| 	ld	r14,STK_REG(R14)(r1)
 | |
| 	ld	r15,STK_REG(R15)(r1)
 | |
| 	ld	r16,STK_REG(R16)(r1)
 | |
| 
 | |
| 	/* Up to 127B to go */
 | |
| 	clrldi	r5,r5,(64-7)
 | |
| 	srdi	r6,r5,4
 | |
| 	mtocrf	0x01,r6
 | |
| 
 | |
| 	bf	cr7*4+1,9f
 | |
| 	lvx	vr3,r0,r4
 | |
| 	vperm	vr8,vr0,vr3,vr16
 | |
| 	lvx	vr2,r4,r9
 | |
| 	vperm	vr9,vr3,vr2,vr16
 | |
| 	lvx	vr1,r4,r10
 | |
| 	vperm	vr10,vr2,vr1,vr16
 | |
| 	lvx	vr0,r4,r11
 | |
| 	vperm	vr11,vr1,vr0,vr16
 | |
| 	addi	r4,r4,64
 | |
| 	stvx	vr8,r0,r3
 | |
| 	stvx	vr9,r3,r9
 | |
| 	stvx	vr10,r3,r10
 | |
| 	stvx	vr11,r3,r11
 | |
| 	addi	r3,r3,64
 | |
| 
 | |
| 9:	bf	cr7*4+2,10f
 | |
| 	lvx	vr1,r0,r4
 | |
| 	vperm	vr8,vr0,vr1,vr16
 | |
| 	lvx	vr0,r4,r9
 | |
| 	vperm	vr9,vr1,vr0,vr16
 | |
| 	addi	r4,r4,32
 | |
| 	stvx	vr8,r0,r3
 | |
| 	stvx	vr9,r3,r9
 | |
| 	addi	r3,r3,32
 | |
| 
 | |
| 10:	bf	cr7*4+3,11f
 | |
| 	lvx	vr1,r0,r4
 | |
| 	vperm	vr8,vr0,vr1,vr16
 | |
| 	addi	r4,r4,16
 | |
| 	stvx	vr8,r0,r3
 | |
| 	addi	r3,r3,16
 | |
| 
 | |
| 	/* Up to 15B to go */
 | |
| 11:	clrldi	r5,r5,(64-4)
 | |
| 	addi	r4,r4,-16	/* Unwind the +16 load offset */
 | |
| 	mtocrf	0x01,r5
 | |
| 	bf	cr7*4+0,12f
 | |
| 	lwz	r0,0(r4)	/* Less chance of a reject with word ops */
 | |
| 	lwz	r6,4(r4)
 | |
| 	addi	r4,r4,8
 | |
| 	stw	r0,0(r3)
 | |
| 	stw	r6,4(r3)
 | |
| 	addi	r3,r3,8
 | |
| 
 | |
| 12:	bf	cr7*4+1,13f
 | |
| 	lwz	r0,0(r4)
 | |
| 	addi	r4,r4,4
 | |
| 	stw	r0,0(r3)
 | |
| 	addi	r3,r3,4
 | |
| 
 | |
| 13:	bf	cr7*4+2,14f
 | |
| 	lhz	r0,0(r4)
 | |
| 	addi	r4,r4,2
 | |
| 	sth	r0,0(r3)
 | |
| 	addi	r3,r3,2
 | |
| 
 | |
| 14:	bf	cr7*4+3,15f
 | |
| 	lbz	r0,0(r4)
 | |
| 	stb	r0,0(r3)
 | |
| 
 | |
| 15:	addi	r1,r1,STACKFRAMESIZE
 | |
| 	ld	r3,48(r1)
 | |
| 	b	.exit_vmx_copy		/* tail call optimise */
 | |
| #endif /* CONFiG_ALTIVEC */
 |