 539cb89fbd
			
		
	
	
	539cb89fbd
	
	
	
		
			
			Add base code for supporting the MIPS SIMD Architecture (MSA) in MIPS KVM guests. MSA cannot yet be enabled in the guest, we're just laying the groundwork. As with the FPU, whether the guest's MSA context is loaded is stored in another bit in the fpu_inuse vcpu member. This allows MSA to be disabled when the guest disables it, but keeping the MSA context loaded so it doesn't have to be reloaded if the guest re-enables it. New assembly code is added for saving and restoring the MSA context, restoring only the upper half of the MSA context (for if the FPU context is already loaded) and for saving/clearing and restoring MSACSR (which can itself cause an MSA FP exception depending on the value). The MSACSR is restored before returning to the guest if MSA is already enabled, and the existing FP exception die notifier is extended to catch the possible MSA FP exception and step over the ctcmsa instruction. The helper function kvm_own_msa() is added to enable MSA and restore the MSA context if it isn't already loaded, which will be used in a later patch when the guest attempts to use MSA for the first time and triggers an MSA disabled exception. The existing FPU helpers are extended to handle MSA. kvm_lose_fpu() saves the full MSA context if it is loaded (which includes the FPU context) and both kvm_lose_fpu() and kvm_drop_fpu() disable MSA. kvm_own_fpu() also needs to lose any MSA context if FR=0, since there would be a risk of getting reserved instruction exceptions if CU1 is enabled and we later try and save the MSA context. We shouldn't usually hit this case since it will be handled when emulating CU1 changes, however there's nothing to stop the guest modifying the Status register directly via the comm page, which will cause this case to get hit. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paul Burton <paul.burton@imgtec.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Gleb Natapov <gleb@kernel.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
		
			
				
	
	
		
			161 lines
		
	
	
	
		
			4.3 KiB
			
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
			
		
		
	
	
			161 lines
		
	
	
	
		
			4.3 KiB
			
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
| /*
 | |
|  * This file is subject to the terms and conditions of the GNU General Public
 | |
|  * License.  See the file "COPYING" in the main directory of this archive
 | |
|  * for more details.
 | |
|  *
 | |
|  * MIPS SIMD Architecture (MSA) context handling code for KVM.
 | |
|  *
 | |
|  * Copyright (C) 2015 Imagination Technologies Ltd.
 | |
|  */
 | |
| 
 | |
| #include <asm/asm.h>
 | |
| #include <asm/asm-offsets.h>
 | |
| #include <asm/asmmacro.h>
 | |
| #include <asm/regdef.h>
 | |
| 
 | |
| 	.set	noreorder
 | |
| 	.set	noat
 | |
| 
 | |
| LEAF(__kvm_save_msa)
 | |
| 	st_d	0,  VCPU_FPR0,  a0
 | |
| 	st_d	1,  VCPU_FPR1,  a0
 | |
| 	st_d	2,  VCPU_FPR2,  a0
 | |
| 	st_d	3,  VCPU_FPR3,  a0
 | |
| 	st_d	4,  VCPU_FPR4,  a0
 | |
| 	st_d	5,  VCPU_FPR5,  a0
 | |
| 	st_d	6,  VCPU_FPR6,  a0
 | |
| 	st_d	7,  VCPU_FPR7,  a0
 | |
| 	st_d	8,  VCPU_FPR8,  a0
 | |
| 	st_d	9,  VCPU_FPR9,  a0
 | |
| 	st_d	10, VCPU_FPR10, a0
 | |
| 	st_d	11, VCPU_FPR11, a0
 | |
| 	st_d	12, VCPU_FPR12, a0
 | |
| 	st_d	13, VCPU_FPR13, a0
 | |
| 	st_d	14, VCPU_FPR14, a0
 | |
| 	st_d	15, VCPU_FPR15, a0
 | |
| 	st_d	16, VCPU_FPR16, a0
 | |
| 	st_d	17, VCPU_FPR17, a0
 | |
| 	st_d	18, VCPU_FPR18, a0
 | |
| 	st_d	19, VCPU_FPR19, a0
 | |
| 	st_d	20, VCPU_FPR20, a0
 | |
| 	st_d	21, VCPU_FPR21, a0
 | |
| 	st_d	22, VCPU_FPR22, a0
 | |
| 	st_d	23, VCPU_FPR23, a0
 | |
| 	st_d	24, VCPU_FPR24, a0
 | |
| 	st_d	25, VCPU_FPR25, a0
 | |
| 	st_d	26, VCPU_FPR26, a0
 | |
| 	st_d	27, VCPU_FPR27, a0
 | |
| 	st_d	28, VCPU_FPR28, a0
 | |
| 	st_d	29, VCPU_FPR29, a0
 | |
| 	st_d	30, VCPU_FPR30, a0
 | |
| 	st_d	31, VCPU_FPR31, a0
 | |
| 	jr	ra
 | |
| 	 nop
 | |
| 	END(__kvm_save_msa)
 | |
| 
 | |
| LEAF(__kvm_restore_msa)
 | |
| 	ld_d	0,  VCPU_FPR0,  a0
 | |
| 	ld_d	1,  VCPU_FPR1,  a0
 | |
| 	ld_d	2,  VCPU_FPR2,  a0
 | |
| 	ld_d	3,  VCPU_FPR3,  a0
 | |
| 	ld_d	4,  VCPU_FPR4,  a0
 | |
| 	ld_d	5,  VCPU_FPR5,  a0
 | |
| 	ld_d	6,  VCPU_FPR6,  a0
 | |
| 	ld_d	7,  VCPU_FPR7,  a0
 | |
| 	ld_d	8,  VCPU_FPR8,  a0
 | |
| 	ld_d	9,  VCPU_FPR9,  a0
 | |
| 	ld_d	10, VCPU_FPR10, a0
 | |
| 	ld_d	11, VCPU_FPR11, a0
 | |
| 	ld_d	12, VCPU_FPR12, a0
 | |
| 	ld_d	13, VCPU_FPR13, a0
 | |
| 	ld_d	14, VCPU_FPR14, a0
 | |
| 	ld_d	15, VCPU_FPR15, a0
 | |
| 	ld_d	16, VCPU_FPR16, a0
 | |
| 	ld_d	17, VCPU_FPR17, a0
 | |
| 	ld_d	18, VCPU_FPR18, a0
 | |
| 	ld_d	19, VCPU_FPR19, a0
 | |
| 	ld_d	20, VCPU_FPR20, a0
 | |
| 	ld_d	21, VCPU_FPR21, a0
 | |
| 	ld_d	22, VCPU_FPR22, a0
 | |
| 	ld_d	23, VCPU_FPR23, a0
 | |
| 	ld_d	24, VCPU_FPR24, a0
 | |
| 	ld_d	25, VCPU_FPR25, a0
 | |
| 	ld_d	26, VCPU_FPR26, a0
 | |
| 	ld_d	27, VCPU_FPR27, a0
 | |
| 	ld_d	28, VCPU_FPR28, a0
 | |
| 	ld_d	29, VCPU_FPR29, a0
 | |
| 	ld_d	30, VCPU_FPR30, a0
 | |
| 	ld_d	31, VCPU_FPR31, a0
 | |
| 	jr	ra
 | |
| 	 nop
 | |
| 	END(__kvm_restore_msa)
 | |
| 
 | |
| 	.macro	kvm_restore_msa_upper	wr, off, base
 | |
| 	.set	push
 | |
| 	.set	noat
 | |
| #ifdef CONFIG_64BIT
 | |
| 	ld	$1, \off(\base)
 | |
| 	insert_d \wr, 1
 | |
| #elif defined(CONFIG_CPU_LITTLE_ENDIAN)
 | |
| 	lw	$1, \off(\base)
 | |
| 	insert_w \wr, 2
 | |
| 	lw	$1, (\off+4)(\base)
 | |
| 	insert_w \wr, 3
 | |
| #else /* CONFIG_CPU_BIG_ENDIAN */
 | |
| 	lw	$1, (\off+4)(\base)
 | |
| 	insert_w \wr, 2
 | |
| 	lw	$1, \off(\base)
 | |
| 	insert_w \wr, 3
 | |
| #endif
 | |
| 	.set	pop
 | |
| 	.endm
 | |
| 
 | |
| LEAF(__kvm_restore_msa_upper)
 | |
| 	kvm_restore_msa_upper	0,  VCPU_FPR0 +8, a0
 | |
| 	kvm_restore_msa_upper	1,  VCPU_FPR1 +8, a0
 | |
| 	kvm_restore_msa_upper	2,  VCPU_FPR2 +8, a0
 | |
| 	kvm_restore_msa_upper	3,  VCPU_FPR3 +8, a0
 | |
| 	kvm_restore_msa_upper	4,  VCPU_FPR4 +8, a0
 | |
| 	kvm_restore_msa_upper	5,  VCPU_FPR5 +8, a0
 | |
| 	kvm_restore_msa_upper	6,  VCPU_FPR6 +8, a0
 | |
| 	kvm_restore_msa_upper	7,  VCPU_FPR7 +8, a0
 | |
| 	kvm_restore_msa_upper	8,  VCPU_FPR8 +8, a0
 | |
| 	kvm_restore_msa_upper	9,  VCPU_FPR9 +8, a0
 | |
| 	kvm_restore_msa_upper	10, VCPU_FPR10+8, a0
 | |
| 	kvm_restore_msa_upper	11, VCPU_FPR11+8, a0
 | |
| 	kvm_restore_msa_upper	12, VCPU_FPR12+8, a0
 | |
| 	kvm_restore_msa_upper	13, VCPU_FPR13+8, a0
 | |
| 	kvm_restore_msa_upper	14, VCPU_FPR14+8, a0
 | |
| 	kvm_restore_msa_upper	15, VCPU_FPR15+8, a0
 | |
| 	kvm_restore_msa_upper	16, VCPU_FPR16+8, a0
 | |
| 	kvm_restore_msa_upper	17, VCPU_FPR17+8, a0
 | |
| 	kvm_restore_msa_upper	18, VCPU_FPR18+8, a0
 | |
| 	kvm_restore_msa_upper	19, VCPU_FPR19+8, a0
 | |
| 	kvm_restore_msa_upper	20, VCPU_FPR20+8, a0
 | |
| 	kvm_restore_msa_upper	21, VCPU_FPR21+8, a0
 | |
| 	kvm_restore_msa_upper	22, VCPU_FPR22+8, a0
 | |
| 	kvm_restore_msa_upper	23, VCPU_FPR23+8, a0
 | |
| 	kvm_restore_msa_upper	24, VCPU_FPR24+8, a0
 | |
| 	kvm_restore_msa_upper	25, VCPU_FPR25+8, a0
 | |
| 	kvm_restore_msa_upper	26, VCPU_FPR26+8, a0
 | |
| 	kvm_restore_msa_upper	27, VCPU_FPR27+8, a0
 | |
| 	kvm_restore_msa_upper	28, VCPU_FPR28+8, a0
 | |
| 	kvm_restore_msa_upper	29, VCPU_FPR29+8, a0
 | |
| 	kvm_restore_msa_upper	30, VCPU_FPR30+8, a0
 | |
| 	kvm_restore_msa_upper	31, VCPU_FPR31+8, a0
 | |
| 	jr	ra
 | |
| 	 nop
 | |
| 	END(__kvm_restore_msa_upper)
 | |
| 
 | |
| LEAF(__kvm_restore_msacsr)
 | |
| 	lw	t0, VCPU_MSA_CSR(a0)
 | |
| 	/*
 | |
| 	 * The ctcmsa must stay at this offset in __kvm_restore_msacsr.
 | |
| 	 * See kvm_mips_csr_die_notify() which handles t0 containing a value
 | |
| 	 * which triggers an MSA FP Exception, which must be stepped over and
 | |
| 	 * ignored since the set cause bits must remain there for the guest.
 | |
| 	 */
 | |
| 	_ctcmsa	MSA_CSR, t0
 | |
| 	jr	ra
 | |
| 	 nop
 | |
| 	END(__kvm_restore_msacsr)
 |