Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc fixes from David Miller: "Please pull to get these sparc AES/DES/CAMELLIA crypto bug fixes as well as an addition of a pte_accessible() define for sparc64 and a hugetlb fix from Dave Kleikamp." * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: sparc64: Set CRYPTO_TFM_REQ_MAY_SLEEP consistently in CAMELLIA code. sparc64: Set CRYPTO_TFM_REQ_MAY_SLEEP consistently in DES code. sparc64: Fix ECB looping constructs in AES code. sparc64: Set CRYPTO_TFM_REQ_MAY_SLEEP consistently in AES code. sparc64: Fix AES ctr mode block size. sparc64: Fix unrolled AES 256-bit key loops. sparc64: Define pte_accessible() sparc: huge_ptep_set_* functions need to call set_huge_pte_at()
This commit is contained in:
		
				commit
				
					
						f01af9f858
					
				
			
		
					 7 changed files with 67 additions and 12 deletions
				
			
		| 
						 | 
				
			
			@ -1024,7 +1024,11 @@ ENTRY(aes_sparc64_ecb_encrypt_256)
 | 
			
		|||
	 add		%o2, 0x20, %o2
 | 
			
		||||
	brlz,pt		%o3, 11f
 | 
			
		||||
	 nop
 | 
			
		||||
10:	ldx		[%o1 + 0x00], %g3
 | 
			
		||||
10:	ldd		[%o0 + 0xd0], %f56
 | 
			
		||||
	ldd		[%o0 + 0xd8], %f58
 | 
			
		||||
	ldd		[%o0 + 0xe0], %f60
 | 
			
		||||
	ldd		[%o0 + 0xe8], %f62
 | 
			
		||||
	ldx		[%o1 + 0x00], %g3
 | 
			
		||||
	ldx		[%o1 + 0x08], %g7
 | 
			
		||||
	xor		%g1, %g3, %g3
 | 
			
		||||
	xor		%g2, %g7, %g7
 | 
			
		||||
| 
						 | 
				
			
			@ -1128,9 +1132,9 @@ ENTRY(aes_sparc64_ecb_decrypt_256)
 | 
			
		|||
	/* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */
 | 
			
		||||
	ldx		[%o0 - 0x10], %g1
 | 
			
		||||
	subcc		%o3, 0x10, %o3
 | 
			
		||||
	ldx		[%o0 - 0x08], %g2
 | 
			
		||||
	be		10f
 | 
			
		||||
	 ldx		[%o0 - 0x08], %g2
 | 
			
		||||
	sub		%o0, 0xf0, %o0
 | 
			
		||||
	 sub		%o0, 0xf0, %o0
 | 
			
		||||
1:	ldx		[%o1 + 0x00], %g3
 | 
			
		||||
	ldx		[%o1 + 0x08], %g7
 | 
			
		||||
	ldx		[%o1 + 0x10], %o4
 | 
			
		||||
| 
						 | 
				
			
			@ -1154,7 +1158,11 @@ ENTRY(aes_sparc64_ecb_decrypt_256)
 | 
			
		|||
	 add		%o2, 0x20, %o2
 | 
			
		||||
	brlz,pt		%o3, 11f
 | 
			
		||||
	 nop
 | 
			
		||||
10:	ldx		[%o1 + 0x00], %g3
 | 
			
		||||
10:	ldd		[%o0 + 0x18], %f56
 | 
			
		||||
	ldd		[%o0 + 0x10], %f58
 | 
			
		||||
	ldd		[%o0 + 0x08], %f60
 | 
			
		||||
	ldd		[%o0 + 0x00], %f62
 | 
			
		||||
	ldx		[%o1 + 0x00], %g3
 | 
			
		||||
	ldx		[%o1 + 0x08], %g7
 | 
			
		||||
	xor		%g1, %g3, %g3
 | 
			
		||||
	xor		%g2, %g7, %g7
 | 
			
		||||
| 
						 | 
				
			
			@ -1511,11 +1519,11 @@ ENTRY(aes_sparc64_ctr_crypt_256)
 | 
			
		|||
	 add		%o2, 0x20, %o2
 | 
			
		||||
	brlz,pt		%o3, 11f
 | 
			
		||||
	 nop
 | 
			
		||||
	ldd		[%o0 + 0xd0], %f56
 | 
			
		||||
10:	ldd		[%o0 + 0xd0], %f56
 | 
			
		||||
	ldd		[%o0 + 0xd8], %f58
 | 
			
		||||
	ldd		[%o0 + 0xe0], %f60
 | 
			
		||||
	ldd		[%o0 + 0xe8], %f62
 | 
			
		||||
10:	xor		%g1, %g3, %o5
 | 
			
		||||
	xor		%g1, %g3, %o5
 | 
			
		||||
	MOVXTOD_O5_F0
 | 
			
		||||
	xor		%g2, %g7, %o5
 | 
			
		||||
	MOVXTOD_O5_F2
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -222,6 +222,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
 | 
			
		|||
 | 
			
		||||
	blkcipher_walk_init(&walk, dst, src, nbytes);
 | 
			
		||||
	err = blkcipher_walk_virt(desc, &walk);
 | 
			
		||||
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 | 
			
		||||
 | 
			
		||||
	ctx->ops->load_encrypt_keys(&ctx->key[0]);
 | 
			
		||||
	while ((nbytes = walk.nbytes)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -251,6 +252,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
 | 
			
		|||
 | 
			
		||||
	blkcipher_walk_init(&walk, dst, src, nbytes);
 | 
			
		||||
	err = blkcipher_walk_virt(desc, &walk);
 | 
			
		||||
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 | 
			
		||||
 | 
			
		||||
	ctx->ops->load_decrypt_keys(&ctx->key[0]);
 | 
			
		||||
	key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
 | 
			
		||||
| 
						 | 
				
			
			@ -280,6 +282,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
 | 
			
		|||
 | 
			
		||||
	blkcipher_walk_init(&walk, dst, src, nbytes);
 | 
			
		||||
	err = blkcipher_walk_virt(desc, &walk);
 | 
			
		||||
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 | 
			
		||||
 | 
			
		||||
	ctx->ops->load_encrypt_keys(&ctx->key[0]);
 | 
			
		||||
	while ((nbytes = walk.nbytes)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -309,6 +312,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
 | 
			
		|||
 | 
			
		||||
	blkcipher_walk_init(&walk, dst, src, nbytes);
 | 
			
		||||
	err = blkcipher_walk_virt(desc, &walk);
 | 
			
		||||
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 | 
			
		||||
 | 
			
		||||
	ctx->ops->load_decrypt_keys(&ctx->key[0]);
 | 
			
		||||
	key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
 | 
			
		||||
| 
						 | 
				
			
			@ -329,6 +333,22 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
 | 
			
		|||
	return err;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx,
 | 
			
		||||
			    struct blkcipher_walk *walk)
 | 
			
		||||
{
 | 
			
		||||
	u8 *ctrblk = walk->iv;
 | 
			
		||||
	u64 keystream[AES_BLOCK_SIZE / sizeof(u64)];
 | 
			
		||||
	u8 *src = walk->src.virt.addr;
 | 
			
		||||
	u8 *dst = walk->dst.virt.addr;
 | 
			
		||||
	unsigned int nbytes = walk->nbytes;
 | 
			
		||||
 | 
			
		||||
	ctx->ops->ecb_encrypt(&ctx->key[0], (const u64 *)ctrblk,
 | 
			
		||||
			      keystream, AES_BLOCK_SIZE);
 | 
			
		||||
	crypto_xor((u8 *) keystream, src, nbytes);
 | 
			
		||||
	memcpy(dst, keystream, nbytes);
 | 
			
		||||
	crypto_inc(ctrblk, AES_BLOCK_SIZE);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int ctr_crypt(struct blkcipher_desc *desc,
 | 
			
		||||
		     struct scatterlist *dst, struct scatterlist *src,
 | 
			
		||||
		     unsigned int nbytes)
 | 
			
		||||
| 
						 | 
				
			
			@ -338,10 +358,11 @@ static int ctr_crypt(struct blkcipher_desc *desc,
 | 
			
		|||
	int err;
 | 
			
		||||
 | 
			
		||||
	blkcipher_walk_init(&walk, dst, src, nbytes);
 | 
			
		||||
	err = blkcipher_walk_virt(desc, &walk);
 | 
			
		||||
	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
 | 
			
		||||
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 | 
			
		||||
 | 
			
		||||
	ctx->ops->load_encrypt_keys(&ctx->key[0]);
 | 
			
		||||
	while ((nbytes = walk.nbytes)) {
 | 
			
		||||
	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
 | 
			
		||||
		unsigned int block_len = nbytes & AES_BLOCK_MASK;
 | 
			
		||||
 | 
			
		||||
		if (likely(block_len)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -353,6 +374,10 @@ static int ctr_crypt(struct blkcipher_desc *desc,
 | 
			
		|||
		nbytes &= AES_BLOCK_SIZE - 1;
 | 
			
		||||
		err = blkcipher_walk_done(desc, &walk, nbytes);
 | 
			
		||||
	}
 | 
			
		||||
	if (walk.nbytes) {
 | 
			
		||||
		ctr_crypt_final(ctx, &walk);
 | 
			
		||||
		err = blkcipher_walk_done(desc, &walk, 0);
 | 
			
		||||
	}
 | 
			
		||||
	fprs_write(0);
 | 
			
		||||
	return err;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -418,7 +443,7 @@ static struct crypto_alg algs[] = { {
 | 
			
		|||
	.cra_driver_name	= "ctr-aes-sparc64",
 | 
			
		||||
	.cra_priority		= SPARC_CR_OPCODE_PRIORITY,
 | 
			
		||||
	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
 | 
			
		||||
	.cra_blocksize		= AES_BLOCK_SIZE,
 | 
			
		||||
	.cra_blocksize		= 1,
 | 
			
		||||
	.cra_ctxsize		= sizeof(struct crypto_sparc64_aes_ctx),
 | 
			
		||||
	.cra_alignmask		= 7,
 | 
			
		||||
	.cra_type		= &crypto_blkcipher_type,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -98,6 +98,7 @@ static int __ecb_crypt(struct blkcipher_desc *desc,
 | 
			
		|||
 | 
			
		||||
	blkcipher_walk_init(&walk, dst, src, nbytes);
 | 
			
		||||
	err = blkcipher_walk_virt(desc, &walk);
 | 
			
		||||
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 | 
			
		||||
 | 
			
		||||
	if (encrypt)
 | 
			
		||||
		key = &ctx->encrypt_key[0];
 | 
			
		||||
| 
						 | 
				
			
			@ -160,6 +161,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
 | 
			
		|||
 | 
			
		||||
	blkcipher_walk_init(&walk, dst, src, nbytes);
 | 
			
		||||
	err = blkcipher_walk_virt(desc, &walk);
 | 
			
		||||
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 | 
			
		||||
 | 
			
		||||
	key = &ctx->encrypt_key[0];
 | 
			
		||||
	camellia_sparc64_load_keys(key, ctx->key_len);
 | 
			
		||||
| 
						 | 
				
			
			@ -198,6 +200,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
 | 
			
		|||
 | 
			
		||||
	blkcipher_walk_init(&walk, dst, src, nbytes);
 | 
			
		||||
	err = blkcipher_walk_virt(desc, &walk);
 | 
			
		||||
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 | 
			
		||||
 | 
			
		||||
	key = &ctx->decrypt_key[0];
 | 
			
		||||
	camellia_sparc64_load_keys(key, ctx->key_len);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -376,6 +376,7 @@ ENTRY(des3_ede_sparc64_ecb_crypt)
 | 
			
		|||
1:	ldd	[%o1 + 0x00], %f60
 | 
			
		||||
	DES3_LOOP_BODY(60)
 | 
			
		||||
	std	%f60, [%o2 + 0x00]
 | 
			
		||||
	add	%o1, 0x08, %o1
 | 
			
		||||
	subcc	%o3, 0x08, %o3
 | 
			
		||||
	bne,pt	%icc, 1b
 | 
			
		||||
	 add	%o2, 0x08, %o2
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -100,6 +100,7 @@ static int __ecb_crypt(struct blkcipher_desc *desc,
 | 
			
		|||
 | 
			
		||||
	blkcipher_walk_init(&walk, dst, src, nbytes);
 | 
			
		||||
	err = blkcipher_walk_virt(desc, &walk);
 | 
			
		||||
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 | 
			
		||||
 | 
			
		||||
	if (encrypt)
 | 
			
		||||
		des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
 | 
			
		||||
| 
						 | 
				
			
			@ -147,6 +148,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
 | 
			
		|||
 | 
			
		||||
	blkcipher_walk_init(&walk, dst, src, nbytes);
 | 
			
		||||
	err = blkcipher_walk_virt(desc, &walk);
 | 
			
		||||
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 | 
			
		||||
 | 
			
		||||
	des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
 | 
			
		||||
	while ((nbytes = walk.nbytes)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -177,6 +179,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
 | 
			
		|||
 | 
			
		||||
	blkcipher_walk_init(&walk, dst, src, nbytes);
 | 
			
		||||
	err = blkcipher_walk_virt(desc, &walk);
 | 
			
		||||
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 | 
			
		||||
 | 
			
		||||
	des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
 | 
			
		||||
	while ((nbytes = walk.nbytes)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -266,6 +269,7 @@ static int __ecb3_crypt(struct blkcipher_desc *desc,
 | 
			
		|||
 | 
			
		||||
	blkcipher_walk_init(&walk, dst, src, nbytes);
 | 
			
		||||
	err = blkcipher_walk_virt(desc, &walk);
 | 
			
		||||
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 | 
			
		||||
 | 
			
		||||
	if (encrypt)
 | 
			
		||||
		K = &ctx->encrypt_expkey[0];
 | 
			
		||||
| 
						 | 
				
			
			@ -317,6 +321,7 @@ static int cbc3_encrypt(struct blkcipher_desc *desc,
 | 
			
		|||
 | 
			
		||||
	blkcipher_walk_init(&walk, dst, src, nbytes);
 | 
			
		||||
	err = blkcipher_walk_virt(desc, &walk);
 | 
			
		||||
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 | 
			
		||||
 | 
			
		||||
	K = &ctx->encrypt_expkey[0];
 | 
			
		||||
	des3_ede_sparc64_load_keys(K);
 | 
			
		||||
| 
						 | 
				
			
			@ -352,6 +357,7 @@ static int cbc3_decrypt(struct blkcipher_desc *desc,
 | 
			
		|||
 | 
			
		||||
	blkcipher_walk_init(&walk, dst, src, nbytes);
 | 
			
		||||
	err = blkcipher_walk_virt(desc, &walk);
 | 
			
		||||
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 | 
			
		||||
 | 
			
		||||
	K = &ctx->decrypt_expkey[0];
 | 
			
		||||
	des3_ede_sparc64_load_keys(K);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -61,14 +61,20 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
 | 
			
		|||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 | 
			
		||||
					   unsigned long addr, pte_t *ptep)
 | 
			
		||||
{
 | 
			
		||||
	ptep_set_wrprotect(mm, addr, ptep);
 | 
			
		||||
	pte_t old_pte = *ptep;
 | 
			
		||||
	set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 | 
			
		||||
					     unsigned long addr, pte_t *ptep,
 | 
			
		||||
					     pte_t pte, int dirty)
 | 
			
		||||
{
 | 
			
		||||
	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
 | 
			
		||||
	int changed = !pte_same(*ptep, pte);
 | 
			
		||||
	if (changed) {
 | 
			
		||||
		set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
 | 
			
		||||
		flush_tlb_page(vma, addr);
 | 
			
		||||
	}
 | 
			
		||||
	return changed;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline pte_t huge_ptep_get(pte_t *ptep)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -617,6 +617,12 @@ static inline unsigned long pte_present(pte_t pte)
 | 
			
		|||
	return val;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define pte_accessible pte_accessible
 | 
			
		||||
static inline unsigned long pte_accessible(pte_t a)
 | 
			
		||||
{
 | 
			
		||||
	return pte_val(a) & _PAGE_VALID;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline unsigned long pte_special(pte_t pte)
 | 
			
		||||
{
 | 
			
		||||
	return pte_val(pte) & _PAGE_SPECIAL;
 | 
			
		||||
| 
						 | 
				
			
			@ -802,7 +808,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 | 
			
		|||
	 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
 | 
			
		||||
	 *             and SUN4V pte layout, so this inline test is fine.
 | 
			
		||||
	 */
 | 
			
		||||
	if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
 | 
			
		||||
	if (likely(mm != &init_mm) && pte_accessible(orig))
 | 
			
		||||
		tlb_batch_add(mm, addr, ptep, orig, fullmm);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue