x86/paravirt: remove lazy mode in interrupts
Impact: simplification, robustness Make paravirt_lazy_mode() always return PARAVIRT_LAZY_NONE when in an interrupt. This prevents interrupt code from accidentally inheriting an outer lazy state, and instead does everything synchronously. Outer batched operations are left deferred. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
		
					parent
					
						
							
								a8a93f3f03
							
						
					
				
			
			
				commit
				
					
						b8bcfe997e
					
				
			
		
					 5 changed files with 5 additions and 21 deletions
				
			
		|  | @ -282,6 +282,9 @@ void paravirt_leave_lazy_cpu(void) | |||
| 
 | ||||
| enum paravirt_lazy_mode paravirt_get_lazy_mode(void) | ||||
| { | ||||
| 	if (in_interrupt()) | ||||
| 		return PARAVIRT_LAZY_NONE; | ||||
| 
 | ||||
| 	return __get_cpu_var(paravirt_lazy_mode); | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -225,12 +225,10 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | |||
| 	if (!pmd_present(*pmd_k)) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	if (!pmd_present(*pmd)) { | ||||
| 	if (!pmd_present(*pmd)) | ||||
| 		set_pmd(pmd, *pmd_k); | ||||
| 		arch_flush_lazy_mmu_mode(); | ||||
| 	} else { | ||||
| 	else | ||||
| 		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); | ||||
| 	} | ||||
| 
 | ||||
| 	return pmd_k; | ||||
| } | ||||
|  |  | |||
|  | @ -87,7 +87,6 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | |||
| 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||||
| 	BUG_ON(!pte_none(*(kmap_pte-idx))); | ||||
| 	set_pte(kmap_pte-idx, mk_pte(page, prot)); | ||||
| 	arch_flush_lazy_mmu_mode(); | ||||
| 
 | ||||
| 	return (void *)vaddr; | ||||
| } | ||||
|  | @ -117,7 +116,6 @@ void kunmap_atomic(void *kvaddr, enum km_type type) | |||
| #endif | ||||
| 	} | ||||
| 
 | ||||
| 	arch_flush_lazy_mmu_mode(); | ||||
| 	pagefault_enable(); | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -74,7 +74,6 @@ iounmap_atomic(void *kvaddr, enum km_type type) | |||
| 	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) | ||||
| 		kpte_clear_flush(kmap_pte-idx, vaddr); | ||||
| 
 | ||||
| 	arch_flush_lazy_mmu_mode(); | ||||
| 	pagefault_enable(); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(iounmap_atomic); | ||||
|  |  | |||
|  | @ -824,13 +824,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
| 
 | ||||
| 	vm_unmap_aliases(); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If we're called with lazy mmu updates enabled, the | ||||
| 	 * in-memory pte state may be stale.  Flush pending updates to | ||||
| 	 * bring them up to date. | ||||
| 	 */ | ||||
| 	arch_flush_lazy_mmu_mode(); | ||||
| 
 | ||||
| 	cpa.vaddr = addr; | ||||
| 	cpa.numpages = numpages; | ||||
| 	cpa.mask_set = mask_set; | ||||
|  | @ -873,13 +866,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
| 	} else | ||||
| 		cpa_flush_all(cache); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If we've been called with lazy mmu updates enabled, then | ||||
| 	 * make sure that everything gets flushed out before we | ||||
| 	 * return. | ||||
| 	 */ | ||||
| 	arch_flush_lazy_mmu_mode(); | ||||
| 
 | ||||
| out: | ||||
| 	return ret; | ||||
| } | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Jeremy Fitzhardinge
				Jeremy Fitzhardinge