xen: enter/exit lazy_mmu_mode around m2p_override calls
This patch is a significant performance improvement for the m2p_override: about 6% using the gntdev device. Each m2p_add/remove_override call issues a MULTI_grant_table_op and a __flush_tlb_single if kmap_op != NULL. Batching all the calls together is a great performance benefit because it means issuing one hypercall total rather than two hypercall per page. If paravirt_lazy_mode is set PARAVIRT_LAZY_MMU, all these calls are going to be batched together, otherwise they are issued one at a time. Adding arch_enter_lazy_mmu_mode/arch_leave_lazy_mmu_mode around the m2p_add/remove_override calls forces paravirt_lazy_mode to PARAVIRT_LAZY_MMU, therefore makes sure that they are always batched. However it is not safe to call arch_enter_lazy_mmu_mode if we are in interrupt context or if we are already in PARAVIRT_LAZY_MMU mode, so check for both conditions before doing so. Changes in v4: - rebased on 3.4-rc4: all the m2p_override users call gnttab_unmap_refs and gnttab_map_refs; - check whether we are in interrupt context and the lazy_mode we are in before calling arch_enter/leave_lazy_mmu_mode. Changes in v3: - do not call arch_enter/leave_lazy_mmu_mode in xen_blkbk_unmap, that can be called in interrupt context. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> [v5: s/int lazy/bool lazy/] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
		
					parent
					
						
							
								211063dc15
							
						
					
				
			
			
				commit
				
					
						f62805f1f3
					
				
			
		
					 1 changed files with 19 additions and 0 deletions
				
			
		|  | @ -38,6 +38,7 @@ | |||
| #include <linux/vmalloc.h> | ||||
| #include <linux/uaccess.h> | ||||
| #include <linux/io.h> | ||||
| #include <linux/hardirq.h> | ||||
| 
 | ||||
| #include <xen/xen.h> | ||||
| #include <xen/interface/xen.h> | ||||
|  | @ -827,6 +828,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
| 		    struct page **pages, unsigned int count) | ||||
| { | ||||
| 	int i, ret; | ||||
| 	bool lazy = false; | ||||
| 	pte_t *pte; | ||||
| 	unsigned long mfn; | ||||
| 
 | ||||
|  | @ -837,6 +839,11 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
| 	if (xen_feature(XENFEAT_auto_translated_physmap)) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { | ||||
| 		arch_enter_lazy_mmu_mode(); | ||||
| 		lazy = true; | ||||
| 	} | ||||
| 
 | ||||
| 	for (i = 0; i < count; i++) { | ||||
| 		/* Do not add to override if the map failed. */ | ||||
| 		if (map_ops[i].status) | ||||
|  | @ -855,6 +862,9 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
| 			return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	if (lazy) | ||||
| 		arch_leave_lazy_mmu_mode(); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(gnttab_map_refs); | ||||
|  | @ -863,6 +873,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | |||
| 		      struct page **pages, unsigned int count, bool clear_pte) | ||||
| { | ||||
| 	int i, ret; | ||||
| 	bool lazy = false; | ||||
| 
 | ||||
| 	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); | ||||
| 	if (ret) | ||||
|  | @ -871,12 +882,20 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | |||
| 	if (xen_feature(XENFEAT_auto_translated_physmap)) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { | ||||
| 		arch_enter_lazy_mmu_mode(); | ||||
| 		lazy = true; | ||||
| 	} | ||||
| 
 | ||||
| 	for (i = 0; i < count; i++) { | ||||
| 		ret = m2p_remove_override(pages[i], clear_pte); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	if (lazy) | ||||
| 		arch_leave_lazy_mmu_mode(); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(gnttab_unmap_refs); | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Stefano Stabellini
				Stefano Stabellini