thp: add tlb_remove_pmd_tlb_entry
We have tlb_remove_tlb_entry to indicate a pte tlb flush entry should be flushed, but not a corresponding API for pmd entry. This isn't a problem so far because THP is only for x86 currently and tlb_flush() under x86 will flush entire TLB. But this is confusion and could be missed if thp is ported to other arch. Also convert tlb->need_flush = 1 to a VM_BUG_ON(!tlb->need_flush) in __tlb_remove_page() as suggested by Andrea Arcangeli. The __tlb_remove_page() function is supposed to be called after tlb_remove_xxx_tlb_entry() and we can catch any misuse. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Reviewed-by: Andrea Arcangeli <aarcange@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Johannes Weiner <jweiner@redhat.com> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
					parent
					
						
							
								e5591307f0
							
						
					
				
			
			
				commit
				
					
						f21760b15d
					
				
			
		
					 4 changed files with 19 additions and 4 deletions
				
			
		| 
						 | 
					@ -139,6 +139,20 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 | 
				
			||||||
		__tlb_remove_tlb_entry(tlb, ptep, address);	\
 | 
							__tlb_remove_tlb_entry(tlb, ptep, address);	\
 | 
				
			||||||
	} while (0)
 | 
						} while (0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
 | 
				
			||||||
 | 
					 * This is a nop so far, because only x86 needs it.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					#ifndef __tlb_remove_pmd_tlb_entry
 | 
				
			||||||
 | 
					#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)		\
 | 
				
			||||||
 | 
						do {							\
 | 
				
			||||||
 | 
							tlb->need_flush = 1;				\
 | 
				
			||||||
 | 
							__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);	\
 | 
				
			||||||
 | 
						} while (0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define pte_free_tlb(tlb, ptep, address)			\
 | 
					#define pte_free_tlb(tlb, ptep, address)			\
 | 
				
			||||||
	do {							\
 | 
						do {							\
 | 
				
			||||||
		tlb->need_flush = 1;				\
 | 
							tlb->need_flush = 1;				\
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -18,7 +18,7 @@ extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
 | 
				
			||||||
					  unsigned int flags);
 | 
										  unsigned int flags);
 | 
				
			||||||
extern int zap_huge_pmd(struct mmu_gather *tlb,
 | 
					extern int zap_huge_pmd(struct mmu_gather *tlb,
 | 
				
			||||||
			struct vm_area_struct *vma,
 | 
								struct vm_area_struct *vma,
 | 
				
			||||||
			pmd_t *pmd);
 | 
								pmd_t *pmd, unsigned long addr);
 | 
				
			||||||
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 | 
					extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 | 
				
			||||||
			unsigned long addr, unsigned long end,
 | 
								unsigned long addr, unsigned long end,
 | 
				
			||||||
			unsigned char *vec);
 | 
								unsigned char *vec);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1026,7 +1026,7 @@ out:
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 | 
					int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 | 
				
			||||||
		 pmd_t *pmd)
 | 
							 pmd_t *pmd, unsigned long addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int ret = 0;
 | 
						int ret = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1042,6 +1042,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 | 
				
			||||||
			pgtable = get_pmd_huge_pte(tlb->mm);
 | 
								pgtable = get_pmd_huge_pte(tlb->mm);
 | 
				
			||||||
			page = pmd_page(*pmd);
 | 
								page = pmd_page(*pmd);
 | 
				
			||||||
			pmd_clear(pmd);
 | 
								pmd_clear(pmd);
 | 
				
			||||||
 | 
								tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
 | 
				
			||||||
			page_remove_rmap(page);
 | 
								page_remove_rmap(page);
 | 
				
			||||||
			VM_BUG_ON(page_mapcount(page) < 0);
 | 
								VM_BUG_ON(page_mapcount(page) < 0);
 | 
				
			||||||
			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
 | 
								add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -293,7 +293,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct mmu_gather_batch *batch;
 | 
						struct mmu_gather_batch *batch;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tlb->need_flush = 1;
 | 
						VM_BUG_ON(!tlb->need_flush);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (tlb_fast_mode(tlb)) {
 | 
						if (tlb_fast_mode(tlb)) {
 | 
				
			||||||
		free_page_and_swap_cache(page);
 | 
							free_page_and_swap_cache(page);
 | 
				
			||||||
| 
						 | 
					@ -1231,7 +1231,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
 | 
				
			||||||
			if (next-addr != HPAGE_PMD_SIZE) {
 | 
								if (next-addr != HPAGE_PMD_SIZE) {
 | 
				
			||||||
				VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
 | 
									VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
 | 
				
			||||||
				split_huge_page_pmd(vma->vm_mm, pmd);
 | 
									split_huge_page_pmd(vma->vm_mm, pmd);
 | 
				
			||||||
			} else if (zap_huge_pmd(tlb, vma, pmd))
 | 
								} else if (zap_huge_pmd(tlb, vma, pmd, addr))
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
			/* fall through */
 | 
								/* fall through */
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue