mm: softdirty: respect VM_SOFTDIRTY in PTE holes
After a VMA is created with the VM_SOFTDIRTY flag set, /proc/pid/pagemap should report that the VMA's virtual pages are soft-dirty until VM_SOFTDIRTY is cleared (i.e., by the next write of "4" to /proc/pid/clear_refs). However, pagemap ignores the VM_SOFTDIRTY flag for virtual addresses that fall in PTE holes (i.e., virtual addresses that don't have a PMD, PUD, or PGD allocated yet). To observe this bug, use mmap to create a VMA large enough such that there's a good chance that the VMA will occupy an unused PMD, then test the soft-dirty bit on its pages. In practice, I found that a VMA that covered a PMD's worth of address space was big enough. This patch adds the necessary VMA lookup to the PTE hole callback in /proc/pid/pagemap's page walk and sets soft-dirty according to the VMAs' VM_SOFTDIRTY flag. Signed-off-by: Peter Feiner <pfeiner@google.com> Acked-by: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Hugh Dickins <hughd@google.com> Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
					parent
					
						
							
								3a91053aeb
							
						
					
				
			
			
				commit
				
					
						68b5a65248
					
				
			
		
					 1 changed files with 21 additions and 6 deletions
				
			
		|  | @ -925,15 +925,30 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, | |||
| 				struct mm_walk *walk) | ||||
| { | ||||
| 	struct pagemapread *pm = walk->private; | ||||
| 	unsigned long addr; | ||||
| 	unsigned long addr = start; | ||||
| 	int err = 0; | ||||
| 	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); | ||||
| 
 | ||||
| 	for (addr = start; addr < end; addr += PAGE_SIZE) { | ||||
| 		err = add_to_pagemap(addr, &pme, pm); | ||||
| 		if (err) | ||||
| 			break; | ||||
| 	while (addr < end) { | ||||
| 		struct vm_area_struct *vma = find_vma(walk->mm, addr); | ||||
| 		pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); | ||||
| 		unsigned long vm_end; | ||||
| 
 | ||||
| 		if (!vma) { | ||||
| 			vm_end = end; | ||||
| 		} else { | ||||
| 			vm_end = min(end, vma->vm_end); | ||||
| 			if (vma->vm_flags & VM_SOFTDIRTY) | ||||
| 				pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY); | ||||
| 		} | ||||
| 
 | ||||
| 		for (; addr < vm_end; addr += PAGE_SIZE) { | ||||
| 			err = add_to_pagemap(addr, &pme, pm); | ||||
| 			if (err) | ||||
| 				goto out; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| out: | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Peter Feiner
				Peter Feiner