s390/gmap: don't unconditionally call pte_unmap_unlock() in __gmap_zap()
[ Upstream commitb159f94c86] ... otherwise we will try unlocking a spinlock that was never locked via a garbage pointer. At the time we reach this code path, we usually successfully looked up a PGSTE already; however, evil user space could have manipulated the VMA layout in the meantime and triggered removal of the page table. Fixes:1e133ab296("s390/mm: split arch/s390/mm/pgtable.c") Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Acked-by: Heiko Carstens <hca@linux.ibm.com> Link: https://lore.kernel.org/r/20210909162248.14969-3-david@redhat.com Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
50fcaa7155
commit
a729eb55b3
1 changed files with 3 additions and 2 deletions
|
|
@ -684,9 +684,10 @@ void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
|
|||
vmaddr |= gaddr & ~PMD_MASK;
|
||||
/* Get pointer to the page table entry */
|
||||
ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
|
||||
if (likely(ptep))
|
||||
if (likely(ptep)) {
|
||||
ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__gmap_zap);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue