 2cb7c9cb42
			
		
	
	
	2cb7c9cb42
	
	
	
		
			
			The existing code relies on pagefault_disable() implicitly disabling preemption, so that no schedule will happen between kmap_atomic() and kunmap_atomic(). Let's make this explicit, to prepare for pagefault_disable() not touching preemption anymore. Reviewed-and-tested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: David.Laight@ACULAB.COM Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: airlied@linux.ie Cc: akpm@linux-foundation.org Cc: benh@kernel.crashing.org Cc: bigeasy@linutronix.de Cc: borntraeger@de.ibm.com Cc: daniel.vetter@intel.com Cc: heiko.carstens@de.ibm.com Cc: herbert@gondor.apana.org.au Cc: hocko@suse.cz Cc: hughd@google.com Cc: mst@redhat.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: schwidefsky@de.ibm.com Cc: yang.shi@windriver.com Link: http://lkml.kernel.org/r/1431359540-32227-5-git-send-email-dahi@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
		
			
				
	
	
		
			132 lines
		
	
	
	
		
			3 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			132 lines
		
	
	
	
		
			3 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  *  highmem.c: virtual kernel memory mappings for high memory
 | |
|  *
 | |
|  *  Provides kernel-static versions of atomic kmap functions originally
 | |
|  *  found as inlines in include/asm-sparc/highmem.h.  These became
 | |
|  *  needed as kmap_atomic() and kunmap_atomic() started getting
 | |
|  *  called from within modules.
 | |
|  *  -- Tomas Szepe <szepe@pinerecords.com>, September 2002
 | |
|  *
 | |
|  *  But kmap_atomic() and kunmap_atomic() cannot be inlined in
 | |
|  *  modules because they are loaded with btfixup-ped functions.
 | |
|  */
 | |
| 
 | |
| /*
 | |
|  * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
 | |
|  * gives a more generic (and caching) interface. But kmap_atomic can
 | |
|  * be used in IRQ contexts, so in some (very limited) cases we need it.
 | |
|  *
 | |
|  * XXX This is an old text. Actually, it's good to use atomic kmaps,
 | |
|  * provided you remember that they are atomic and not try to sleep
 | |
|  * with a kmap taken, much like a spinlock. Non-atomic kmaps are
 | |
|  * shared by CPUs, and so precious, and establishing them requires IPI.
 | |
|  * Atomic kmaps are lightweight and we may have NCPUS more of them.
 | |
|  */
 | |
| #include <linux/highmem.h>
 | |
| #include <linux/export.h>
 | |
| #include <linux/mm.h>
 | |
| 
 | |
| #include <asm/cacheflush.h>
 | |
| #include <asm/tlbflush.h>
 | |
| #include <asm/pgalloc.h>
 | |
| #include <asm/vaddrs.h>
 | |
| 
 | |
| pgprot_t kmap_prot;
 | |
| 
 | |
| static pte_t *kmap_pte;
 | |
| 
 | |
| void __init kmap_init(void)
 | |
| {
 | |
| 	unsigned long address;
 | |
| 	pmd_t *dir;
 | |
| 
 | |
| 	address = __fix_to_virt(FIX_KMAP_BEGIN);
 | |
| 	dir = pmd_offset(pgd_offset_k(address), address);
 | |
| 
 | |
|         /* cache the first kmap pte */
 | |
|         kmap_pte = pte_offset_kernel(dir, address);
 | |
|         kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
 | |
| }
 | |
| 
 | |
| void *kmap_atomic(struct page *page)
 | |
| {
 | |
| 	unsigned long vaddr;
 | |
| 	long idx, type;
 | |
| 
 | |
| 	preempt_disable();
 | |
| 	pagefault_disable();
 | |
| 	if (!PageHighMem(page))
 | |
| 		return page_address(page);
 | |
| 
 | |
| 	type = kmap_atomic_idx_push();
 | |
| 	idx = type + KM_TYPE_NR*smp_processor_id();
 | |
| 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 | |
| 
 | |
| /* XXX Fix - Anton */
 | |
| #if 0
 | |
| 	__flush_cache_one(vaddr);
 | |
| #else
 | |
| 	flush_cache_all();
 | |
| #endif
 | |
| 
 | |
| #ifdef CONFIG_DEBUG_HIGHMEM
 | |
| 	BUG_ON(!pte_none(*(kmap_pte-idx)));
 | |
| #endif
 | |
| 	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
 | |
| /* XXX Fix - Anton */
 | |
| #if 0
 | |
| 	__flush_tlb_one(vaddr);
 | |
| #else
 | |
| 	flush_tlb_all();
 | |
| #endif
 | |
| 
 | |
| 	return (void*) vaddr;
 | |
| }
 | |
| EXPORT_SYMBOL(kmap_atomic);
 | |
| 
 | |
| void __kunmap_atomic(void *kvaddr)
 | |
| {
 | |
| 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 | |
| 	int type;
 | |
| 
 | |
| 	if (vaddr < FIXADDR_START) { // FIXME
 | |
| 		pagefault_enable();
 | |
| 		preempt_enable();
 | |
| 		return;
 | |
| 	}
 | |
| 
 | |
| 	type = kmap_atomic_idx();
 | |
| 
 | |
| #ifdef CONFIG_DEBUG_HIGHMEM
 | |
| 	{
 | |
| 		unsigned long idx;
 | |
| 
 | |
| 		idx = type + KM_TYPE_NR * smp_processor_id();
 | |
| 		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
 | |
| 
 | |
| 		/* XXX Fix - Anton */
 | |
| #if 0
 | |
| 		__flush_cache_one(vaddr);
 | |
| #else
 | |
| 		flush_cache_all();
 | |
| #endif
 | |
| 
 | |
| 		/*
 | |
| 		 * force other mappings to Oops if they'll try to access
 | |
| 		 * this pte without first remap it
 | |
| 		 */
 | |
| 		pte_clear(&init_mm, vaddr, kmap_pte-idx);
 | |
| 		/* XXX Fix - Anton */
 | |
| #if 0
 | |
| 		__flush_tlb_one(vaddr);
 | |
| #else
 | |
| 		flush_tlb_all();
 | |
| #endif
 | |
| 	}
 | |
| #endif
 | |
| 
 | |
| 	kmap_atomic_idx_pop();
 | |
| 	pagefault_enable();
 | |
| 	preempt_enable();
 | |
| }
 | |
| EXPORT_SYMBOL(__kunmap_atomic);
 |