 862c588f06
			
		
	
	
	862c588f06
	
	
	
		
			
			The ARM architecture requires explicit branch predictor maintenance when updating an instruction stream for a given virtual address. In reality, this isn't so much of a burden because the branch predictor is flushed during the cache maintenance required to make the new instructions visible to the I-side of the processor. However, there are still some cases where explicit flushing is required, so add a local_bp_flush_all operation to deal with this. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
		
			
				
	
	
		
			141 lines
		
	
	
	
		
			3 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			141 lines
		
	
	
	
		
			3 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  *  linux/arch/arm/kernel/smp_tlb.c
 | |
|  *
 | |
|  *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
 | |
|  *
 | |
|  * This program is free software; you can redistribute it and/or modify
 | |
|  * it under the terms of the GNU General Public License version 2 as
 | |
|  * published by the Free Software Foundation.
 | |
|  */
 | |
| #include <linux/preempt.h>
 | |
| #include <linux/smp.h>
 | |
| 
 | |
| #include <asm/smp_plat.h>
 | |
| #include <asm/tlbflush.h>
 | |
| 
 | |
| /**********************************************************************/
 | |
| 
 | |
| /*
 | |
|  * TLB operations
 | |
|  */
 | |
| struct tlb_args {
 | |
| 	struct vm_area_struct *ta_vma;
 | |
| 	unsigned long ta_start;
 | |
| 	unsigned long ta_end;
 | |
| };
 | |
| 
 | |
| static inline void ipi_flush_tlb_all(void *ignored)
 | |
| {
 | |
| 	local_flush_tlb_all();
 | |
| }
 | |
| 
 | |
| static inline void ipi_flush_tlb_mm(void *arg)
 | |
| {
 | |
| 	struct mm_struct *mm = (struct mm_struct *)arg;
 | |
| 
 | |
| 	local_flush_tlb_mm(mm);
 | |
| }
 | |
| 
 | |
| static inline void ipi_flush_tlb_page(void *arg)
 | |
| {
 | |
| 	struct tlb_args *ta = (struct tlb_args *)arg;
 | |
| 
 | |
| 	local_flush_tlb_page(ta->ta_vma, ta->ta_start);
 | |
| }
 | |
| 
 | |
| static inline void ipi_flush_tlb_kernel_page(void *arg)
 | |
| {
 | |
| 	struct tlb_args *ta = (struct tlb_args *)arg;
 | |
| 
 | |
| 	local_flush_tlb_kernel_page(ta->ta_start);
 | |
| }
 | |
| 
 | |
| static inline void ipi_flush_tlb_range(void *arg)
 | |
| {
 | |
| 	struct tlb_args *ta = (struct tlb_args *)arg;
 | |
| 
 | |
| 	local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
 | |
| }
 | |
| 
 | |
| static inline void ipi_flush_tlb_kernel_range(void *arg)
 | |
| {
 | |
| 	struct tlb_args *ta = (struct tlb_args *)arg;
 | |
| 
 | |
| 	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
 | |
| }
 | |
| 
 | |
| static inline void ipi_flush_bp_all(void *ignored)
 | |
| {
 | |
| 	local_flush_bp_all();
 | |
| }
 | |
| 
 | |
| void flush_tlb_all(void)
 | |
| {
 | |
| 	if (tlb_ops_need_broadcast())
 | |
| 		on_each_cpu(ipi_flush_tlb_all, NULL, 1);
 | |
| 	else
 | |
| 		local_flush_tlb_all();
 | |
| }
 | |
| 
 | |
| void flush_tlb_mm(struct mm_struct *mm)
 | |
| {
 | |
| 	if (tlb_ops_need_broadcast())
 | |
| 		on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
 | |
| 	else
 | |
| 		local_flush_tlb_mm(mm);
 | |
| }
 | |
| 
 | |
| void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 | |
| {
 | |
| 	if (tlb_ops_need_broadcast()) {
 | |
| 		struct tlb_args ta;
 | |
| 		ta.ta_vma = vma;
 | |
| 		ta.ta_start = uaddr;
 | |
| 		on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
 | |
| 					&ta, 1);
 | |
| 	} else
 | |
| 		local_flush_tlb_page(vma, uaddr);
 | |
| }
 | |
| 
 | |
| void flush_tlb_kernel_page(unsigned long kaddr)
 | |
| {
 | |
| 	if (tlb_ops_need_broadcast()) {
 | |
| 		struct tlb_args ta;
 | |
| 		ta.ta_start = kaddr;
 | |
| 		on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
 | |
| 	} else
 | |
| 		local_flush_tlb_kernel_page(kaddr);
 | |
| }
 | |
| 
 | |
| void flush_tlb_range(struct vm_area_struct *vma,
 | |
|                      unsigned long start, unsigned long end)
 | |
| {
 | |
| 	if (tlb_ops_need_broadcast()) {
 | |
| 		struct tlb_args ta;
 | |
| 		ta.ta_vma = vma;
 | |
| 		ta.ta_start = start;
 | |
| 		ta.ta_end = end;
 | |
| 		on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range,
 | |
| 					&ta, 1);
 | |
| 	} else
 | |
| 		local_flush_tlb_range(vma, start, end);
 | |
| }
 | |
| 
 | |
| void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 | |
| {
 | |
| 	if (tlb_ops_need_broadcast()) {
 | |
| 		struct tlb_args ta;
 | |
| 		ta.ta_start = start;
 | |
| 		ta.ta_end = end;
 | |
| 		on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
 | |
| 	} else
 | |
| 		local_flush_tlb_kernel_range(start, end);
 | |
| }
 | |
| 
 | |
| void flush_bp_all(void)
 | |
| {
 | |
| 	if (tlb_ops_need_broadcast())
 | |
| 		on_each_cpu(ipi_flush_bp_all, NULL, 1);
 | |
| 	else
 | |
| 		local_flush_bp_all();
 | |
| }
 |