arch, mm: Remove tlb_fast_mode()
Since the introduction of preemptible mmu_gather TLB fast mode has been broken. TLB fast mode relies on there being absolutely no concurrency; it frees pages first and invalidates TLBs later. However now we can get concurrency and stuff goes *bang*. This patch removes all tlb_fast_mode() code; it was found the better option vs trying to patch the hole by entangling tlb invalidation with the scheduler. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Russell King <linux@arm.linux.org.uk> Cc: Tony Luck <tony.luck@intel.com> Reported-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2c95523c0f
commit
29eb77825c
4 changed files with 13 additions and 81 deletions
|
@ -97,11 +97,9 @@ struct mmu_gather {
|
|||
unsigned long start;
|
||||
unsigned long end;
|
||||
unsigned int need_flush : 1, /* Did free PTEs */
|
||||
fast_mode : 1; /* No batching */
|
||||
|
||||
/* we are in the middle of an operation to clear
|
||||
* a full mm and can make some optimizations */
|
||||
unsigned int fullmm : 1,
|
||||
fullmm : 1,
|
||||
/* we have performed an operation which
|
||||
* requires a complete flush of the tlb */
|
||||
need_flush_all : 1;
|
||||
|
@ -114,19 +112,6 @@ struct mmu_gather {
|
|||
|
||||
#define HAVE_GENERIC_MMU_GATHER
|
||||
|
||||
static inline int tlb_fast_mode(struct mmu_gather *tlb)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return tlb->fast_mode;
|
||||
#else
|
||||
/*
|
||||
* For UP we don't need to worry about TLB flush
|
||||
* and page free order so much..
|
||||
*/
|
||||
return 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
|
||||
void tlb_flush_mmu(struct mmu_gather *tlb);
|
||||
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue