Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | #ifndef _PARISC_TLBFLUSH_H |
| 2 | #define _PARISC_TLBFLUSH_H |
| 3 | |
| 4 | /* TLB flushing routines.... */ |
| 5 | |
| 6 | #include <linux/mm.h> |
| 7 | #include <linux/sched.h> |
| 8 | #include <asm/mmu_context.h> |
| 9 | |
| 10 | |
| 11 | /* This is for the serialisation of PxTLB broadcasts. At least on the |
| 12 | * N class systems, only one PxTLB inter processor broadcast can be |
| 13 | * active at any one time on the Merced bus. This tlb purge |
| 14 | * synchronisation is fairly lightweight and harmless so we activate |
| 15 | * it on all systems not just the N class. |
| 16 | |
| 17 | * It is also used to ensure PTE updates are atomic and consistent |
| 18 | * with the TLB. |
| 19 | */ |
| 20 | extern spinlock_t pa_tlb_lock; |
| 21 | |
| 22 | #define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags) |
| 23 | #define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags) |
| 24 | |
| 25 | extern void flush_tlb_all(void); |
| 26 | extern void flush_tlb_all_local(void *); |
| 27 | |
| 28 | #define smp_flush_tlb_all() flush_tlb_all() |
| 29 | |
| 30 | int __flush_tlb_range(unsigned long sid, |
| 31 | unsigned long start, unsigned long end); |
| 32 | |
| 33 | #define flush_tlb_range(vma, start, end) \ |
| 34 | __flush_tlb_range((vma)->vm_mm->context, start, end) |
| 35 | |
| 36 | #define flush_tlb_kernel_range(start, end) \ |
| 37 | __flush_tlb_range(0, start, end) |
| 38 | |
| 39 | /* |
| 40 | * flush_tlb_mm() |
| 41 | * |
| 42 | * The code to switch to a new context is NOT valid for processes |
| 43 | * which play with the space id's. Thus, we have to preserve the |
| 44 | * space and just flush the entire tlb. However, the compilers, |
| 45 | * dynamic linker, etc, do not manipulate space id's, so there |
| 46 | * could be a significant performance benefit in switching contexts |
| 47 | * and not flushing the whole tlb. |
| 48 | */ |
| 49 | |
| 50 | static inline void flush_tlb_mm(struct mm_struct *mm) |
| 51 | { |
| 52 | BUG_ON(mm == &init_mm); /* Should never happen */ |
| 53 | |
| 54 | #if 1 || defined(CONFIG_SMP) |
| 55 | /* Except for very small threads, flushing the whole TLB is |
| 56 | * faster than using __flush_tlb_range. The pdtlb and pitlb |
| 57 | * instructions are very slow because of the TLB broadcast. |
| 58 | * It might be faster to do local range flushes on all CPUs |
| 59 | * on PA 2.0 systems. |
| 60 | */ |
| 61 | flush_tlb_all(); |
| 62 | #else |
| 63 | /* FIXME: currently broken, causing space id and protection ids |
| 64 | * to go out of sync, resulting in faults on userspace accesses. |
| 65 | * This approach needs further investigation since running many |
| 66 | * small applications (e.g., GCC testsuite) is faster on HP-UX. |
| 67 | */ |
| 68 | if (mm) { |
| 69 | if (mm->context != 0) |
| 70 | free_sid(mm->context); |
| 71 | mm->context = alloc_sid(); |
| 72 | if (mm == current->active_mm) |
| 73 | load_context(mm->context); |
| 74 | } |
| 75 | #endif |
| 76 | } |
| 77 | |
| 78 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
| 79 | unsigned long addr) |
| 80 | { |
| 81 | unsigned long flags, sid; |
| 82 | |
| 83 | sid = vma->vm_mm->context; |
| 84 | purge_tlb_start(flags); |
| 85 | mtsp(sid, 1); |
| 86 | pdtlb(addr); |
| 87 | if (unlikely(split_tlb)) |
| 88 | pitlb(addr); |
| 89 | purge_tlb_end(flags); |
| 90 | } |
| 91 | #endif |