Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * TLB flushing operations for SH with an MMU. |
| 3 | * |
| 4 | * Copyright (C) 1999 Niibe Yutaka |
| 5 | * Copyright (C) 2003 Paul Mundt |
| 6 | * |
| 7 | * This file is subject to the terms and conditions of the GNU General Public |
| 8 | * License. See the file "COPYING" in the main directory of this archive |
| 9 | * for more details. |
| 10 | */ |
| 11 | #include <linux/mm.h> |
| 12 | #include <asm/mmu_context.h> |
| 13 | #include <asm/tlbflush.h> |
| 14 | |
| 15 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
| 16 | { |
| 17 | unsigned int cpu = smp_processor_id(); |
| 18 | |
| 19 | if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { |
| 20 | unsigned long flags; |
| 21 | unsigned long asid; |
| 22 | unsigned long saved_asid = MMU_NO_ASID; |
| 23 | |
| 24 | asid = cpu_asid(cpu, vma->vm_mm); |
| 25 | page &= PAGE_MASK; |
| 26 | |
| 27 | local_irq_save(flags); |
| 28 | if (vma->vm_mm != current->mm) { |
| 29 | saved_asid = get_asid(); |
| 30 | set_asid(asid); |
| 31 | } |
| 32 | local_flush_tlb_one(asid, page); |
| 33 | if (saved_asid != MMU_NO_ASID) |
| 34 | set_asid(saved_asid); |
| 35 | local_irq_restore(flags); |
| 36 | } |
| 37 | } |
| 38 | |
| 39 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
| 40 | unsigned long end) |
| 41 | { |
| 42 | struct mm_struct *mm = vma->vm_mm; |
| 43 | unsigned int cpu = smp_processor_id(); |
| 44 | |
| 45 | if (cpu_context(cpu, mm) != NO_CONTEXT) { |
| 46 | unsigned long flags; |
| 47 | int size; |
| 48 | |
| 49 | local_irq_save(flags); |
| 50 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 51 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
| 52 | cpu_context(cpu, mm) = NO_CONTEXT; |
| 53 | if (mm == current->mm) |
| 54 | activate_context(mm, cpu); |
| 55 | } else { |
| 56 | unsigned long asid; |
| 57 | unsigned long saved_asid = MMU_NO_ASID; |
| 58 | |
| 59 | asid = cpu_asid(cpu, mm); |
| 60 | start &= PAGE_MASK; |
| 61 | end += (PAGE_SIZE - 1); |
| 62 | end &= PAGE_MASK; |
| 63 | if (mm != current->mm) { |
| 64 | saved_asid = get_asid(); |
| 65 | set_asid(asid); |
| 66 | } |
| 67 | while (start < end) { |
| 68 | local_flush_tlb_one(asid, start); |
| 69 | start += PAGE_SIZE; |
| 70 | } |
| 71 | if (saved_asid != MMU_NO_ASID) |
| 72 | set_asid(saved_asid); |
| 73 | } |
| 74 | local_irq_restore(flags); |
| 75 | } |
| 76 | } |
| 77 | |
| 78 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 79 | { |
| 80 | unsigned int cpu = smp_processor_id(); |
| 81 | unsigned long flags; |
| 82 | int size; |
| 83 | |
| 84 | local_irq_save(flags); |
| 85 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 86 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
| 87 | local_flush_tlb_all(); |
| 88 | } else { |
| 89 | unsigned long asid; |
| 90 | unsigned long saved_asid = get_asid(); |
| 91 | |
| 92 | asid = cpu_asid(cpu, &init_mm); |
| 93 | start &= PAGE_MASK; |
| 94 | end += (PAGE_SIZE - 1); |
| 95 | end &= PAGE_MASK; |
| 96 | set_asid(asid); |
| 97 | while (start < end) { |
| 98 | local_flush_tlb_one(asid, start); |
| 99 | start += PAGE_SIZE; |
| 100 | } |
| 101 | set_asid(saved_asid); |
| 102 | } |
| 103 | local_irq_restore(flags); |
| 104 | } |
| 105 | |
| 106 | void local_flush_tlb_mm(struct mm_struct *mm) |
| 107 | { |
| 108 | unsigned int cpu = smp_processor_id(); |
| 109 | |
| 110 | /* Invalidate all TLB of this process. */ |
| 111 | /* Instead of invalidating each TLB, we get new MMU context. */ |
| 112 | if (cpu_context(cpu, mm) != NO_CONTEXT) { |
| 113 | unsigned long flags; |
| 114 | |
| 115 | local_irq_save(flags); |
| 116 | cpu_context(cpu, mm) = NO_CONTEXT; |
| 117 | if (mm == current->mm) |
| 118 | activate_context(mm, cpu); |
| 119 | local_irq_restore(flags); |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | void __flush_tlb_global(void) |
| 124 | { |
| 125 | unsigned long flags; |
| 126 | |
| 127 | local_irq_save(flags); |
| 128 | |
| 129 | /* |
| 130 | * This is the most destructive of the TLB flushing options, |
| 131 | * and will tear down all of the UTLB/ITLB mappings, including |
| 132 | * wired entries. |
| 133 | */ |
| 134 | __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); |
| 135 | |
| 136 | local_irq_restore(flags); |
| 137 | } |