blob: 92e08bf37aad940b8f7da40644ef88bcbf56aa31 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * linux/arch/arm/mm/proc-feroceon.S: MMU functions for Feroceon
3 *
4 * Heavily based on proc-arm926.S
5 * Maintainer: Assaf Hoffman <hoffman@marvell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/linkage.h>
23#include <linux/init.h>
24#include <asm/assembler.h>
25#include <asm/hwcap.h>
26#include <asm/pgtable-hwdef.h>
27#include <asm/pgtable.h>
28#include <asm/page.h>
29#include <asm/ptrace.h>
30#include "proc-macros.S"
31
32/*
33 * This is the maximum size of an area which will be invalidated
34 * using the single invalidate entry instructions. Anything larger
35 * than this, and we go for the whole cache.
36 *
37 * This value should be chosen such that we choose the cheapest
38 * alternative.
39 */
40#define CACHE_DLIMIT 16384
41
42/*
43 * the cache line size of the I and D cache
44 */
45#define CACHE_DLINESIZE 32
46
47 .bss
48 .align 3
49__cache_params_loc:
50 .space 8
51
52 .text
53__cache_params:
54 .word __cache_params_loc
55
56/*
57 * cpu_feroceon_proc_init()
58 */
59ENTRY(cpu_feroceon_proc_init)
60 mrc p15, 0, r0, c0, c0, 1 @ read cache type register
61 ldr r1, __cache_params
62 mov r2, #(16 << 5)
63 tst r0, #(1 << 16) @ get way
64 mov r0, r0, lsr #18 @ get cache size order
65 movne r3, #((4 - 1) << 30) @ 4-way
66 and r0, r0, #0xf
67 moveq r3, #0 @ 1-way
68 mov r2, r2, lsl r0 @ actual cache size
69 movne r2, r2, lsr #2 @ turned into # of sets
70 sub r2, r2, #(1 << 5)
71 stmia r1, {r2, r3}
72 ret lr
73
74/*
75 * cpu_feroceon_proc_fin()
76 */
77ENTRY(cpu_feroceon_proc_fin)
78#if defined(CONFIG_CACHE_FEROCEON_L2) && \
79 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
80 mov r0, #0
81 mcr p15, 1, r0, c15, c9, 0 @ clean L2
82 mcr p15, 0, r0, c7, c10, 4 @ drain WB
83#endif
84
85 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
86 bic r0, r0, #0x1000 @ ...i............
87 bic r0, r0, #0x000e @ ............wca.
88 mcr p15, 0, r0, c1, c0, 0 @ disable caches
89 ret lr
90
91/*
92 * cpu_feroceon_reset(loc)
93 *
94 * Perform a soft reset of the system. Put the CPU into the
95 * same state as it would be if it had been reset, and branch
96 * to what would be the reset vector.
97 *
98 * loc: location to jump to for soft reset
99 */
100 .align 5
101 .pushsection .idmap.text, "ax"
102ENTRY(cpu_feroceon_reset)
103 mov ip, #0
104 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
105 mcr p15, 0, ip, c7, c10, 4 @ drain WB
106#ifdef CONFIG_MMU
107 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
108#endif
109 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
110 bic ip, ip, #0x000f @ ............wcam
111 bic ip, ip, #0x1100 @ ...i...s........
112 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
113 ret r0
114ENDPROC(cpu_feroceon_reset)
115 .popsection
116
117/*
118 * cpu_feroceon_do_idle()
119 *
120 * Called with IRQs disabled
121 */
122 .align 5
123ENTRY(cpu_feroceon_do_idle)
124 mov r0, #0
125 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
126 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
127 ret lr
128
129/*
130 * flush_icache_all()
131 *
132 * Unconditionally clean and invalidate the entire icache.
133 */
134ENTRY(feroceon_flush_icache_all)
135 mov r0, #0
136 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
137 ret lr
138ENDPROC(feroceon_flush_icache_all)
139
140/*
141 * flush_user_cache_all()
142 *
143 * Clean and invalidate all cache entries in a particular
144 * address space.
145 */
146 .align 5
147ENTRY(feroceon_flush_user_cache_all)
148 /* FALLTHROUGH */
149
150/*
151 * flush_kern_cache_all()
152 *
153 * Clean and invalidate the entire cache.
154 */
155ENTRY(feroceon_flush_kern_cache_all)
156 mov r2, #VM_EXEC
157
158__flush_whole_cache:
159 ldr r1, __cache_params
160 ldmia r1, {r1, r3}
1611: orr ip, r1, r3
1622: mcr p15, 0, ip, c7, c14, 2 @ clean + invalidate D set/way
163 subs ip, ip, #(1 << 30) @ next way
164 bcs 2b
165 subs r1, r1, #(1 << 5) @ next set
166 bcs 1b
167
168 tst r2, #VM_EXEC
169 mov ip, #0
170 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
172 ret lr
173
174/*
175 * flush_user_cache_range(start, end, flags)
176 *
177 * Clean and invalidate a range of cache entries in the
178 * specified address range.
179 *
180 * - start - start address (inclusive)
181 * - end - end address (exclusive)
182 * - flags - vm_flags describing address space
183 */
184 .align 5
185ENTRY(feroceon_flush_user_cache_range)
186 sub r3, r1, r0 @ calculate total size
187 cmp r3, #CACHE_DLIMIT
188 bgt __flush_whole_cache
1891: tst r2, #VM_EXEC
190 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
191 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
192 add r0, r0, #CACHE_DLINESIZE
193 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
194 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
195 add r0, r0, #CACHE_DLINESIZE
196 cmp r0, r1
197 blo 1b
198 tst r2, #VM_EXEC
199 mov ip, #0
200 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
201 ret lr
202
203/*
204 * coherent_kern_range(start, end)
205 *
206 * Ensure coherency between the Icache and the Dcache in the
207 * region described by start, end. If you have non-snooping
208 * Harvard caches, you need to implement this function.
209 *
210 * - start - virtual start address
211 * - end - virtual end address
212 */
213 .align 5
214ENTRY(feroceon_coherent_kern_range)
215 /* FALLTHROUGH */
216
217/*
218 * coherent_user_range(start, end)
219 *
220 * Ensure coherency between the Icache and the Dcache in the
221 * region described by start, end. If you have non-snooping
222 * Harvard caches, you need to implement this function.
223 *
224 * - start - virtual start address
225 * - end - virtual end address
226 */
227ENTRY(feroceon_coherent_user_range)
228 bic r0, r0, #CACHE_DLINESIZE - 1
2291: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
230 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
231 add r0, r0, #CACHE_DLINESIZE
232 cmp r0, r1
233 blo 1b
234 mcr p15, 0, r0, c7, c10, 4 @ drain WB
235 mov r0, #0
236 ret lr
237
238/*
239 * flush_kern_dcache_area(void *addr, size_t size)
240 *
241 * Ensure no D cache aliasing occurs, either with itself or
242 * the I cache
243 *
244 * - addr - kernel address
245 * - size - region size
246 */
247 .align 5
248ENTRY(feroceon_flush_kern_dcache_area)
249 add r1, r0, r1
2501: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
251 add r0, r0, #CACHE_DLINESIZE
252 cmp r0, r1
253 blo 1b
254 mov r0, #0
255 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
256 mcr p15, 0, r0, c7, c10, 4 @ drain WB
257 ret lr
258
259 .align 5
260ENTRY(feroceon_range_flush_kern_dcache_area)
261 mrs r2, cpsr
262 add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
263 orr r3, r2, #PSR_I_BIT
264 msr cpsr_c, r3 @ disable interrupts
265 mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
266 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
267 msr cpsr_c, r2 @ restore interrupts
268 mov r0, #0
269 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
270 mcr p15, 0, r0, c7, c10, 4 @ drain WB
271 ret lr
272
273/*
274 * dma_inv_range(start, end)
275 *
276 * Invalidate (discard) the specified virtual address range.
277 * May not write back any entries. If 'start' or 'end'
278 * are not cache line aligned, those lines must be written
279 * back.
280 *
281 * - start - virtual start address
282 * - end - virtual end address
283 *
284 * (same as v4wb)
285 */
286 .align 5
287feroceon_dma_inv_range:
288 tst r0, #CACHE_DLINESIZE - 1
289 bic r0, r0, #CACHE_DLINESIZE - 1
290 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
291 tst r1, #CACHE_DLINESIZE - 1
292 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
2931: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
294 add r0, r0, #CACHE_DLINESIZE
295 cmp r0, r1
296 blo 1b
297 mcr p15, 0, r0, c7, c10, 4 @ drain WB
298 ret lr
299
300 .align 5
301feroceon_range_dma_inv_range:
302 mrs r2, cpsr
303 tst r0, #CACHE_DLINESIZE - 1
304 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
305 tst r1, #CACHE_DLINESIZE - 1
306 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
307 cmp r1, r0
308 subne r1, r1, #1 @ top address is inclusive
309 orr r3, r2, #PSR_I_BIT
310 msr cpsr_c, r3 @ disable interrupts
311 mcr p15, 5, r0, c15, c14, 0 @ D inv range start
312 mcr p15, 5, r1, c15, c14, 1 @ D inv range top
313 msr cpsr_c, r2 @ restore interrupts
314 ret lr
315
316/*
317 * dma_clean_range(start, end)
318 *
319 * Clean the specified virtual address range.
320 *
321 * - start - virtual start address
322 * - end - virtual end address
323 *
324 * (same as v4wb)
325 */
326 .align 5
327feroceon_dma_clean_range:
328 bic r0, r0, #CACHE_DLINESIZE - 1
3291: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
330 add r0, r0, #CACHE_DLINESIZE
331 cmp r0, r1
332 blo 1b
333 mcr p15, 0, r0, c7, c10, 4 @ drain WB
334 ret lr
335
336 .align 5
337feroceon_range_dma_clean_range:
338 mrs r2, cpsr
339 cmp r1, r0
340 subne r1, r1, #1 @ top address is inclusive
341 orr r3, r2, #PSR_I_BIT
342 msr cpsr_c, r3 @ disable interrupts
343 mcr p15, 5, r0, c15, c13, 0 @ D clean range start
344 mcr p15, 5, r1, c15, c13, 1 @ D clean range top
345 msr cpsr_c, r2 @ restore interrupts
346 mcr p15, 0, r0, c7, c10, 4 @ drain WB
347 ret lr
348
349/*
350 * dma_flush_range(start, end)
351 *
352 * Clean and invalidate the specified virtual address range.
353 *
354 * - start - virtual start address
355 * - end - virtual end address
356 */
357 .align 5
358ENTRY(feroceon_dma_flush_range)
359 bic r0, r0, #CACHE_DLINESIZE - 1
3601: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
361 add r0, r0, #CACHE_DLINESIZE
362 cmp r0, r1
363 blo 1b
364 mcr p15, 0, r0, c7, c10, 4 @ drain WB
365 ret lr
366
367 .align 5
368ENTRY(feroceon_range_dma_flush_range)
369 mrs r2, cpsr
370 cmp r1, r0
371 subne r1, r1, #1 @ top address is inclusive
372 orr r3, r2, #PSR_I_BIT
373 msr cpsr_c, r3 @ disable interrupts
374 mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
375 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
376 msr cpsr_c, r2 @ restore interrupts
377 mcr p15, 0, r0, c7, c10, 4 @ drain WB
378 ret lr
379
380/*
381 * dma_map_area(start, size, dir)
382 * - start - kernel virtual start address
383 * - size - size of region
384 * - dir - DMA direction
385 */
386ENTRY(feroceon_dma_map_area)
387 add r1, r1, r0
388 cmp r2, #DMA_TO_DEVICE
389 beq feroceon_dma_clean_range
390 bcs feroceon_dma_inv_range
391 b feroceon_dma_flush_range
392ENDPROC(feroceon_dma_map_area)
393
394/*
395 * dma_map_area(start, size, dir)
396 * - start - kernel virtual start address
397 * - size - size of region
398 * - dir - DMA direction
399 */
400ENTRY(feroceon_range_dma_map_area)
401 add r1, r1, r0
402 cmp r2, #DMA_TO_DEVICE
403 beq feroceon_range_dma_clean_range
404 bcs feroceon_range_dma_inv_range
405 b feroceon_range_dma_flush_range
406ENDPROC(feroceon_range_dma_map_area)
407
408/*
409 * dma_unmap_area(start, size, dir)
410 * - start - kernel virtual start address
411 * - size - size of region
412 * - dir - DMA direction
413 */
414ENTRY(feroceon_dma_unmap_area)
415 ret lr
416ENDPROC(feroceon_dma_unmap_area)
417
418 .globl feroceon_flush_kern_cache_louis
419 .equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all
420
421 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
422 define_cache_functions feroceon
423
424.macro range_alias basename
425 .globl feroceon_range_\basename
426 .type feroceon_range_\basename , %function
427 .equ feroceon_range_\basename , feroceon_\basename
428.endm
429
430/*
431 * Most of the cache functions are unchanged for this case.
432 * Export suitable alias symbols for the unchanged functions:
433 */
434 range_alias flush_icache_all
435 range_alias flush_user_cache_all
436 range_alias flush_kern_cache_all
437 range_alias flush_kern_cache_louis
438 range_alias flush_user_cache_range
439 range_alias coherent_kern_range
440 range_alias coherent_user_range
441 range_alias dma_unmap_area
442
443 define_cache_functions feroceon_range
444
445 .align 5
446ENTRY(cpu_feroceon_dcache_clean_area)
447#if defined(CONFIG_CACHE_FEROCEON_L2) && \
448 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
449 mov r2, r0
450 mov r3, r1
451#endif
4521: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
453 add r0, r0, #CACHE_DLINESIZE
454 subs r1, r1, #CACHE_DLINESIZE
455 bhi 1b
456#if defined(CONFIG_CACHE_FEROCEON_L2) && \
457 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
4581: mcr p15, 1, r2, c15, c9, 1 @ clean L2 entry
459 add r2, r2, #CACHE_DLINESIZE
460 subs r3, r3, #CACHE_DLINESIZE
461 bhi 1b
462#endif
463 mcr p15, 0, r0, c7, c10, 4 @ drain WB
464 ret lr
465
466/* =============================== PageTable ============================== */
467
468/*
469 * cpu_feroceon_switch_mm(pgd)
470 *
471 * Set the translation base pointer to be as described by pgd.
472 *
473 * pgd: new page tables
474 */
475 .align 5
476ENTRY(cpu_feroceon_switch_mm)
477#ifdef CONFIG_MMU
478 /*
479 * Note: we wish to call __flush_whole_cache but we need to preserve
480 * lr to do so. The only way without touching main memory is to
481 * use r2 which is normally used to test the VM_EXEC flag, and
482 * compensate locally for the skipped ops if it is not set.
483 */
484 mov r2, lr @ abuse r2 to preserve lr
485 bl __flush_whole_cache
486 @ if r2 contains the VM_EXEC bit then the next 2 ops are done already
487 tst r2, #VM_EXEC
488 mcreq p15, 0, ip, c7, c5, 0 @ invalidate I cache
489 mcreq p15, 0, ip, c7, c10, 4 @ drain WB
490
491 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
492 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
493 ret r2
494#else
495 ret lr
496#endif
497
498/*
499 * cpu_feroceon_set_pte_ext(ptep, pte, ext)
500 *
501 * Set a PTE and flush it out
502 */
503 .align 5
504ENTRY(cpu_feroceon_set_pte_ext)
505#ifdef CONFIG_MMU
506 armv3_set_pte_ext wc_disable=0
507 mov r0, r0
508 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
509#if defined(CONFIG_CACHE_FEROCEON_L2) && \
510 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
511 mcr p15, 1, r0, c15, c9, 1 @ clean L2 entry
512#endif
513 mcr p15, 0, r0, c7, c10, 4 @ drain WB
514#endif
515 ret lr
516
517/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
518.globl cpu_feroceon_suspend_size
519.equ cpu_feroceon_suspend_size, 4 * 3
520#ifdef CONFIG_ARM_CPU_SUSPEND
521ENTRY(cpu_feroceon_do_suspend)
522 stmfd sp!, {r4 - r6, lr}
523 mrc p15, 0, r4, c13, c0, 0 @ PID
524 mrc p15, 0, r5, c3, c0, 0 @ Domain ID
525 mrc p15, 0, r6, c1, c0, 0 @ Control register
526 stmia r0, {r4 - r6}
527 ldmfd sp!, {r4 - r6, pc}
528ENDPROC(cpu_feroceon_do_suspend)
529
530ENTRY(cpu_feroceon_do_resume)
531 mov ip, #0
532 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
533 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
534 ldmia r0, {r4 - r6}
535 mcr p15, 0, r4, c13, c0, 0 @ PID
536 mcr p15, 0, r5, c3, c0, 0 @ Domain ID
537 mcr p15, 0, r1, c2, c0, 0 @ TTB address
538 mov r0, r6 @ control register
539 b cpu_resume_mmu
540ENDPROC(cpu_feroceon_do_resume)
541#endif
542
543 .type __feroceon_setup, #function
544__feroceon_setup:
545 mov r0, #0
546 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
547 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
548#ifdef CONFIG_MMU
549 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
550#endif
551
552 adr r5, feroceon_crval
553 ldmia r5, {r5, r6}
554 mrc p15, 0, r0, c1, c0 @ get control register v4
555 bic r0, r0, r5
556 orr r0, r0, r6
557 ret lr
558 .size __feroceon_setup, . - __feroceon_setup
559
560 /*
561 * B
562 * R P
563 * .RVI UFRS BLDP WCAM
564 * .011 .001 ..11 0101
565 *
566 */
567 .type feroceon_crval, #object
568feroceon_crval:
569 crval clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134
570
571 __INITDATA
572
573 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
574 define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort
575
576 .section ".rodata"
577
578 string cpu_arch_name, "armv5te"
579 string cpu_elf_name, "v5"
580 string cpu_feroceon_name, "Feroceon"
581 string cpu_88fr531_name, "Feroceon 88FR531-vd"
582 string cpu_88fr571_name, "Feroceon 88FR571-vd"
583 string cpu_88fr131_name, "Feroceon 88FR131"
584
585 .align
586
587 .section ".proc.info.init", #alloc
588
589.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
590 .type __\name\()_proc_info,#object
591__\name\()_proc_info:
592 .long \cpu_val
593 .long \cpu_mask
594 .long PMD_TYPE_SECT | \
595 PMD_SECT_BUFFERABLE | \
596 PMD_SECT_CACHEABLE | \
597 PMD_BIT4 | \
598 PMD_SECT_AP_WRITE | \
599 PMD_SECT_AP_READ
600 .long PMD_TYPE_SECT | \
601 PMD_BIT4 | \
602 PMD_SECT_AP_WRITE | \
603 PMD_SECT_AP_READ
604 initfn __feroceon_setup, __\name\()_proc_info
605 .long cpu_arch_name
606 .long cpu_elf_name
607 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
608 .long \cpu_name
609 .long feroceon_processor_functions
610 .long v4wbi_tlb_fns
611 .long feroceon_user_fns
612 .long \cache
613 .size __\name\()_proc_info, . - __\name\()_proc_info
614.endm
615
616#ifdef CONFIG_CPU_FEROCEON_OLD_ID
617 feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \
618 cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns
619#endif
620
621 feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \
622 cache=feroceon_cache_fns
623 feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \
624 cache=feroceon_range_cache_fns
625 feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \
626 cache=feroceon_range_cache_fns