Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright 2012 Calxeda, Inc. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License along with |
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 15 | */ |
| 16 | #ifndef _ASM_ARM_PERCPU_H_ |
| 17 | #define _ASM_ARM_PERCPU_H_ |
| 18 | |
| 19 | /* |
| 20 | * Same as asm-generic/percpu.h, except that we store the per cpu offset |
| 21 | * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 |
| 22 | */ |
| 23 | #if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6) |
| 24 | static inline void set_my_cpu_offset(unsigned long off) |
| 25 | { |
| 26 | /* Set TPIDRPRW */ |
| 27 | asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory"); |
| 28 | } |
| 29 | |
| 30 | static inline unsigned long __my_cpu_offset(void) |
| 31 | { |
| 32 | unsigned long off; |
| 33 | |
| 34 | /* |
| 35 | * Read TPIDRPRW. |
| 36 | * We want to allow caching the value, so avoid using volatile and |
| 37 | * instead use a fake stack read to hazard against barrier(). |
| 38 | */ |
| 39 | asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) |
| 40 | : "Q" (*(const unsigned long *)current_stack_pointer)); |
| 41 | |
| 42 | return off; |
| 43 | } |
| 44 | #define __my_cpu_offset __my_cpu_offset() |
| 45 | #else |
| 46 | #define set_my_cpu_offset(x) do {} while(0) |
| 47 | |
| 48 | #endif /* CONFIG_SMP */ |
| 49 | |
| 50 | #include <asm-generic/percpu.h> |
| 51 | |
| 52 | #endif /* _ASM_ARM_PERCPU_H_ */ |