blob: 6504c7b4e3aa6aa97d05ac847d589a012e7f24bc [file] [log] [blame]
Damjan Marion522e4862016-03-04 12:44:14 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_clib_cpu_h
17#define included_clib_cpu_h
18
Damjan Marion1c80e832016-05-11 23:07:18 +020019#include <vppinfra/format.h>
20
21/*
22 * multiarchitecture support. Adding new entry will produce
23 * new graph node function variant optimized for specific cpu
24 * microarchitecture.
25 * Order is important for runtime selection, as 1st match wins...
26 */
27
28#if __x86_64__ && CLIB_DEBUG == 0
29#define foreach_march_variant(macro, x) \
30 macro(avx2, x, "arch=core-avx2")
31#else
32#define foreach_march_variant(macro, x)
33#endif
34
35
36#if __GNUC__ > 4 && !__clang__
37#define CLIB_CPU_OPTIMIZED __attribute__ ((optimize ("tree-vectorize")))
38#else
39#define CLIB_CPU_OPTIMIZED
40#endif
41
42
43#define CLIB_MULTIARCH_ARCH_CHECK(arch, fn, tgt) \
44 if (clib_cpu_supports_ ## arch()) \
45 return & fn ## _ ##arch;
46
47#define CLIB_MULTIARCH_SELECT_FN(fn,...) \
48 __VA_ARGS__ void * fn ## _multiarch_select(void) \
49{ \
50 foreach_march_variant(CLIB_MULTIARCH_ARCH_CHECK, fn) \
51 return & fn; \
52}
53
Damjan Marion04f3db32017-11-10 21:55:45 +010054#ifdef CLIB_MULTIARCH_VARIANT
55#define __CLIB_MULTIARCH_FN(a,b) a##_##b
56#define _CLIB_MULTIARCH_FN(a,b) __CLIB_MULTIARCH_FN(a,b)
57#define CLIB_MULTIARCH_FN(fn) _CLIB_MULTIARCH_FN(fn,CLIB_MULTIARCH_VARIANT)
58#else
59#define CLIB_MULTIARCH_FN(fn) fn
60#endif
Damjan Marion1c80e832016-05-11 23:07:18 +020061
62#define foreach_x86_64_flags \
63_ (sse3, 1, ecx, 0) \
64_ (ssse3, 1, ecx, 9) \
65_ (sse41, 1, ecx, 19) \
66_ (sse42, 1, ecx, 20) \
67_ (avx, 1, ecx, 28) \
68_ (avx2, 7, ebx, 5) \
69_ (avx512f, 7, ebx, 16) \
Gabriel Ganne73cb0062017-12-05 14:26:33 +010070_ (x86_aes, 1, ecx, 25) \
Damjan Marionc0e939b2016-11-12 11:50:01 +010071_ (sha, 7, ebx, 29) \
72_ (invariant_tsc, 0x80000007, edx, 8)
Damjan Marion1c80e832016-05-11 23:07:18 +020073
Gabriel Ganne73cb0062017-12-05 14:26:33 +010074
75#define foreach_aarch64_flags \
76_ (fp, 0) \
77_ (asimd, 1) \
78_ (evtstrm, 2) \
79_ (aarch64_aes, 3) \
80_ (pmull, 4) \
81_ (sha1, 5) \
82_ (sha2, 6) \
83_ (crc32, 7) \
84_ (atomics, 8) \
85_ (fphp, 9) \
86_ (asimdhp, 10) \
87_ (cpuid, 11) \
88_ (asimdrdm, 12) \
89_ (jscvt, 13) \
90_ (fcma, 14) \
91_ (lrcpc, 15) \
92_ (dcpop, 16) \
93_ (sha3, 17) \
94_ (sm3, 18) \
95_ (sm4, 19) \
96_ (asimddp, 20) \
97_ (sha512, 21) \
98_ (sve, 22)
99
Christophe Fontaine33e81952016-12-19 14:41:52 +0100100#if defined(__x86_64__)
101#include "cpuid.h"
102
Damjan Marion1c80e832016-05-11 23:07:18 +0200103static inline int
Dave Barachc3799992016-08-15 11:12:27 -0400104clib_get_cpuid (const u32 lev, u32 * eax, u32 * ebx, u32 * ecx, u32 * edx)
Damjan Marion1c80e832016-05-11 23:07:18 +0200105{
106 if ((u32) __get_cpuid_max (0x80000000 & lev, 0) < lev)
107 return 0;
108 if (lev == 7)
Dave Barachc3799992016-08-15 11:12:27 -0400109 __cpuid_count (lev, 0, *eax, *ebx, *ecx, *edx);
Damjan Marion1c80e832016-05-11 23:07:18 +0200110 else
Dave Barachc3799992016-08-15 11:12:27 -0400111 __cpuid (lev, *eax, *ebx, *ecx, *edx);
Damjan Marion1c80e832016-05-11 23:07:18 +0200112 return 1;
113}
114
115
116#define _(flag, func, reg, bit) \
117static inline int \
118clib_cpu_supports_ ## flag() \
119{ \
120 u32 __attribute__((unused)) eax, ebx = 0, ecx = 0, edx = 0; \
121 clib_get_cpuid (func, &eax, &ebx, &ecx, &edx); \
122 \
123 return ((reg & (1 << bit)) != 0); \
124}
Dave Barachc3799992016-08-15 11:12:27 -0400125foreach_x86_64_flags
Damjan Marion1c80e832016-05-11 23:07:18 +0200126#undef _
Gabriel Ganne73cb0062017-12-05 14:26:33 +0100127#else /* __x86_64__ */
Christophe Fontaine33e81952016-12-19 14:41:52 +0100128
129#define _(flag, func, reg, bit) \
130static inline int clib_cpu_supports_ ## flag() { return 0; }
131foreach_x86_64_flags
132#undef _
Gabriel Ganne73cb0062017-12-05 14:26:33 +0100133#endif /* __x86_64__ */
134#if defined(__aarch64__)
135#include <sys/auxv.h>
136#define _(flag, bit) \
137static inline int \
138clib_cpu_supports_ ## flag() \
139{ \
140 unsigned long hwcap = getauxval(AT_HWCAP); \
141 return (hwcap & (1 << bit)); \
142}
143 foreach_aarch64_flags
144#undef _
145#else /* ! __x86_64__ && !__aarch64__ */
146#define _(flag, bit) \
147static inline int clib_cpu_supports_ ## flag() { return 0; }
148 foreach_aarch64_flags
149#undef _
150#endif /* __x86_64__, __aarch64__ */
151/*
152 * aes is the only feature with the same name in both flag lists
153 * handle this by prefixing it with the arch name, and handling it
154 * with the custom function below
155 */
156 static inline int
157clib_cpu_supports_aes ()
158{
159#if defined (__aarch64__)
160 return clib_cpu_supports_x86_aes ();
161#elif defined (__aarch64__)
162 return clib_cpu_supports_aarch64_aes ();
163#else
164 return 0;
Christophe Fontaine33e81952016-12-19 14:41:52 +0100165#endif
Gabriel Ganne73cb0062017-12-05 14:26:33 +0100166}
167
168#endif /* included_clib_cpu_h */
169
170format_function_t format_cpu_uarch;
Damjan Marion522e4862016-03-04 12:44:14 +0100171format_function_t format_cpu_model_name;
Damjan Marion1c80e832016-05-11 23:07:18 +0200172format_function_t format_cpu_flags;
Damjan Marion522e4862016-03-04 12:44:14 +0100173
Dave Barachc3799992016-08-15 11:12:27 -0400174/*
175 * fd.io coding-style-patch-verification: ON
176 *
177 * Local Variables:
178 * eval: (c-set-style "gnu")
179 * End:
180 */