blob: 0ca9edb97fcc1ba3d82d70443a708106058e398c [file] [log] [blame]
Damjan Marion522e4862016-03-04 12:44:14 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_clib_cpu_h
17#define included_clib_cpu_h
18
Damjan Marion1c80e832016-05-11 23:07:18 +020019#include <vppinfra/format.h>
20
21/*
22 * multiarchitecture support. Adding new entry will produce
23 * new graph node function variant optimized for specific cpu
24 * microarchitecture.
25 * Order is important for runtime selection, as 1st match wins...
26 */
27
28#if __x86_64__ && CLIB_DEBUG == 0
29#define foreach_march_variant(macro, x) \
30 macro(avx2, x, "arch=core-avx2")
31#else
32#define foreach_march_variant(macro, x)
33#endif
34
35
36#if __GNUC__ > 4 && !__clang__
Damjan Marion24223172018-05-28 16:22:14 +020037#define CLIB_CPU_OPTIMIZED __attribute__ ((optimize ("O3")))
Damjan Marion1c80e832016-05-11 23:07:18 +020038#else
39#define CLIB_CPU_OPTIMIZED
40#endif
41
42
43#define CLIB_MULTIARCH_ARCH_CHECK(arch, fn, tgt) \
44 if (clib_cpu_supports_ ## arch()) \
45 return & fn ## _ ##arch;
46
47#define CLIB_MULTIARCH_SELECT_FN(fn,...) \
48 __VA_ARGS__ void * fn ## _multiarch_select(void) \
49{ \
50 foreach_march_variant(CLIB_MULTIARCH_ARCH_CHECK, fn) \
51 return & fn; \
52}
53
Damjan Marion812b32d2018-05-28 21:26:47 +020054#ifdef CLIB_MARCH_VARIANT
Damjan Marion04f3db32017-11-10 21:55:45 +010055#define __CLIB_MULTIARCH_FN(a,b) a##_##b
56#define _CLIB_MULTIARCH_FN(a,b) __CLIB_MULTIARCH_FN(a,b)
Damjan Marion812b32d2018-05-28 21:26:47 +020057#define CLIB_MULTIARCH_FN(fn) _CLIB_MULTIARCH_FN(fn,CLIB_MARCH_VARIANT)
Damjan Marion04f3db32017-11-10 21:55:45 +010058#else
59#define CLIB_MULTIARCH_FN(fn) fn
60#endif
Damjan Marion1c80e832016-05-11 23:07:18 +020061
Damjan Marion812b32d2018-05-28 21:26:47 +020062#define CLIB_MARCH_SFX CLIB_MULTIARCH_FN
63
Damjan Marion1c80e832016-05-11 23:07:18 +020064#define foreach_x86_64_flags \
65_ (sse3, 1, ecx, 0) \
66_ (ssse3, 1, ecx, 9) \
67_ (sse41, 1, ecx, 19) \
68_ (sse42, 1, ecx, 20) \
69_ (avx, 1, ecx, 28) \
70_ (avx2, 7, ebx, 5) \
71_ (avx512f, 7, ebx, 16) \
Gabriel Ganne73cb0062017-12-05 14:26:33 +010072_ (x86_aes, 1, ecx, 25) \
Damjan Marionc0e939b2016-11-12 11:50:01 +010073_ (sha, 7, ebx, 29) \
74_ (invariant_tsc, 0x80000007, edx, 8)
Damjan Marion1c80e832016-05-11 23:07:18 +020075
Gabriel Ganne73cb0062017-12-05 14:26:33 +010076
77#define foreach_aarch64_flags \
78_ (fp, 0) \
79_ (asimd, 1) \
80_ (evtstrm, 2) \
81_ (aarch64_aes, 3) \
82_ (pmull, 4) \
83_ (sha1, 5) \
84_ (sha2, 6) \
85_ (crc32, 7) \
86_ (atomics, 8) \
87_ (fphp, 9) \
88_ (asimdhp, 10) \
89_ (cpuid, 11) \
90_ (asimdrdm, 12) \
91_ (jscvt, 13) \
92_ (fcma, 14) \
93_ (lrcpc, 15) \
94_ (dcpop, 16) \
95_ (sha3, 17) \
96_ (sm3, 18) \
97_ (sm4, 19) \
98_ (asimddp, 20) \
99_ (sha512, 21) \
100_ (sve, 22)
101
Christophe Fontaine33e81952016-12-19 14:41:52 +0100102#if defined(__x86_64__)
103#include "cpuid.h"
104
Damjan Marion1c80e832016-05-11 23:07:18 +0200105static inline int
Dave Barachc3799992016-08-15 11:12:27 -0400106clib_get_cpuid (const u32 lev, u32 * eax, u32 * ebx, u32 * ecx, u32 * edx)
Damjan Marion1c80e832016-05-11 23:07:18 +0200107{
108 if ((u32) __get_cpuid_max (0x80000000 & lev, 0) < lev)
109 return 0;
110 if (lev == 7)
Dave Barachc3799992016-08-15 11:12:27 -0400111 __cpuid_count (lev, 0, *eax, *ebx, *ecx, *edx);
Damjan Marion1c80e832016-05-11 23:07:18 +0200112 else
Dave Barachc3799992016-08-15 11:12:27 -0400113 __cpuid (lev, *eax, *ebx, *ecx, *edx);
Damjan Marion1c80e832016-05-11 23:07:18 +0200114 return 1;
115}
116
117
118#define _(flag, func, reg, bit) \
119static inline int \
120clib_cpu_supports_ ## flag() \
121{ \
122 u32 __attribute__((unused)) eax, ebx = 0, ecx = 0, edx = 0; \
123 clib_get_cpuid (func, &eax, &ebx, &ecx, &edx); \
124 \
125 return ((reg & (1 << bit)) != 0); \
126}
Dave Barachc3799992016-08-15 11:12:27 -0400127foreach_x86_64_flags
Damjan Marion1c80e832016-05-11 23:07:18 +0200128#undef _
Gabriel Ganne73cb0062017-12-05 14:26:33 +0100129#else /* __x86_64__ */
Christophe Fontaine33e81952016-12-19 14:41:52 +0100130
131#define _(flag, func, reg, bit) \
132static inline int clib_cpu_supports_ ## flag() { return 0; }
133foreach_x86_64_flags
134#undef _
Gabriel Ganne73cb0062017-12-05 14:26:33 +0100135#endif /* __x86_64__ */
136#if defined(__aarch64__)
137#include <sys/auxv.h>
138#define _(flag, bit) \
139static inline int \
140clib_cpu_supports_ ## flag() \
141{ \
142 unsigned long hwcap = getauxval(AT_HWCAP); \
143 return (hwcap & (1 << bit)); \
144}
145 foreach_aarch64_flags
146#undef _
147#else /* ! __x86_64__ && !__aarch64__ */
148#define _(flag, bit) \
149static inline int clib_cpu_supports_ ## flag() { return 0; }
150 foreach_aarch64_flags
151#undef _
152#endif /* __x86_64__, __aarch64__ */
153/*
154 * aes is the only feature with the same name in both flag lists
155 * handle this by prefixing it with the arch name, and handling it
156 * with the custom function below
157 */
158 static inline int
159clib_cpu_supports_aes ()
160{
161#if defined (__aarch64__)
162 return clib_cpu_supports_x86_aes ();
163#elif defined (__aarch64__)
164 return clib_cpu_supports_aarch64_aes ();
165#else
166 return 0;
Christophe Fontaine33e81952016-12-19 14:41:52 +0100167#endif
Gabriel Ganne73cb0062017-12-05 14:26:33 +0100168}
169
Damjan Marion812b32d2018-05-28 21:26:47 +0200170static inline int
171clib_cpu_march_priority_avx512 ()
172{
173 if (clib_cpu_supports_avx512f ())
174 return 20;
175 return -1;
176}
177
178static inline int
179clib_cpu_march_priority_avx2 ()
180{
181 if (clib_cpu_supports_avx2 ())
182 return 10;
183 return -1;
184}
185
Lijian Zhang2e237212018-09-10 17:13:56 +0800186static inline u32
187clib_cpu_implementer ()
188{
189 char buf[128];
190 static u32 implementer = -1;
191
192 if (-1 != implementer)
193 return implementer;
194
195 FILE *fp = fopen ("/proc/cpuinfo", "r");
196 if (!fp)
197 return implementer;
198
199 while (!feof (fp))
200 {
201 if (!fgets (buf, sizeof (buf), fp))
202 break;
203 buf[127] = '\0';
204 if (strstr (buf, "CPU implementer"))
205 implementer = (u32) strtol (memchr (buf, ':', 128) + 2, NULL, 0);
206 if (-1 != implementer)
207 break;
208 }
209 fclose (fp);
210
211 return implementer;
212}
213
214static inline u32
215clib_cpu_part ()
216{
217 char buf[128];
218 static u32 part = -1;
219
220 if (-1 != part)
221 return part;
222
223 FILE *fp = fopen ("/proc/cpuinfo", "r");
224 if (!fp)
225 return part;
226
227 while (!feof (fp))
228 {
229 if (!fgets (buf, sizeof (buf), fp))
230 break;
231 buf[127] = '\0';
232 if (strstr (buf, "CPU part"))
233 part = (u32) strtol (memchr (buf, ':', 128) + 2, NULL, 0);
234 if (-1 != part)
235 break;
236 }
237 fclose (fp);
238
239 return part;
240}
241
242#define AARCH64_CPU_IMPLEMENTER_THUNERDERX2 0x43
243#define AARCH64_CPU_PART_THUNERDERX2 0x0af
244#define AARCH64_CPU_IMPLEMENTER_QDF24XX 0x51
245#define AARCH64_CPU_PART_QDF24XX 0xc00
246#define AARCH64_CPU_IMPLEMENTER_CORTEXA72 0x41
247#define AARCH64_CPU_PART_CORTEXA72 0xd08
248
249static inline int
250clib_cpu_march_priority_thunderx2t99 ()
251{
252 if ((AARCH64_CPU_IMPLEMENTER_THUNERDERX2 == clib_cpu_implementer ()) &&
253 (AARCH64_CPU_PART_THUNERDERX2 == clib_cpu_part ()))
254 return 20;
255 return -1;
256}
257
258static inline int
259clib_cpu_march_priority_qdf24xx ()
260{
261 if ((AARCH64_CPU_IMPLEMENTER_QDF24XX == clib_cpu_implementer ()) &&
262 (AARCH64_CPU_PART_QDF24XX == clib_cpu_part ()))
263 return 20;
264 return -1;
265}
266
267static inline int
268clib_cpu_march_priority_cortexa72 ()
269{
270 if ((AARCH64_CPU_IMPLEMENTER_CORTEXA72 == clib_cpu_implementer ()) &&
271 (AARCH64_CPU_PART_CORTEXA72 == clib_cpu_part ()))
272 return 10;
273 return -1;
274}
275
Damjan Marion812b32d2018-05-28 21:26:47 +0200276#ifdef CLIB_MARCH_VARIANT
277#define CLIB_MARCH_FN_PRIORITY() CLIB_MARCH_SFX(clib_cpu_march_priority)()
278#else
279#define CLIB_MARCH_FN_PRIORITY() 0
280#endif
Gabriel Ganne73cb0062017-12-05 14:26:33 +0100281#endif /* included_clib_cpu_h */
282
Florin Coras983cc7d2018-09-18 23:11:55 -0700283#define CLIB_MARCH_FN_CONSTRUCTOR(fn) \
284static void __clib_constructor \
285CLIB_MARCH_SFX(fn ## _march_constructor) (void) \
286{ \
287 if (CLIB_MARCH_FN_PRIORITY() > fn ## _selected_priority) \
288 { \
289 fn ## _selected = & CLIB_MARCH_SFX (fn ## _ma); \
290 fn ## _selected_priority = CLIB_MARCH_FN_PRIORITY(); \
291 } \
292} \
293
294#ifndef CLIB_MARCH_VARIANT
295#define CLIB_MARCH_FN(fn, rtype, _args...) \
296 static rtype CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args); \
297 rtype (*fn ## _selected) (_args) = & CLIB_MARCH_SFX (fn ## _ma); \
298 int fn ## _selected_priority = 0; \
299 static inline rtype CLIB_CPU_OPTIMIZED \
300 CLIB_MARCH_SFX (fn ## _ma)(_args)
301#else
302#define CLIB_MARCH_FN(fn, rtype, _args...) \
303 static rtype CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args); \
304 extern int (*fn ## _selected) (_args); \
305 extern int fn ## _selected_priority; \
306 CLIB_MARCH_FN_CONSTRUCTOR (fn) \
307 static rtype CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args)
308#endif
309
310#define CLIB_MARCH_FN_SELECT(fn) (* fn ## _selected)
311
Gabriel Ganne73cb0062017-12-05 14:26:33 +0100312format_function_t format_cpu_uarch;
Damjan Marion522e4862016-03-04 12:44:14 +0100313format_function_t format_cpu_model_name;
Damjan Marion1c80e832016-05-11 23:07:18 +0200314format_function_t format_cpu_flags;
Damjan Marion522e4862016-03-04 12:44:14 +0100315
Dave Barachc3799992016-08-15 11:12:27 -0400316/*
317 * fd.io coding-style-patch-verification: ON
318 *
319 * Local Variables:
320 * eval: (c-set-style "gnu")
321 * End:
322 */