blob: 761dbed3fe870f6e88cca756b830beff2b81405f [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef included_time_h
39#define included_time_h
40
41#include <vppinfra/clib.h>
Dave Barachc25048b2020-01-29 18:05:24 -050042#include <vppinfra/format.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070043
Dave Barachc3799992016-08-15 11:12:27 -040044typedef struct
45{
Ed Warnickecb9cada2015-12-08 15:45:58 -070046 /* Total run time in clock cycles
47 since clib_time_init call. */
48 u64 total_cpu_time;
49
50 /* Last recorded time stamp. */
51 u64 last_cpu_time;
52
53 /* CPU clock frequency. */
54 f64 clocks_per_second;
55
56 /* 1 / cpu clock frequency: conversion factor
57 from clock cycles into seconds. */
58 f64 seconds_per_clock;
59
60 /* Time stamp of call to clib_time_init call. */
61 u64 init_cpu_time;
Dave Barachc25048b2020-01-29 18:05:24 -050062 f64 init_reference_time;
Ed Warnickecb9cada2015-12-08 15:45:58 -070063
64 u64 last_verify_cpu_time;
65
66 /* Same but for reference time (if present). */
67 f64 last_verify_reference_time;
68
69 u32 log2_clocks_per_second, log2_clocks_per_frequency_verify;
Dave Barachc25048b2020-01-29 18:05:24 -050070
71 /* Damping constant */
72 f64 damping_constant;
73
Ed Warnickecb9cada2015-12-08 15:45:58 -070074} clib_time_t;
75
Dave Barachc25048b2020-01-29 18:05:24 -050076format_function_t format_clib_time;
77
Ed Warnickecb9cada2015-12-08 15:45:58 -070078/* Return CPU time stamp as 64bit number. */
79#if defined(__x86_64__) || defined(i386)
Dave Barachc3799992016-08-15 11:12:27 -040080always_inline u64
81clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070082{
83 u32 a, d;
Dave Barachc3799992016-08-15 11:12:27 -040084 asm volatile ("rdtsc":"=a" (a), "=d" (d));
Ed Warnickecb9cada2015-12-08 15:45:58 -070085 return (u64) a + ((u64) d << (u64) 32);
86}
87
88#elif defined (__powerpc64__)
89
Dave Barachc3799992016-08-15 11:12:27 -040090always_inline u64
91clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070092{
93 u64 t;
Dave Barachc3799992016-08-15 11:12:27 -040094 asm volatile ("mftb %0":"=r" (t));
Ed Warnickecb9cada2015-12-08 15:45:58 -070095 return t;
96}
97
98#elif defined (__SPU__)
99
Dave Barachc3799992016-08-15 11:12:27 -0400100always_inline u64
101clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102{
103#ifdef _XLC
104 return spu_rdch (0x8);
105#else
Dave Barachc3799992016-08-15 11:12:27 -0400106 return 0 /* __builtin_si_rdch (0x8) FIXME */ ;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700107#endif
108}
109
110#elif defined (__powerpc__)
111
Dave Barachc3799992016-08-15 11:12:27 -0400112always_inline u64
113clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114{
115 u32 hi1, hi2, lo;
Dave Barachc3799992016-08-15 11:12:27 -0400116 asm volatile ("1:\n"
117 "mftbu %[hi1]\n"
118 "mftb %[lo]\n"
119 "mftbu %[hi2]\n"
120 "cmpw %[hi1],%[hi2]\n"
121 "bne 1b\n":[hi1] "=r" (hi1),[hi2] "=r" (hi2),[lo] "=r" (lo));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700122 return (u64) lo + ((u64) hi2 << (u64) 32);
123}
124
Brian Brooksc0379ae2018-01-09 16:39:07 -0600125#elif defined (__aarch64__)
126always_inline u64
127clib_cpu_time_now (void)
128{
129 u64 vct;
130 /* User access to cntvct_el0 is enabled in Linux kernel since 3.12. */
131 asm volatile ("mrs %0, cntvct_el0":"=r" (vct));
132 return vct;
133}
134
Ed Warnickecb9cada2015-12-08 15:45:58 -0700135#elif defined (__arm__)
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000136#if defined(__ARM_ARCH_8A__)
Dave Barachc3799992016-08-15 11:12:27 -0400137always_inline u64
138clib_cpu_time_now (void) /* We may run arm64 in aarch32 mode, to leverage 64bit counter */
Christophe Fontainefef15b42016-04-09 12:38:49 +0900139{
140 u64 tsc;
Dave Barachc3799992016-08-15 11:12:27 -0400141 asm volatile ("mrrc p15, 0, %Q0, %R0, c9":"=r" (tsc));
Christophe Fontainefef15b42016-04-09 12:38:49 +0900142 return tsc;
143}
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000144#elif defined(__ARM_ARCH_7A__)
Dave Barachc3799992016-08-15 11:12:27 -0400145always_inline u64
146clib_cpu_time_now (void)
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000147{
148 u32 tsc;
Dave Barachc3799992016-08-15 11:12:27 -0400149 asm volatile ("mrc p15, 0, %0, c9, c13, 0":"=r" (tsc));
150 return (u64) tsc;
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000151}
Christophe Fontainefef15b42016-04-09 12:38:49 +0900152#else
Dave Barachc3799992016-08-15 11:12:27 -0400153always_inline u64
154clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155{
156 u32 lo;
Dave Barachc3799992016-08-15 11:12:27 -0400157 asm volatile ("mrc p15, 0, %[lo], c15, c12, 1":[lo] "=r" (lo));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700158 return (u64) lo;
159}
Christophe Fontainefef15b42016-04-09 12:38:49 +0900160#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161
162#elif defined (__xtensa__)
163
164/* Stub for now. */
Dave Barachc3799992016-08-15 11:12:27 -0400165always_inline u64
166clib_cpu_time_now (void)
167{
168 return 0;
169}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700170
171#elif defined (__TMS320C6X__)
172
Dave Barachc3799992016-08-15 11:12:27 -0400173always_inline u64
174clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700175{
176 u32 l, h;
177
178 asm volatile (" dint\n"
179 " mvc .s2 TSCL,%0\n"
Dave Barachc3799992016-08-15 11:12:27 -0400180 " mvc .s2 TSCH,%1\n" " rint\n":"=b" (l), "=b" (h));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700181
Dave Barachc3799992016-08-15 11:12:27 -0400182 return ((u64) h << 32) | l;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700183}
184
Carl Smith28d42712018-07-26 15:45:28 +1200185#elif defined(_mips) && __mips == 64
186
187always_inline u64
188clib_cpu_time_now (void)
189{
190 u64 result;
191 asm volatile ("rdhwr %0,$31\n":"=r" (result));
192 return result;
193}
194
Damjan Marion6eb0f842021-10-31 19:04:33 +0100195#elif defined(__riscv)
196
197always_inline u64
198clib_cpu_time_now (void)
199{
200 u64 result;
201 asm volatile("rdcycle %0\n" : "=r"(result));
202 return result;
203}
Dave Barach61efa142016-01-22 08:23:09 -0500204#else
Ed Warnickecb9cada2015-12-08 15:45:58 -0700205#error "don't know how to read CPU time stamp"
206
207#endif
208
209void clib_time_verify_frequency (clib_time_t * c);
210
Florin Corasa8e71c82019-10-22 19:01:39 -0700211/* Define it as the type returned by clib_time_now */
212typedef f64 clib_time_type_t;
213typedef u64 clib_us_time_t;
214
215#define CLIB_US_TIME_PERIOD (1e-6)
216#define CLIB_US_TIME_FREQ (1.0/CLIB_US_TIME_PERIOD)
217
Dave Barachc3799992016-08-15 11:12:27 -0400218always_inline f64
219clib_time_now_internal (clib_time_t * c, u64 n)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700220{
221 u64 l = c->last_cpu_time;
222 u64 t = c->total_cpu_time;
Dave Barache52d8d82019-12-01 08:59:03 -0500223 f64 rv;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700224 t += n - l;
225 c->total_cpu_time = t;
226 c->last_cpu_time = n;
Dave Barache52d8d82019-12-01 08:59:03 -0500227 rv = t * c->seconds_per_clock;
Dave Barachc3799992016-08-15 11:12:27 -0400228 if (PREDICT_FALSE
229 ((c->last_cpu_time -
230 c->last_verify_cpu_time) >> c->log2_clocks_per_frequency_verify))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700231 clib_time_verify_frequency (c);
Dave Barache52d8d82019-12-01 08:59:03 -0500232 return rv;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700233}
234
John Lo7f358b32018-04-28 01:19:24 -0400235/* Maximum f64 value as max clib_time */
236#define CLIB_TIME_MAX (1.7976931348623157e+308)
237
Ed Warnickecb9cada2015-12-08 15:45:58 -0700238always_inline f64
239clib_time_now (clib_time_t * c)
240{
Dave Barachc3799992016-08-15 11:12:27 -0400241 return clib_time_now_internal (c, clib_cpu_time_now ());
Ed Warnickecb9cada2015-12-08 15:45:58 -0700242}
243
Dave Barachc3799992016-08-15 11:12:27 -0400244always_inline void
245clib_cpu_time_wait (u64 dt)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700246{
247 u64 t_end = clib_cpu_time_now () + dt;
248 while (clib_cpu_time_now () < t_end)
249 ;
250}
251
252void clib_time_init (clib_time_t * c);
253
254#ifdef CLIB_UNIX
255
256#include <time.h>
257#include <sys/time.h>
258#include <sys/resource.h>
259#include <unistd.h>
260#include <sys/syscall.h>
261
262/* Use 64bit floating point to represent time offset from epoch. */
Dave Barachc3799992016-08-15 11:12:27 -0400263always_inline f64
264unix_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700265{
Damjan Marion4dffd1c2018-09-03 12:30:36 +0200266 struct timespec ts;
267#ifdef __MACH__
268 clock_gettime (CLOCK_REALTIME, &ts);
269#else
Ed Warnickecb9cada2015-12-08 15:45:58 -0700270 /* clock_gettime without indirect syscall uses GLIBC wrappers which
271 we don't want. Just the bare metal, please. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700272 syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
Damjan Marion4dffd1c2018-09-03 12:30:36 +0200273#endif
Dave Barachc3799992016-08-15 11:12:27 -0400274 return ts.tv_sec + 1e-9 * ts.tv_nsec;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700275}
276
277/* As above but integer number of nano-seconds. */
Dave Barachc3799992016-08-15 11:12:27 -0400278always_inline u64
279unix_time_now_nsec (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700280{
281 struct timespec ts;
Damjan Marion4dffd1c2018-09-03 12:30:36 +0200282#ifdef __MACH__
283 clock_gettime (CLOCK_REALTIME, &ts);
284#else
Ed Warnickecb9cada2015-12-08 15:45:58 -0700285 syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
Damjan Marion4dffd1c2018-09-03 12:30:36 +0200286#endif
Dave Barachc3799992016-08-15 11:12:27 -0400287 return 1e9 * ts.tv_sec + ts.tv_nsec;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700288}
289
Ole Troaned929252017-06-13 21:15:40 +0200290always_inline void
291unix_time_now_nsec_fraction (u32 * sec, u32 * nsec)
292{
293 struct timespec ts;
Damjan Marion4dffd1c2018-09-03 12:30:36 +0200294#ifdef __MACH__
295 clock_gettime (CLOCK_REALTIME, &ts);
296#else
Ole Troaned929252017-06-13 21:15:40 +0200297 syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
Damjan Marion4dffd1c2018-09-03 12:30:36 +0200298#endif
Ole Troaned929252017-06-13 21:15:40 +0200299 *sec = ts.tv_sec;
300 *nsec = ts.tv_nsec;
301}
302
Dave Barachc3799992016-08-15 11:12:27 -0400303always_inline f64
304unix_usage_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700305{
306 struct rusage u;
307 getrusage (RUSAGE_SELF, &u);
Dave Barachc3799992016-08-15 11:12:27 -0400308 return u.ru_utime.tv_sec + 1e-6 * u.ru_utime.tv_usec
309 + u.ru_stime.tv_sec + 1e-6 * u.ru_stime.tv_usec;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700310}
311
Dave Barachc3799992016-08-15 11:12:27 -0400312always_inline void
313unix_sleep (f64 dt)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700314{
Dave Barachb9f2cf02017-10-17 13:13:42 -0400315 struct timespec ts, tsrem;
316 ts.tv_sec = dt;
317 ts.tv_nsec = 1e9 * (dt - (f64) ts.tv_sec);
318
319 while (nanosleep (&ts, &tsrem) < 0)
320 ts = tsrem;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700321}
322
Dave Barachc3799992016-08-15 11:12:27 -0400323#else /* ! CLIB_UNIX */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700324
Dave Barachc3799992016-08-15 11:12:27 -0400325always_inline f64
326unix_time_now (void)
327{
328 return 0;
329}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700330
Dave Barachc3799992016-08-15 11:12:27 -0400331always_inline u64
332unix_time_now_nsec (void)
333{
334 return 0;
335}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700336
Ole Troaned929252017-06-13 21:15:40 +0200337always_inline void
338unix_time_now_nsec_fraction (u32 * sec, u32 * nsec)
339{
340}
341
Dave Barachc3799992016-08-15 11:12:27 -0400342always_inline f64
343unix_usage_now (void)
344{
345 return 0;
346}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700347
Dave Barachc3799992016-08-15 11:12:27 -0400348always_inline void
349unix_sleep (f64 dt)
350{
351}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700352
353#endif
354
355#endif /* included_time_h */
Dave Barachc3799992016-08-15 11:12:27 -0400356
357/*
358 * fd.io coding-style-patch-verification: ON
359 *
360 * Local Variables:
361 * eval: (c-set-style "gnu")
362 * End:
363 */