blob: 4d8997f0a9e30b2b33fcb083ea4eaa93cb526b8f [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef included_time_h
39#define included_time_h
40
41#include <vppinfra/clib.h>
Dave Barachc25048b2020-01-29 18:05:24 -050042#include <vppinfra/format.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070043
Dave Barachc3799992016-08-15 11:12:27 -040044typedef struct
45{
Ed Warnickecb9cada2015-12-08 15:45:58 -070046 /* Total run time in clock cycles
47 since clib_time_init call. */
48 u64 total_cpu_time;
49
50 /* Last recorded time stamp. */
51 u64 last_cpu_time;
52
53 /* CPU clock frequency. */
54 f64 clocks_per_second;
55
56 /* 1 / cpu clock frequency: conversion factor
57 from clock cycles into seconds. */
58 f64 seconds_per_clock;
59
60 /* Time stamp of call to clib_time_init call. */
61 u64 init_cpu_time;
Dave Barachc25048b2020-01-29 18:05:24 -050062 f64 init_reference_time;
Ed Warnickecb9cada2015-12-08 15:45:58 -070063
64 u64 last_verify_cpu_time;
65
66 /* Same but for reference time (if present). */
67 f64 last_verify_reference_time;
68
69 u32 log2_clocks_per_second, log2_clocks_per_frequency_verify;
Dave Barachc25048b2020-01-29 18:05:24 -050070
71 /* Damping constant */
72 f64 damping_constant;
73
Ed Warnickecb9cada2015-12-08 15:45:58 -070074} clib_time_t;
75
Dave Barachc25048b2020-01-29 18:05:24 -050076format_function_t format_clib_time;
77
Ed Warnickecb9cada2015-12-08 15:45:58 -070078/* Return CPU time stamp as 64bit number. */
79#if defined(__x86_64__) || defined(i386)
Dave Barachc3799992016-08-15 11:12:27 -040080always_inline u64
81clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070082{
83 u32 a, d;
Dave Barachc3799992016-08-15 11:12:27 -040084 asm volatile ("rdtsc":"=a" (a), "=d" (d));
Ed Warnickecb9cada2015-12-08 15:45:58 -070085 return (u64) a + ((u64) d << (u64) 32);
86}
87
88#elif defined (__powerpc64__)
89
Dave Barachc3799992016-08-15 11:12:27 -040090always_inline u64
91clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070092{
93 u64 t;
Dave Barachc3799992016-08-15 11:12:27 -040094 asm volatile ("mftb %0":"=r" (t));
Ed Warnickecb9cada2015-12-08 15:45:58 -070095 return t;
96}
97
98#elif defined (__SPU__)
99
Dave Barachc3799992016-08-15 11:12:27 -0400100always_inline u64
101clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102{
103#ifdef _XLC
104 return spu_rdch (0x8);
105#else
Dave Barachc3799992016-08-15 11:12:27 -0400106 return 0 /* __builtin_si_rdch (0x8) FIXME */ ;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700107#endif
108}
109
110#elif defined (__powerpc__)
111
Dave Barachc3799992016-08-15 11:12:27 -0400112always_inline u64
113clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114{
115 u32 hi1, hi2, lo;
Dave Barachc3799992016-08-15 11:12:27 -0400116 asm volatile ("1:\n"
117 "mftbu %[hi1]\n"
118 "mftb %[lo]\n"
119 "mftbu %[hi2]\n"
120 "cmpw %[hi1],%[hi2]\n"
121 "bne 1b\n":[hi1] "=r" (hi1),[hi2] "=r" (hi2),[lo] "=r" (lo));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700122 return (u64) lo + ((u64) hi2 << (u64) 32);
123}
124
Brian Brooksc0379ae2018-01-09 16:39:07 -0600125#elif defined (__aarch64__)
126always_inline u64
127clib_cpu_time_now (void)
128{
129 u64 vct;
130 /* User access to cntvct_el0 is enabled in Linux kernel since 3.12. */
131 asm volatile ("mrs %0, cntvct_el0":"=r" (vct));
132 return vct;
133}
134
Ed Warnickecb9cada2015-12-08 15:45:58 -0700135#elif defined (__arm__)
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000136#if defined(__ARM_ARCH_8A__)
Dave Barachc3799992016-08-15 11:12:27 -0400137always_inline u64
138clib_cpu_time_now (void) /* We may run arm64 in aarch32 mode, to leverage 64bit counter */
Christophe Fontainefef15b42016-04-09 12:38:49 +0900139{
140 u64 tsc;
Dave Barachc3799992016-08-15 11:12:27 -0400141 asm volatile ("mrrc p15, 0, %Q0, %R0, c9":"=r" (tsc));
Christophe Fontainefef15b42016-04-09 12:38:49 +0900142 return tsc;
143}
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000144#elif defined(__ARM_ARCH_7A__)
Dave Barachc3799992016-08-15 11:12:27 -0400145always_inline u64
146clib_cpu_time_now (void)
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000147{
148 u32 tsc;
Dave Barachc3799992016-08-15 11:12:27 -0400149 asm volatile ("mrc p15, 0, %0, c9, c13, 0":"=r" (tsc));
150 return (u64) tsc;
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000151}
Christophe Fontainefef15b42016-04-09 12:38:49 +0900152#else
Dave Barachc3799992016-08-15 11:12:27 -0400153always_inline u64
154clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155{
156 u32 lo;
Dave Barachc3799992016-08-15 11:12:27 -0400157 asm volatile ("mrc p15, 0, %[lo], c15, c12, 1":[lo] "=r" (lo));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700158 return (u64) lo;
159}
Christophe Fontainefef15b42016-04-09 12:38:49 +0900160#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161
162#elif defined (__xtensa__)
163
164/* Stub for now. */
Dave Barachc3799992016-08-15 11:12:27 -0400165always_inline u64
166clib_cpu_time_now (void)
167{
168 return 0;
169}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700170
171#elif defined (__TMS320C6X__)
172
Dave Barachc3799992016-08-15 11:12:27 -0400173always_inline u64
174clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700175{
176 u32 l, h;
177
178 asm volatile (" dint\n"
179 " mvc .s2 TSCL,%0\n"
Dave Barachc3799992016-08-15 11:12:27 -0400180 " mvc .s2 TSCH,%1\n" " rint\n":"=b" (l), "=b" (h));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700181
Dave Barachc3799992016-08-15 11:12:27 -0400182 return ((u64) h << 32) | l;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700183}
184
Carl Smith28d42712018-07-26 15:45:28 +1200185#elif defined(_mips) && __mips == 64
186
187always_inline u64
188clib_cpu_time_now (void)
189{
190 u64 result;
191 asm volatile ("rdhwr %0,$31\n":"=r" (result));
192 return result;
193}
194
Dave Barach61efa142016-01-22 08:23:09 -0500195#else
Ed Warnickecb9cada2015-12-08 15:45:58 -0700196#error "don't know how to read CPU time stamp"
197
198#endif
199
200void clib_time_verify_frequency (clib_time_t * c);
201
Florin Corasa8e71c82019-10-22 19:01:39 -0700202/* Define it as the type returned by clib_time_now */
203typedef f64 clib_time_type_t;
204typedef u64 clib_us_time_t;
205
206#define CLIB_US_TIME_PERIOD (1e-6)
207#define CLIB_US_TIME_FREQ (1.0/CLIB_US_TIME_PERIOD)
208
Dave Barachc3799992016-08-15 11:12:27 -0400209always_inline f64
210clib_time_now_internal (clib_time_t * c, u64 n)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700211{
212 u64 l = c->last_cpu_time;
213 u64 t = c->total_cpu_time;
Dave Barache52d8d82019-12-01 08:59:03 -0500214 f64 rv;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700215 t += n - l;
216 c->total_cpu_time = t;
217 c->last_cpu_time = n;
Dave Barache52d8d82019-12-01 08:59:03 -0500218 rv = t * c->seconds_per_clock;
Dave Barachc3799992016-08-15 11:12:27 -0400219 if (PREDICT_FALSE
220 ((c->last_cpu_time -
221 c->last_verify_cpu_time) >> c->log2_clocks_per_frequency_verify))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700222 clib_time_verify_frequency (c);
Dave Barache52d8d82019-12-01 08:59:03 -0500223 return rv;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700224}
225
John Lo7f358b32018-04-28 01:19:24 -0400226/* Maximum f64 value as max clib_time */
227#define CLIB_TIME_MAX (1.7976931348623157e+308)
228
Ed Warnickecb9cada2015-12-08 15:45:58 -0700229always_inline f64
230clib_time_now (clib_time_t * c)
231{
Dave Barachc3799992016-08-15 11:12:27 -0400232 return clib_time_now_internal (c, clib_cpu_time_now ());
Ed Warnickecb9cada2015-12-08 15:45:58 -0700233}
234
Dave Barachc3799992016-08-15 11:12:27 -0400235always_inline void
236clib_cpu_time_wait (u64 dt)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700237{
238 u64 t_end = clib_cpu_time_now () + dt;
239 while (clib_cpu_time_now () < t_end)
240 ;
241}
242
243void clib_time_init (clib_time_t * c);
244
245#ifdef CLIB_UNIX
246
247#include <time.h>
248#include <sys/time.h>
249#include <sys/resource.h>
250#include <unistd.h>
251#include <sys/syscall.h>
252
253/* Use 64bit floating point to represent time offset from epoch. */
Dave Barachc3799992016-08-15 11:12:27 -0400254always_inline f64
255unix_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700256{
Damjan Marion4dffd1c2018-09-03 12:30:36 +0200257 struct timespec ts;
258#ifdef __MACH__
259 clock_gettime (CLOCK_REALTIME, &ts);
260#else
Ed Warnickecb9cada2015-12-08 15:45:58 -0700261 /* clock_gettime without indirect syscall uses GLIBC wrappers which
262 we don't want. Just the bare metal, please. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700263 syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
Damjan Marion4dffd1c2018-09-03 12:30:36 +0200264#endif
Dave Barachc3799992016-08-15 11:12:27 -0400265 return ts.tv_sec + 1e-9 * ts.tv_nsec;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700266}
267
268/* As above but integer number of nano-seconds. */
Dave Barachc3799992016-08-15 11:12:27 -0400269always_inline u64
270unix_time_now_nsec (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700271{
272 struct timespec ts;
Damjan Marion4dffd1c2018-09-03 12:30:36 +0200273#ifdef __MACH__
274 clock_gettime (CLOCK_REALTIME, &ts);
275#else
Ed Warnickecb9cada2015-12-08 15:45:58 -0700276 syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
Damjan Marion4dffd1c2018-09-03 12:30:36 +0200277#endif
Dave Barachc3799992016-08-15 11:12:27 -0400278 return 1e9 * ts.tv_sec + ts.tv_nsec;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700279}
280
Ole Troaned929252017-06-13 21:15:40 +0200281always_inline void
282unix_time_now_nsec_fraction (u32 * sec, u32 * nsec)
283{
284 struct timespec ts;
Damjan Marion4dffd1c2018-09-03 12:30:36 +0200285#ifdef __MACH__
286 clock_gettime (CLOCK_REALTIME, &ts);
287#else
Ole Troaned929252017-06-13 21:15:40 +0200288 syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
Damjan Marion4dffd1c2018-09-03 12:30:36 +0200289#endif
Ole Troaned929252017-06-13 21:15:40 +0200290 *sec = ts.tv_sec;
291 *nsec = ts.tv_nsec;
292}
293
Dave Barachc3799992016-08-15 11:12:27 -0400294always_inline f64
295unix_usage_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700296{
297 struct rusage u;
298 getrusage (RUSAGE_SELF, &u);
Dave Barachc3799992016-08-15 11:12:27 -0400299 return u.ru_utime.tv_sec + 1e-6 * u.ru_utime.tv_usec
300 + u.ru_stime.tv_sec + 1e-6 * u.ru_stime.tv_usec;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700301}
302
Dave Barachc3799992016-08-15 11:12:27 -0400303always_inline void
304unix_sleep (f64 dt)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700305{
Dave Barachb9f2cf02017-10-17 13:13:42 -0400306 struct timespec ts, tsrem;
307 ts.tv_sec = dt;
308 ts.tv_nsec = 1e9 * (dt - (f64) ts.tv_sec);
309
310 while (nanosleep (&ts, &tsrem) < 0)
311 ts = tsrem;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700312}
313
Dave Barachc3799992016-08-15 11:12:27 -0400314#else /* ! CLIB_UNIX */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700315
Dave Barachc3799992016-08-15 11:12:27 -0400316always_inline f64
317unix_time_now (void)
318{
319 return 0;
320}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700321
Dave Barachc3799992016-08-15 11:12:27 -0400322always_inline u64
323unix_time_now_nsec (void)
324{
325 return 0;
326}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700327
Ole Troaned929252017-06-13 21:15:40 +0200328always_inline void
329unix_time_now_nsec_fraction (u32 * sec, u32 * nsec)
330{
331}
332
Dave Barachc3799992016-08-15 11:12:27 -0400333always_inline f64
334unix_usage_now (void)
335{
336 return 0;
337}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700338
Dave Barachc3799992016-08-15 11:12:27 -0400339always_inline void
340unix_sleep (f64 dt)
341{
342}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700343
344#endif
345
346#endif /* included_time_h */
Dave Barachc3799992016-08-15 11:12:27 -0400347
348/*
349 * fd.io coding-style-patch-verification: ON
350 *
351 * Local Variables:
352 * eval: (c-set-style "gnu")
353 * End:
354 */