blob: ced9677d1e2c0bc5406ad5c444e2d71fc4da649e [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef included_time_h
39#define included_time_h
40
41#include <vppinfra/clib.h>
42
Dave Barachc3799992016-08-15 11:12:27 -040043typedef struct
44{
Ed Warnickecb9cada2015-12-08 15:45:58 -070045 /* Total run time in clock cycles
46 since clib_time_init call. */
47 u64 total_cpu_time;
48
49 /* Last recorded time stamp. */
50 u64 last_cpu_time;
51
52 /* CPU clock frequency. */
53 f64 clocks_per_second;
54
55 /* 1 / cpu clock frequency: conversion factor
56 from clock cycles into seconds. */
57 f64 seconds_per_clock;
58
59 /* Time stamp of call to clib_time_init call. */
60 u64 init_cpu_time;
61
62 u64 last_verify_cpu_time;
63
64 /* Same but for reference time (if present). */
65 f64 last_verify_reference_time;
66
67 u32 log2_clocks_per_second, log2_clocks_per_frequency_verify;
68} clib_time_t;
69
70/* Return CPU time stamp as 64bit number. */
71#if defined(__x86_64__) || defined(i386)
Dave Barachc3799992016-08-15 11:12:27 -040072always_inline u64
73clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070074{
75 u32 a, d;
Dave Barachc3799992016-08-15 11:12:27 -040076 asm volatile ("rdtsc":"=a" (a), "=d" (d));
Ed Warnickecb9cada2015-12-08 15:45:58 -070077 return (u64) a + ((u64) d << (u64) 32);
78}
79
80#elif defined (__powerpc64__)
81
Dave Barachc3799992016-08-15 11:12:27 -040082always_inline u64
83clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070084{
85 u64 t;
Dave Barachc3799992016-08-15 11:12:27 -040086 asm volatile ("mftb %0":"=r" (t));
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 return t;
88}
89
90#elif defined (__SPU__)
91
Dave Barachc3799992016-08-15 11:12:27 -040092always_inline u64
93clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070094{
95#ifdef _XLC
96 return spu_rdch (0x8);
97#else
Dave Barachc3799992016-08-15 11:12:27 -040098 return 0 /* __builtin_si_rdch (0x8) FIXME */ ;
Ed Warnickecb9cada2015-12-08 15:45:58 -070099#endif
100}
101
102#elif defined (__powerpc__)
103
Dave Barachc3799992016-08-15 11:12:27 -0400104always_inline u64
105clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106{
107 u32 hi1, hi2, lo;
Dave Barachc3799992016-08-15 11:12:27 -0400108 asm volatile ("1:\n"
109 "mftbu %[hi1]\n"
110 "mftb %[lo]\n"
111 "mftbu %[hi2]\n"
112 "cmpw %[hi1],%[hi2]\n"
113 "bne 1b\n":[hi1] "=r" (hi1),[hi2] "=r" (hi2),[lo] "=r" (lo));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114 return (u64) lo + ((u64) hi2 << (u64) 32);
115}
116
Brian Brooksc0379ae2018-01-09 16:39:07 -0600117#elif defined (__aarch64__)
118always_inline u64
119clib_cpu_time_now (void)
120{
121 u64 vct;
122 /* User access to cntvct_el0 is enabled in Linux kernel since 3.12. */
123 asm volatile ("mrs %0, cntvct_el0":"=r" (vct));
124 return vct;
125}
126
Ed Warnickecb9cada2015-12-08 15:45:58 -0700127#elif defined (__arm__)
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000128#if defined(__ARM_ARCH_8A__)
Dave Barachc3799992016-08-15 11:12:27 -0400129always_inline u64
130clib_cpu_time_now (void) /* We may run arm64 in aarch32 mode, to leverage 64bit counter */
Christophe Fontainefef15b42016-04-09 12:38:49 +0900131{
132 u64 tsc;
Dave Barachc3799992016-08-15 11:12:27 -0400133 asm volatile ("mrrc p15, 0, %Q0, %R0, c9":"=r" (tsc));
Christophe Fontainefef15b42016-04-09 12:38:49 +0900134 return tsc;
135}
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000136#elif defined(__ARM_ARCH_7A__)
Dave Barachc3799992016-08-15 11:12:27 -0400137always_inline u64
138clib_cpu_time_now (void)
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000139{
140 u32 tsc;
Dave Barachc3799992016-08-15 11:12:27 -0400141 asm volatile ("mrc p15, 0, %0, c9, c13, 0":"=r" (tsc));
142 return (u64) tsc;
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000143}
Christophe Fontainefef15b42016-04-09 12:38:49 +0900144#else
Dave Barachc3799992016-08-15 11:12:27 -0400145always_inline u64
146clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147{
148 u32 lo;
Dave Barachc3799992016-08-15 11:12:27 -0400149 asm volatile ("mrc p15, 0, %[lo], c15, c12, 1":[lo] "=r" (lo));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700150 return (u64) lo;
151}
Christophe Fontainefef15b42016-04-09 12:38:49 +0900152#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153
154#elif defined (__xtensa__)
155
156/* Stub for now. */
Dave Barachc3799992016-08-15 11:12:27 -0400157always_inline u64
158clib_cpu_time_now (void)
159{
160 return 0;
161}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700162
163#elif defined (__TMS320C6X__)
164
Dave Barachc3799992016-08-15 11:12:27 -0400165always_inline u64
166clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167{
168 u32 l, h;
169
170 asm volatile (" dint\n"
171 " mvc .s2 TSCL,%0\n"
Dave Barachc3799992016-08-15 11:12:27 -0400172 " mvc .s2 TSCH,%1\n" " rint\n":"=b" (l), "=b" (h));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700173
Dave Barachc3799992016-08-15 11:12:27 -0400174 return ((u64) h << 32) | l;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700175}
176
Carl Smith28d42712018-07-26 15:45:28 +1200177#elif defined(_mips) && __mips == 64
178
179always_inline u64
180clib_cpu_time_now (void)
181{
182 u64 result;
183 asm volatile ("rdhwr %0,$31\n":"=r" (result));
184 return result;
185}
186
Dave Barach61efa142016-01-22 08:23:09 -0500187#else
Ed Warnickecb9cada2015-12-08 15:45:58 -0700188#error "don't know how to read CPU time stamp"
189
190#endif
191
192void clib_time_verify_frequency (clib_time_t * c);
193
Dave Barachc3799992016-08-15 11:12:27 -0400194always_inline f64
195clib_time_now_internal (clib_time_t * c, u64 n)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700196{
197 u64 l = c->last_cpu_time;
198 u64 t = c->total_cpu_time;
199 t += n - l;
200 c->total_cpu_time = t;
201 c->last_cpu_time = n;
Dave Barachc3799992016-08-15 11:12:27 -0400202 if (PREDICT_FALSE
203 ((c->last_cpu_time -
204 c->last_verify_cpu_time) >> c->log2_clocks_per_frequency_verify))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700205 clib_time_verify_frequency (c);
206 return t * c->seconds_per_clock;
207}
208
John Lo7f358b32018-04-28 01:19:24 -0400209/* Maximum f64 value as max clib_time */
210#define CLIB_TIME_MAX (1.7976931348623157e+308)
211
Ed Warnickecb9cada2015-12-08 15:45:58 -0700212always_inline f64
213clib_time_now (clib_time_t * c)
214{
Dave Barachc3799992016-08-15 11:12:27 -0400215 return clib_time_now_internal (c, clib_cpu_time_now ());
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216}
217
Dave Barachc3799992016-08-15 11:12:27 -0400218always_inline void
219clib_cpu_time_wait (u64 dt)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700220{
221 u64 t_end = clib_cpu_time_now () + dt;
222 while (clib_cpu_time_now () < t_end)
223 ;
224}
225
226void clib_time_init (clib_time_t * c);
227
228#ifdef CLIB_UNIX
229
230#include <time.h>
231#include <sys/time.h>
232#include <sys/resource.h>
233#include <unistd.h>
234#include <sys/syscall.h>
235
236/* Use 64bit floating point to represent time offset from epoch. */
Dave Barachc3799992016-08-15 11:12:27 -0400237always_inline f64
238unix_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700239{
240 /* clock_gettime without indirect syscall uses GLIBC wrappers which
241 we don't want. Just the bare metal, please. */
242 struct timespec ts;
243 syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
Dave Barachc3799992016-08-15 11:12:27 -0400244 return ts.tv_sec + 1e-9 * ts.tv_nsec;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700245}
246
247/* As above but integer number of nano-seconds. */
Dave Barachc3799992016-08-15 11:12:27 -0400248always_inline u64
249unix_time_now_nsec (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250{
251 struct timespec ts;
252 syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
Dave Barachc3799992016-08-15 11:12:27 -0400253 return 1e9 * ts.tv_sec + ts.tv_nsec;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700254}
255
Ole Troaned929252017-06-13 21:15:40 +0200256always_inline void
257unix_time_now_nsec_fraction (u32 * sec, u32 * nsec)
258{
259 struct timespec ts;
260 syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
261 *sec = ts.tv_sec;
262 *nsec = ts.tv_nsec;
263}
264
Dave Barachc3799992016-08-15 11:12:27 -0400265always_inline f64
266unix_usage_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700267{
268 struct rusage u;
269 getrusage (RUSAGE_SELF, &u);
Dave Barachc3799992016-08-15 11:12:27 -0400270 return u.ru_utime.tv_sec + 1e-6 * u.ru_utime.tv_usec
271 + u.ru_stime.tv_sec + 1e-6 * u.ru_stime.tv_usec;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700272}
273
Dave Barachc3799992016-08-15 11:12:27 -0400274always_inline void
275unix_sleep (f64 dt)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700276{
Dave Barachb9f2cf02017-10-17 13:13:42 -0400277 struct timespec ts, tsrem;
278 ts.tv_sec = dt;
279 ts.tv_nsec = 1e9 * (dt - (f64) ts.tv_sec);
280
281 while (nanosleep (&ts, &tsrem) < 0)
282 ts = tsrem;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700283}
284
Dave Barachc3799992016-08-15 11:12:27 -0400285#else /* ! CLIB_UNIX */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286
Dave Barachc3799992016-08-15 11:12:27 -0400287always_inline f64
288unix_time_now (void)
289{
290 return 0;
291}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700292
Dave Barachc3799992016-08-15 11:12:27 -0400293always_inline u64
294unix_time_now_nsec (void)
295{
296 return 0;
297}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700298
Ole Troaned929252017-06-13 21:15:40 +0200299always_inline void
300unix_time_now_nsec_fraction (u32 * sec, u32 * nsec)
301{
302}
303
Dave Barachc3799992016-08-15 11:12:27 -0400304always_inline f64
305unix_usage_now (void)
306{
307 return 0;
308}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700309
Dave Barachc3799992016-08-15 11:12:27 -0400310always_inline void
311unix_sleep (f64 dt)
312{
313}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700314
315#endif
316
317#endif /* included_time_h */
Dave Barachc3799992016-08-15 11:12:27 -0400318
319/*
320 * fd.io coding-style-patch-verification: ON
321 *
322 * Local Variables:
323 * eval: (c-set-style "gnu")
324 * End:
325 */