blob: 39bc188ebe587075671e93b03494c06966ec8641 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef included_time_h
39#define included_time_h
40
41#include <vppinfra/clib.h>
42
Dave Barachc3799992016-08-15 11:12:27 -040043typedef struct
44{
Ed Warnickecb9cada2015-12-08 15:45:58 -070045 /* Total run time in clock cycles
46 since clib_time_init call. */
47 u64 total_cpu_time;
48
49 /* Last recorded time stamp. */
50 u64 last_cpu_time;
51
52 /* CPU clock frequency. */
53 f64 clocks_per_second;
54
55 /* 1 / cpu clock frequency: conversion factor
56 from clock cycles into seconds. */
57 f64 seconds_per_clock;
58
59 /* Time stamp of call to clib_time_init call. */
60 u64 init_cpu_time;
61
62 u64 last_verify_cpu_time;
63
64 /* Same but for reference time (if present). */
65 f64 last_verify_reference_time;
66
67 u32 log2_clocks_per_second, log2_clocks_per_frequency_verify;
68} clib_time_t;
69
70/* Return CPU time stamp as 64bit number. */
71#if defined(__x86_64__) || defined(i386)
Dave Barachc3799992016-08-15 11:12:27 -040072always_inline u64
73clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070074{
75 u32 a, d;
Dave Barachc3799992016-08-15 11:12:27 -040076 asm volatile ("rdtsc":"=a" (a), "=d" (d));
Ed Warnickecb9cada2015-12-08 15:45:58 -070077 return (u64) a + ((u64) d << (u64) 32);
78}
79
80#elif defined (__powerpc64__)
81
Dave Barachc3799992016-08-15 11:12:27 -040082always_inline u64
83clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070084{
85 u64 t;
Dave Barachc3799992016-08-15 11:12:27 -040086 asm volatile ("mftb %0":"=r" (t));
Ed Warnickecb9cada2015-12-08 15:45:58 -070087 return t;
88}
89
90#elif defined (__SPU__)
91
Dave Barachc3799992016-08-15 11:12:27 -040092always_inline u64
93clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070094{
95#ifdef _XLC
96 return spu_rdch (0x8);
97#else
Dave Barachc3799992016-08-15 11:12:27 -040098 return 0 /* __builtin_si_rdch (0x8) FIXME */ ;
Ed Warnickecb9cada2015-12-08 15:45:58 -070099#endif
100}
101
102#elif defined (__powerpc__)
103
Dave Barachc3799992016-08-15 11:12:27 -0400104always_inline u64
105clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700106{
107 u32 hi1, hi2, lo;
Dave Barachc3799992016-08-15 11:12:27 -0400108 asm volatile ("1:\n"
109 "mftbu %[hi1]\n"
110 "mftb %[lo]\n"
111 "mftbu %[hi2]\n"
112 "cmpw %[hi1],%[hi2]\n"
113 "bne 1b\n":[hi1] "=r" (hi1),[hi2] "=r" (hi2),[lo] "=r" (lo));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114 return (u64) lo + ((u64) hi2 << (u64) 32);
115}
116
Brian Brooksc0379ae2018-01-09 16:39:07 -0600117#elif defined (__aarch64__)
118always_inline u64
119clib_cpu_time_now (void)
120{
121 u64 vct;
122 /* User access to cntvct_el0 is enabled in Linux kernel since 3.12. */
123 asm volatile ("mrs %0, cntvct_el0":"=r" (vct));
124 return vct;
125}
126
Ed Warnickecb9cada2015-12-08 15:45:58 -0700127#elif defined (__arm__)
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000128#if defined(__ARM_ARCH_8A__)
Dave Barachc3799992016-08-15 11:12:27 -0400129always_inline u64
130clib_cpu_time_now (void) /* We may run arm64 in aarch32 mode, to leverage 64bit counter */
Christophe Fontainefef15b42016-04-09 12:38:49 +0900131{
132 u64 tsc;
Dave Barachc3799992016-08-15 11:12:27 -0400133 asm volatile ("mrrc p15, 0, %Q0, %R0, c9":"=r" (tsc));
Christophe Fontainefef15b42016-04-09 12:38:49 +0900134 return tsc;
135}
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000136#elif defined(__ARM_ARCH_7A__)
Dave Barachc3799992016-08-15 11:12:27 -0400137always_inline u64
138clib_cpu_time_now (void)
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000139{
140 u32 tsc;
Dave Barachc3799992016-08-15 11:12:27 -0400141 asm volatile ("mrc p15, 0, %0, c9, c13, 0":"=r" (tsc));
142 return (u64) tsc;
Christophe Fontaine9341a1f2016-05-13 07:07:28 +0000143}
Christophe Fontainefef15b42016-04-09 12:38:49 +0900144#else
Dave Barachc3799992016-08-15 11:12:27 -0400145always_inline u64
146clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147{
148 u32 lo;
Dave Barachc3799992016-08-15 11:12:27 -0400149 asm volatile ("mrc p15, 0, %[lo], c15, c12, 1":[lo] "=r" (lo));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700150 return (u64) lo;
151}
Christophe Fontainefef15b42016-04-09 12:38:49 +0900152#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153
154#elif defined (__xtensa__)
155
156/* Stub for now. */
Dave Barachc3799992016-08-15 11:12:27 -0400157always_inline u64
158clib_cpu_time_now (void)
159{
160 return 0;
161}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700162
163#elif defined (__TMS320C6X__)
164
Dave Barachc3799992016-08-15 11:12:27 -0400165always_inline u64
166clib_cpu_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167{
168 u32 l, h;
169
170 asm volatile (" dint\n"
171 " mvc .s2 TSCL,%0\n"
Dave Barachc3799992016-08-15 11:12:27 -0400172 " mvc .s2 TSCH,%1\n" " rint\n":"=b" (l), "=b" (h));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700173
Dave Barachc3799992016-08-15 11:12:27 -0400174 return ((u64) h << 32) | l;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700175}
176
Dave Barach61efa142016-01-22 08:23:09 -0500177#else
Ed Warnickecb9cada2015-12-08 15:45:58 -0700178#error "don't know how to read CPU time stamp"
179
180#endif
181
182void clib_time_verify_frequency (clib_time_t * c);
183
Dave Barachc3799992016-08-15 11:12:27 -0400184always_inline f64
185clib_time_now_internal (clib_time_t * c, u64 n)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700186{
187 u64 l = c->last_cpu_time;
188 u64 t = c->total_cpu_time;
189 t += n - l;
190 c->total_cpu_time = t;
191 c->last_cpu_time = n;
Dave Barachc3799992016-08-15 11:12:27 -0400192 if (PREDICT_FALSE
193 ((c->last_cpu_time -
194 c->last_verify_cpu_time) >> c->log2_clocks_per_frequency_verify))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700195 clib_time_verify_frequency (c);
196 return t * c->seconds_per_clock;
197}
198
John Lo7f358b32018-04-28 01:19:24 -0400199/* Maximum f64 value as max clib_time */
200#define CLIB_TIME_MAX (1.7976931348623157e+308)
201
Ed Warnickecb9cada2015-12-08 15:45:58 -0700202always_inline f64
203clib_time_now (clib_time_t * c)
204{
Dave Barachc3799992016-08-15 11:12:27 -0400205 return clib_time_now_internal (c, clib_cpu_time_now ());
Ed Warnickecb9cada2015-12-08 15:45:58 -0700206}
207
Dave Barachc3799992016-08-15 11:12:27 -0400208always_inline void
209clib_cpu_time_wait (u64 dt)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700210{
211 u64 t_end = clib_cpu_time_now () + dt;
212 while (clib_cpu_time_now () < t_end)
213 ;
214}
215
216void clib_time_init (clib_time_t * c);
217
218#ifdef CLIB_UNIX
219
220#include <time.h>
221#include <sys/time.h>
222#include <sys/resource.h>
223#include <unistd.h>
224#include <sys/syscall.h>
225
226/* Use 64bit floating point to represent time offset from epoch. */
Dave Barachc3799992016-08-15 11:12:27 -0400227always_inline f64
228unix_time_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700229{
230 /* clock_gettime without indirect syscall uses GLIBC wrappers which
231 we don't want. Just the bare metal, please. */
232 struct timespec ts;
233 syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
Dave Barachc3799992016-08-15 11:12:27 -0400234 return ts.tv_sec + 1e-9 * ts.tv_nsec;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700235}
236
237/* As above but integer number of nano-seconds. */
Dave Barachc3799992016-08-15 11:12:27 -0400238always_inline u64
239unix_time_now_nsec (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700240{
241 struct timespec ts;
242 syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
Dave Barachc3799992016-08-15 11:12:27 -0400243 return 1e9 * ts.tv_sec + ts.tv_nsec;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700244}
245
Ole Troaned929252017-06-13 21:15:40 +0200246always_inline void
247unix_time_now_nsec_fraction (u32 * sec, u32 * nsec)
248{
249 struct timespec ts;
250 syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
251 *sec = ts.tv_sec;
252 *nsec = ts.tv_nsec;
253}
254
Dave Barachc3799992016-08-15 11:12:27 -0400255always_inline f64
256unix_usage_now (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700257{
258 struct rusage u;
259 getrusage (RUSAGE_SELF, &u);
Dave Barachc3799992016-08-15 11:12:27 -0400260 return u.ru_utime.tv_sec + 1e-6 * u.ru_utime.tv_usec
261 + u.ru_stime.tv_sec + 1e-6 * u.ru_stime.tv_usec;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700262}
263
Dave Barachc3799992016-08-15 11:12:27 -0400264always_inline void
265unix_sleep (f64 dt)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700266{
Dave Barachb9f2cf02017-10-17 13:13:42 -0400267 struct timespec ts, tsrem;
268 ts.tv_sec = dt;
269 ts.tv_nsec = 1e9 * (dt - (f64) ts.tv_sec);
270
271 while (nanosleep (&ts, &tsrem) < 0)
272 ts = tsrem;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700273}
274
Dave Barachc3799992016-08-15 11:12:27 -0400275#else /* ! CLIB_UNIX */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700276
Dave Barachc3799992016-08-15 11:12:27 -0400277always_inline f64
278unix_time_now (void)
279{
280 return 0;
281}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700282
Dave Barachc3799992016-08-15 11:12:27 -0400283always_inline u64
284unix_time_now_nsec (void)
285{
286 return 0;
287}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700288
Ole Troaned929252017-06-13 21:15:40 +0200289always_inline void
290unix_time_now_nsec_fraction (u32 * sec, u32 * nsec)
291{
292}
293
Dave Barachc3799992016-08-15 11:12:27 -0400294always_inline f64
295unix_usage_now (void)
296{
297 return 0;
298}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700299
Dave Barachc3799992016-08-15 11:12:27 -0400300always_inline void
301unix_sleep (f64 dt)
302{
303}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700304
305#endif
306
307#endif /* included_time_h */
Dave Barachc3799992016-08-15 11:12:27 -0400308
309/*
310 * fd.io coding-style-patch-verification: ON
311 *
312 * Local Variables:
313 * eval: (c-set-style "gnu")
314 * End:
315 */