blob: 4c7d7bdbdd5ce6ec12e57b13e92ef9c655df98ab [file] [log] [blame]
Pierre Pfister041eacc2016-08-04 16:13:09 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/*
17 * vlib provides lock-free counters but those
18 * - Have 16bits per-CPU counter, which may overflow.
19 * - Would only increment.
20 *
21 * This is very similar to vlib counters, but may be used to count reference.
22 * Such a counter includes an arbitrary number of counters. Each counter
23 * is identified by its index. This is used to aggregate per-cpu memory.
24 *
25 * Warning:
26 * This reference counter is lock-free but is not race-condition free.
27 * The counting result is approximate and another mechanism needs to be used
28 * in order to ensure that an object may be freed.
29 *
30 */
31
32#include <vnet/vnet.h>
jaszha035cdde5c2019-07-11 20:47:24 +000033#include <vppinfra/lock.h>
Pierre Pfister041eacc2016-08-04 16:13:09 +010034
Hongjun Nibf4be572017-11-13 20:34:06 +080035/*
36 * Reference counting
37 * A specific reference counter is used. The design is quite
38 * similar to vlib counters but:
39 * - It is possible to decrease the value
40 * - Summing will not zero the per-thread counters
41 * - Only the thread can reallocate its own counters vector (to avoid concurrency issues)
42*/
Pierre Pfister041eacc2016-08-04 16:13:09 +010043typedef struct {
44 u32 *counters;
jaszha035cdde5c2019-07-11 20:47:24 +000045 clib_spinlock_t counter_lock;
Pierre Pfister041eacc2016-08-04 16:13:09 +010046 CLIB_CACHE_LINE_ALIGN_MARK(o);
47} vlib_refcount_per_cpu_t;
48
49typedef struct {
50 vlib_refcount_per_cpu_t *per_cpu;
51} vlib_refcount_t;
52
Hongjun Nibf4be572017-11-13 20:34:06 +080053static_always_inline
jaszha035cdde5c2019-07-11 20:47:24 +000054void vlib_refcount_lock (clib_spinlock_t counter_lock)
Hongjun Nibf4be572017-11-13 20:34:06 +080055{
jaszha035cdde5c2019-07-11 20:47:24 +000056 clib_spinlock_lock (&counter_lock);
Hongjun Nibf4be572017-11-13 20:34:06 +080057}
58
59static_always_inline
jaszha035cdde5c2019-07-11 20:47:24 +000060void vlib_refcount_unlock (clib_spinlock_t counter_lock)
Hongjun Nibf4be572017-11-13 20:34:06 +080061{
jaszha035cdde5c2019-07-11 20:47:24 +000062 clib_spinlock_unlock (&counter_lock);
Hongjun Nibf4be572017-11-13 20:34:06 +080063}
64
Pierre Pfister041eacc2016-08-04 16:13:09 +010065void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size);
66
67static_always_inline
Damjan Marion586afd72017-04-05 19:18:20 +020068void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v)
Pierre Pfister041eacc2016-08-04 16:13:09 +010069{
Damjan Marion586afd72017-04-05 19:18:20 +020070 vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[thread_index];
Hongjun Nibf4be572017-11-13 20:34:06 +080071 if (PREDICT_FALSE(counter_index >= vec_len(per_cpu->counters)))
72 __vlib_refcount_resize(per_cpu, clib_max(counter_index + 16,(vec_len(per_cpu->counters)) * 2));
Pierre Pfister041eacc2016-08-04 16:13:09 +010073
74 per_cpu->counters[counter_index] += v;
75}
76
77u64 vlib_refcount_get(vlib_refcount_t *r, u32 index);
78
79static_always_inline
80void vlib_refcount_init(vlib_refcount_t *r)
81{
82 vlib_thread_main_t *tm = vlib_get_thread_main ();
Hongjun Nibf4be572017-11-13 20:34:06 +080083 u32 thread_index;
Pierre Pfister041eacc2016-08-04 16:13:09 +010084 r->per_cpu = 0;
85 vec_validate (r->per_cpu, tm->n_vlib_mains - 1);
Hongjun Nibf4be572017-11-13 20:34:06 +080086
87 for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++)
88 {
jaszha035cdde5c2019-07-11 20:47:24 +000089 clib_spinlock_init (&r->per_cpu[thread_index].counter_lock);
Hongjun Nibf4be572017-11-13 20:34:06 +080090 }
Pierre Pfister041eacc2016-08-04 16:13:09 +010091}
92
93