blob: ea92148dafae46a77d117b6ecf6c56ee0fbbc1bb [file] [log] [blame]
Pierre Pfister041eacc2016-08-04 16:13:09 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/*
17 * vlib provides lock-free counters but those
18 * - Have 16bits per-CPU counter, which may overflow.
19 * - Would only increment.
20 *
21 * This is very similar to vlib counters, but may be used to count reference.
22 * Such a counter includes an arbitrary number of counters. Each counter
23 * is identified by its index. This is used to aggregate per-cpu memory.
24 *
25 * Warning:
26 * This reference counter is lock-free but is not race-condition free.
27 * The counting result is approximate and another mechanism needs to be used
28 * in order to ensure that an object may be freed.
29 *
30 */
31
32#include <vnet/vnet.h>
33
Hongjun Nibf4be572017-11-13 20:34:06 +080034/*
35 * Reference counting
36 * A specific reference counter is used. The design is quite
37 * similar to vlib counters but:
38 * - It is possible to decrease the value
39 * - Summing will not zero the per-thread counters
40 * - Only the thread can reallocate its own counters vector (to avoid concurrency issues)
41*/
Pierre Pfister041eacc2016-08-04 16:13:09 +010042typedef struct {
43 u32 *counters;
Hongjun Nibf4be572017-11-13 20:34:06 +080044 volatile u32 *counter_lock;
Pierre Pfister041eacc2016-08-04 16:13:09 +010045 CLIB_CACHE_LINE_ALIGN_MARK(o);
46} vlib_refcount_per_cpu_t;
47
48typedef struct {
49 vlib_refcount_per_cpu_t *per_cpu;
50} vlib_refcount_t;
51
Hongjun Nibf4be572017-11-13 20:34:06 +080052static_always_inline
53void vlib_refcount_lock (volatile u32 *counter_lock)
54{
55 while (__sync_lock_test_and_set (counter_lock, 1))
56 ;
57}
58
59static_always_inline
60void vlib_refcount_unlock (volatile u32 *counter_lock)
61{
62 *counter_lock = 0;
63}
64
Pierre Pfister041eacc2016-08-04 16:13:09 +010065void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size);
66
67static_always_inline
Damjan Marion586afd72017-04-05 19:18:20 +020068void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v)
Pierre Pfister041eacc2016-08-04 16:13:09 +010069{
Damjan Marion586afd72017-04-05 19:18:20 +020070 vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[thread_index];
Hongjun Nibf4be572017-11-13 20:34:06 +080071 if (PREDICT_FALSE(counter_index >= vec_len(per_cpu->counters)))
72 __vlib_refcount_resize(per_cpu, clib_max(counter_index + 16,(vec_len(per_cpu->counters)) * 2));
Pierre Pfister041eacc2016-08-04 16:13:09 +010073
74 per_cpu->counters[counter_index] += v;
75}
76
77u64 vlib_refcount_get(vlib_refcount_t *r, u32 index);
78
79static_always_inline
80void vlib_refcount_init(vlib_refcount_t *r)
81{
82 vlib_thread_main_t *tm = vlib_get_thread_main ();
Hongjun Nibf4be572017-11-13 20:34:06 +080083 u32 thread_index;
Pierre Pfister041eacc2016-08-04 16:13:09 +010084 r->per_cpu = 0;
85 vec_validate (r->per_cpu, tm->n_vlib_mains - 1);
Hongjun Nibf4be572017-11-13 20:34:06 +080086
87 for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++)
88 {
89 r->per_cpu[thread_index].counter_lock =
90 clib_mem_alloc_aligned(CLIB_CACHE_LINE_BYTES,CLIB_CACHE_LINE_BYTES);
91 r->per_cpu[thread_index].counter_lock[0] = 0;
92 }
Pierre Pfister041eacc2016-08-04 16:13:09 +010093}
94
95