Pierre Pfister | 041eacc | 2016-08-04 16:13:09 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | |
| 16 | /* |
| 17 | * vlib provides lock-free counters but those |
| 18 | * - Have 16bits per-CPU counter, which may overflow. |
| 19 | * - Would only increment. |
| 20 | * |
| 21 | * This is very similar to vlib counters, but may be used to count reference. |
| 22 | * Such a counter includes an arbitrary number of counters. Each counter |
| 23 | * is identified by its index. This is used to aggregate per-cpu memory. |
| 24 | * |
| 25 | * Warning: |
| 26 | * This reference counter is lock-free but is not race-condition free. |
| 27 | * The counting result is approximate and another mechanism needs to be used |
| 28 | * in order to ensure that an object may be freed. |
| 29 | * |
| 30 | */ |
| 31 | |
| 32 | #include <vnet/vnet.h> |
jaszha03 | 5cdde5c | 2019-07-11 20:47:24 +0000 | [diff] [blame] | 33 | #include <vppinfra/lock.h> |
Pierre Pfister | 041eacc | 2016-08-04 16:13:09 +0100 | [diff] [blame] | 34 | |
Hongjun Ni | bf4be57 | 2017-11-13 20:34:06 +0800 | [diff] [blame] | 35 | /* |
| 36 | * Reference counting |
| 37 | * A specific reference counter is used. The design is quite |
| 38 | * similar to vlib counters but: |
| 39 | * - It is possible to decrease the value |
| 40 | * - Summing will not zero the per-thread counters |
| 41 | * - Only the thread can reallocate its own counters vector (to avoid concurrency issues) |
| 42 | */ |
Pierre Pfister | 041eacc | 2016-08-04 16:13:09 +0100 | [diff] [blame] | 43 | typedef struct { |
| 44 | u32 *counters; |
jaszha03 | 5cdde5c | 2019-07-11 20:47:24 +0000 | [diff] [blame] | 45 | clib_spinlock_t counter_lock; |
Pierre Pfister | 041eacc | 2016-08-04 16:13:09 +0100 | [diff] [blame] | 46 | CLIB_CACHE_LINE_ALIGN_MARK(o); |
| 47 | } vlib_refcount_per_cpu_t; |
| 48 | |
| 49 | typedef struct { |
| 50 | vlib_refcount_per_cpu_t *per_cpu; |
| 51 | } vlib_refcount_t; |
| 52 | |
Hongjun Ni | bf4be57 | 2017-11-13 20:34:06 +0800 | [diff] [blame] | 53 | static_always_inline |
jaszha03 | 5cdde5c | 2019-07-11 20:47:24 +0000 | [diff] [blame] | 54 | void vlib_refcount_lock (clib_spinlock_t counter_lock) |
Hongjun Ni | bf4be57 | 2017-11-13 20:34:06 +0800 | [diff] [blame] | 55 | { |
jaszha03 | 5cdde5c | 2019-07-11 20:47:24 +0000 | [diff] [blame] | 56 | clib_spinlock_lock (&counter_lock); |
Hongjun Ni | bf4be57 | 2017-11-13 20:34:06 +0800 | [diff] [blame] | 57 | } |
| 58 | |
| 59 | static_always_inline |
jaszha03 | 5cdde5c | 2019-07-11 20:47:24 +0000 | [diff] [blame] | 60 | void vlib_refcount_unlock (clib_spinlock_t counter_lock) |
Hongjun Ni | bf4be57 | 2017-11-13 20:34:06 +0800 | [diff] [blame] | 61 | { |
jaszha03 | 5cdde5c | 2019-07-11 20:47:24 +0000 | [diff] [blame] | 62 | clib_spinlock_unlock (&counter_lock); |
Hongjun Ni | bf4be57 | 2017-11-13 20:34:06 +0800 | [diff] [blame] | 63 | } |
| 64 | |
Pierre Pfister | 041eacc | 2016-08-04 16:13:09 +0100 | [diff] [blame] | 65 | void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size); |
| 66 | |
| 67 | static_always_inline |
Damjan Marion | 586afd7 | 2017-04-05 19:18:20 +0200 | [diff] [blame] | 68 | void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v) |
Pierre Pfister | 041eacc | 2016-08-04 16:13:09 +0100 | [diff] [blame] | 69 | { |
Damjan Marion | 586afd7 | 2017-04-05 19:18:20 +0200 | [diff] [blame] | 70 | vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[thread_index]; |
Hongjun Ni | bf4be57 | 2017-11-13 20:34:06 +0800 | [diff] [blame] | 71 | if (PREDICT_FALSE(counter_index >= vec_len(per_cpu->counters))) |
| 72 | __vlib_refcount_resize(per_cpu, clib_max(counter_index + 16,(vec_len(per_cpu->counters)) * 2)); |
Pierre Pfister | 041eacc | 2016-08-04 16:13:09 +0100 | [diff] [blame] | 73 | |
| 74 | per_cpu->counters[counter_index] += v; |
| 75 | } |
| 76 | |
| 77 | u64 vlib_refcount_get(vlib_refcount_t *r, u32 index); |
| 78 | |
| 79 | static_always_inline |
| 80 | void vlib_refcount_init(vlib_refcount_t *r) |
| 81 | { |
| 82 | vlib_thread_main_t *tm = vlib_get_thread_main (); |
Hongjun Ni | bf4be57 | 2017-11-13 20:34:06 +0800 | [diff] [blame] | 83 | u32 thread_index; |
Pierre Pfister | 041eacc | 2016-08-04 16:13:09 +0100 | [diff] [blame] | 84 | r->per_cpu = 0; |
| 85 | vec_validate (r->per_cpu, tm->n_vlib_mains - 1); |
Hongjun Ni | bf4be57 | 2017-11-13 20:34:06 +0800 | [diff] [blame] | 86 | |
| 87 | for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++) |
| 88 | { |
jaszha03 | 5cdde5c | 2019-07-11 20:47:24 +0000 | [diff] [blame] | 89 | clib_spinlock_init (&r->per_cpu[thread_index].counter_lock); |
Hongjun Ni | bf4be57 | 2017-11-13 20:34:06 +0800 | [diff] [blame] | 90 | } |
Pierre Pfister | 041eacc | 2016-08-04 16:13:09 +0100 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | |