vppinfra: refactor test_and_set spinlocks to use clib_spinlock_t
Spinlock performance improved when implemented with compare_and_exchange
instead of test_and_set. All instances of test_and_set locks were refactored
to use clib_spinlock_t when possible. Some locks e.g. ssvm synchronize
between processes rather than threads, so they cannot directly use
clib_spinlock_t.
Type: refactor
Change-Id: Ia16b5d4cd49209b2b57b8df6c94615c28b11bb60
Signed-off-by: Jason Zhang <jason.zhang2@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Lijian Zhang <Lijian.Zhang@arm.com>
diff --git a/src/vnet/classify/vnet_classify.c b/src/vnet/classify/vnet_classify.c
index f5e4949..b807a26 100755
--- a/src/vnet/classify/vnet_classify.c
+++ b/src/vnet/classify/vnet_classify.c
@@ -152,10 +152,7 @@
vec_validate_aligned (t->buckets, nbuckets - 1, CLIB_CACHE_LINE_BYTES);
oldheap = clib_mem_set_heap (t->mheap);
- t->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
- t->writer_lock[0] = 0;
-
+ clib_spinlock_init (&t->writer_lock);
clib_mem_set_heap (oldheap);
return (t);
}
@@ -193,7 +190,7 @@
u32 required_length;
void *oldheap;
- ASSERT (t->writer_lock[0]);
+ CLIB_SPINLOCK_ASSERT_LOCKED (&t->writer_lock);
required_length =
(sizeof (vnet_classify_entry_t) + (t->match_n_vectors * sizeof (u32x4)))
* t->entries_per_page * (1 << log2_pages);
@@ -222,7 +219,7 @@
vnet_classify_entry_free (vnet_classify_table_t * t,
vnet_classify_entry_t * v, u32 log2_pages)
{
- ASSERT (t->writer_lock[0]);
+ CLIB_SPINLOCK_ASSERT_LOCKED (&t->writer_lock);
ASSERT (vec_len (t->freelists) > log2_pages);
@@ -447,8 +444,7 @@
hash >>= t->log2_nbuckets;
- while (clib_atomic_test_and_set (t->writer_lock))
- CLIB_PAUSE ();
+ clib_spinlock_lock (&t->writer_lock);
/* First elt in the bucket? */
if (b->offset == 0)
@@ -640,7 +636,7 @@
vnet_classify_entry_free (t, v, old_log2_pages);
unlock:
- clib_atomic_release (&t->writer_lock[0]);
+ clib_spinlock_unlock (&t->writer_lock);
return rv;
}