blob: f603283ef6152949c5ffca0961e3b6a1a1b51c39 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#include <vppinfra/longjmp.h>
39#include <vppinfra/mheap.h>
40#include <vppinfra/os.h>
41
Dave Barachc3799992016-08-15 11:12:27 -040042void
43clib_smp_free (clib_smp_main_t * m)
Ed Warnickecb9cada2015-12-08 15:45:58 -070044{
Dave Barachc3799992016-08-15 11:12:27 -040045 clib_mem_vm_free (m->vm_base,
46 (uword) ((1 + m->n_cpus) << m->log2_n_per_cpu_vm_bytes));
Ed Warnickecb9cada2015-12-08 15:45:58 -070047}
48
Dave Barachc3799992016-08-15 11:12:27 -040049static uword
50allocate_per_cpu_mheap (uword cpu)
Ed Warnickecb9cada2015-12-08 15:45:58 -070051{
Dave Barachc3799992016-08-15 11:12:27 -040052 clib_smp_main_t *m = &clib_smp_main;
53 void *heap;
Ed Warnickecb9cada2015-12-08 15:45:58 -070054 uword vm_size, stack_size, mheap_flags;
55
Damjan Marionf55f9b82017-05-10 21:06:28 +020056 ASSERT (os_get_thread_index () == cpu);
Ed Warnickecb9cada2015-12-08 15:45:58 -070057
58 vm_size = (uword) 1 << m->log2_n_per_cpu_vm_bytes;
59 stack_size = (uword) 1 << m->log2_n_per_cpu_stack_bytes;
60
61 mheap_flags = MHEAP_FLAG_SMALL_OBJECT_CACHE;
62
63 /* Heap extends up to start of stack. */
64 heap = mheap_alloc_with_flags (clib_smp_vm_base_for_cpu (m, cpu),
Dave Barachc3799992016-08-15 11:12:27 -040065 vm_size - stack_size, mheap_flags);
Ed Warnickecb9cada2015-12-08 15:45:58 -070066 clib_mem_set_heap (heap);
67
68 if (cpu == 0)
69 {
70 /* Now that we have a heap, allocate main structure on cpu 0. */
71 vec_resize (m->per_cpu_mains, m->n_cpus);
72
73 /* Allocate shared global heap (thread safe). */
74 m->global_heap =
75 mheap_alloc_with_flags (clib_smp_vm_base_for_cpu (m, cpu + m->n_cpus),
76 vm_size,
77 mheap_flags | MHEAP_FLAG_THREAD_SAFE);
78 }
79
80 m->per_cpu_mains[cpu].heap = heap;
81 return 0;
82}
83
Dave Barachc3799992016-08-15 11:12:27 -040084void
85clib_smp_init (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070086{
Dave Barachc3799992016-08-15 11:12:27 -040087 clib_smp_main_t *m = &clib_smp_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -070088 uword cpu;
89
Dave Barachc3799992016-08-15 11:12:27 -040090 m->vm_base =
91 clib_mem_vm_alloc ((uword) (m->n_cpus + 1) << m->log2_n_per_cpu_vm_bytes);
92 if (!m->vm_base)
Ed Warnickecb9cada2015-12-08 15:45:58 -070093 clib_error ("error allocating virtual memory");
94
95 for (cpu = 0; cpu < m->n_cpus; cpu++)
96 clib_calljmp (allocate_per_cpu_mheap, cpu,
97 clib_smp_stack_top_for_cpu (m, cpu));
98}
99
Dave Barachc3799992016-08-15 11:12:27 -0400100void
101clib_smp_lock_init (clib_smp_lock_t ** pl)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700102{
Dave Barachc3799992016-08-15 11:12:27 -0400103 clib_smp_lock_t *l;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700104 uword i, n_bytes, n_fifo_elts;
105
106 /* No locking necessary if n_cpus <= 1.
107 Null means no locking is necessary. */
108 if (clib_smp_main.n_cpus < 2)
109 {
110 *pl = 0;
111 return;
112 }
113
114 /* Need n_cpus - 1 elts in waiting fifo. One CPU holds lock
115 and others could potentially be waiting. */
116 n_fifo_elts = clib_smp_main.n_cpus - 1;
117
118 n_bytes = sizeof (l[0]) + n_fifo_elts * sizeof (l->waiting_fifo[0]);
119 ASSERT_AND_PANIC (n_bytes % CLIB_CACHE_LINE_BYTES == 0);
120
121 l = clib_mem_alloc_aligned (n_bytes, CLIB_CACHE_LINE_BYTES);
122
123 memset (l, 0, n_bytes);
124 l->n_waiting_fifo_elts = n_fifo_elts;
125
126 for (i = 0; i < l->n_waiting_fifo_elts; i++)
127 l->waiting_fifo[i].wait_type = CLIB_SMP_LOCK_WAIT_EMPTY;
128
129 *pl = l;
130}
131
Dave Barachc3799992016-08-15 11:12:27 -0400132void
133clib_smp_lock_free (clib_smp_lock_t ** pl)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700134{
135 if (*pl)
136 clib_mem_free (*pl);
137 *pl = 0;
138}
139
Dave Barachc3799992016-08-15 11:12:27 -0400140void
141clib_smp_lock_slow_path (clib_smp_lock_t * l,
142 uword my_cpu,
143 clib_smp_lock_header_t h0, clib_smp_lock_type_t type)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700144{
145 clib_smp_lock_header_t h1, h2, h3;
146 uword is_reader = type == CLIB_SMP_LOCK_TYPE_READER;
147 uword n_fifo_elts = l->n_waiting_fifo_elts;
148 uword my_tail;
149
150 /* Atomically advance waiting FIFO tail pointer; my_tail will point
151 to entry where we can insert ourselves to wait for lock to be granted. */
152 while (1)
153 {
154 h1 = h0;
155 my_tail = h1.waiting_fifo.head_index + h1.waiting_fifo.n_elts;
156 my_tail = my_tail >= n_fifo_elts ? my_tail - n_fifo_elts : my_tail;
157 h1.waiting_fifo.n_elts += 1;
158 h1.request_cpu = my_cpu;
159
160 ASSERT_AND_PANIC (h1.waiting_fifo.n_elts <= n_fifo_elts);
161 ASSERT_AND_PANIC (my_tail >= 0 && my_tail < n_fifo_elts);
162
163 h2 = clib_smp_lock_set_header (l, h1, h0);
164
165 /* Tail successfully advanced? */
166 if (clib_smp_lock_header_is_equal (h0, h2))
167 break;
168
169 /* It is possible that if head and tail are both zero, CPU with lock would have unlocked lock. */
170 else if (type == CLIB_SMP_LOCK_TYPE_SPIN)
171 {
Dave Barachc3799992016-08-15 11:12:27 -0400172 while (!h2.writer_has_lock)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700173 {
174 ASSERT_AND_PANIC (h2.waiting_fifo.n_elts == 0);
175 h1 = h2;
176 h1.request_cpu = my_cpu;
177 h1.writer_has_lock = 1;
178
179 h3 = clib_smp_lock_set_header (l, h1, h2);
180
181 /* Got it? */
182 if (clib_smp_lock_header_is_equal (h2, h3))
183 return;
184
185 h2 = h3;
186 }
187 }
188
189 /* Try to advance tail again. */
190 h0 = h2;
191 }
192
193 {
Dave Barachc3799992016-08-15 11:12:27 -0400194 clib_smp_lock_waiting_fifo_elt_t *w;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700195
196 w = l->waiting_fifo + my_tail;
197
198 while (w->wait_type != CLIB_SMP_LOCK_WAIT_EMPTY)
199 clib_smp_pause ();
200
201 w->wait_type = (is_reader
Dave Barachc3799992016-08-15 11:12:27 -0400202 ? CLIB_SMP_LOCK_WAIT_READER : CLIB_SMP_LOCK_WAIT_WRITER);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700203
204 /* Wait until CPU holding the lock grants us the lock. */
205 while (w->wait_type != CLIB_SMP_LOCK_WAIT_DONE)
206 clib_smp_pause ();
207
208 w->wait_type = CLIB_SMP_LOCK_WAIT_EMPTY;
209 }
210}
211
Dave Barachc3799992016-08-15 11:12:27 -0400212void
213clib_smp_unlock_slow_path (clib_smp_lock_t * l,
214 uword my_cpu,
215 clib_smp_lock_header_t h0,
216 clib_smp_lock_type_t type)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700217{
218 clib_smp_lock_header_t h1, h2;
Dave Barachc3799992016-08-15 11:12:27 -0400219 clib_smp_lock_waiting_fifo_elt_t *head;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700220 clib_smp_lock_wait_type_t head_wait_type;
221 uword is_reader = type == CLIB_SMP_LOCK_TYPE_READER;
222 uword n_fifo_elts = l->n_waiting_fifo_elts;
223 uword head_index, must_wait_for_readers;
Dave Barachc3799992016-08-15 11:12:27 -0400224
Ed Warnickecb9cada2015-12-08 15:45:58 -0700225 while (1)
226 {
227 /* Advance waiting fifo giving lock to first waiter. */
228 while (1)
229 {
230 ASSERT_AND_PANIC (h0.waiting_fifo.n_elts != 0);
231
232 h1 = h0;
233
234 head_index = h1.waiting_fifo.head_index;
235 head = l->waiting_fifo + head_index;
236 if (is_reader)
237 {
238 ASSERT_AND_PANIC (h1.n_readers_with_lock > 0);
239 h1.n_readers_with_lock -= 1;
240 }
241 else
242 {
243 /* Writer will already have lock. */
244 ASSERT_AND_PANIC (h1.writer_has_lock);
245 }
246
Dave Barachc3799992016-08-15 11:12:27 -0400247 while ((head_wait_type =
248 head->wait_type) == CLIB_SMP_LOCK_WAIT_EMPTY)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700249 clib_smp_pause ();
250
251 /* Don't advance FIFO to writer unless all readers have unlocked. */
252 must_wait_for_readers =
253 (type != CLIB_SMP_LOCK_TYPE_SPIN
254 && head_wait_type == CLIB_SMP_LOCK_WAIT_WRITER
255 && h1.n_readers_with_lock != 0);
256
Dave Barachc3799992016-08-15 11:12:27 -0400257 if (!must_wait_for_readers)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700258 {
259 head_index += 1;
260 h1.waiting_fifo.n_elts -= 1;
261 if (type != CLIB_SMP_LOCK_TYPE_SPIN)
262 {
263 if (head_wait_type == CLIB_SMP_LOCK_WAIT_WRITER)
264 h1.writer_has_lock = h1.n_readers_with_lock == 0;
265 else
266 {
267 h1.writer_has_lock = 0;
268 h1.n_readers_with_lock += 1;
269 }
270 }
271 }
272
Dave Barachc3799992016-08-15 11:12:27 -0400273 h1.waiting_fifo.head_index =
274 head_index == n_fifo_elts ? 0 : head_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700275 h1.request_cpu = my_cpu;
276
Dave Barachc3799992016-08-15 11:12:27 -0400277 ASSERT_AND_PANIC (h1.waiting_fifo.head_index >= 0
278 && h1.waiting_fifo.head_index < n_fifo_elts);
279 ASSERT_AND_PANIC (h1.waiting_fifo.n_elts >= 0
280 && h1.waiting_fifo.n_elts <= n_fifo_elts);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700281
282 h2 = clib_smp_lock_set_header (l, h1, h0);
283
284 if (clib_smp_lock_header_is_equal (h2, h0))
285 break;
286
287 h0 = h2;
288
289 if (h0.waiting_fifo.n_elts == 0)
290 return clib_smp_unlock_inline (l, type);
291 }
292
293 if (must_wait_for_readers)
294 return;
295
296 /* Wake up head of waiting fifo. */
297 {
298 uword done_waking;
299
300 /* Shift lock to first thread waiting in fifo. */
301 head->wait_type = CLIB_SMP_LOCK_WAIT_DONE;
302
303 /* For read locks we may be able to wake multiple readers. */
304 done_waking = 1;
305 if (head_wait_type == CLIB_SMP_LOCK_WAIT_READER)
306 {
307 uword hi = h0.waiting_fifo.head_index;
308 if (h0.waiting_fifo.n_elts != 0
309 && l->waiting_fifo[hi].wait_type == CLIB_SMP_LOCK_WAIT_READER)
310 done_waking = 0;
311 }
312
313 if (done_waking)
314 break;
315 }
316 }
317}
Dave Barachc3799992016-08-15 11:12:27 -0400318
319/*
320 * fd.io coding-style-patch-verification: ON
321 *
322 * Local Variables:
323 * eval: (c-set-style "gnu")
324 * End:
325 */