blob: c57b51acc7a0d6b487d2566325cd5b5288a5b66d [file] [log] [blame]
Florin Coras65784c12018-07-04 04:17:41 -07001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/**
16 * @file
17 * @brief Unidirectional shared-memory multi-ring message queue
18 */
19
20#ifndef SRC_SVM_MESSAGE_QUEUE_H_
21#define SRC_SVM_MESSAGE_QUEUE_H_
22
23#include <vppinfra/clib.h>
Florin Coras3c2fed52018-07-04 04:15:05 -070024#include <vppinfra/error.h>
Florin Coras65784c12018-07-04 04:17:41 -070025#include <svm/queue.h>
26
27typedef struct svm_msg_q_ring_
28{
29 volatile u32 cursize; /**< current size of the ring */
30 u32 nitems; /**< max size of the ring */
Florin Coras3c2fed52018-07-04 04:15:05 -070031 volatile u32 head; /**< current head (for dequeue) */
32 volatile u32 tail; /**< current tail (for enqueue) */
Florin Coras65784c12018-07-04 04:17:41 -070033 u32 elsize; /**< size of an element */
34 u8 *data; /**< chunk of memory for msg data */
Florin Corase91bdb32018-07-11 16:35:38 -070035} __clib_packed svm_msg_q_ring_t;
Florin Coras65784c12018-07-04 04:17:41 -070036
37typedef struct svm_msg_q_
38{
39 svm_queue_t *q; /**< queue for exchanging messages */
40 svm_msg_q_ring_t *rings; /**< rings with message data*/
Florin Corase91bdb32018-07-11 16:35:38 -070041} __clib_packed svm_msg_q_t;
Florin Coras65784c12018-07-04 04:17:41 -070042
43typedef struct svm_msg_q_ring_cfg_
44{
45 u32 nitems;
46 u32 elsize;
47 void *data;
48} svm_msg_q_ring_cfg_t;
49
50typedef struct svm_msg_q_cfg_
51{
52 int consumer_pid; /**< pid of msg consumer */
53 u32 q_nitems; /**< msg queue size (not rings) */
54 u32 n_rings; /**< number of msg rings */
55 svm_msg_q_ring_cfg_t *ring_cfgs; /**< array of ring cfgs */
56} svm_msg_q_cfg_t;
57
58typedef union
59{
60 struct
61 {
62 u32 ring_index; /**< ring index, could be u8 */
63 u32 elt_index; /**< index in ring */
64 };
65 u64 as_u64;
66} svm_msg_q_msg_t;
67
Florin Coras3c2fed52018-07-04 04:15:05 -070068#define SVM_MQ_INVALID_MSG { .as_u64 = ~0 }
Florin Coras65784c12018-07-04 04:17:41 -070069/**
70 * Allocate message queue
71 *
72 * Allocates a message queue on the heap. Based on the configuration options,
73 * apart from the message queue this also allocates (one or multiple)
74 * shared-memory rings for the messages.
75 *
76 * @param cfg configuration options: queue len, consumer pid,
77 * ring configs
78 * @return message queue
79 */
80svm_msg_q_t *svm_msg_q_alloc (svm_msg_q_cfg_t * cfg);
81
82/**
83 * Free message queue
84 *
85 * @param mq message queue to be freed
86 */
87void svm_msg_q_free (svm_msg_q_t * mq);
88
89/**
90 * Allocate message buffer
91 *
92 * Message is allocated on the first available ring capable of holding
93 * the requested number of bytes.
94 *
95 * @param mq message queue
96 * @param nbytes number of bytes needed for message
97 * @return message structure pointing to the ring and position
98 * allocated
99 */
100svm_msg_q_msg_t svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes);
101
102/**
Florin Coras3c2fed52018-07-04 04:15:05 -0700103 * Allocate message buffer on ring
104 *
105 * Message is allocated, on requested ring. The caller MUST check that
106 * the ring is not full.
107 *
108 * @param mq message queue
109 * @param ring_index ring on which the allocation should occur
110 * @return message structure pointing to the ring and position
111 * allocated
112 */
113svm_msg_q_msg_t svm_msg_q_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index);
114
115/**
116 * Lock message queue and allocate message buffer on ring
117 *
118 * This should be used when multiple writers/readers are expected to
119 * compete for the rings/queue. Message should be enqueued by calling
120 * @ref svm_msg_q_add_w_lock and the caller MUST unlock the queue once
121 * the message in enqueued.
122 *
123 * @param mq message queue
124 * @param ring_index ring on which the allocation should occur
125 * @param noblock flag that indicates if request should block
126 * @param msg pointer to message to be filled in
127 * @return 0 on success, negative number otherwise
128 */
129int svm_msg_q_lock_and_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index,
130 u8 noblock, svm_msg_q_msg_t * msg);
131
132/**
Florin Coras65784c12018-07-04 04:17:41 -0700133 * Free message buffer
134 *
135 * Marks message buffer on ring as free.
136 *
137 * @param mq message queue
138 * @param msg message to be freed
139 */
140void svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg);
Florin Coras3c2fed52018-07-04 04:15:05 -0700141
Florin Coras65784c12018-07-04 04:17:41 -0700142/**
143 * Producer enqueue one message to queue
144 *
145 * Prior to calling this, the producer should've obtained a message buffer
146 * from one of the rings by calling @ref svm_msg_q_alloc_msg.
147 *
148 * @param mq message queue
149 * @param msg message (pointer to ring position) to be enqueued
150 * @param nowait flag to indicate if request is blocking or not
151 * @return success status
152 */
Florin Coras3c2fed52018-07-04 04:15:05 -0700153int svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, int nowait);
154
155/**
156 * Producer enqueue one message to queue with mutex held
157 *
158 * Prior to calling this, the producer should've obtained a message buffer
159 * from one of the rings by calling @ref svm_msg_q_alloc_msg. It assumes
160 * the queue mutex is held.
161 *
162 * @param mq message queue
163 * @param msg message (pointer to ring position) to be enqueued
164 * @return success status
165 */
Florin Coras52207f12018-07-12 14:48:06 -0700166void svm_msg_q_add_and_unlock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg);
Florin Coras65784c12018-07-04 04:17:41 -0700167
168/**
169 * Consumer dequeue one message from queue
170 *
171 * This returns the message pointing to the data in the message rings.
172 * The consumer is expected to call @ref svm_msg_q_free_msg once it
173 * finishes processing/copies the message data.
174 *
175 * @param mq message queue
176 * @param msg pointer to structure where message is to be received
177 * @param cond flag that indicates if request should block or not
Florin Coras3c2fed52018-07-04 04:15:05 -0700178 * @param time time to wait if condition it SVM_Q_TIMEDWAIT
Florin Coras65784c12018-07-04 04:17:41 -0700179 * @return success status
180 */
181int svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg,
182 svm_q_conditional_wait_t cond, u32 time);
183
184/**
Florin Coras3c2fed52018-07-04 04:15:05 -0700185 * Consumer dequeue one message from queue with mutex held
186 *
187 * Returns the message pointing to the data in the message rings under the
188 * assumption that the message queue lock is already held. The consumer is
189 * expected to call @ref svm_msg_q_free_msg once it finishes
190 * processing/copies the message data.
191 *
192 * @param mq message queue
193 * @param msg pointer to structure where message is to be received
194 * @return success status
195 */
196void svm_msg_q_sub_w_lock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg);
197
198/**
199 * Get data for message in queue
Florin Coras65784c12018-07-04 04:17:41 -0700200 *
201 * @param mq message queue
202 * @param msg message for which the data is requested
203 * @return pointer to data
204 */
205void *svm_msg_q_msg_data (svm_msg_q_t * mq, svm_msg_q_msg_t * msg);
206
Florin Coras3c2fed52018-07-04 04:15:05 -0700207/**
208 * Get message queue ring
209 *
210 * @param mq message queue
211 * @param ring_index index of ring
212 * @return pointer to ring
213 */
214svm_msg_q_ring_t *svm_msg_q_ring (svm_msg_q_t * mq, u32 ring_index);
215
216/**
Florin Coras99368312018-08-02 10:45:44 -0700217 * Set event fd for queue consumer
218 *
219 * If set, queue will exclusively use eventfds for signaling. Moreover,
220 * afterwards, the queue should only be used in non-blocking mode. Waiting
221 * for events should be done externally using something like epoll.
222 *
223 * @param mq message queue
224 * @param fd consumer eventfd
225 */
226void svm_msg_q_set_consumer_eventfd (svm_msg_q_t * mq, int fd);
227
228/**
229 * Set event fd for queue producer
230 *
231 * If set, queue will exclusively use eventfds for signaling. Moreover,
232 * afterwards, the queue should only be used in non-blocking mode. Waiting
233 * for events should be done externally using something like epoll.
234 *
235 * @param mq message queue
236 * @param fd producer eventfd
237 */
238void svm_msg_q_set_producer_eventfd (svm_msg_q_t * mq, int fd);
239
240/**
241 * Allocate event fd for queue consumer
242 */
243int svm_msg_q_alloc_consumer_eventfd (svm_msg_q_t * mq);
244
245/**
246 * Allocate event fd for queue consumer
247 */
248int svm_msg_q_alloc_producer_eventfd (svm_msg_q_t * mq);
249
250/**
Florin Coras3c2fed52018-07-04 04:15:05 -0700251 * Check if message queue is full
252 */
253static inline u8
254svm_msg_q_is_full (svm_msg_q_t * mq)
255{
256 return (mq->q->cursize == mq->q->maxsize);
257}
258
259static inline u8
260svm_msg_q_ring_is_full (svm_msg_q_t * mq, u32 ring_index)
261{
262 ASSERT (ring_index < vec_len (mq->rings));
263 return (mq->rings[ring_index].cursize == mq->rings[ring_index].nitems);
264}
265
266/**
267 * Check if message queue is empty
268 */
269static inline u8
270svm_msg_q_is_empty (svm_msg_q_t * mq)
271{
272 return (mq->q->cursize == 0);
273}
274
275/**
276 * Check length of message queue
277 */
278static inline u32
279svm_msg_q_size (svm_msg_q_t * mq)
280{
281 return mq->q->cursize;
282}
283
284/**
285 * Check if message is invalid
286 */
287static inline u8
288svm_msg_q_msg_is_invalid (svm_msg_q_msg_t * msg)
289{
290 return (msg->as_u64 == (u64) ~ 0);
291}
292
293/**
294 * Try locking message queue
295 */
296static inline int
297svm_msg_q_try_lock (svm_msg_q_t * mq)
298{
299 return pthread_mutex_trylock (&mq->q->mutex);
300}
301
302/**
303 * Lock, or block trying, the message queue
304 */
305static inline int
306svm_msg_q_lock (svm_msg_q_t * mq)
307{
308 return pthread_mutex_lock (&mq->q->mutex);
309}
310
Florin Coras3c2fed52018-07-04 04:15:05 -0700311/**
312 * Unlock message queue
313 */
314static inline void
315svm_msg_q_unlock (svm_msg_q_t * mq)
316{
Florin Coras3c2fed52018-07-04 04:15:05 -0700317 pthread_mutex_unlock (&mq->q->mutex);
318}
319
Florin Coras54693d22018-07-17 10:46:29 -0700320/**
321 * Wait for message queue event
322 *
Florin Coras99368312018-08-02 10:45:44 -0700323 * Must be called with mutex held. The queue only works non-blocking
324 * with eventfds, so handle blocking calls as an exception here.
Florin Coras54693d22018-07-17 10:46:29 -0700325 */
326static inline void
327svm_msg_q_wait (svm_msg_q_t * mq)
328{
Florin Coras99368312018-08-02 10:45:44 -0700329 svm_queue_wait (mq->q);
Florin Coras54693d22018-07-17 10:46:29 -0700330}
331
332/**
333 * Timed wait for message queue event
334 *
335 * Must be called with mutex held.
336 *
337 * @param mq message queue
338 * @param timeout time in seconds
339 */
340static inline int
341svm_msg_q_timedwait (svm_msg_q_t * mq, double timeout)
342{
Florin Coras99368312018-08-02 10:45:44 -0700343 return svm_queue_timedwait (mq->q, timeout);
344}
Florin Coras54693d22018-07-17 10:46:29 -0700345
Florin Coras99368312018-08-02 10:45:44 -0700346static inline int
347svm_msg_q_get_consumer_eventfd (svm_msg_q_t * mq)
348{
349 return mq->q->consumer_evtfd;
350}
351
352static inline int
353svm_msg_q_get_producer_eventfd (svm_msg_q_t * mq)
354{
355 return mq->q->producer_evtfd;
Florin Coras54693d22018-07-17 10:46:29 -0700356}
357
Florin Coras65784c12018-07-04 04:17:41 -0700358#endif /* SRC_SVM_MESSAGE_QUEUE_H_ */
359
360/*
361 * fd.io coding-style-patch-verification: ON
362 *
363 * Local Variables:
364 * eval: (c-set-style "gnu")
365 * End:
366 */