blob: d6a77e783e3f0d35aaf61010f31ec5b82e9dba27 [file] [log] [blame]
Florin Coras65784c12018-07-04 04:17:41 -07001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <svm/message_queue.h>
17#include <vppinfra/mem.h>
Florin Coras99368312018-08-02 10:45:44 -070018#include <sys/eventfd.h>
Florin Coras65784c12018-07-04 04:17:41 -070019
Florin Coras3c2fed52018-07-04 04:15:05 -070020static inline svm_msg_q_ring_t *
21svm_msg_q_ring_inline (svm_msg_q_t * mq, u32 ring_index)
22{
23 return vec_elt_at_index (mq->rings, ring_index);
24}
25
26svm_msg_q_ring_t *
27svm_msg_q_ring (svm_msg_q_t * mq, u32 ring_index)
28{
29 return svm_msg_q_ring_inline (mq, ring_index);
30}
31
32static inline void *
33svm_msg_q_ring_data (svm_msg_q_ring_t * ring, u32 elt_index)
34{
35 ASSERT (elt_index < ring->nitems);
36 return (ring->data + elt_index * ring->elsize);
37}
38
Florin Coras65784c12018-07-04 04:17:41 -070039svm_msg_q_t *
40svm_msg_q_alloc (svm_msg_q_cfg_t * cfg)
41{
Florin Corase91bdb32018-07-11 16:35:38 -070042 svm_msg_q_ring_cfg_t *ring_cfg;
Florin Corasc470e222018-08-01 07:53:18 -070043 uword rings_sz = 0, mq_sz;
Florin Coras65784c12018-07-04 04:17:41 -070044 svm_msg_q_ring_t *ring;
Florin Corase91bdb32018-07-11 16:35:38 -070045 u8 *base, *rings_ptr;
Florin Corase91bdb32018-07-11 16:35:38 -070046 vec_header_t *vh;
Florin Corasc470e222018-08-01 07:53:18 -070047 u32 vec_sz, q_sz;
Florin Coras65784c12018-07-04 04:17:41 -070048 svm_msg_q_t *mq;
49 int i;
50
Florin Corase91bdb32018-07-11 16:35:38 -070051 ASSERT (cfg);
52
53 vec_sz = vec_header_bytes (0) + sizeof (svm_msg_q_ring_t) * cfg->n_rings;
54 for (i = 0; i < cfg->n_rings; i++)
55 {
56 if (cfg->ring_cfgs[i].data)
57 continue;
58 ring_cfg = &cfg->ring_cfgs[i];
59 rings_sz += (uword) ring_cfg->nitems * ring_cfg->elsize;
60 }
61
Florin Corasc470e222018-08-01 07:53:18 -070062 q_sz = sizeof (svm_queue_t) + cfg->q_nitems * sizeof (svm_msg_q_msg_t);
63 mq_sz = sizeof (svm_msg_q_t) + vec_sz + rings_sz + q_sz;
64 base = clib_mem_alloc_aligned (mq_sz, CLIB_CACHE_LINE_BYTES);
Florin Corase91bdb32018-07-11 16:35:38 -070065 if (!base)
Florin Coras65784c12018-07-04 04:17:41 -070066 return 0;
67
Florin Corase91bdb32018-07-11 16:35:38 -070068 mq = (svm_msg_q_t *) base;
Florin Corasc470e222018-08-01 07:53:18 -070069 mq->q = svm_queue_init (base + sizeof (svm_msg_q_t), cfg->q_nitems,
70 sizeof (svm_msg_q_msg_t));
71 mq->q->consumer_pid = cfg->consumer_pid;
72 vh = (vec_header_t *) ((u8 *) mq->q + q_sz);
Florin Corase91bdb32018-07-11 16:35:38 -070073 vh->len = cfg->n_rings;
74 mq->rings = (svm_msg_q_ring_t *) (vh + 1);
75 rings_ptr = (u8 *) mq->rings + vec_sz;
Florin Coras65784c12018-07-04 04:17:41 -070076 for (i = 0; i < cfg->n_rings; i++)
77 {
78 ring = &mq->rings[i];
79 ring->elsize = cfg->ring_cfgs[i].elsize;
80 ring->nitems = cfg->ring_cfgs[i].nitems;
Florin Coras54693d22018-07-17 10:46:29 -070081 ring->cursize = ring->head = ring->tail = 0;
Florin Coras65784c12018-07-04 04:17:41 -070082 if (cfg->ring_cfgs[i].data)
83 ring->data = cfg->ring_cfgs[i].data;
84 else
Florin Coras95e0ce02018-07-05 23:44:23 -070085 {
Florin Corase91bdb32018-07-11 16:35:38 -070086 ring->data = rings_ptr;
87 rings_ptr += (uword) ring->nitems * ring->elsize;
Florin Coras95e0ce02018-07-05 23:44:23 -070088 }
Florin Coras65784c12018-07-04 04:17:41 -070089 }
90
91 return mq;
92}
93
94void
95svm_msg_q_free (svm_msg_q_t * mq)
96{
Florin Corase91bdb32018-07-11 16:35:38 -070097 svm_queue_free (mq->q);
Florin Coras65784c12018-07-04 04:17:41 -070098 clib_mem_free (mq);
99}
100
101svm_msg_q_msg_t
Florin Coras3c2fed52018-07-04 04:15:05 -0700102svm_msg_q_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index)
103{
Florin Coras54693d22018-07-17 10:46:29 -0700104 svm_msg_q_msg_t msg;
Florin Coras3c2fed52018-07-04 04:15:05 -0700105 svm_msg_q_ring_t *ring = svm_msg_q_ring_inline (mq, ring_index);
106
Florin Coras54693d22018-07-17 10:46:29 -0700107 ASSERT (ring->cursize < ring->nitems);
Florin Coras3c2fed52018-07-04 04:15:05 -0700108 msg.ring_index = ring - mq->rings;
109 msg.elt_index = ring->tail;
110 ring->tail = (ring->tail + 1) % ring->nitems;
111 __sync_fetch_and_add (&ring->cursize, 1);
112 return msg;
113}
114
115int
116svm_msg_q_lock_and_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index,
117 u8 noblock, svm_msg_q_msg_t * msg)
118{
119 if (noblock)
120 {
121 if (svm_msg_q_try_lock (mq))
122 return -1;
123 if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, ring_index)))
124 {
125 svm_msg_q_unlock (mq);
126 return -2;
127 }
128 *msg = svm_msg_q_alloc_msg_w_ring (mq, ring_index);
129 if (PREDICT_FALSE (svm_msg_q_msg_is_invalid (msg)))
130 {
131 svm_msg_q_unlock (mq);
132 return -2;
133 }
134 }
135 else
136 {
137 svm_msg_q_lock (mq);
Florin Coras54693d22018-07-17 10:46:29 -0700138 while (svm_msg_q_ring_is_full (mq, ring_index))
139 svm_msg_q_wait (mq);
Florin Coras3c2fed52018-07-04 04:15:05 -0700140 *msg = svm_msg_q_alloc_msg_w_ring (mq, ring_index);
Florin Coras3c2fed52018-07-04 04:15:05 -0700141 }
142 return 0;
143}
144
145svm_msg_q_msg_t
Florin Coras65784c12018-07-04 04:17:41 -0700146svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes)
147{
148 svm_msg_q_msg_t msg = {.as_u64 = ~0 };
149 svm_msg_q_ring_t *ring;
150
151 vec_foreach (ring, mq->rings)
152 {
153 if (ring->elsize < nbytes || ring->cursize == ring->nitems)
154 continue;
155 msg.ring_index = ring - mq->rings;
156 msg.elt_index = ring->tail;
157 ring->tail = (ring->tail + 1) % ring->nitems;
158 __sync_fetch_and_add (&ring->cursize, 1);
159 break;
160 }
161 return msg;
162}
163
Florin Coras65784c12018-07-04 04:17:41 -0700164void *
165svm_msg_q_msg_data (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
166{
Florin Coras3c2fed52018-07-04 04:15:05 -0700167 svm_msg_q_ring_t *ring = svm_msg_q_ring_inline (mq, msg->ring_index);
Florin Coras65784c12018-07-04 04:17:41 -0700168 return svm_msg_q_ring_data (ring, msg->elt_index);
169}
170
171void
172svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
173{
174 svm_msg_q_ring_t *ring;
175
176 if (vec_len (mq->rings) <= msg->ring_index)
177 return;
178 ring = &mq->rings[msg->ring_index];
179 if (msg->elt_index == ring->head)
180 {
181 ring->head = (ring->head + 1) % ring->nitems;
182 }
183 else
184 {
185 /* for now, expect messages to be processed in order */
186 ASSERT (0);
187 }
188 __sync_fetch_and_sub (&ring->cursize, 1);
189}
190
191static int
192svm_msq_q_msg_is_valid (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
193{
Florin Coras54693d22018-07-17 10:46:29 -0700194 u32 dist1, dist2, tail, head;
Florin Coras65784c12018-07-04 04:17:41 -0700195 svm_msg_q_ring_t *ring;
Florin Coras65784c12018-07-04 04:17:41 -0700196
197 if (vec_len (mq->rings) <= msg->ring_index)
198 return 0;
199 ring = &mq->rings[msg->ring_index];
Florin Coras54693d22018-07-17 10:46:29 -0700200 tail = ring->tail;
201 head = ring->head;
Florin Coras65784c12018-07-04 04:17:41 -0700202
Florin Coras54693d22018-07-17 10:46:29 -0700203 dist1 = ((ring->nitems + msg->elt_index) - head) % ring->nitems;
204 if (tail == head)
Florin Coras65784c12018-07-04 04:17:41 -0700205 dist2 = (ring->cursize == 0) ? 0 : ring->nitems;
206 else
Florin Coras54693d22018-07-17 10:46:29 -0700207 dist2 = ((ring->nitems + tail) - head) % ring->nitems;
Florin Coras65784c12018-07-04 04:17:41 -0700208 return (dist1 < dist2);
209}
210
211int
Florin Coras3c2fed52018-07-04 04:15:05 -0700212svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, int nowait)
Florin Coras65784c12018-07-04 04:17:41 -0700213{
Florin Coras3c2fed52018-07-04 04:15:05 -0700214 ASSERT (svm_msq_q_msg_is_valid (mq, msg));
215 return svm_queue_add (mq->q, (u8 *) msg, nowait);
216}
217
218void
Florin Coras52207f12018-07-12 14:48:06 -0700219svm_msg_q_add_and_unlock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
Florin Coras3c2fed52018-07-04 04:15:05 -0700220{
221 ASSERT (svm_msq_q_msg_is_valid (mq, msg));
222 svm_queue_add_raw (mq->q, (u8 *) msg);
Florin Coras52207f12018-07-12 14:48:06 -0700223 svm_msg_q_unlock (mq);
Florin Coras65784c12018-07-04 04:17:41 -0700224}
225
226int
227svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg,
228 svm_q_conditional_wait_t cond, u32 time)
229{
230 return svm_queue_sub (mq->q, (u8 *) msg, cond, time);
231}
232
Florin Coras3c2fed52018-07-04 04:15:05 -0700233void
234svm_msg_q_sub_w_lock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
235{
236 svm_queue_sub_raw (mq->q, (u8 *) msg);
237}
238
Florin Coras99368312018-08-02 10:45:44 -0700239void
240svm_msg_q_set_consumer_eventfd (svm_msg_q_t * mq, int fd)
241{
242 mq->q->consumer_evtfd = fd;
243}
244
245void
246svm_msg_q_set_producer_eventfd (svm_msg_q_t * mq, int fd)
247{
248 mq->q->producer_evtfd = fd;
249}
250
251int
252svm_msg_q_alloc_consumer_eventfd (svm_msg_q_t * mq)
253{
254 int fd;
255 if ((fd = eventfd (0, EFD_NONBLOCK)) < 0)
256 return -1;
257 svm_msg_q_set_consumer_eventfd (mq, fd);
258 return 0;
259}
260
261int
262svm_msg_q_alloc_producer_eventfd (svm_msg_q_t * mq)
263{
264 int fd;
265 if ((fd = eventfd (0, EFD_NONBLOCK)) < 0)
266 return -1;
267 svm_msg_q_set_producer_eventfd (mq, fd);
268 return 0;
269}
270
Florin Coras65784c12018-07-04 04:17:41 -0700271/*
272 * fd.io coding-style-patch-verification: ON
273 *
274 * Local Variables:
275 * eval: (c-set-style "gnu")
276 * End:
277 */