Florin Coras | 65784c1 | 2018-07-04 04:17:41 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | |
| 16 | #include <svm/message_queue.h> |
| 17 | #include <vppinfra/mem.h> |
| 18 | |
| 19 | svm_msg_q_t * |
| 20 | svm_msg_q_alloc (svm_msg_q_cfg_t * cfg) |
| 21 | { |
| 22 | svm_msg_q_ring_t *ring; |
| 23 | svm_msg_q_t *mq; |
Florin Coras | 95e0ce0 | 2018-07-05 23:44:23 -0700 | [diff] [blame] | 24 | uword size; |
Florin Coras | 65784c1 | 2018-07-04 04:17:41 -0700 | [diff] [blame] | 25 | int i; |
| 26 | |
| 27 | if (!cfg) |
| 28 | return 0; |
| 29 | |
| 30 | mq = clib_mem_alloc_aligned (sizeof (svm_msg_q_t), CLIB_CACHE_LINE_BYTES); |
| 31 | memset (mq, 0, sizeof (*mq)); |
| 32 | mq->q = svm_queue_init (cfg->q_nitems, sizeof (svm_msg_q_msg_t), |
| 33 | cfg->consumer_pid, 0); |
| 34 | vec_validate (mq->rings, cfg->n_rings - 1); |
| 35 | for (i = 0; i < cfg->n_rings; i++) |
| 36 | { |
| 37 | ring = &mq->rings[i]; |
| 38 | ring->elsize = cfg->ring_cfgs[i].elsize; |
| 39 | ring->nitems = cfg->ring_cfgs[i].nitems; |
| 40 | if (cfg->ring_cfgs[i].data) |
| 41 | ring->data = cfg->ring_cfgs[i].data; |
| 42 | else |
Florin Coras | 95e0ce0 | 2018-07-05 23:44:23 -0700 | [diff] [blame] | 43 | { |
| 44 | size = (uword) ring->nitems * ring->elsize; |
| 45 | ring->data = clib_mem_alloc_aligned (size, CLIB_CACHE_LINE_BYTES); |
| 46 | } |
Florin Coras | 65784c1 | 2018-07-04 04:17:41 -0700 | [diff] [blame] | 47 | } |
| 48 | |
| 49 | return mq; |
| 50 | } |
| 51 | |
| 52 | void |
| 53 | svm_msg_q_free (svm_msg_q_t * mq) |
| 54 | { |
| 55 | svm_msg_q_ring_t *ring; |
| 56 | |
| 57 | vec_foreach (ring, mq->rings) |
| 58 | { |
| 59 | clib_mem_free (ring->data); |
| 60 | } |
| 61 | vec_free (mq->rings); |
| 62 | clib_mem_free (mq); |
| 63 | } |
| 64 | |
| 65 | svm_msg_q_msg_t |
| 66 | svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes) |
| 67 | { |
| 68 | svm_msg_q_msg_t msg = {.as_u64 = ~0 }; |
| 69 | svm_msg_q_ring_t *ring; |
| 70 | |
| 71 | vec_foreach (ring, mq->rings) |
| 72 | { |
| 73 | if (ring->elsize < nbytes || ring->cursize == ring->nitems) |
| 74 | continue; |
| 75 | msg.ring_index = ring - mq->rings; |
| 76 | msg.elt_index = ring->tail; |
| 77 | ring->tail = (ring->tail + 1) % ring->nitems; |
| 78 | __sync_fetch_and_add (&ring->cursize, 1); |
| 79 | break; |
| 80 | } |
| 81 | return msg; |
| 82 | } |
| 83 | |
| 84 | static inline svm_msg_q_ring_t * |
| 85 | svm_msg_q_get_ring (svm_msg_q_t * mq, u32 ring_index) |
| 86 | { |
| 87 | return vec_elt_at_index (mq->rings, ring_index); |
| 88 | } |
| 89 | |
| 90 | static inline void * |
| 91 | svm_msg_q_ring_data (svm_msg_q_ring_t * ring, u32 elt_index) |
| 92 | { |
| 93 | ASSERT (elt_index < ring->nitems); |
| 94 | return (ring->data + elt_index * ring->elsize); |
| 95 | } |
| 96 | |
| 97 | void * |
| 98 | svm_msg_q_msg_data (svm_msg_q_t * mq, svm_msg_q_msg_t * msg) |
| 99 | { |
| 100 | svm_msg_q_ring_t *ring = svm_msg_q_get_ring (mq, msg->ring_index); |
| 101 | return svm_msg_q_ring_data (ring, msg->elt_index); |
| 102 | } |
| 103 | |
| 104 | void |
| 105 | svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg) |
| 106 | { |
| 107 | svm_msg_q_ring_t *ring; |
| 108 | |
| 109 | if (vec_len (mq->rings) <= msg->ring_index) |
| 110 | return; |
| 111 | ring = &mq->rings[msg->ring_index]; |
| 112 | if (msg->elt_index == ring->head) |
| 113 | { |
| 114 | ring->head = (ring->head + 1) % ring->nitems; |
| 115 | } |
| 116 | else |
| 117 | { |
| 118 | /* for now, expect messages to be processed in order */ |
| 119 | ASSERT (0); |
| 120 | } |
| 121 | __sync_fetch_and_sub (&ring->cursize, 1); |
| 122 | } |
| 123 | |
| 124 | static int |
| 125 | svm_msq_q_msg_is_valid (svm_msg_q_t * mq, svm_msg_q_msg_t * msg) |
| 126 | { |
| 127 | svm_msg_q_ring_t *ring; |
| 128 | u32 dist1, dist2; |
| 129 | |
| 130 | if (vec_len (mq->rings) <= msg->ring_index) |
| 131 | return 0; |
| 132 | ring = &mq->rings[msg->ring_index]; |
| 133 | |
| 134 | dist1 = ((ring->nitems + msg->ring_index) - ring->head) % ring->nitems; |
| 135 | if (ring->tail == ring->head) |
| 136 | dist2 = (ring->cursize == 0) ? 0 : ring->nitems; |
| 137 | else |
| 138 | dist2 = ((ring->nitems + ring->tail) - ring->head) % ring->nitems; |
| 139 | return (dist1 < dist2); |
| 140 | } |
| 141 | |
| 142 | int |
| 143 | svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t msg, int nowait) |
| 144 | { |
| 145 | ASSERT (svm_msq_q_msg_is_valid (mq, &msg)); |
| 146 | return svm_queue_add (mq->q, (u8 *) & msg, nowait); |
| 147 | } |
| 148 | |
| 149 | int |
| 150 | svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, |
| 151 | svm_q_conditional_wait_t cond, u32 time) |
| 152 | { |
| 153 | return svm_queue_sub (mq->q, (u8 *) msg, cond, time); |
| 154 | } |
| 155 | |
| 156 | /* |
| 157 | * fd.io coding-style-patch-verification: ON |
| 158 | * |
| 159 | * Local Variables: |
| 160 | * eval: (c-set-style "gnu") |
| 161 | * End: |
| 162 | */ |