blob: 8bdff3733a7f99db8a695b6c5a8169739ddd08e9 [file] [log] [blame]
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#ifndef __VIRTIO_VHOST_USER_INLINE_H__
16#define __VIRTIO_VHOST_USER_INLINE_H__
17/* vhost-user inline functions */
Steven Luongb0789102019-01-23 22:20:19 -080018#include <vppinfra/elog.h>
Mohsin Kazmie7cde312018-06-26 17:20:11 +020019
20static_always_inline void *
21map_guest_mem (vhost_user_intf_t * vui, uword addr, u32 * hint)
22{
23 int i = *hint;
24 if (PREDICT_TRUE ((vui->regions[i].guest_phys_addr <= addr) &&
25 ((vui->regions[i].guest_phys_addr +
26 vui->regions[i].memory_size) > addr)))
27 {
28 return (void *) (vui->region_mmap_addr[i] + addr -
29 vui->regions[i].guest_phys_addr);
30 }
31#if __SSE4_2__
32 __m128i rl, rh, al, ah, r;
33 al = _mm_set1_epi64x (addr + 1);
34 ah = _mm_set1_epi64x (addr);
35
36 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[0]);
37 rl = _mm_cmpgt_epi64 (al, rl);
38 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[0]);
39 rh = _mm_cmpgt_epi64 (rh, ah);
40 r = _mm_and_si128 (rl, rh);
41
42 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[2]);
43 rl = _mm_cmpgt_epi64 (al, rl);
44 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[2]);
45 rh = _mm_cmpgt_epi64 (rh, ah);
46 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x22);
47
48 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[4]);
49 rl = _mm_cmpgt_epi64 (al, rl);
50 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[4]);
51 rh = _mm_cmpgt_epi64 (rh, ah);
52 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x44);
53
54 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[6]);
55 rl = _mm_cmpgt_epi64 (al, rl);
56 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[6]);
57 rh = _mm_cmpgt_epi64 (rh, ah);
58 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x88);
59
60 r = _mm_shuffle_epi8 (r, _mm_set_epi64x (0, 0x0e060c040a020800));
61 i = count_trailing_zeros (_mm_movemask_epi8 (r) |
62 (1 << VHOST_MEMORY_MAX_NREGIONS));
63
64 if (i < vui->nregions)
65 {
66 *hint = i;
67 return (void *) (vui->region_mmap_addr[i] + addr -
68 vui->regions[i].guest_phys_addr);
69 }
70#elif __aarch64__ && __ARM_NEON
71 uint64x2_t al, ah, rl, rh, r;
72 uint32_t u32 = 0;
73
74 al = vdupq_n_u64 (addr + 1);
75 ah = vdupq_n_u64 (addr);
76
77 /*First Iteration */
78 rl = vld1q_u64 (&vui->region_guest_addr_lo[0]);
79 rl = vcgtq_u64 (al, rl);
80 rh = vld1q_u64 (&vui->region_guest_addr_hi[0]);
81 rh = vcgtq_u64 (rh, ah);
82 r = vandq_u64 (rl, rh);
83 u32 |= (vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1);
84 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 1);
85
86 if (u32)
87 {
88 i = count_trailing_zeros (u32);
89 goto vhost_map_guest_mem_done;
90 }
91
92 /*Second Iteration */
93 rl = vld1q_u64 (&vui->region_guest_addr_lo[2]);
94 rl = vcgtq_u64 (al, rl);
95 rh = vld1q_u64 (&vui->region_guest_addr_hi[2]);
96 rh = vcgtq_u64 (rh, ah);
97 r = vandq_u64 (rl, rh);
98 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 2);
99 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 3);
100
101 if (u32)
102 {
103 i = count_trailing_zeros (u32);
104 goto vhost_map_guest_mem_done;
105 }
106
107 /*Third Iteration */
108 rl = vld1q_u64 (&vui->region_guest_addr_lo[4]);
109 rl = vcgtq_u64 (al, rl);
110 rh = vld1q_u64 (&vui->region_guest_addr_hi[4]);
111 rh = vcgtq_u64 (rh, ah);
112 r = vandq_u64 (rl, rh);
113 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 6);
114 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 7);
115
116 i = count_trailing_zeros (u32 | (1 << VHOST_MEMORY_MAX_NREGIONS));
117
118vhost_map_guest_mem_done:
119 if (i < vui->nregions)
120 {
121 *hint = i;
122 return (void *) (vui->region_mmap_addr[i] + addr -
123 vui->regions[i].guest_phys_addr);
124 }
125#else
126 for (i = 0; i < vui->nregions; i++)
127 {
128 if ((vui->regions[i].guest_phys_addr <= addr) &&
129 ((vui->regions[i].guest_phys_addr + vui->regions[i].memory_size) >
130 addr))
131 {
132 *hint = i;
133 return (void *) (vui->region_mmap_addr[i] + addr -
134 vui->regions[i].guest_phys_addr);
135 }
136 }
137#endif
Steven Luongb0789102019-01-23 22:20:19 -0800138 /* *INDENT-OFF* */
139 ELOG_TYPE_DECLARE (el) =
140 {
141 .format = "failed to map guest mem addr %lx",
142 .format_args = "i8",
143 };
144 /* *INDENT-ON* */
145 struct
146 {
147 uword addr;
148 } *ed;
149 ed = ELOG_DATA (&vlib_global_main.elog_main, el);
150 ed->addr = addr;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200151 *hint = 0;
152 return 0;
153}
154
155static_always_inline void *
156map_user_mem (vhost_user_intf_t * vui, uword addr)
157{
158 int i;
159 for (i = 0; i < vui->nregions; i++)
160 {
161 if ((vui->regions[i].userspace_addr <= addr) &&
162 ((vui->regions[i].userspace_addr + vui->regions[i].memory_size) >
163 addr))
164 {
165 return (void *) (vui->region_mmap_addr[i] + addr -
166 vui->regions[i].userspace_addr);
167 }
168 }
169 return 0;
170}
171
172#define VHOST_LOG_PAGE 0x1000
173
174static_always_inline void
175vhost_user_log_dirty_pages_2 (vhost_user_intf_t * vui,
176 u64 addr, u64 len, u8 is_host_address)
177{
178 if (PREDICT_TRUE (vui->log_base_addr == 0
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200179 || !(vui->features & VIRTIO_FEATURE (VHOST_F_LOG_ALL))))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200180 {
181 return;
182 }
183 if (is_host_address)
184 {
185 addr = pointer_to_uword (map_user_mem (vui, (uword) addr));
186 }
187 if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size))
188 {
Jerome Tollet2f54c272018-10-02 11:41:11 +0200189 vu_log_debug (vui, "vhost_user_log_dirty_pages(): out of range\n");
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200190 return;
191 }
192
193 CLIB_MEMORY_BARRIER ();
194 u64 page = addr / VHOST_LOG_PAGE;
195 while (page * VHOST_LOG_PAGE < addr + len)
196 {
197 ((u8 *) vui->log_base_addr)[page / 8] |= 1 << page % 8;
198 page++;
199 }
200}
201
Mohsin Kazmi0f8912f2022-02-01 18:35:59 +0000202#define vhost_user_log_dirty_ring(vui, vq, member) \
203 if (PREDICT_FALSE (vq->log_used)) \
204 { \
205 vhost_user_log_dirty_pages_2 ( \
206 vui, \
207 vq->log_guest_addr + \
208 STRUCT_OFFSET_OF (vnet_virtio_vring_used_t, member), \
209 sizeof (vq->used->member), 0); \
210 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200211
212static_always_inline u8 *
213format_vhost_trace (u8 * s, va_list * va)
214{
215 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
216 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
217 CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main ();
218 vhost_user_main_t *vum = &vhost_user_main;
219 vhost_trace_t *t = va_arg (*va, vhost_trace_t *);
Steven Luong5cd987d2019-08-15 14:30:16 -0700220 vhost_user_intf_t *vui = vum->vhost_user_interfaces + t->device_index;
221 vnet_sw_interface_t *sw;
222 u32 indent;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200223
Steven Luong5cd987d2019-08-15 14:30:16 -0700224 if (pool_is_free (vum->vhost_user_interfaces, vui))
225 {
226 s = format (s, "vhost-user interface is deleted");
227 return s;
228 }
229 sw = vnet_get_sw_interface (vnm, vui->sw_if_index);
230 indent = format_get_indent (s);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200231 s = format (s, "%U %U queue %d\n", format_white_space, indent,
232 format_vnet_sw_interface_name, vnm, sw, t->qid);
233
234 s = format (s, "%U virtio flags:\n", format_white_space, indent);
235#define _(n,i,st) \
236 if (t->virtio_ring_flags & (1 << VIRTIO_TRACE_F_##n)) \
237 s = format (s, "%U %s %s\n", format_white_space, indent, #n, st);
238 foreach_virtio_trace_flags
239#undef _
240 s = format (s, "%U virtio_net_hdr first_desc_len %u\n",
241 format_white_space, indent, t->first_desc_len);
242
243 s = format (s, "%U flags 0x%02x gso_type %u\n",
244 format_white_space, indent,
245 t->hdr.hdr.flags, t->hdr.hdr.gso_type);
246
247 if (vui->virtio_net_hdr_sz == 12)
248 s = format (s, "%U num_buff %u",
249 format_white_space, indent, t->hdr.num_buffers);
250
251 return s;
252}
253
Steven Luong27ba5002020-11-17 13:30:44 -0800254static_always_inline u64
255vhost_user_is_packed_ring_supported (vhost_user_intf_t * vui)
256{
257 return (vui->features & VIRTIO_FEATURE (VIRTIO_F_RING_PACKED));
258}
259
260static_always_inline u64
261vhost_user_is_event_idx_supported (vhost_user_intf_t * vui)
262{
263 return (vui->features & VIRTIO_FEATURE (VIRTIO_RING_F_EVENT_IDX));
264}
265
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200266static_always_inline void
Steven Luong27ba5002020-11-17 13:30:44 -0800267vhost_user_kick (vlib_main_t * vm, vhost_user_vring_t * vq)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200268{
269 vhost_user_main_t *vum = &vhost_user_main;
270 u64 x = 1;
271 int fd = UNIX_GET_FD (vq->callfd_idx);
272 int rv;
273
274 rv = write (fd, &x, sizeof (x));
Steven Luong27ba5002020-11-17 13:30:44 -0800275 if (PREDICT_FALSE (rv <= 0))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200276 {
277 clib_unix_warning
278 ("Error: Could not write to unix socket for callfd %d", fd);
279 return;
280 }
281
282 vq->n_since_last_int = 0;
283 vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
284}
285
Steven Luong27ba5002020-11-17 13:30:44 -0800286static_always_inline u16
287vhost_user_avail_event_idx (vhost_user_vring_t * vq)
288{
289 volatile u16 *event_idx = (u16 *) & (vq->used->ring[vq->qsz_mask + 1]);
290
291 return *event_idx;
292}
293
294static_always_inline u16
295vhost_user_used_event_idx (vhost_user_vring_t * vq)
296{
297 volatile u16 *event_idx = (u16 *) & (vq->avail->ring[vq->qsz_mask + 1]);
298
299 return *event_idx;
300}
301
302static_always_inline u16
303vhost_user_need_event (u16 event_idx, u16 new_idx, u16 old_idx)
304{
305 return ((u16) (new_idx - event_idx - 1) < (u16) (new_idx - old_idx));
306}
307
308static_always_inline void
309vhost_user_send_call_event_idx (vlib_main_t * vm, vhost_user_vring_t * vq)
310{
311 vhost_user_main_t *vum = &vhost_user_main;
312 u8 first_kick = vq->first_kick;
313 u16 event_idx = vhost_user_used_event_idx (vq);
314
315 vq->first_kick = 1;
316 if (vhost_user_need_event (event_idx, vq->last_used_idx, vq->last_kick) ||
317 PREDICT_FALSE (!first_kick))
318 {
319 vhost_user_kick (vm, vq);
320 vq->last_kick = event_idx;
321 }
322 else
323 {
324 vq->n_since_last_int = 0;
325 vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
326 }
327}
328
329static_always_inline void
330vhost_user_send_call_event_idx_packed (vlib_main_t * vm,
331 vhost_user_vring_t * vq)
332{
333 vhost_user_main_t *vum = &vhost_user_main;
334 u8 first_kick = vq->first_kick;
335 u16 off_wrap;
336 u16 event_idx;
337 u16 new_idx = vq->last_used_idx;
338 u16 old_idx = vq->last_kick;
339
340 if (PREDICT_TRUE (vq->avail_event->flags == VRING_EVENT_F_DESC))
341 {
342 CLIB_COMPILER_BARRIER ();
343 off_wrap = vq->avail_event->off_wrap;
344 event_idx = off_wrap & 0x7fff;
345 if (vq->used_wrap_counter != (off_wrap >> 15))
346 event_idx -= (vq->qsz_mask + 1);
347
348 if (new_idx <= old_idx)
349 old_idx -= (vq->qsz_mask + 1);
350
351 vq->first_kick = 1;
352 vq->last_kick = event_idx;
353 if (vhost_user_need_event (event_idx, new_idx, old_idx) ||
354 PREDICT_FALSE (!first_kick))
355 vhost_user_kick (vm, vq);
356 else
357 {
358 vq->n_since_last_int = 0;
359 vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
360 }
361 }
362 else
363 vhost_user_kick (vm, vq);
364}
365
366static_always_inline void
367vhost_user_send_call (vlib_main_t * vm, vhost_user_intf_t * vui,
368 vhost_user_vring_t * vq)
369{
370 if (vhost_user_is_event_idx_supported (vui))
371 {
372 if (vhost_user_is_packed_ring_supported (vui))
373 vhost_user_send_call_event_idx_packed (vm, vq);
374 else
375 vhost_user_send_call_event_idx (vm, vq);
376 }
377 else
378 vhost_user_kick (vm, vq);
379}
380
Juraj Slobodab192feb2018-10-01 12:42:07 +0200381static_always_inline u8
382vui_is_link_up (vhost_user_intf_t * vui)
383{
384 return vui->admin_up && vui->is_ready;
385}
386
Steven Luong4208a4c2019-05-06 08:51:56 -0700387static_always_inline void
388vhost_user_update_gso_interface_count (vhost_user_intf_t * vui, u8 add)
389{
Steven Luong4208a4c2019-05-06 08:51:56 -0700390 vhost_user_main_t *vum = &vhost_user_main;
391
392 if (vui->enable_gso)
393 {
394 if (add)
395 {
Steven Luong4208a4c2019-05-06 08:51:56 -0700396 vum->gso_count++;
397 }
398 else
399 {
Steven Luong4208a4c2019-05-06 08:51:56 -0700400 ASSERT (vum->gso_count > 0);
401 vum->gso_count--;
402 }
403 }
404}
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700405
406static_always_inline u8
407vhost_user_packed_desc_available (vhost_user_vring_t * vring, u16 idx)
408{
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200409 return (((vring->packed_desc[idx].flags & VRING_DESC_F_AVAIL) ==
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700410 vring->avail_wrap_counter));
411}
412
413static_always_inline void
414vhost_user_advance_last_avail_idx (vhost_user_vring_t * vring)
415{
416 vring->last_avail_idx++;
417 if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
Steven Luong27ba5002020-11-17 13:30:44 -0800418 {
419 vring->avail_wrap_counter ^= VRING_DESC_F_AVAIL;
420 vring->last_avail_idx = 0;
421 }
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700422}
423
424static_always_inline void
425vhost_user_advance_last_avail_table_idx (vhost_user_intf_t * vui,
426 vhost_user_vring_t * vring,
427 u8 chained)
428{
429 if (chained)
430 {
Mohsin Kazmi0f8912f2022-02-01 18:35:59 +0000431 vnet_virtio_vring_packed_desc_t *desc_table = vring->packed_desc;
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700432
433 /* pick up the slot of the next avail idx */
434 while (desc_table[vring->last_avail_idx & vring->qsz_mask].flags &
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200435 VRING_DESC_F_NEXT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700436 vhost_user_advance_last_avail_idx (vring);
437 }
438
439 vhost_user_advance_last_avail_idx (vring);
440}
441
442static_always_inline void
443vhost_user_undo_advanced_last_avail_idx (vhost_user_vring_t * vring)
444{
445 if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200446 vring->avail_wrap_counter ^= VRING_DESC_F_AVAIL;
Steven Luong27ba5002020-11-17 13:30:44 -0800447
448 if (PREDICT_FALSE (vring->last_avail_idx == 0))
449 vring->last_avail_idx = vring->qsz_mask;
450 else
451 vring->last_avail_idx--;
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700452}
453
454static_always_inline void
Mohsin Kazmi0f8912f2022-02-01 18:35:59 +0000455vhost_user_dequeue_descs (vhost_user_vring_t *rxvq,
456 vnet_virtio_net_hdr_mrg_rxbuf_t *hdr,
457 u16 *n_descs_processed)
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700458{
459 u16 i;
460
461 *n_descs_processed -= (hdr->num_buffers - 1);
462 for (i = 0; i < hdr->num_buffers - 1; i++)
463 vhost_user_undo_advanced_last_avail_idx (rxvq);
464}
465
466static_always_inline void
467vhost_user_dequeue_chained_descs (vhost_user_vring_t * rxvq,
468 u16 * n_descs_processed)
469{
470 while (*n_descs_processed)
471 {
472 vhost_user_undo_advanced_last_avail_idx (rxvq);
473 (*n_descs_processed)--;
474 }
475}
476
477static_always_inline void
478vhost_user_advance_last_used_idx (vhost_user_vring_t * vring)
479{
480 vring->last_used_idx++;
481 if (PREDICT_FALSE ((vring->last_used_idx & vring->qsz_mask) == 0))
Steven Luong27ba5002020-11-17 13:30:44 -0800482 {
483 vring->used_wrap_counter ^= 1;
484 vring->last_used_idx = 0;
485 }
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700486}
487
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200488#endif
489
490/*
491 * fd.io coding-style-patch-verification: ON
492 *
493 * Local Variables:
494 * eval: (c-set-style "gnu")
495 * End:
496 */