blob: 02c15ad666f6a40ca1d6cbcfc337efce7cf2730e [file] [log] [blame]
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#ifndef __VIRTIO_VHOST_USER_INLINE_H__
16#define __VIRTIO_VHOST_USER_INLINE_H__
17/* vhost-user inline functions */
18
19static_always_inline void *
20map_guest_mem (vhost_user_intf_t * vui, uword addr, u32 * hint)
21{
22 int i = *hint;
23 if (PREDICT_TRUE ((vui->regions[i].guest_phys_addr <= addr) &&
24 ((vui->regions[i].guest_phys_addr +
25 vui->regions[i].memory_size) > addr)))
26 {
27 return (void *) (vui->region_mmap_addr[i] + addr -
28 vui->regions[i].guest_phys_addr);
29 }
30#if __SSE4_2__
31 __m128i rl, rh, al, ah, r;
32 al = _mm_set1_epi64x (addr + 1);
33 ah = _mm_set1_epi64x (addr);
34
35 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[0]);
36 rl = _mm_cmpgt_epi64 (al, rl);
37 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[0]);
38 rh = _mm_cmpgt_epi64 (rh, ah);
39 r = _mm_and_si128 (rl, rh);
40
41 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[2]);
42 rl = _mm_cmpgt_epi64 (al, rl);
43 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[2]);
44 rh = _mm_cmpgt_epi64 (rh, ah);
45 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x22);
46
47 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[4]);
48 rl = _mm_cmpgt_epi64 (al, rl);
49 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[4]);
50 rh = _mm_cmpgt_epi64 (rh, ah);
51 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x44);
52
53 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[6]);
54 rl = _mm_cmpgt_epi64 (al, rl);
55 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[6]);
56 rh = _mm_cmpgt_epi64 (rh, ah);
57 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x88);
58
59 r = _mm_shuffle_epi8 (r, _mm_set_epi64x (0, 0x0e060c040a020800));
60 i = count_trailing_zeros (_mm_movemask_epi8 (r) |
61 (1 << VHOST_MEMORY_MAX_NREGIONS));
62
63 if (i < vui->nregions)
64 {
65 *hint = i;
66 return (void *) (vui->region_mmap_addr[i] + addr -
67 vui->regions[i].guest_phys_addr);
68 }
69#elif __aarch64__ && __ARM_NEON
70 uint64x2_t al, ah, rl, rh, r;
71 uint32_t u32 = 0;
72
73 al = vdupq_n_u64 (addr + 1);
74 ah = vdupq_n_u64 (addr);
75
76 /*First Iteration */
77 rl = vld1q_u64 (&vui->region_guest_addr_lo[0]);
78 rl = vcgtq_u64 (al, rl);
79 rh = vld1q_u64 (&vui->region_guest_addr_hi[0]);
80 rh = vcgtq_u64 (rh, ah);
81 r = vandq_u64 (rl, rh);
82 u32 |= (vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1);
83 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 1);
84
85 if (u32)
86 {
87 i = count_trailing_zeros (u32);
88 goto vhost_map_guest_mem_done;
89 }
90
91 /*Second Iteration */
92 rl = vld1q_u64 (&vui->region_guest_addr_lo[2]);
93 rl = vcgtq_u64 (al, rl);
94 rh = vld1q_u64 (&vui->region_guest_addr_hi[2]);
95 rh = vcgtq_u64 (rh, ah);
96 r = vandq_u64 (rl, rh);
97 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 2);
98 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 3);
99
100 if (u32)
101 {
102 i = count_trailing_zeros (u32);
103 goto vhost_map_guest_mem_done;
104 }
105
106 /*Third Iteration */
107 rl = vld1q_u64 (&vui->region_guest_addr_lo[4]);
108 rl = vcgtq_u64 (al, rl);
109 rh = vld1q_u64 (&vui->region_guest_addr_hi[4]);
110 rh = vcgtq_u64 (rh, ah);
111 r = vandq_u64 (rl, rh);
112 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 6);
113 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 7);
114
115 i = count_trailing_zeros (u32 | (1 << VHOST_MEMORY_MAX_NREGIONS));
116
117vhost_map_guest_mem_done:
118 if (i < vui->nregions)
119 {
120 *hint = i;
121 return (void *) (vui->region_mmap_addr[i] + addr -
122 vui->regions[i].guest_phys_addr);
123 }
124#else
125 for (i = 0; i < vui->nregions; i++)
126 {
127 if ((vui->regions[i].guest_phys_addr <= addr) &&
128 ((vui->regions[i].guest_phys_addr + vui->regions[i].memory_size) >
129 addr))
130 {
131 *hint = i;
132 return (void *) (vui->region_mmap_addr[i] + addr -
133 vui->regions[i].guest_phys_addr);
134 }
135 }
136#endif
Jerome Tollet2f54c272018-10-02 11:41:11 +0200137 vu_log_err (vui, "failed to map guest mem addr %llx", addr);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200138 *hint = 0;
139 return 0;
140}
141
142static_always_inline void *
143map_user_mem (vhost_user_intf_t * vui, uword addr)
144{
145 int i;
146 for (i = 0; i < vui->nregions; i++)
147 {
148 if ((vui->regions[i].userspace_addr <= addr) &&
149 ((vui->regions[i].userspace_addr + vui->regions[i].memory_size) >
150 addr))
151 {
152 return (void *) (vui->region_mmap_addr[i] + addr -
153 vui->regions[i].userspace_addr);
154 }
155 }
156 return 0;
157}
158
159#define VHOST_LOG_PAGE 0x1000
160
161static_always_inline void
162vhost_user_log_dirty_pages_2 (vhost_user_intf_t * vui,
163 u64 addr, u64 len, u8 is_host_address)
164{
165 if (PREDICT_TRUE (vui->log_base_addr == 0
166 || !(vui->features & (1 << FEAT_VHOST_F_LOG_ALL))))
167 {
168 return;
169 }
170 if (is_host_address)
171 {
172 addr = pointer_to_uword (map_user_mem (vui, (uword) addr));
173 }
174 if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size))
175 {
Jerome Tollet2f54c272018-10-02 11:41:11 +0200176 vu_log_debug (vui, "vhost_user_log_dirty_pages(): out of range\n");
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200177 return;
178 }
179
180 CLIB_MEMORY_BARRIER ();
181 u64 page = addr / VHOST_LOG_PAGE;
182 while (page * VHOST_LOG_PAGE < addr + len)
183 {
184 ((u8 *) vui->log_base_addr)[page / 8] |= 1 << page % 8;
185 page++;
186 }
187}
188
189
190#define vhost_user_log_dirty_ring(vui, vq, member) \
191 if (PREDICT_FALSE(vq->log_used)) { \
192 vhost_user_log_dirty_pages_2(vui, vq->log_guest_addr + STRUCT_OFFSET_OF(vring_used_t, member), \
193 sizeof(vq->used->member), 0); \
194 }
195
196static_always_inline u8 *
197format_vhost_trace (u8 * s, va_list * va)
198{
199 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
200 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
201 CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main ();
202 vhost_user_main_t *vum = &vhost_user_main;
203 vhost_trace_t *t = va_arg (*va, vhost_trace_t *);
204 vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces,
205 t->device_index);
206
207 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, vui->sw_if_index);
208
209 u32 indent = format_get_indent (s);
210
211 s = format (s, "%U %U queue %d\n", format_white_space, indent,
212 format_vnet_sw_interface_name, vnm, sw, t->qid);
213
214 s = format (s, "%U virtio flags:\n", format_white_space, indent);
215#define _(n,i,st) \
216 if (t->virtio_ring_flags & (1 << VIRTIO_TRACE_F_##n)) \
217 s = format (s, "%U %s %s\n", format_white_space, indent, #n, st);
218 foreach_virtio_trace_flags
219#undef _
220 s = format (s, "%U virtio_net_hdr first_desc_len %u\n",
221 format_white_space, indent, t->first_desc_len);
222
223 s = format (s, "%U flags 0x%02x gso_type %u\n",
224 format_white_space, indent,
225 t->hdr.hdr.flags, t->hdr.hdr.gso_type);
226
227 if (vui->virtio_net_hdr_sz == 12)
228 s = format (s, "%U num_buff %u",
229 format_white_space, indent, t->hdr.num_buffers);
230
231 return s;
232}
233
234static_always_inline void
235vhost_user_send_call (vlib_main_t * vm, vhost_user_vring_t * vq)
236{
237 vhost_user_main_t *vum = &vhost_user_main;
238 u64 x = 1;
239 int fd = UNIX_GET_FD (vq->callfd_idx);
240 int rv;
241
242 rv = write (fd, &x, sizeof (x));
243 if (rv <= 0)
244 {
245 clib_unix_warning
246 ("Error: Could not write to unix socket for callfd %d", fd);
247 return;
248 }
249
250 vq->n_since_last_int = 0;
251 vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
252}
253
Juraj Slobodab192feb2018-10-01 12:42:07 +0200254static_always_inline u8
255vui_is_link_up (vhost_user_intf_t * vui)
256{
257 return vui->admin_up && vui->is_ready;
258}
259
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200260#endif
261
262/*
263 * fd.io coding-style-patch-verification: ON
264 *
265 * Local Variables:
266 * eval: (c-set-style "gnu")
267 * End:
268 */