Dave Barach | 68b0fb0 | 2017-02-28 15:15:56 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2017 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | /** |
| 16 | * @file |
| 17 | * @brief Session and session manager |
| 18 | */ |
| 19 | |
| 20 | #include <vnet/session/session.h> |
| 21 | #include <vlibmemory/api.h> |
| 22 | #include <vnet/dpo/load_balance.h> |
| 23 | #include <vnet/fib/ip4_fib.h> |
| 24 | #include <vnet/session/application.h> |
| 25 | |
| 26 | /** |
| 27 | * Per-type vector of transport protocol virtual function tables |
| 28 | */ |
| 29 | static transport_proto_vft_t *tp_vfts; |
| 30 | |
| 31 | session_manager_main_t session_manager_main; |
| 32 | |
| 33 | /* |
| 34 | * Session lookup key; (src-ip, dst-ip, src-port, dst-port, session-type) |
| 35 | * Value: (owner thread index << 32 | session_index); |
| 36 | */ |
| 37 | static void |
| 38 | stream_session_table_add_for_tc (u8 sst, transport_connection_t * tc, |
| 39 | u64 value) |
| 40 | { |
| 41 | session_manager_main_t *smm = &session_manager_main; |
| 42 | session_kv4_t kv4; |
| 43 | session_kv6_t kv6; |
| 44 | |
| 45 | switch (sst) |
| 46 | { |
| 47 | case SESSION_TYPE_IP4_UDP: |
| 48 | case SESSION_TYPE_IP4_TCP: |
| 49 | make_v4_ss_kv_from_tc (&kv4, tc); |
| 50 | kv4.value = value; |
| 51 | clib_bihash_add_del_16_8 (&smm->v4_session_hash, &kv4, 1 /* is_add */ ); |
| 52 | break; |
| 53 | case SESSION_TYPE_IP6_UDP: |
| 54 | case SESSION_TYPE_IP6_TCP: |
| 55 | make_v6_ss_kv_from_tc (&kv6, tc); |
| 56 | kv6.value = value; |
| 57 | clib_bihash_add_del_48_8 (&smm->v6_session_hash, &kv6, 1 /* is_add */ ); |
| 58 | break; |
| 59 | default: |
| 60 | clib_warning ("Session type not supported"); |
| 61 | ASSERT (0); |
| 62 | } |
| 63 | } |
| 64 | |
| 65 | void |
| 66 | stream_session_table_add (session_manager_main_t * smm, stream_session_t * s, |
| 67 | u64 value) |
| 68 | { |
| 69 | transport_connection_t *tc; |
| 70 | |
| 71 | tc = tp_vfts[s->session_type].get_connection (s->connection_index, |
| 72 | s->thread_index); |
| 73 | stream_session_table_add_for_tc (s->session_type, tc, value); |
| 74 | } |
| 75 | |
| 76 | static void |
| 77 | stream_session_half_open_table_add (u8 sst, transport_connection_t * tc, |
| 78 | u64 value) |
| 79 | { |
| 80 | session_manager_main_t *smm = &session_manager_main; |
| 81 | session_kv4_t kv4; |
| 82 | session_kv6_t kv6; |
| 83 | |
| 84 | switch (sst) |
| 85 | { |
| 86 | case SESSION_TYPE_IP4_UDP: |
| 87 | case SESSION_TYPE_IP4_TCP: |
| 88 | make_v4_ss_kv_from_tc (&kv4, tc); |
| 89 | kv4.value = value; |
| 90 | clib_bihash_add_del_16_8 (&smm->v4_half_open_hash, &kv4, |
| 91 | 1 /* is_add */ ); |
| 92 | break; |
| 93 | case SESSION_TYPE_IP6_UDP: |
| 94 | case SESSION_TYPE_IP6_TCP: |
| 95 | make_v6_ss_kv_from_tc (&kv6, tc); |
| 96 | kv6.value = value; |
| 97 | clib_bihash_add_del_48_8 (&smm->v6_half_open_hash, &kv6, |
| 98 | 1 /* is_add */ ); |
| 99 | break; |
| 100 | default: |
| 101 | clib_warning ("Session type not supported"); |
| 102 | ASSERT (0); |
| 103 | } |
| 104 | } |
| 105 | |
| 106 | static int |
| 107 | stream_session_table_del_for_tc (session_manager_main_t * smm, u8 sst, |
| 108 | transport_connection_t * tc) |
| 109 | { |
| 110 | session_kv4_t kv4; |
| 111 | session_kv6_t kv6; |
| 112 | |
| 113 | switch (sst) |
| 114 | { |
| 115 | case SESSION_TYPE_IP4_UDP: |
| 116 | case SESSION_TYPE_IP4_TCP: |
| 117 | make_v4_ss_kv_from_tc (&kv4, tc); |
| 118 | return clib_bihash_add_del_16_8 (&smm->v4_session_hash, &kv4, |
| 119 | 0 /* is_add */ ); |
| 120 | break; |
| 121 | case SESSION_TYPE_IP6_UDP: |
| 122 | case SESSION_TYPE_IP6_TCP: |
| 123 | make_v6_ss_kv_from_tc (&kv6, tc); |
| 124 | return clib_bihash_add_del_48_8 (&smm->v6_session_hash, &kv6, |
| 125 | 0 /* is_add */ ); |
| 126 | break; |
| 127 | default: |
| 128 | clib_warning ("Session type not supported"); |
| 129 | ASSERT (0); |
| 130 | } |
| 131 | |
| 132 | return 0; |
| 133 | } |
| 134 | |
| 135 | static int |
| 136 | stream_session_table_del (session_manager_main_t * smm, stream_session_t * s) |
| 137 | { |
| 138 | transport_connection_t *ts; |
| 139 | |
| 140 | ts = tp_vfts[s->session_type].get_connection (s->connection_index, |
| 141 | s->thread_index); |
| 142 | return stream_session_table_del_for_tc (smm, s->session_type, ts); |
| 143 | } |
| 144 | |
| 145 | static void |
| 146 | stream_session_half_open_table_del (session_manager_main_t * smm, u8 sst, |
| 147 | transport_connection_t * tc) |
| 148 | { |
| 149 | session_kv4_t kv4; |
| 150 | session_kv6_t kv6; |
| 151 | |
| 152 | switch (sst) |
| 153 | { |
| 154 | case SESSION_TYPE_IP4_UDP: |
| 155 | case SESSION_TYPE_IP4_TCP: |
| 156 | make_v4_ss_kv_from_tc (&kv4, tc); |
| 157 | clib_bihash_add_del_16_8 (&smm->v4_half_open_hash, &kv4, |
| 158 | 0 /* is_add */ ); |
| 159 | break; |
| 160 | case SESSION_TYPE_IP6_UDP: |
| 161 | case SESSION_TYPE_IP6_TCP: |
| 162 | make_v6_ss_kv_from_tc (&kv6, tc); |
| 163 | clib_bihash_add_del_48_8 (&smm->v6_half_open_hash, &kv6, |
| 164 | 0 /* is_add */ ); |
| 165 | break; |
| 166 | default: |
| 167 | clib_warning ("Session type not supported"); |
| 168 | ASSERT (0); |
| 169 | } |
| 170 | } |
| 171 | |
| 172 | stream_session_t * |
| 173 | stream_session_lookup_listener4 (ip4_address_t * lcl, u16 lcl_port, u8 proto) |
| 174 | { |
| 175 | session_manager_main_t *smm = &session_manager_main; |
| 176 | session_kv4_t kv4; |
| 177 | int rv; |
| 178 | |
| 179 | make_v4_listener_kv (&kv4, lcl, lcl_port, proto); |
| 180 | rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); |
| 181 | if (rv == 0) |
| 182 | return pool_elt_at_index (smm->listen_sessions[proto], (u32) kv4.value); |
| 183 | |
| 184 | /* Zero out the lcl ip */ |
| 185 | kv4.key[0] = 0; |
| 186 | rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); |
| 187 | if (rv == 0) |
| 188 | return pool_elt_at_index (smm->listen_sessions[proto], kv4.value); |
| 189 | |
| 190 | return 0; |
| 191 | } |
| 192 | |
| 193 | /** Looks up a session based on the 5-tuple passed as argument. |
| 194 | * |
| 195 | * First it tries to find an established session, if this fails, it tries |
| 196 | * finding a listener session if this fails, it tries a lookup with a |
| 197 | * wildcarded local source (listener bound to all interfaces) |
| 198 | */ |
| 199 | stream_session_t * |
| 200 | stream_session_lookup4 (ip4_address_t * lcl, ip4_address_t * rmt, |
| 201 | u16 lcl_port, u16 rmt_port, u8 proto, |
| 202 | u32 my_thread_index) |
| 203 | { |
| 204 | session_manager_main_t *smm = &session_manager_main; |
| 205 | session_kv4_t kv4; |
| 206 | int rv; |
| 207 | |
| 208 | /* Lookup session amongst established ones */ |
| 209 | make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto); |
| 210 | rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); |
| 211 | if (rv == 0) |
| 212 | return stream_session_get_tsi (kv4.value, my_thread_index); |
| 213 | |
| 214 | /* If nothing is found, check if any listener is available */ |
| 215 | return stream_session_lookup_listener4 (lcl, lcl_port, proto); |
| 216 | } |
| 217 | |
| 218 | stream_session_t * |
| 219 | stream_session_lookup_listener6 (ip6_address_t * lcl, u16 lcl_port, u8 proto) |
| 220 | { |
| 221 | session_manager_main_t *smm = &session_manager_main; |
| 222 | session_kv6_t kv6; |
| 223 | int rv; |
| 224 | |
| 225 | make_v6_listener_kv (&kv6, lcl, lcl_port, proto); |
| 226 | rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); |
| 227 | if (rv == 0) |
| 228 | return pool_elt_at_index (smm->listen_sessions[proto], kv6.value); |
| 229 | |
| 230 | /* Zero out the lcl ip */ |
| 231 | kv6.key[0] = kv6.key[1] = 0; |
| 232 | rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); |
| 233 | if (rv == 0) |
| 234 | return pool_elt_at_index (smm->listen_sessions[proto], kv6.value); |
| 235 | |
| 236 | return 0; |
| 237 | } |
| 238 | |
| 239 | /* Looks up a session based on the 5-tuple passed as argument. |
| 240 | * First it tries to find an established session, if this fails, it tries |
| 241 | * finding a listener session if this fails, it tries a lookup with a |
| 242 | * wildcarded local source (listener bound to all interfaces) */ |
| 243 | stream_session_t * |
| 244 | stream_session_lookup6 (ip6_address_t * lcl, ip6_address_t * rmt, |
| 245 | u16 lcl_port, u16 rmt_port, u8 proto, |
| 246 | u32 my_thread_index) |
| 247 | { |
| 248 | session_manager_main_t *smm = vnet_get_session_manager_main (); |
| 249 | session_kv6_t kv6; |
| 250 | int rv; |
| 251 | |
| 252 | make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto); |
| 253 | rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); |
| 254 | if (rv == 0) |
| 255 | return stream_session_get_tsi (kv6.value, my_thread_index); |
| 256 | |
| 257 | /* If nothing is found, check if any listener is available */ |
| 258 | return stream_session_lookup_listener6 (lcl, lcl_port, proto); |
| 259 | } |
| 260 | |
| 261 | stream_session_t * |
| 262 | stream_session_lookup_listener (ip46_address_t * lcl, u16 lcl_port, u8 proto) |
| 263 | { |
| 264 | switch (proto) |
| 265 | { |
| 266 | case SESSION_TYPE_IP4_UDP: |
| 267 | case SESSION_TYPE_IP4_TCP: |
| 268 | return stream_session_lookup_listener4 (&lcl->ip4, lcl_port, proto); |
| 269 | break; |
| 270 | case SESSION_TYPE_IP6_UDP: |
| 271 | case SESSION_TYPE_IP6_TCP: |
| 272 | return stream_session_lookup_listener6 (&lcl->ip6, lcl_port, proto); |
| 273 | break; |
| 274 | } |
| 275 | return 0; |
| 276 | } |
| 277 | |
| 278 | static u64 |
| 279 | stream_session_half_open_lookup (session_manager_main_t * smm, |
| 280 | ip46_address_t * lcl, ip46_address_t * rmt, |
| 281 | u16 lcl_port, u16 rmt_port, u8 proto) |
| 282 | { |
| 283 | session_kv4_t kv4; |
| 284 | session_kv6_t kv6; |
| 285 | int rv; |
| 286 | |
| 287 | switch (proto) |
| 288 | { |
| 289 | case SESSION_TYPE_IP4_UDP: |
| 290 | case SESSION_TYPE_IP4_TCP: |
| 291 | make_v4_ss_kv (&kv4, &lcl->ip4, &rmt->ip4, lcl_port, rmt_port, proto); |
| 292 | rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4); |
| 293 | |
| 294 | if (rv == 0) |
| 295 | return kv4.value; |
| 296 | |
| 297 | return (u64) ~ 0; |
| 298 | break; |
| 299 | case SESSION_TYPE_IP6_UDP: |
| 300 | case SESSION_TYPE_IP6_TCP: |
| 301 | make_v6_ss_kv (&kv6, &lcl->ip6, &rmt->ip6, lcl_port, rmt_port, proto); |
| 302 | rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6); |
| 303 | |
| 304 | if (rv == 0) |
| 305 | return kv6.value; |
| 306 | |
| 307 | return (u64) ~ 0; |
| 308 | break; |
| 309 | } |
| 310 | return 0; |
| 311 | } |
| 312 | |
| 313 | transport_connection_t * |
| 314 | stream_session_lookup_transport4 (session_manager_main_t * smm, |
| 315 | ip4_address_t * lcl, ip4_address_t * rmt, |
| 316 | u16 lcl_port, u16 rmt_port, u8 proto, |
| 317 | u32 my_thread_index) |
| 318 | { |
| 319 | session_kv4_t kv4; |
| 320 | stream_session_t *s; |
| 321 | int rv; |
| 322 | |
| 323 | /* Lookup session amongst established ones */ |
| 324 | make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto); |
| 325 | rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); |
| 326 | if (rv == 0) |
| 327 | { |
| 328 | s = stream_session_get_tsi (kv4.value, my_thread_index); |
| 329 | |
| 330 | return tp_vfts[s->session_type].get_connection (s->connection_index, |
| 331 | my_thread_index); |
| 332 | } |
| 333 | |
| 334 | /* If nothing is found, check if any listener is available */ |
| 335 | s = stream_session_lookup_listener4 (lcl, lcl_port, proto); |
| 336 | if (s) |
| 337 | return tp_vfts[s->session_type].get_listener (s->connection_index); |
| 338 | |
| 339 | /* Finally, try half-open connections */ |
| 340 | rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4); |
| 341 | if (rv == 0) |
| 342 | return tp_vfts[proto].get_half_open (kv4.value & 0xFFFFFFFF); |
| 343 | |
| 344 | return 0; |
| 345 | } |
| 346 | |
| 347 | transport_connection_t * |
| 348 | stream_session_lookup_transport6 (session_manager_main_t * smm, |
| 349 | ip6_address_t * lcl, ip6_address_t * rmt, |
| 350 | u16 lcl_port, u16 rmt_port, u8 proto, |
| 351 | u32 my_thread_index) |
| 352 | { |
| 353 | stream_session_t *s; |
| 354 | session_kv6_t kv6; |
| 355 | int rv; |
| 356 | |
| 357 | make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto); |
| 358 | rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); |
| 359 | if (rv == 0) |
| 360 | { |
| 361 | s = stream_session_get_tsi (kv6.value, my_thread_index); |
| 362 | |
| 363 | return tp_vfts[s->session_type].get_connection (s->connection_index, |
| 364 | my_thread_index); |
| 365 | } |
| 366 | |
| 367 | /* If nothing is found, check if any listener is available */ |
| 368 | s = stream_session_lookup_listener6 (lcl, lcl_port, proto); |
| 369 | if (s) |
| 370 | return tp_vfts[s->session_type].get_listener (s->connection_index); |
| 371 | |
| 372 | /* Finally, try half-open connections */ |
| 373 | rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6); |
| 374 | if (rv == 0) |
| 375 | return tp_vfts[s->session_type].get_half_open (kv6.value & 0xFFFFFFFF); |
| 376 | |
| 377 | return 0; |
| 378 | } |
| 379 | |
| 380 | /** |
| 381 | * Allocate vpp event queue (once) per worker thread |
| 382 | */ |
| 383 | void |
| 384 | vpp_session_event_queue_allocate (session_manager_main_t * smm, |
| 385 | u32 thread_index) |
| 386 | { |
| 387 | api_main_t *am = &api_main; |
| 388 | void *oldheap; |
| 389 | |
| 390 | if (smm->vpp_event_queues[thread_index] == 0) |
| 391 | { |
| 392 | /* Allocate event fifo in the /vpe-api shared-memory segment */ |
| 393 | oldheap = svm_push_data_heap (am->vlib_rp); |
| 394 | |
| 395 | smm->vpp_event_queues[thread_index] = |
| 396 | unix_shared_memory_queue_init (2048 /* nels $$$$ config */ , |
| 397 | sizeof (session_fifo_event_t), |
| 398 | 0 /* consumer pid */ , |
| 399 | 0 |
| 400 | /* (do not) send signal when queue non-empty */ |
| 401 | ); |
| 402 | |
| 403 | svm_pop_heap (oldheap); |
| 404 | } |
| 405 | } |
| 406 | |
| 407 | void |
| 408 | session_manager_get_segment_info (u32 index, u8 ** name, u32 * size) |
| 409 | { |
| 410 | svm_fifo_segment_private_t *s; |
| 411 | s = svm_fifo_get_segment (index); |
| 412 | *name = s->h->segment_name; |
| 413 | *size = s->ssvm.ssvm_size; |
| 414 | } |
| 415 | |
| 416 | always_inline int |
| 417 | session_manager_add_segment_i (session_manager_main_t * smm, |
| 418 | session_manager_t * sm, |
| 419 | u32 segment_size, u8 * segment_name) |
| 420 | { |
| 421 | svm_fifo_segment_create_args_t _ca, *ca = &_ca; |
| 422 | int rv; |
| 423 | |
| 424 | memset (ca, 0, sizeof (*ca)); |
| 425 | |
| 426 | ca->segment_name = (char *) segment_name; |
| 427 | ca->segment_size = segment_size; |
| 428 | |
| 429 | rv = svm_fifo_segment_create (ca); |
| 430 | if (rv) |
| 431 | { |
| 432 | clib_warning ("svm_fifo_segment_create ('%s', %d) failed", |
| 433 | ca->segment_name, ca->segment_size); |
| 434 | vec_free (segment_name); |
| 435 | return -1; |
| 436 | } |
| 437 | |
| 438 | vec_add1 (sm->segment_indices, ca->new_segment_index); |
| 439 | |
| 440 | return 0; |
| 441 | } |
| 442 | |
| 443 | static int |
| 444 | session_manager_add_segment (session_manager_main_t * smm, |
| 445 | session_manager_t * sm) |
| 446 | { |
| 447 | u8 *segment_name; |
| 448 | svm_fifo_segment_create_args_t _ca, *ca = &_ca; |
| 449 | u32 add_segment_size; |
| 450 | u32 default_segment_size = 128 << 10; |
| 451 | |
| 452 | memset (ca, 0, sizeof (*ca)); |
| 453 | segment_name = format (0, "%d-%d%c", getpid (), |
| 454 | smm->unique_segment_name_counter++, 0); |
| 455 | add_segment_size = |
| 456 | sm->add_segment_size ? sm->add_segment_size : default_segment_size; |
| 457 | |
| 458 | return session_manager_add_segment_i (smm, sm, add_segment_size, |
| 459 | segment_name); |
| 460 | } |
| 461 | |
| 462 | int |
| 463 | session_manager_add_first_segment (session_manager_main_t * smm, |
| 464 | session_manager_t * sm, u32 segment_size, |
| 465 | u8 ** segment_name) |
| 466 | { |
| 467 | svm_fifo_segment_create_args_t _ca, *ca = &_ca; |
| 468 | memset (ca, 0, sizeof (*ca)); |
| 469 | *segment_name = format (0, "%d-%d%c", getpid (), |
| 470 | smm->unique_segment_name_counter++, 0); |
| 471 | return session_manager_add_segment_i (smm, sm, segment_size, *segment_name); |
| 472 | } |
| 473 | |
| 474 | void |
| 475 | session_manager_del (session_manager_main_t * smm, session_manager_t * sm) |
| 476 | { |
| 477 | u32 *deleted_sessions = 0; |
| 478 | u32 *deleted_thread_indices = 0; |
| 479 | int i, j; |
| 480 | |
| 481 | /* Across all fifo segments used by the server */ |
| 482 | for (j = 0; j < vec_len (sm->segment_indices); j++) |
| 483 | { |
| 484 | svm_fifo_segment_private_t *fifo_segment; |
| 485 | svm_fifo_t **fifos; |
| 486 | /* Vector of fifos allocated in the segment */ |
| 487 | fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); |
| 488 | fifos = (svm_fifo_t **) fifo_segment->h->fifos; |
| 489 | |
| 490 | /* |
| 491 | * Remove any residual sessions from the session lookup table |
| 492 | * Don't bother deleting the individual fifos, we're going to |
| 493 | * throw away the fifo segment in a minute. |
| 494 | */ |
| 495 | for (i = 0; i < vec_len (fifos); i++) |
| 496 | { |
| 497 | svm_fifo_t *fifo; |
| 498 | u32 session_index, thread_index; |
| 499 | stream_session_t *session; |
| 500 | |
| 501 | fifo = fifos[i]; |
| 502 | session_index = fifo->server_session_index; |
| 503 | thread_index = fifo->server_thread_index; |
| 504 | |
| 505 | session = pool_elt_at_index (smm->sessions[thread_index], |
| 506 | session_index); |
| 507 | |
| 508 | /* Add to the deleted_sessions vector (once!) */ |
| 509 | if (!session->is_deleted) |
| 510 | { |
| 511 | session->is_deleted = 1; |
| 512 | vec_add1 (deleted_sessions, |
| 513 | session - smm->sessions[thread_index]); |
| 514 | vec_add1 (deleted_thread_indices, thread_index); |
| 515 | } |
| 516 | } |
| 517 | |
| 518 | for (i = 0; i < vec_len (deleted_sessions); i++) |
| 519 | { |
| 520 | stream_session_t *session; |
| 521 | |
| 522 | session = |
| 523 | pool_elt_at_index (smm->sessions[deleted_thread_indices[i]], |
| 524 | deleted_sessions[i]); |
| 525 | |
| 526 | /* Instead of directly removing the session call disconnect */ |
| 527 | stream_session_disconnect (session); |
| 528 | |
| 529 | /* |
| 530 | stream_session_table_del (smm, session); |
| 531 | pool_put(smm->sessions[deleted_thread_indices[i]], session); |
| 532 | */ |
| 533 | } |
| 534 | |
| 535 | vec_reset_length (deleted_sessions); |
| 536 | vec_reset_length (deleted_thread_indices); |
| 537 | |
| 538 | /* Instead of removing the segment, test when removing the session if |
| 539 | * the segment can be removed |
| 540 | */ |
| 541 | /* svm_fifo_segment_delete (fifo_segment); */ |
| 542 | } |
| 543 | |
| 544 | vec_free (deleted_sessions); |
| 545 | vec_free (deleted_thread_indices); |
| 546 | } |
| 547 | |
| 548 | int |
| 549 | session_manager_allocate_session_fifos (session_manager_main_t * smm, |
| 550 | session_manager_t * sm, |
| 551 | svm_fifo_t ** server_rx_fifo, |
| 552 | svm_fifo_t ** server_tx_fifo, |
| 553 | u32 * fifo_segment_index, |
| 554 | u8 * added_a_segment) |
| 555 | { |
| 556 | svm_fifo_segment_private_t *fifo_segment; |
| 557 | u32 fifo_size, default_fifo_size = 8192 /* TODO config */ ; |
| 558 | int i; |
| 559 | |
| 560 | *added_a_segment = 0; |
| 561 | |
| 562 | /* Allocate svm fifos */ |
| 563 | ASSERT (vec_len (sm->segment_indices)); |
| 564 | |
| 565 | again: |
| 566 | for (i = 0; i < vec_len (sm->segment_indices); i++) |
| 567 | { |
| 568 | *fifo_segment_index = sm->segment_indices[i]; |
| 569 | fifo_segment = svm_fifo_get_segment (*fifo_segment_index); |
| 570 | |
| 571 | fifo_size = sm->rx_fifo_size; |
| 572 | fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; |
| 573 | *server_rx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); |
| 574 | |
| 575 | fifo_size = sm->tx_fifo_size; |
| 576 | fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; |
| 577 | *server_tx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); |
| 578 | |
| 579 | if (*server_rx_fifo == 0) |
| 580 | { |
| 581 | /* This would be very odd, but handle it... */ |
| 582 | if (*server_tx_fifo != 0) |
| 583 | { |
| 584 | svm_fifo_segment_free_fifo (fifo_segment, *server_tx_fifo); |
| 585 | *server_tx_fifo = 0; |
| 586 | } |
| 587 | continue; |
| 588 | } |
| 589 | if (*server_tx_fifo == 0) |
| 590 | { |
| 591 | if (*server_rx_fifo != 0) |
| 592 | { |
| 593 | svm_fifo_segment_free_fifo (fifo_segment, *server_rx_fifo); |
| 594 | *server_rx_fifo = 0; |
| 595 | } |
| 596 | continue; |
| 597 | } |
| 598 | break; |
| 599 | } |
| 600 | |
| 601 | /* See if we're supposed to create another segment */ |
| 602 | if (*server_rx_fifo == 0) |
| 603 | { |
| 604 | if (sm->add_segment) |
| 605 | { |
| 606 | if (*added_a_segment) |
| 607 | { |
| 608 | clib_warning ("added a segment, still cant allocate a fifo"); |
| 609 | return SESSION_ERROR_NEW_SEG_NO_SPACE; |
| 610 | } |
| 611 | |
| 612 | if (session_manager_add_segment (smm, sm)) |
| 613 | return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; |
| 614 | |
| 615 | *added_a_segment = 1; |
| 616 | goto again; |
| 617 | } |
| 618 | else |
| 619 | return SESSION_ERROR_NO_SPACE; |
| 620 | } |
| 621 | return 0; |
| 622 | } |
| 623 | |
| 624 | int |
| 625 | stream_session_create_i (session_manager_main_t * smm, application_t * app, |
| 626 | transport_connection_t * tc, |
| 627 | stream_session_t ** ret_s) |
| 628 | { |
| 629 | int rv; |
| 630 | svm_fifo_t *server_rx_fifo = 0, *server_tx_fifo = 0; |
| 631 | u32 fifo_segment_index; |
| 632 | u32 pool_index, seg_size; |
| 633 | stream_session_t *s; |
| 634 | u64 value; |
| 635 | u32 thread_index = tc->thread_index; |
| 636 | session_manager_t *sm; |
| 637 | u8 segment_added; |
| 638 | u8 *seg_name; |
| 639 | |
| 640 | sm = session_manager_get (app->session_manager_index); |
| 641 | |
| 642 | /* Check the API queue */ |
| 643 | if (app->mode == APP_SERVER && application_api_queue_is_full (app)) |
| 644 | return SESSION_ERROR_API_QUEUE_FULL; |
| 645 | |
| 646 | if ((rv = session_manager_allocate_session_fifos (smm, sm, &server_rx_fifo, |
| 647 | &server_tx_fifo, |
| 648 | &fifo_segment_index, |
| 649 | &segment_added))) |
| 650 | return rv; |
| 651 | |
| 652 | if (segment_added && app->mode == APP_SERVER) |
| 653 | { |
| 654 | /* Send an API message to the external server, to map new segment */ |
| 655 | ASSERT (app->cb_fns.add_segment_callback); |
| 656 | |
| 657 | session_manager_get_segment_info (fifo_segment_index, &seg_name, |
| 658 | &seg_size); |
| 659 | if (app->cb_fns.add_segment_callback (app->api_client_index, seg_name, |
| 660 | seg_size)) |
| 661 | return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; |
| 662 | } |
| 663 | |
| 664 | /* Create the session */ |
| 665 | pool_get (smm->sessions[thread_index], s); |
| 666 | memset (s, 0, sizeof (*s)); |
| 667 | |
| 668 | /* Initialize backpointers */ |
| 669 | pool_index = s - smm->sessions[thread_index]; |
| 670 | server_rx_fifo->server_session_index = pool_index; |
| 671 | server_rx_fifo->server_thread_index = thread_index; |
| 672 | |
| 673 | server_tx_fifo->server_session_index = pool_index; |
| 674 | server_tx_fifo->server_thread_index = thread_index; |
| 675 | |
| 676 | s->server_rx_fifo = server_rx_fifo; |
| 677 | s->server_tx_fifo = server_tx_fifo; |
| 678 | |
| 679 | /* Initialize state machine, such as it is... */ |
| 680 | s->session_type = app->session_type; |
| 681 | s->session_state = SESSION_STATE_CONNECTING; |
| 682 | s->app_index = application_get_index (app); |
| 683 | s->server_segment_index = fifo_segment_index; |
| 684 | s->thread_index = thread_index; |
| 685 | s->session_index = pool_index; |
| 686 | |
| 687 | /* Attach transport to session */ |
| 688 | s->connection_index = tc->c_index; |
| 689 | |
| 690 | /* Attach session to transport */ |
| 691 | tc->s_index = s->session_index; |
| 692 | |
| 693 | /* Add to the main lookup table */ |
| 694 | value = (((u64) thread_index) << 32) | (u64) s->session_index; |
| 695 | stream_session_table_add_for_tc (app->session_type, tc, value); |
| 696 | |
| 697 | *ret_s = s; |
| 698 | |
| 699 | return 0; |
| 700 | } |
| 701 | |
| 702 | /* |
| 703 | * Enqueue data for delivery to session peer. Does not notify peer of enqueue |
| 704 | * event but on request can queue notification events for later delivery by |
| 705 | * calling stream_server_flush_enqueue_events(). |
| 706 | * |
| 707 | * @param tc Transport connection which is to be enqueued data |
| 708 | * @param data Data to be enqueued |
| 709 | * @param len Length of data to be enqueued |
| 710 | * @param queue_event Flag to indicate if peer is to be notified or if event |
| 711 | * is to be queued. The former is useful when more data is |
| 712 | * enqueued and only one event is to be generated. |
| 713 | * @return Number of bytes enqueued or a negative value if enqueueing failed. |
| 714 | */ |
| 715 | int |
| 716 | stream_session_enqueue_data (transport_connection_t * tc, u8 * data, u16 len, |
| 717 | u8 queue_event) |
| 718 | { |
| 719 | stream_session_t *s; |
| 720 | int enqueued; |
| 721 | |
| 722 | s = stream_session_get (tc->s_index, tc->thread_index); |
| 723 | |
| 724 | /* Make sure there's enough space left. We might've filled the pipes */ |
| 725 | if (PREDICT_FALSE (len > svm_fifo_max_enqueue (s->server_rx_fifo))) |
| 726 | return -1; |
| 727 | |
| 728 | enqueued = svm_fifo_enqueue_nowait (s->server_rx_fifo, s->pid, len, data); |
| 729 | |
| 730 | if (queue_event) |
| 731 | { |
| 732 | /* Queue RX event on this fifo. Eventually these will need to be flushed |
| 733 | * by calling stream_server_flush_enqueue_events () */ |
| 734 | session_manager_main_t *smm = vnet_get_session_manager_main (); |
| 735 | u32 thread_index = s->thread_index; |
| 736 | u32 my_enqueue_epoch = smm->current_enqueue_epoch[thread_index]; |
| 737 | |
| 738 | if (s->enqueue_epoch != my_enqueue_epoch) |
| 739 | { |
| 740 | s->enqueue_epoch = my_enqueue_epoch; |
| 741 | vec_add1 (smm->session_indices_to_enqueue_by_thread[thread_index], |
| 742 | s - smm->sessions[thread_index]); |
| 743 | } |
| 744 | } |
| 745 | |
| 746 | return enqueued; |
| 747 | } |
| 748 | |
| 749 | /** Check if we have space in rx fifo to push more bytes */ |
| 750 | u8 |
| 751 | stream_session_no_space (transport_connection_t * tc, u32 thread_index, |
| 752 | u16 data_len) |
| 753 | { |
| 754 | stream_session_t *s = stream_session_get (tc->c_index, thread_index); |
| 755 | |
| 756 | if (PREDICT_FALSE (s->session_state != SESSION_STATE_READY)) |
| 757 | return 1; |
| 758 | |
| 759 | if (data_len > svm_fifo_max_enqueue (s->server_rx_fifo)) |
| 760 | return 1; |
| 761 | |
| 762 | return 0; |
| 763 | } |
| 764 | |
| 765 | u32 |
| 766 | stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer, |
| 767 | u32 offset, u32 max_bytes) |
| 768 | { |
| 769 | stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); |
| 770 | return svm_fifo_peek (s->server_tx_fifo, s->pid, offset, max_bytes, buffer); |
| 771 | } |
| 772 | |
| 773 | u32 |
| 774 | stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes) |
| 775 | { |
| 776 | stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); |
| 777 | return svm_fifo_dequeue_drop (s->server_tx_fifo, s->pid, max_bytes); |
| 778 | } |
| 779 | |
| 780 | /** |
| 781 | * Notify session peer that new data has been enqueued. |
| 782 | * |
| 783 | * @param s Stream session for which the event is to be generated. |
| 784 | * @param block Flag to indicate if call should block if event queue is full. |
| 785 | * |
| 786 | * @return 0 on succes or negative number if failed to send notification. |
| 787 | */ |
| 788 | static int |
| 789 | stream_session_enqueue_notify (stream_session_t * s, u8 block) |
| 790 | { |
| 791 | application_t *app; |
| 792 | session_fifo_event_t evt; |
| 793 | unix_shared_memory_queue_t *q; |
| 794 | static u32 serial_number; |
| 795 | |
| 796 | if (PREDICT_FALSE (s->session_state == SESSION_STATE_CLOSED)) |
| 797 | return 0; |
| 798 | |
| 799 | /* Get session's server */ |
| 800 | app = application_get (s->app_index); |
| 801 | |
| 802 | /* Fabricate event */ |
| 803 | evt.fifo = s->server_rx_fifo; |
| 804 | evt.event_type = FIFO_EVENT_SERVER_RX; |
| 805 | evt.event_id = serial_number++; |
| 806 | evt.enqueue_length = svm_fifo_max_dequeue (s->server_rx_fifo); |
| 807 | |
| 808 | /* Add event to server's event queue */ |
| 809 | q = app->event_queue; |
| 810 | |
| 811 | /* Based on request block (or not) for lack of space */ |
| 812 | if (block || PREDICT_TRUE (q->cursize < q->maxsize)) |
| 813 | unix_shared_memory_queue_add (app->event_queue, (u8 *) & evt, |
| 814 | 0 /* do wait for mutex */ ); |
| 815 | else |
| 816 | return -1; |
| 817 | |
| 818 | if (1) |
| 819 | { |
| 820 | ELOG_TYPE_DECLARE (e) = |
| 821 | { |
| 822 | .format = "evt-enqueue: id %d length %d",.format_args = "i4i4",}; |
| 823 | struct |
| 824 | { |
| 825 | u32 data[2]; |
| 826 | } *ed; |
| 827 | ed = ELOG_DATA (&vlib_global_main.elog_main, e); |
| 828 | ed->data[0] = evt.event_id; |
| 829 | ed->data[1] = evt.enqueue_length; |
| 830 | } |
| 831 | |
| 832 | return 0; |
| 833 | } |
| 834 | |
| 835 | /** |
| 836 | * Flushes queue of sessions that are to be notified of new data |
| 837 | * enqueued events. |
| 838 | * |
| 839 | * @param thread_index Thread index for which the flush is to be performed. |
| 840 | * @return 0 on success or a positive number indicating the number of |
| 841 | * failures due to API queue being full. |
| 842 | */ |
| 843 | int |
| 844 | session_manager_flush_enqueue_events (u32 thread_index) |
| 845 | { |
| 846 | session_manager_main_t *smm = &session_manager_main; |
| 847 | u32 *session_indices_to_enqueue; |
| 848 | int i, errors = 0; |
| 849 | |
| 850 | session_indices_to_enqueue = |
| 851 | smm->session_indices_to_enqueue_by_thread[thread_index]; |
| 852 | |
| 853 | for (i = 0; i < vec_len (session_indices_to_enqueue); i++) |
| 854 | { |
| 855 | stream_session_t *s0; |
| 856 | |
| 857 | /* Get session */ |
| 858 | s0 = stream_session_get (session_indices_to_enqueue[i], thread_index); |
| 859 | if (stream_session_enqueue_notify (s0, 0 /* don't block */ )) |
| 860 | { |
| 861 | errors++; |
| 862 | } |
| 863 | } |
| 864 | |
| 865 | vec_reset_length (session_indices_to_enqueue); |
| 866 | |
| 867 | smm->session_indices_to_enqueue_by_thread[thread_index] = |
| 868 | session_indices_to_enqueue; |
| 869 | |
| 870 | /* Increment enqueue epoch for next round */ |
| 871 | smm->current_enqueue_epoch[thread_index]++; |
| 872 | |
| 873 | return errors; |
| 874 | } |
| 875 | |
| 876 | /* |
| 877 | * Start listening on server's ip/port pair for requested transport. |
| 878 | * |
| 879 | * Creates a 'dummy' stream session with state LISTENING to be used in session |
| 880 | * lookups, prior to establishing connection. Requests transport to build |
| 881 | * it's own specific listening connection. |
| 882 | */ |
| 883 | int |
| 884 | stream_session_start_listen (u32 server_index, ip46_address_t * ip, u16 port) |
| 885 | { |
| 886 | session_manager_main_t *smm = &session_manager_main; |
| 887 | stream_session_t *s; |
| 888 | transport_connection_t *tc; |
| 889 | application_t *srv; |
| 890 | u32 tci; |
| 891 | |
| 892 | srv = application_get (server_index); |
| 893 | |
| 894 | pool_get (smm->listen_sessions[srv->session_type], s); |
| 895 | memset (s, 0, sizeof (*s)); |
| 896 | |
| 897 | s->session_type = srv->session_type; |
| 898 | s->session_state = SESSION_STATE_LISTENING; |
| 899 | s->session_index = s - smm->listen_sessions[srv->session_type]; |
| 900 | s->app_index = srv->index; |
| 901 | |
| 902 | /* Transport bind/listen */ |
| 903 | tci = tp_vfts[srv->session_type].bind (smm->vlib_main, s->session_index, ip, |
| 904 | port); |
| 905 | |
| 906 | /* Attach transport to session */ |
| 907 | s->connection_index = tci; |
| 908 | tc = tp_vfts[srv->session_type].get_listener (tci); |
| 909 | |
| 910 | srv->session_index = s->session_index; |
| 911 | |
| 912 | /* Add to the main lookup table */ |
| 913 | stream_session_table_add_for_tc (s->session_type, tc, s->session_index); |
| 914 | |
| 915 | return 0; |
| 916 | } |
| 917 | |
| 918 | void |
| 919 | stream_session_stop_listen (u32 server_index) |
| 920 | { |
| 921 | session_manager_main_t *smm = &session_manager_main; |
| 922 | stream_session_t *listener; |
| 923 | transport_connection_t *tc; |
| 924 | application_t *srv; |
| 925 | |
| 926 | srv = application_get (server_index); |
| 927 | listener = pool_elt_at_index (smm->listen_sessions[srv->session_type], |
| 928 | srv->session_index); |
| 929 | |
| 930 | tc = tp_vfts[srv->session_type].get_listener (listener->connection_index); |
| 931 | stream_session_table_del_for_tc (smm, listener->session_type, tc); |
| 932 | |
| 933 | tp_vfts[srv->session_type].unbind (smm->vlib_main, |
| 934 | listener->connection_index); |
| 935 | pool_put (smm->listen_sessions[srv->session_type], listener); |
| 936 | } |
| 937 | |
| 938 | int |
| 939 | connect_server_add_segment_cb (application_t * ss, char *segment_name, |
| 940 | u32 segment_size) |
| 941 | { |
| 942 | /* Does exactly nothing, but die */ |
| 943 | ASSERT (0); |
| 944 | return 0; |
| 945 | } |
| 946 | |
| 947 | void |
| 948 | connects_session_manager_init (session_manager_main_t * smm, u8 session_type) |
| 949 | { |
| 950 | session_manager_t *sm; |
| 951 | u32 connect_fifo_size = 8 << 10; /* Config? */ |
| 952 | u32 default_segment_size = 1 << 20; |
| 953 | |
| 954 | pool_get (smm->session_managers, sm); |
| 955 | memset (sm, 0, sizeof (*sm)); |
| 956 | |
| 957 | sm->add_segment_size = default_segment_size; |
| 958 | sm->rx_fifo_size = connect_fifo_size; |
| 959 | sm->tx_fifo_size = connect_fifo_size; |
| 960 | sm->add_segment = 1; |
| 961 | |
| 962 | session_manager_add_segment (smm, sm); |
| 963 | smm->connect_manager_index[session_type] = sm - smm->session_managers; |
| 964 | } |
| 965 | |
| 966 | void |
| 967 | stream_session_connect_notify (transport_connection_t * tc, u8 sst, |
| 968 | u8 is_fail) |
| 969 | { |
| 970 | session_manager_main_t *smm = &session_manager_main; |
| 971 | application_t *app; |
| 972 | stream_session_t *new_s = 0; |
| 973 | u64 value; |
| 974 | |
| 975 | value = stream_session_half_open_lookup (smm, &tc->lcl_ip, &tc->rmt_ip, |
| 976 | tc->lcl_port, tc->rmt_port, |
| 977 | tc->proto); |
| 978 | if (value == HALF_OPEN_LOOKUP_INVALID_VALUE) |
| 979 | { |
| 980 | clib_warning ("This can't be good!"); |
| 981 | return; |
| 982 | } |
| 983 | |
| 984 | app = application_get (value >> 32); |
| 985 | |
| 986 | if (!is_fail) |
| 987 | { |
| 988 | /* Create new session (server segments are allocated if needed) */ |
| 989 | if (stream_session_create_i (smm, app, tc, &new_s)) |
| 990 | return; |
| 991 | |
| 992 | app->session_index = stream_session_get_index (new_s); |
| 993 | app->thread_index = new_s->thread_index; |
| 994 | |
| 995 | /* Allocate vpp event queue for this thread if needed */ |
| 996 | vpp_session_event_queue_allocate (smm, tc->thread_index); |
| 997 | } |
| 998 | |
| 999 | /* Notify client */ |
| 1000 | app->cb_fns.session_connected_callback (app->api_client_index, new_s, |
| 1001 | is_fail); |
| 1002 | |
| 1003 | /* Cleanup session lookup */ |
| 1004 | stream_session_half_open_table_del (smm, sst, tc); |
| 1005 | } |
| 1006 | |
| 1007 | void |
| 1008 | stream_session_accept_notify (transport_connection_t * tc) |
| 1009 | { |
| 1010 | application_t *server; |
| 1011 | stream_session_t *s; |
| 1012 | |
| 1013 | s = stream_session_get (tc->s_index, tc->thread_index); |
| 1014 | server = application_get (s->app_index); |
| 1015 | server->cb_fns.session_accept_callback (s); |
| 1016 | } |
| 1017 | |
| 1018 | /** |
| 1019 | * Notification from transport that connection is being closed. |
| 1020 | * |
| 1021 | * A disconnect is sent to application but state is not removed. Once |
| 1022 | * disconnect is acknowledged by application, session disconnect is called. |
| 1023 | * Ultimately this leads to close being called on transport (passive close). |
| 1024 | */ |
| 1025 | void |
| 1026 | stream_session_disconnect_notify (transport_connection_t * tc) |
| 1027 | { |
| 1028 | application_t *server; |
| 1029 | stream_session_t *s; |
| 1030 | |
| 1031 | s = stream_session_get (tc->s_index, tc->thread_index); |
| 1032 | server = application_get (s->app_index); |
| 1033 | server->cb_fns.session_disconnect_callback (s); |
| 1034 | } |
| 1035 | |
| 1036 | /** |
| 1037 | * Cleans up session and associated app if needed. |
| 1038 | */ |
| 1039 | void |
| 1040 | stream_session_delete (stream_session_t * s) |
| 1041 | { |
| 1042 | session_manager_main_t *smm = vnet_get_session_manager_main (); |
| 1043 | svm_fifo_segment_private_t *fifo_segment; |
| 1044 | application_t *app; |
| 1045 | int rv; |
| 1046 | |
| 1047 | /* delete from the main lookup table */ |
| 1048 | rv = stream_session_table_del (smm, s); |
| 1049 | |
| 1050 | if (rv) |
| 1051 | clib_warning ("hash delete error, rv %d", rv); |
| 1052 | |
| 1053 | /* Cleanup fifo segments */ |
| 1054 | fifo_segment = svm_fifo_get_segment (s->server_segment_index); |
| 1055 | svm_fifo_segment_free_fifo (fifo_segment, s->server_rx_fifo); |
| 1056 | svm_fifo_segment_free_fifo (fifo_segment, s->server_tx_fifo); |
| 1057 | |
| 1058 | /* Cleanup app if client */ |
| 1059 | app = application_get (s->app_index); |
| 1060 | if (app->mode == APP_CLIENT) |
| 1061 | { |
| 1062 | application_del (app); |
| 1063 | } |
| 1064 | else if (app->mode == APP_SERVER) |
| 1065 | { |
| 1066 | session_manager_t *sm; |
| 1067 | svm_fifo_segment_private_t *fifo_segment; |
| 1068 | svm_fifo_t **fifos; |
| 1069 | u32 fifo_index; |
| 1070 | |
| 1071 | sm = session_manager_get (app->session_manager_index); |
| 1072 | |
| 1073 | /* Delete fifo */ |
| 1074 | fifo_segment = svm_fifo_get_segment (s->server_segment_index); |
| 1075 | fifos = (svm_fifo_t **) fifo_segment->h->fifos; |
| 1076 | |
| 1077 | fifo_index = svm_fifo_segment_index (fifo_segment); |
| 1078 | |
| 1079 | /* Remove segment only if it holds no fifos and not the first */ |
| 1080 | if (sm->segment_indices[0] != fifo_index && vec_len (fifos) == 0) |
| 1081 | svm_fifo_segment_delete (fifo_segment); |
| 1082 | } |
| 1083 | |
| 1084 | pool_put (smm->sessions[s->thread_index], s); |
| 1085 | } |
| 1086 | |
| 1087 | /** |
| 1088 | * Notification from transport that connection is being deleted |
| 1089 | * |
| 1090 | * This should be called only on previously fully established sessions. For |
| 1091 | * instance failed connects should call stream_session_connect_notify and |
| 1092 | * indicate that the connect has failed. |
| 1093 | */ |
| 1094 | void |
| 1095 | stream_session_delete_notify (transport_connection_t * tc) |
| 1096 | { |
| 1097 | stream_session_t *s; |
| 1098 | |
| 1099 | s = stream_session_get_if_valid (tc->s_index, tc->thread_index); |
| 1100 | if (!s) |
| 1101 | { |
| 1102 | clib_warning ("Surprised!"); |
| 1103 | return; |
| 1104 | } |
| 1105 | stream_session_delete (s); |
| 1106 | } |
| 1107 | |
| 1108 | /** |
| 1109 | * Notify application that connection has been reset. |
| 1110 | */ |
| 1111 | void |
| 1112 | stream_session_reset_notify (transport_connection_t * tc) |
| 1113 | { |
| 1114 | stream_session_t *s; |
| 1115 | application_t *app; |
| 1116 | s = stream_session_get (tc->s_index, tc->thread_index); |
| 1117 | |
| 1118 | app = application_get (s->app_index); |
| 1119 | app->cb_fns.session_reset_callback (s); |
| 1120 | } |
| 1121 | |
| 1122 | /** |
| 1123 | * Accept a stream session. Optionally ping the server by callback. |
| 1124 | */ |
| 1125 | int |
| 1126 | stream_session_accept (transport_connection_t * tc, u32 listener_index, |
| 1127 | u8 sst, u8 notify) |
| 1128 | { |
| 1129 | session_manager_main_t *smm = &session_manager_main; |
| 1130 | application_t *server; |
| 1131 | stream_session_t *s, *listener; |
| 1132 | |
| 1133 | int rv; |
| 1134 | |
| 1135 | /* Find the server */ |
| 1136 | listener = pool_elt_at_index (smm->listen_sessions[sst], listener_index); |
| 1137 | server = application_get (listener->app_index); |
| 1138 | |
| 1139 | if ((rv = stream_session_create_i (smm, server, tc, &s))) |
| 1140 | return rv; |
| 1141 | |
| 1142 | /* Allocate vpp event queue for this thread if needed */ |
| 1143 | vpp_session_event_queue_allocate (smm, tc->thread_index); |
| 1144 | |
| 1145 | /* Shoulder-tap the server */ |
| 1146 | if (notify) |
| 1147 | { |
| 1148 | server->cb_fns.session_accept_callback (s); |
| 1149 | } |
| 1150 | |
| 1151 | return 0; |
| 1152 | } |
| 1153 | |
| 1154 | void |
| 1155 | stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order, |
| 1156 | u32 app_index) |
| 1157 | { |
| 1158 | transport_connection_t *tc; |
| 1159 | u32 tci; |
| 1160 | u64 value; |
| 1161 | |
| 1162 | /* Ask transport to open connection */ |
| 1163 | tci = tp_vfts[sst].open (addr, port_host_byte_order); |
| 1164 | |
| 1165 | /* Get transport connection */ |
| 1166 | tc = tp_vfts[sst].get_half_open (tci); |
| 1167 | |
| 1168 | /* Store api_client_index and transport connection index */ |
| 1169 | value = (((u64) app_index) << 32) | (u64) tc->c_index; |
| 1170 | |
| 1171 | /* Add to the half-open lookup table */ |
| 1172 | stream_session_half_open_table_add (sst, tc, value); |
| 1173 | } |
| 1174 | |
| 1175 | /** |
| 1176 | * Disconnect session and propagate to transport. This should eventually |
| 1177 | * result in a delete notification that allows us to cleanup session state. |
| 1178 | * Called for both active/passive disconnects. |
| 1179 | */ |
| 1180 | void |
| 1181 | stream_session_disconnect (stream_session_t * s) |
| 1182 | { |
| 1183 | tp_vfts[s->session_type].close (s->connection_index, s->thread_index); |
| 1184 | s->session_state = SESSION_STATE_CLOSED; |
| 1185 | } |
| 1186 | |
| 1187 | /** |
| 1188 | * Cleanup transport and session state. |
| 1189 | */ |
| 1190 | void |
| 1191 | stream_session_cleanup (stream_session_t * s) |
| 1192 | { |
| 1193 | tp_vfts[s->session_type].cleanup (s->connection_index, s->thread_index); |
| 1194 | stream_session_delete (s); |
| 1195 | } |
| 1196 | |
| 1197 | void |
| 1198 | session_register_transport (u8 type, const transport_proto_vft_t * vft) |
| 1199 | { |
| 1200 | session_manager_main_t *smm = vnet_get_session_manager_main (); |
| 1201 | |
| 1202 | vec_validate (tp_vfts, type); |
| 1203 | tp_vfts[type] = *vft; |
| 1204 | |
| 1205 | /* If an offset function is provided, then peek instead of dequeue */ |
| 1206 | smm->session_rx_fns[type] = |
| 1207 | (vft->rx_fifo_offset) ? session_fifo_rx_peek : session_fifo_rx_dequeue; |
| 1208 | } |
| 1209 | |
| 1210 | transport_proto_vft_t * |
| 1211 | session_get_transport_vft (u8 type) |
| 1212 | { |
| 1213 | if (type >= vec_len (tp_vfts)) |
| 1214 | return 0; |
| 1215 | return &tp_vfts[type]; |
| 1216 | } |
| 1217 | |
| 1218 | static clib_error_t * |
| 1219 | session_manager_main_init (vlib_main_t * vm) |
| 1220 | { |
| 1221 | u32 num_threads; |
| 1222 | vlib_thread_main_t *vtm = vlib_get_thread_main (); |
| 1223 | session_manager_main_t *smm = &session_manager_main; |
| 1224 | int i; |
| 1225 | |
| 1226 | smm->vlib_main = vm; |
| 1227 | smm->vnet_main = vnet_get_main (); |
| 1228 | |
| 1229 | num_threads = 1 /* main thread */ + vtm->n_threads; |
| 1230 | |
| 1231 | if (num_threads < 1) |
| 1232 | return clib_error_return (0, "n_thread_stacks not set"); |
| 1233 | |
| 1234 | /* $$$ config parameters */ |
| 1235 | svm_fifo_segment_init (0x200000000ULL /* first segment base VA */ , |
| 1236 | 20 /* timeout in seconds */ ); |
| 1237 | |
| 1238 | /* configure per-thread ** vectors */ |
| 1239 | vec_validate (smm->sessions, num_threads - 1); |
| 1240 | vec_validate (smm->session_indices_to_enqueue_by_thread, num_threads - 1); |
| 1241 | vec_validate (smm->tx_buffers, num_threads - 1); |
| 1242 | vec_validate (smm->fifo_events, num_threads - 1); |
| 1243 | vec_validate (smm->evts_partially_read, num_threads - 1); |
| 1244 | vec_validate (smm->current_enqueue_epoch, num_threads - 1); |
| 1245 | vec_validate (smm->vpp_event_queues, num_threads - 1); |
| 1246 | |
| 1247 | /* $$$$ preallocate hack config parameter */ |
| 1248 | for (i = 0; i < 200000; i++) |
| 1249 | { |
| 1250 | stream_session_t *ss; |
| 1251 | pool_get (smm->sessions[0], ss); |
| 1252 | memset (ss, 0, sizeof (*ss)); |
| 1253 | } |
| 1254 | |
| 1255 | for (i = 0; i < 200000; i++) |
| 1256 | pool_put_index (smm->sessions[0], i); |
| 1257 | |
| 1258 | clib_bihash_init_16_8 (&smm->v4_session_hash, "v4 session table", |
| 1259 | 200000 /* $$$$ config parameter nbuckets */ , |
| 1260 | (64 << 20) /*$$$ config parameter table size */ ); |
| 1261 | clib_bihash_init_48_8 (&smm->v6_session_hash, "v6 session table", |
| 1262 | 200000 /* $$$$ config parameter nbuckets */ , |
| 1263 | (64 << 20) /*$$$ config parameter table size */ ); |
| 1264 | |
| 1265 | clib_bihash_init_16_8 (&smm->v4_half_open_hash, "v4 half-open table", |
| 1266 | 200000 /* $$$$ config parameter nbuckets */ , |
| 1267 | (64 << 20) /*$$$ config parameter table size */ ); |
| 1268 | clib_bihash_init_48_8 (&smm->v6_half_open_hash, "v6 half-open table", |
| 1269 | 200000 /* $$$$ config parameter nbuckets */ , |
| 1270 | (64 << 20) /*$$$ config parameter table size */ ); |
| 1271 | |
| 1272 | for (i = 0; i < SESSION_N_TYPES; i++) |
| 1273 | smm->connect_manager_index[i] = INVALID_INDEX; |
| 1274 | |
| 1275 | return 0; |
| 1276 | } |
| 1277 | |
| 1278 | VLIB_INIT_FUNCTION (session_manager_main_init); |
| 1279 | |
| 1280 | /* |
| 1281 | * fd.io coding-style-patch-verification: ON |
| 1282 | * |
| 1283 | * Local Variables: |
| 1284 | * eval: (c-set-style "gnu") |
| 1285 | * End: |
| 1286 | */ |