tls: add async processing support
Adds support for tls async processing using OpenSSL.
Adds new CLI command to configure OpenSSL TLS configurations used by
OpenSSL context and session. New CLI format is:
tls openssl set-tls [record-size <size>]
[record-split-size <size>]
[max-pipelines <size>]
Sets default values to below TLS configuration parameters:
- first_seg_size: 32MB
- add_seg_size: 256MB
Type: feature
Signed-off-by: Varun Rapelly <vrapelly@marvell.com>
Change-Id: I990be31fced9e258fdb036f5751cd67594b0bce7
diff --git a/src/plugins/tlsopenssl/tls_async.c b/src/plugins/tlsopenssl/tls_async.c
index d85af68..c6d2b2f 100644
--- a/src/plugins/tlsopenssl/tls_async.c
+++ b/src/plugins/tlsopenssl/tls_async.c
@@ -17,12 +17,36 @@
#include <vlib/node_funcs.h>
#include <openssl/engine.h>
#include <tlsopenssl/tls_openssl.h>
+#include <dlfcn.h>
-#define SSL_ASYNC_INFLIGHT 1
-#define SSL_ASYNC_READY 2
-#define SSL_ASYNC_REENTER 3
#define MAX_VECTOR_ASYNC 256
+#define SSL_WANT_NAMES \
+ { \
+ [0] = "N/A", [SSL_NOTHING] = "SSL_NOTHING", \
+ [SSL_WRITING] = "SSL_WRITING", [SSL_READING] = "SSL_READING", \
+ [SSL_X509_LOOKUP] = "SSL_X509_LOOKUP", \
+ [SSL_ASYNC_PAUSED] = "SSL_ASYNC_PAUSED", \
+ [SSL_ASYNC_NO_JOBS] = "SSL_ASYNC_NO_JOBS", \
+ [SSL_CLIENT_HELLO_CB] = "SSL_CLIENT_HELLO_CB", \
+ }
+
+static const char *ssl_want[] = SSL_WANT_NAMES;
+
+#define foreach_ssl_evt_status_type_ \
+ _ (INVALID_STATUS, "Async event invalid status") \
+ _ (INFLIGHT, "Async event inflight") \
+ _ (READY, "Async event ready") \
+ _ (REENTER, "Async event reenter") \
+ _ (MAX_STATUS, "Async event max status")
+
+typedef enum ssl_evt_status_type_
+{
+#define _(sym, str) SSL_ASYNC_##sym,
+ foreach_ssl_evt_status_type_
+#undef _
+} ssl_evt_status_type_t;
+
typedef struct openssl_tls_callback_arg_
{
int thread_index;
@@ -33,7 +57,8 @@
{
u32 ctx_index;
int session_index;
- u8 status;
+ ssl_evt_status_type_t status;
+ ssl_async_evt_type_t type;
openssl_resume_handler *handler;
openssl_tls_callback_arg_t cb_args;
@@ -46,12 +71,15 @@
{
int evt_run_head;
int evt_run_tail;
+ int depth;
+ int max_depth;
} openssl_async_queue_t;
typedef struct openssl_async_
{
openssl_evt_t ***evt_pool;
openssl_async_queue_t *queue;
+ openssl_async_queue_t *queue_in_init;
void (*polling) (void);
u8 start_polling;
ENGINE *engine;
@@ -74,8 +102,8 @@
void qat_init_thread (void *arg);
struct engine_polling engine_list[] = {
- {"qat", qat_polling, qat_pre_init, qat_init_thread},
- {"dasync", dasync_polling, NULL, NULL}
+ { "qat", qat_polling, qat_pre_init, qat_init_thread },
+ { "dasync", dasync_polling, NULL, NULL }
};
openssl_async_t openssl_async_main;
@@ -98,6 +126,7 @@
vec_validate (om->evt_pool, num_threads - 1);
vec_validate (om->queue, num_threads - 1);
+ vec_validate (om->queue_in_init, num_threads - 1);
om->start_polling = 0;
om->engine = 0;
@@ -106,6 +135,13 @@
{
om->queue[i].evt_run_head = -1;
om->queue[i].evt_run_tail = -1;
+ om->queue[i].depth = 0;
+ om->queue[i].max_depth = 0;
+
+ om->queue_in_init[i].evt_run_head = -1;
+ om->queue_in_init[i].evt_run_tail = -1;
+ om->queue_in_init[i].depth = 0;
+ om->queue_in_init[i].max_depth = 0;
}
om->polling = NULL;
@@ -243,16 +279,23 @@
tls_async_openssl_callback (SSL * s, void *cb_arg)
{
openssl_evt_t *event, *event_tail;
+ openssl_async_queue_t *queue;
openssl_async_t *om = &openssl_async_main;
openssl_tls_callback_arg_t *args = (openssl_tls_callback_arg_t *) cb_arg;
int thread_index = args->thread_index;
int event_index = args->event_index;
- int *evt_run_tail = &om->queue[thread_index].evt_run_tail;
- int *evt_run_head = &om->queue[thread_index].evt_run_head;
TLS_DBG (2, "Set event %d to run\n", event_index);
event = openssl_evt_get_w_thread (event_index, thread_index);
+ if (event->type == SSL_ASYNC_EVT_INIT)
+ queue = om->queue_in_init;
+ else
+ queue = om->queue;
+
+ int *evt_run_tail = &queue[thread_index].evt_run_tail;
+ int *evt_run_head = &queue[thread_index].evt_run_head;
+
/* Happend when a recursive case, especially in SW simulation */
if (PREDICT_FALSE (event->status == SSL_ASYNC_READY))
{
@@ -276,10 +319,498 @@
return 1;
}
+/*
+ * Continue an async SSL_write() call.
+ * This function is _only_ called when continuing an SSL_write() call
+ * that returned WANT_ASYNC.
+ * Since it continues the handling of an existing, paused SSL job
+ * (ASYNC_JOB*), the 'buf' and 'num' params to SSL_write() have
+ * already been set in the initial call, and are meaningless here.
+ * Therefore setting buf=null,num=0, to emphasize the point.
+ * On successful write, TLS context total_async_write bytes are updated.
+ */
+static int
+openssl_async_write_from_fifo_into_ssl (svm_fifo_t *f, SSL *ssl,
+ openssl_ctx_t *oc)
+{
+ int wrote = 0;
+
+ wrote = SSL_write (ssl, NULL, 0);
+ ossl_check_err_is_fatal (ssl, wrote);
+
+ oc->total_async_write -= wrote;
+ svm_fifo_dequeue_drop (f, wrote);
+
+ return wrote;
+}
+
+/*
+ * Perform SSL_write from TX FIFO head.
+ * On successful write, TLS context total_async_write bytes are updated.
+ */
+static_always_inline int
+openssl_write_from_fifo_head_into_ssl (svm_fifo_t *f, SSL *ssl,
+ openssl_ctx_t *oc, u32 max_len)
+{
+ int wrote = 0, rv, i = 0, len;
+ u32 n_segs = 2;
+ svm_fifo_seg_t fs[n_segs];
+
+ max_len = clib_min (oc->total_async_write, max_len);
+
+ len = svm_fifo_segments (f, 0, fs, &n_segs, max_len);
+ if (len <= 0)
+ return 0;
+
+ while (wrote < len && i < n_segs)
+ {
+ rv = SSL_write (ssl, fs[i].data, fs[i].len);
+ wrote += (rv > 0) ? rv : 0;
+ if (rv < (int) fs[i].len)
+ break;
+ i++;
+ }
+
+ if (wrote)
+ {
+ oc->total_async_write -= wrote;
+ svm_fifo_dequeue_drop (f, wrote);
+ }
+
+ return wrote;
+}
+
+static int
+openssl_async_read_from_ssl_into_fifo (svm_fifo_t *f, SSL *ssl)
+{
+ int read;
+
+ read = SSL_read (ssl, NULL, 0);
+ if (read <= 0)
+ return read;
+
+ svm_fifo_enqueue_nocopy (f, read);
+
+ return read;
+}
+
+/*
+ * Pop the current event from queue and update tail if needed
+ */
+static void
+tls_async_dequeue_update (openssl_evt_t *event, int *evt_run_head,
+ int *evt_run_tail, int *queue_depth)
+{
+ /* remove the event from queue head */
+ *evt_run_head = event->next;
+ event->status = SSL_ASYNC_INVALID_STATUS;
+ event->next = -1;
+
+ (*queue_depth)--;
+
+ if (*evt_run_head < 0)
+ {
+ *evt_run_tail = -1;
+ if (*queue_depth)
+ clib_warning ("queue empty but depth:%d\n", *queue_depth);
+ }
+}
+
+static int
+tls_async_dequeue_event (int thread_index)
+{
+ openssl_evt_t *event;
+ openssl_async_t *om = &openssl_async_main;
+ openssl_async_queue_t *queue = om->queue;
+ int *evt_run_tail = &queue[thread_index].evt_run_tail;
+ int *evt_run_head = &queue[thread_index].evt_run_head;
+ int dequeue_cnt = clib_min (queue[thread_index].depth, MAX_VECTOR_ASYNC);
+ const u32 max_len = 128 << 10;
+
+ /* dequeue all pending events, events enqueued during this routine call,
+ * will be handled next time tls_async_dequeue_event is invoked */
+ while (*evt_run_head >= 0 && dequeue_cnt--)
+ {
+ session_t *app_session, *tls_session;
+ openssl_ctx_t *oc;
+ tls_ctx_t *ctx;
+ SSL *ssl;
+
+ event = openssl_evt_get_w_thread (*evt_run_head, thread_index);
+ ctx = openssl_ctx_get_w_thread (event->ctx_index, thread_index);
+ oc = (openssl_ctx_t *) ctx;
+ ssl = oc->ssl;
+
+ if (event->type == SSL_ASYNC_EVT_RD)
+ {
+ /* read event */
+ svm_fifo_t *app_rx_fifo, *tls_rx_fifo;
+ int read;
+
+ app_session = session_get_from_handle (ctx->app_session_handle);
+ app_rx_fifo = app_session->rx_fifo;
+
+ tls_session = session_get_from_handle (ctx->tls_session_handle);
+ tls_rx_fifo = tls_session->rx_fifo;
+
+ /* continue the paused job */
+ read = openssl_async_read_from_ssl_into_fifo (app_rx_fifo, ssl);
+ if (read < 0)
+ {
+ if (SSL_want_async (ssl))
+ goto handle_later;
+
+ tls_async_dequeue_update (event, evt_run_head, evt_run_tail,
+ &queue[thread_index].depth);
+ goto ev_rd_done;
+ }
+
+ /* read finished or in error, remove the event from queue */
+ tls_async_dequeue_update (event, evt_run_head, evt_run_tail,
+ &queue[thread_index].depth);
+
+ /* Unrecoverable protocol error. Reset connection */
+ if (PREDICT_FALSE ((read < 0) &&
+ (SSL_get_error (ssl, read) == SSL_ERROR_SSL)))
+ {
+ tls_notify_app_io_error (ctx);
+ goto ev_rd_done;
+ }
+
+ /*
+ * Managed to read some data. If handshake just completed, session
+ * may still be in accepting state.
+ */
+ if (app_session->session_state >= SESSION_STATE_READY)
+ tls_notify_app_enqueue (ctx, app_session);
+
+ /* managed to read, try to read more */
+ while (read > 0)
+ {
+ read =
+ openssl_read_from_ssl_into_fifo (app_rx_fifo, ctx, max_len);
+ if (read < 0)
+ {
+ if (SSL_want_async (ssl))
+ {
+ vpp_tls_async_enqueue_event (oc, SSL_ASYNC_EVT_RD, NULL,
+ 0);
+ goto ev_rd_queued;
+ }
+ }
+
+ /* Unrecoverable protocol error. Reset connection */
+ if (PREDICT_FALSE ((read < 0) &&
+ (SSL_get_error (ssl, read) == SSL_ERROR_SSL)))
+ {
+ tls_notify_app_io_error (ctx);
+ goto ev_rd_done;
+ }
+
+ /* If handshake just completed, session may still be in accepting
+ * state */
+ if (read >= 0 &&
+ app_session->session_state >= SESSION_STATE_READY)
+ tls_notify_app_enqueue (ctx, app_session);
+ }
+
+ ev_rd_done:
+ /* read done */
+ ctx->flags &= ~TLS_CONN_F_ASYNC_RD;
+
+ if ((SSL_pending (ssl) > 0) ||
+ svm_fifo_max_dequeue_cons (tls_rx_fifo))
+ {
+ tls_add_vpp_q_builtin_rx_evt (tls_session);
+ }
+
+ ev_rd_queued:
+ continue;
+ }
+ else if (event->type == SSL_ASYNC_EVT_WR)
+ {
+ /* write event */
+ int wrote, wrote_sum = 0;
+ u32 space, enq_buf;
+ svm_fifo_t *app_tx_fifo, *tls_tx_fifo;
+ transport_send_params_t *sp =
+ (transport_send_params_t *) event->handler;
+
+ app_session = session_get_from_handle (ctx->app_session_handle);
+ app_tx_fifo = app_session->tx_fifo;
+
+ /* continue the paused job */
+ wrote =
+ openssl_async_write_from_fifo_into_ssl (app_tx_fifo, ssl, oc);
+ if (wrote < 0)
+ {
+ if (SSL_want_async (ssl))
+ /* paused job not ready, wait */
+ goto handle_later;
+ clib_warning ("[wrote:%d want:%s ctx:%d]\n", wrote,
+ ssl_want[SSL_want (ssl)], oc->openssl_ctx_index);
+ }
+ wrote_sum += wrote;
+
+ /* paused job done, remove event, update queue */
+ tls_async_dequeue_update (event, evt_run_head, evt_run_tail,
+ &queue[thread_index].depth);
+
+ /* Unrecoverable protocol error. Reset connection */
+ if (PREDICT_FALSE (wrote < 0))
+ {
+ tls_notify_app_io_error (ctx);
+ clib_warning (
+ "Unrecoverable protocol error. Reset connection\n");
+ goto ev_in_queue;
+ }
+
+ tls_session = session_get_from_handle (ctx->tls_session_handle);
+ tls_tx_fifo = tls_session->tx_fifo;
+
+ /* prepare for remaining write(s) */
+ space = svm_fifo_max_enqueue_prod (tls_tx_fifo);
+ /* Leave a bit of extra space for tls ctrl data, if any needed */
+ space = clib_max ((int) space - TLSO_CTRL_BYTES, 0);
+
+ /* continue remaining openssl_ctx_write request */
+ while (oc->total_async_write)
+ {
+ int rv;
+ u32 deq_max = svm_fifo_max_dequeue_cons (app_tx_fifo);
+
+ deq_max = clib_min (deq_max, space);
+ deq_max = clib_min (deq_max, sp->max_burst_size);
+ if (!deq_max)
+ goto check_tls_fifo;
+
+ /* Make sure tcp's tx fifo can actually buffer all bytes to
+ * be dequeued. If under memory pressure, tls's fifo segment
+ * might not be able to allocate the chunks needed. This also
+ * avoids errors from the underlying custom bio to the ssl
+ * infra which at times can get stuck. */
+ if (svm_fifo_provision_chunks (tls_tx_fifo, 0, 0,
+ deq_max + TLSO_CTRL_BYTES))
+ goto check_tls_fifo;
+
+ rv = openssl_write_from_fifo_head_into_ssl (app_tx_fifo, ssl, oc,
+ deq_max);
+
+ /* Unrecoverable protocol error. Reset connection */
+ if (PREDICT_FALSE (rv < 0))
+ {
+ tls_notify_app_io_error (ctx);
+ clib_warning (
+ "Unrecoverable protocol error. Reset connection\n");
+ goto ev_in_queue;
+ }
+
+ if (!rv)
+ {
+ if (SSL_want_async (ssl))
+ {
+ /* new paused job, add queue event and wait */
+ vpp_tls_async_enqueue_event (oc, SSL_ASYNC_EVT_WR, sp,
+ 0);
+ goto ev_in_queue;
+ }
+ clib_warning ("[rv:%d want:%s ctx:%d]\n", rv,
+ ssl_want[SSL_want (ssl)],
+ oc->openssl_ctx_index);
+ break;
+ }
+ wrote_sum += rv;
+ }
+
+ if (svm_fifo_needs_deq_ntf (app_tx_fifo, wrote_sum))
+ session_dequeue_notify (app_session);
+
+ check_tls_fifo:
+ /* we got here, async write is done or not possible */
+ oc->total_async_write = 0;
+
+ if (PREDICT_FALSE (BIO_ctrl_pending (oc->rbio) <= 0))
+ tls_notify_app_io_error (ctx);
+
+ /* Deschedule and wait for deq notification if fifo is almost full */
+ enq_buf =
+ clib_min (svm_fifo_size (tls_tx_fifo) / 2, TLSO_MIN_ENQ_SPACE);
+ if (space < wrote_sum + enq_buf)
+ {
+ svm_fifo_add_want_deq_ntf (tls_tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
+ transport_connection_deschedule (&ctx->connection);
+ sp->flags |= TRANSPORT_SND_F_DESCHED;
+ }
+ else
+ {
+ /* Request tx reschedule of the app session */
+ app_session->flags |= SESSION_F_CUSTOM_TX;
+ transport_connection_reschedule (&ctx->connection);
+ }
+
+ ev_in_queue:
+ /* job removed, openssl_ctx_write will resume */
+ continue;
+ }
+ else
+ {
+ /* wrong event type */
+ clib_warning ("goto remove_event [event->type:%d]\n", event->type);
+ tls_async_dequeue_update (event, evt_run_head, evt_run_tail,
+ &queue[thread_index].depth);
+ }
+ }
+
+handle_later:
+ return 1;
+}
+
+static int
+tls_async_dequeue_event_in_init (int thread_index)
+{
+ openssl_evt_t *event;
+ openssl_async_t *om = &openssl_async_main;
+ openssl_async_queue_t *queue = om->queue_in_init;
+ int *evt_run_tail = &queue[thread_index].evt_run_tail;
+ int *evt_run_head = &queue[thread_index].evt_run_head;
+
+ /* dequeue events if exists */
+ while (*evt_run_head >= 0)
+ {
+ openssl_ctx_t *oc;
+ tls_ctx_t *ctx;
+ int rv, err;
+
+ event = openssl_evt_get_w_thread (*evt_run_head, thread_index);
+ ctx = openssl_ctx_get_w_thread (event->ctx_index, thread_index);
+ oc = (openssl_ctx_t *) ctx;
+
+ if (event->type != SSL_ASYNC_EVT_INIT)
+ {
+ /* wrong event type */
+ clib_warning ("goto remove_event [event->type:%d]\n", event->type);
+ goto remove_event;
+ }
+
+ if (!SSL_in_init (oc->ssl))
+ {
+ clib_warning ("[!SSL_in_init() != ev->type:%d] th:%d ev:%d\n",
+ event->type, event->cb_args.thread_index,
+ event->cb_args.event_index);
+ goto remove_event;
+ }
+
+ rv = SSL_do_handshake (oc->ssl);
+ err = SSL_get_error (oc->ssl, rv);
+
+ /* Do not remove session from tail */
+ if (err == SSL_ERROR_WANT_ASYNC)
+ goto handle_later;
+
+ if (err == SSL_ERROR_SSL)
+ {
+ char buf[512];
+ ERR_error_string (ERR_get_error (), buf);
+ clib_warning ("Err: %s\n", buf);
+ openssl_handle_handshake_failure (ctx);
+ goto remove_event;
+ }
+
+ if (err == SSL_ERROR_WANT_WRITE || err == SSL_ERROR_WANT_READ)
+ goto handle_later;
+
+ /* client not supported */
+ if (!SSL_is_server (oc->ssl))
+ {
+ clib_warning ("goto remove_event [!SSL_is_server]\n");
+ goto remove_event;
+ }
+
+ if (tls_notify_app_accept (ctx))
+ {
+ ctx->c_s_index = SESSION_INVALID_INDEX;
+ tls_disconnect_transport (ctx);
+ }
+
+ TLS_DBG (1, "Handshake for %u complete. TLS cipher is %s",
+ oc->openssl_ctx_index, SSL_get_cipher (oc->ssl));
+
+ remove_event:
+ *evt_run_head = event->next;
+ queue[thread_index].depth--;
+
+ if (*evt_run_head < 0)
+ {
+ /* queue empty, bail out */
+ *evt_run_tail = -1;
+ if (queue[thread_index].depth)
+ clib_warning ("queue empty but depth:%d\n",
+ queue[thread_index].depth);
+ break;
+ }
+ }
+
+handle_later:
+ return 1;
+}
+
int
-vpp_tls_async_init_event (tls_ctx_t * ctx,
- openssl_resume_handler * handler,
- session_t * session)
+vpp_tls_async_enqueue_event (openssl_ctx_t *ctx, int evt_type,
+ transport_send_params_t *sp, int size)
+{
+ openssl_evt_t *event;
+ openssl_async_t *om = &openssl_async_main;
+ openssl_async_queue_t *queue;
+ int thread_index;
+ int event_index;
+ int *evt_run_tail;
+ int *evt_run_head;
+
+ event = openssl_evt_get (ctx->evt_index[evt_type]);
+
+ thread_index = event->thread_idx;
+ event_index = event->event_idx;
+
+ /* set queue to be used */
+ if (SSL_in_init (ctx->ssl))
+ queue = om->queue_in_init;
+ else
+ queue = om->queue;
+
+ evt_run_tail = &queue[thread_index].evt_run_tail;
+ evt_run_head = &queue[thread_index].evt_run_head;
+
+ event->type = evt_type;
+ event->handler = (openssl_resume_handler *) sp;
+ event->next = -1;
+
+ /* first we enqueue the request */
+ if (*evt_run_tail >= 0)
+ {
+ openssl_evt_t *event_tail;
+
+ /* queue not empty, append to tail event */
+ event_tail = openssl_evt_get_w_thread (*evt_run_tail, thread_index);
+ event_tail->next = event_index;
+ }
+
+ /* set tail to use new event index */
+ *evt_run_tail = event_index;
+
+ if (*evt_run_head < 0)
+ /* queue is empty, update head */
+ *evt_run_head = event_index;
+
+ queue[thread_index].depth++;
+ if (queue[thread_index].depth > queue[thread_index].max_depth)
+ queue[thread_index].max_depth = queue[thread_index].depth;
+
+ return 1;
+}
+
+static int
+vpp_tls_async_init_event (tls_ctx_t *ctx, openssl_resume_handler *handler,
+ session_t *session, ssl_async_evt_type_t evt_type)
{
u32 eidx;
openssl_evt_t *event;
@@ -293,8 +824,10 @@
event->thread_idx = thread_id;
event->handler = handler;
event->session_index = session->session_index;
- event->status = 0;
- ctx->evt_index = eidx;
+ event->type = evt_type;
+ event->status = SSL_ASYNC_INVALID_STATUS;
+ oc->evt_index[evt_type] = eidx;
+ event->next = -1;
#ifdef HAVE_OPENSSL_ASYNC
SSL_set_async_callback_arg (oc->ssl, &event->cb_args);
#endif
@@ -303,25 +836,45 @@
}
int
-vpp_openssl_is_inflight (tls_ctx_t * ctx)
+vpp_tls_async_init_events (tls_ctx_t *ctx, openssl_resume_handler *handler,
+ session_t *session)
+{
+ vpp_tls_async_init_event (ctx, handler, session, SSL_ASYNC_EVT_INIT);
+ vpp_tls_async_init_event (ctx, handler, session, SSL_ASYNC_EVT_RD);
+ vpp_tls_async_init_event (ctx, handler, session, SSL_ASYNC_EVT_WR);
+
+ return 1;
+}
+
+int
+vpp_openssl_is_inflight (tls_ctx_t *ctx)
{
u32 eidx;
+ openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
openssl_evt_t *event;
- eidx = ctx->evt_index;
- event = openssl_evt_get (eidx);
+ int i;
- if (event->status == SSL_ASYNC_INFLIGHT)
- return 1;
+ for (i = SSL_ASYNC_EVT_INIT; i < SSL_ASYNC_EVT_MAX; i++)
+ {
+ eidx = oc->evt_index[i];
+ event = openssl_evt_get (eidx);
+
+ if (event->status == SSL_ASYNC_INFLIGHT)
+ return 1;
+ }
+
return 0;
}
int
-vpp_tls_async_update_event (tls_ctx_t * ctx, int eagain)
+vpp_tls_async_update_event (tls_ctx_t *ctx, int eagain,
+ ssl_async_evt_type_t type)
{
u32 eidx;
+ openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
openssl_evt_t *event;
- eidx = ctx->evt_index;
+ eidx = oc->evt_index[type];
event = openssl_evt_get (eidx);
event->status = SSL_ASYNC_INFLIGHT;
if (eagain)
@@ -469,7 +1022,7 @@
continue;
}
- event->status = 0;
+ event->status = SSL_ASYNC_INVALID_STATUS;
*evt_run_head = event->next;
if (event->next < 0)
@@ -502,7 +1055,8 @@
if (pool_elts (om->evt_pool[thread_index]) > 0)
{
openssl_async_polling ();
- tls_resume_from_crypto (thread_index);
+ tls_async_dequeue_event_in_init (thread_index);
+ tls_async_dequeue_event (thread_index);
}
return 0;
diff --git a/src/plugins/tlsopenssl/tls_openssl.c b/src/plugins/tlsopenssl/tls_openssl.c
index c8e685f..d7adbed 100644
--- a/src/plugins/tlsopenssl/tls_openssl.c
+++ b/src/plugins/tlsopenssl/tls_openssl.c
@@ -74,9 +74,16 @@
SSL_free (oc->ssl);
vec_free (ctx->srv_hostname);
SSL_CTX_free (oc->client_ssl_ctx);
-#ifdef HAVE_OPENSSL_ASYNC
- openssl_evt_free (ctx->evt_index, ctx->c_thread_index);
-#endif
+
+ if (openssl_main.async)
+ {
+ openssl_evt_free (oc->evt_index[SSL_ASYNC_EVT_INIT],
+ ctx->c_thread_index);
+ openssl_evt_free (oc->evt_index[SSL_ASYNC_EVT_RD],
+ ctx->c_thread_index);
+ openssl_evt_free (oc->evt_index[SSL_ASYNC_EVT_WR],
+ ctx->c_thread_index);
+ }
}
pool_put_index (openssl_main.ctx_pool[ctx->c_thread_index],
@@ -159,17 +166,43 @@
return pool_elt_at_index (openssl_main.lctx_pool, lctx_index);
}
-#define ossl_check_err_is_fatal(_ssl, _rv) \
- if (PREDICT_FALSE (_rv < 0 && SSL_get_error (_ssl, _rv) == SSL_ERROR_SSL)) \
- return -1;
-
static int
-openssl_read_from_ssl_into_fifo (svm_fifo_t *f, SSL *ssl, u32 max_len)
+openssl_handle_want_async (tls_ctx_t *ctx, int evt_type,
+ transport_send_params_t *sp, int size)
+{
+ int ret;
+ openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
+
+ if (evt_type >= SSL_ASYNC_EVT_MAX || evt_type == 0)
+ {
+ clib_warning ("return 0 [illegal evt_type value:%d]\n", evt_type);
+ return 0;
+ }
+
+ if (evt_type == SSL_ASYNC_EVT_WR)
+ {
+ /* de-schedule transport connection */
+ transport_connection_deschedule (&ctx->connection);
+ sp->flags |= TRANSPORT_SND_F_DESCHED;
+ oc->total_async_write = size;
+ }
+ ret = vpp_tls_async_enqueue_event (oc, evt_type, sp, size);
+
+ return ret;
+}
+
+int
+openssl_read_from_ssl_into_fifo (svm_fifo_t *f, tls_ctx_t *ctx, u32 max_len)
{
int read, rv, n_fs, i;
+ openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
const int n_segs = 2;
svm_fifo_seg_t fs[n_segs];
u32 max_enq;
+ SSL *ssl = oc->ssl;
+
+ if (ctx->flags & TLS_CONN_F_ASYNC_RD)
+ return 0;
max_enq = svm_fifo_max_enqueue_prod (f);
if (!max_enq)
@@ -184,6 +217,12 @@
read = SSL_read (ssl, fs[0].data, fs[0].len);
if (read <= 0)
{
+ if (openssl_main.async && SSL_want_async (oc->ssl))
+ {
+ ctx->flags |= TLS_CONN_F_ASYNC_RD;
+ openssl_handle_want_async (ctx, SSL_ASYNC_EVT_RD, NULL, 0);
+ return 0;
+ }
ossl_check_err_is_fatal (ssl, read);
return 0;
}
@@ -208,11 +247,14 @@
}
static int
-openssl_write_from_fifo_into_ssl (svm_fifo_t *f, SSL *ssl, u32 max_len)
+openssl_write_from_fifo_into_ssl (svm_fifo_t *f, tls_ctx_t *ctx,
+ transport_send_params_t *sp, u32 max_len)
{
int wrote = 0, rv, i = 0, len;
u32 n_segs = 2;
svm_fifo_seg_t fs[n_segs];
+ openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
+ SSL *ssl = oc->ssl;
len = svm_fifo_segments (f, 0, fs, &n_segs, max_len);
if (len <= 0)
@@ -230,6 +272,11 @@
i++;
}
+ if (openssl_main.async && SSL_want_async (ssl))
+ {
+ openssl_handle_want_async (ctx, SSL_ASYNC_EVT_WR, sp, max_len);
+ return 0;
+ }
if (wrote)
svm_fifo_dequeue_drop (f, wrote);
@@ -247,11 +294,15 @@
SSL_get_async_status (oc->ssl, &estatus);
if (estatus == ASYNC_STATUS_EAGAIN)
{
- vpp_tls_async_update_event (ctx, 1);
+ vpp_tls_async_update_event (ctx, 1, SSL_ASYNC_EVT_INIT);
+ vpp_tls_async_update_event (ctx, 1, SSL_ASYNC_EVT_RD);
+ vpp_tls_async_update_event (ctx, 1, SSL_ASYNC_EVT_WR);
}
else
{
- vpp_tls_async_update_event (ctx, 0);
+ vpp_tls_async_update_event (ctx, 0, SSL_ASYNC_EVT_INIT);
+ vpp_tls_async_update_event (ctx, 0, SSL_ASYNC_EVT_RD);
+ vpp_tls_async_update_event (ctx, 0, SSL_ASYNC_EVT_WR);
}
return 1;
@@ -260,8 +311,8 @@
#endif
-static void
-openssl_handle_handshake_failure (tls_ctx_t * ctx)
+void
+openssl_handle_handshake_failure (tls_ctx_t *ctx)
{
/* Failed to renegotiate handshake */
if (ctx->flags & TLS_CONN_F_HS_DONE)
@@ -304,19 +355,17 @@
rv = SSL_do_handshake (oc->ssl);
err = SSL_get_error (oc->ssl, rv);
-#ifdef HAVE_OPENSSL_ASYNC
- if (err == SSL_ERROR_WANT_ASYNC)
+ if (openssl_main.async && err == SSL_ERROR_WANT_ASYNC)
{
- openssl_check_async_status (ctx, openssl_ctx_handshake_rx,
- tls_session);
+ openssl_handle_want_async (ctx, SSL_ASYNC_EVT_INIT, NULL, 0);
+ return -1;
}
-#endif
+
if (err == SSL_ERROR_SSL)
{
char buf[512];
ERR_error_string (ERR_get_error (), buf);
clib_warning ("Err: %s", buf);
-
openssl_handle_handshake_failure (ctx);
return -1;
}
@@ -385,8 +434,8 @@
return rv;
}
-static void
-openssl_confirm_app_close (tls_ctx_t * ctx)
+void
+openssl_confirm_app_close (tls_ctx_t *ctx)
{
openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
SSL_shutdown (oc->ssl);
@@ -425,7 +474,7 @@
if (svm_fifo_provision_chunks (ts->tx_fifo, 0, 0, deq_max + TLSO_CTRL_BYTES))
goto check_tls_fifo;
- wrote = openssl_write_from_fifo_into_ssl (f, oc->ssl, deq_max);
+ wrote = openssl_write_from_fifo_into_ssl (f, ctx, sp, deq_max);
/* Unrecoverable protocol error. Reset connection */
if (PREDICT_FALSE (wrote < 0))
@@ -556,7 +605,7 @@
app_session = session_get_from_handle (ctx->app_session_handle);
f = app_session->rx_fifo;
- read = openssl_read_from_ssl_into_fifo (f, oc->ssl, max_len);
+ read = openssl_read_from_ssl_into_fifo (f, ctx, max_len);
/* Unrecoverable protocol error. Reset connection */
if (PREDICT_FALSE (read < 0))
@@ -753,10 +802,9 @@
SSL_CTX_set_ecdh_auto (oc->client_ssl_ctx, 1);
SSL_CTX_set_mode (oc->client_ssl_ctx, SSL_MODE_ENABLE_PARTIAL_WRITE);
-#ifdef HAVE_OPENSSL_ASYNC
if (om->async)
SSL_CTX_set_mode (oc->client_ssl_ctx, SSL_MODE_ASYNC);
-#endif
+
rv =
SSL_CTX_set_cipher_list (oc->client_ssl_ctx, (const char *) om->ciphers);
if (rv != 1)
@@ -801,7 +849,17 @@
{
TLS_DBG (1, "Couldn't set client certificate-key pair");
}
-
+ /* Set TLS Record size */
+ if (om->record_size)
+ {
+ rv = SSL_CTX_set_max_send_fragment (oc->client_ssl_ctx, om->record_size);
+ if (rv != 1)
+ {
+ TLS_DBG (1, "Couldn't set TLS record-size");
+ return -1;
+ }
+ TLS_DBG (1, "Using TLS record-size of %d", om->record_size);
+ }
/*
* 2. Do the first steps in the handshake.
*/
@@ -810,7 +868,7 @@
#ifdef HAVE_OPENSSL_ASYNC
session_t *tls_session = session_get_from_handle (ctx->tls_session_handle);
- vpp_tls_async_init_event (ctx, openssl_ctx_handshake_rx, tls_session);
+ vpp_tls_async_init_events (ctx, openssl_ctx_handshake_rx, tls_session);
#endif
while (1)
{
@@ -828,7 +886,7 @@
break;
}
- TLS_DBG (2, "tls state for [%u]%u is su", ctx->c_thread_index,
+ TLS_DBG (2, "tls state for [%u]%u is %s", ctx->c_thread_index,
oc->openssl_ctx_index, SSL_state_string_long (oc->ssl));
return 0;
}
@@ -895,6 +953,39 @@
return -1;
}
+ /* Set TLS Record size */
+ if (om->record_size)
+ {
+ rv = SSL_CTX_set_max_send_fragment (ssl_ctx, om->record_size);
+ if (rv != 1)
+ {
+ TLS_DBG (1, "Couldn't set TLS record-size");
+ return -1;
+ }
+ }
+
+ /* Set TLS Record Split size */
+ if (om->record_split_size)
+ {
+ rv = SSL_CTX_set_split_send_fragment (ssl_ctx, om->record_split_size);
+ if (rv != 1)
+ {
+ TLS_DBG (1, "Couldn't set TLS record-split-size");
+ return -1;
+ }
+ }
+
+ /* Set TLS Max Pipeline count */
+ if (om->max_pipelines)
+ {
+ rv = SSL_CTX_set_max_pipelines (ssl_ctx, om->max_pipelines);
+ if (rv != 1)
+ {
+ TLS_DBG (1, "Couldn't set TLS max-pipelines");
+ return -1;
+ }
+ }
+
/*
* Set the key and cert
*/
@@ -1012,22 +1103,23 @@
TLS_DBG (1, "Initiating handshake for [%u]%u", ctx->c_thread_index,
oc->openssl_ctx_index);
-#ifdef HAVE_OPENSSL_ASYNC
- session_t *tls_session = session_get_from_handle (ctx->tls_session_handle);
- vpp_tls_async_init_event (ctx, openssl_ctx_handshake_rx, tls_session);
-#endif
+ if (openssl_main.async)
+ {
+ session_t *tls_session =
+ session_get_from_handle (ctx->tls_session_handle);
+ vpp_tls_async_init_events (ctx, openssl_ctx_handshake_rx, tls_session);
+ }
+
while (1)
{
rv = SSL_do_handshake (oc->ssl);
err = SSL_get_error (oc->ssl, rv);
-#ifdef HAVE_OPENSSL_ASYNC
- if (err == SSL_ERROR_WANT_ASYNC)
+ if (openssl_main.async && err == SSL_ERROR_WANT_ASYNC)
{
- openssl_check_async_status (ctx, openssl_ctx_handshake_rx,
- tls_session);
+ openssl_handle_want_async (ctx, SSL_ASYNC_EVT_INIT, NULL, 0);
+
break;
}
-#endif
if (err != SSL_ERROR_WANT_WRITE)
break;
}
@@ -1040,10 +1132,8 @@
static int
openssl_transport_close (tls_ctx_t * ctx)
{
-#ifdef HAVE_OPENSSL_ASYNC
- if (vpp_openssl_is_inflight (ctx))
+ if (openssl_main.async && vpp_openssl_is_inflight (ctx))
return 0;
-#endif
if (!(ctx->flags & TLS_CONN_F_HS_DONE))
{
@@ -1224,7 +1314,6 @@
.runs_after = VLIB_INITS("tls_init"),
};
-#ifdef HAVE_OPENSSL_ASYNC
static clib_error_t *
tls_openssl_set_command_fn (vlib_main_t * vm, unformat_input_t * input,
vlib_cli_command_t * cmd)
@@ -1302,7 +1391,45 @@
.short_help = "tls openssl set [engine <engine name>] [alg [algorithm] [async]",
.function = tls_openssl_set_command_fn,
};
-#endif
+
+static clib_error_t *
+tls_openssl_set_tls_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ openssl_main_t *om = &openssl_main;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "record-size %U", unformat_memory_size,
+ &om->record_size))
+ {
+ clib_warning ("Using TLS record-size of %d", om->record_size);
+ }
+ else if (unformat (input, "record-split-size %U", unformat_memory_size,
+ &om->record_split_size))
+ {
+ clib_warning ("Using TLS record-split-size of %d",
+ om->record_split_size);
+ }
+ else if (unformat (input, "max-pipelines %U", unformat_memory_size,
+ &om->max_pipelines))
+ {
+ clib_warning ("Using TLS max-pipelines of %d", om->max_pipelines);
+ }
+ else
+ return clib_error_return (0, "failed: unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (tls_openssl_set_tls, static) = {
+ .path = "tls openssl set-tls",
+ .short_help = "tls openssl set-tls [record-size <size>] [record-split-size "
+ "<size>] [max-pipelines <size>]",
+ .function = tls_openssl_set_tls_fn,
+};
VLIB_PLUGIN_REGISTER () = {
.version = VPP_BUILD_VER,
diff --git a/src/plugins/tlsopenssl/tls_openssl.h b/src/plugins/tlsopenssl/tls_openssl.h
index 1600cd7..8f6c665 100644
--- a/src/plugins/tlsopenssl/tls_openssl.h
+++ b/src/plugins/tlsopenssl/tls_openssl.h
@@ -29,12 +29,18 @@
#define DTLSO_MAX_DGRAM 2000
+#define ossl_check_err_is_fatal(_ssl, _rv) \
+ if (PREDICT_FALSE (_rv < 0 && SSL_get_error (_ssl, _rv) == SSL_ERROR_SSL)) \
+ return -1;
+
typedef struct tls_ctx_openssl_
{
tls_ctx_t ctx; /**< First */
u32 openssl_ctx_index;
SSL_CTX *client_ssl_ctx;
SSL *ssl;
+ u32 evt_index[SSL_ASYNC_EVT_MAX];
+ u32 total_async_write;
BIO *rbio;
BIO *wbio;
} openssl_ctx_t;
@@ -63,15 +69,20 @@
u8 *ciphers;
int engine_init;
int async;
+ u32 record_size;
+ u32 record_split_size;
+ u32 max_pipelines;
} openssl_main_t;
typedef int openssl_resume_handler (tls_ctx_t * ctx, session_t * tls_session);
tls_ctx_t *openssl_ctx_get_w_thread (u32 ctx_index, u8 thread_index);
-int vpp_tls_async_init_event (tls_ctx_t * ctx,
- openssl_resume_handler * handler,
- session_t * session);
-int vpp_tls_async_update_event (tls_ctx_t * ctx, int eagain);
+int vpp_tls_async_init_events (tls_ctx_t *ctx, openssl_resume_handler *handler,
+ session_t *session);
+int vpp_tls_async_update_event (tls_ctx_t *ctx, int eagain,
+ ssl_async_evt_type_t type);
+int vpp_tls_async_enqueue_event (openssl_ctx_t *ctx, int evt_type,
+ transport_send_params_t *sp, int size);
int tls_async_openssl_callback (SSL * s, void *evt);
int openssl_evt_free (int event_idx, u8 thread_index);
void openssl_polling_start (ENGINE * engine);
@@ -80,6 +91,10 @@
clib_error_t *tls_openssl_api_init (vlib_main_t * vm);
int tls_openssl_set_ciphers (char *ciphers);
int vpp_openssl_is_inflight (tls_ctx_t * ctx);
+int openssl_read_from_ssl_into_fifo (svm_fifo_t *f, tls_ctx_t *ctx,
+ u32 max_len);
+void openssl_handle_handshake_failure (tls_ctx_t *ctx);
+void openssl_confirm_app_close (tls_ctx_t *ctx);
#endif /* SRC_PLUGINS_TLSOPENSSL_TLS_OPENSSL_H_ */
diff --git a/src/vnet/tls/tls.c b/src/vnet/tls/tls.c
index 12dcbb4..b9ff30b 100644
--- a/src/vnet/tls/tls.c
+++ b/src/vnet/tls/tls.c
@@ -1258,6 +1258,10 @@
vec_validate (tm->rx_bufs, num_threads - 1);
vec_validate (tm->tx_bufs, num_threads - 1);
+ /*
+ * first_seg_size default value 32MB
+ * add_seg_size default value 256 MB
+ */
tm->first_seg_size = 32 << 20;
tm->add_seg_size = 256 << 20;
diff --git a/src/vnet/tls/tls.h b/src/vnet/tls/tls.h
index e56c4c0..244e204 100644
--- a/src/vnet/tls/tls.h
+++ b/src/vnet/tls/tls.h
@@ -40,6 +40,19 @@
#define TLS_DBG(_lvl, _fmt, _args...)
#endif
+#define foreach_ssl_async_evt_type \
+ _ (INIT, "SSL_in_init async event") \
+ _ (RD, "Read async event") \
+ _ (WR, "Write async event") \
+ _ (MAX, "Maximum async event")
+
+typedef enum ssl_async_evt_type_
+{
+#define _(sym, str) SSL_ASYNC_EVT_##sym,
+ foreach_ssl_async_evt_type
+#undef _
+} ssl_async_evt_type_t;
+
typedef struct tls_cxt_id_
{
session_handle_t app_session_handle;
@@ -66,7 +79,8 @@
_ (MIGRATED, "migrated") \
_ (NO_APP_SESSION, "no-app-session") \
_ (RESUME, "resume") \
- _ (HS_DONE, "handshake-done")
+ _ (HS_DONE, "handshake-done") \
+ _ (ASYNC_RD, "async-read")
typedef enum tls_conn_flags_bit_
{
@@ -105,7 +119,6 @@
u32 ts_app_index;
tls_conn_flags_t flags;
u8 *srv_hostname;
- u32 evt_index;
u32 ckpair_index;
transport_proto_t tls_type;
} tls_ctx_t;