ipsec: add per-SA error counters
Error counters are added on a per-node basis. In Ipsec, it is
useful to also track the errors that occured per SA.
Type: feature
Change-Id: Iabcdcb439f67ad3c6c202b36ffc44ab39abac1bc
Signed-off-by: Arthur de Kerhor <arthurdekerhor@gmail.com>
diff --git a/src/vnet/ipsec/ah.h b/src/vnet/ipsec/ah.h
index d0b4c21..ae4cd0b 100644
--- a/src/vnet/ipsec/ah.h
+++ b/src/vnet/ipsec/ah.h
@@ -17,6 +17,7 @@
#include <vnet/ip/ip.h>
#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ipsec.api_enum.h>
typedef struct
{
@@ -43,6 +44,58 @@
}) ip6_and_ah_header_t;
/* *INDENT-ON* */
+always_inline u32
+ah_encrypt_err_to_sa_err (u32 err)
+{
+ switch (err)
+ {
+ case AH_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR:
+ return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR;
+ case AH_ENCRYPT_ERROR_SEQ_CYCLED:
+ return IPSEC_SA_ERROR_SEQ_CYCLED;
+ }
+ return ~0;
+}
+
+always_inline u32
+ah_decrypt_err_to_sa_err (u32 err)
+{
+ switch (err)
+ {
+ case AH_DECRYPT_ERROR_DECRYPTION_FAILED:
+ return IPSEC_SA_ERROR_DECRYPTION_FAILED;
+ case AH_DECRYPT_ERROR_INTEG_ERROR:
+ return IPSEC_SA_ERROR_INTEG_ERROR;
+ case AH_DECRYPT_ERROR_NO_TAIL_SPACE:
+ return IPSEC_SA_ERROR_NO_TAIL_SPACE;
+ case AH_DECRYPT_ERROR_DROP_FRAGMENTS:
+ return IPSEC_SA_ERROR_DROP_FRAGMENTS;
+ case AH_DECRYPT_ERROR_REPLAY:
+ return IPSEC_SA_ERROR_REPLAY;
+ }
+ return ~0;
+}
+
+always_inline void
+ah_encrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
+ u32 thread_index, u32 err, u16 index, u16 *nexts,
+ u16 drop_next, u32 sa_index)
+{
+ ipsec_set_next_index (b, node, thread_index, err,
+ ah_encrypt_err_to_sa_err (err), index, nexts,
+ drop_next, sa_index);
+}
+
+always_inline void
+ah_decrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
+ u32 thread_index, u32 err, u16 index, u16 *nexts,
+ u16 drop_next, u32 sa_index)
+{
+ ipsec_set_next_index (b, node, thread_index, err,
+ ah_decrypt_err_to_sa_err (err), index, nexts,
+ drop_next, sa_index);
+}
+
always_inline u8
ah_calc_icv_padding_len (u8 icv_size, int is_ipv6)
{
diff --git a/src/vnet/ipsec/ah_decrypt.c b/src/vnet/ipsec/ah_decrypt.c
index c9209d6..ce4610d 100644
--- a/src/vnet/ipsec/ah_decrypt.c
+++ b/src/vnet/ipsec/ah_decrypt.c
@@ -23,7 +23,6 @@
#include <vnet/ipsec/esp.h>
#include <vnet/ipsec/ah.h>
#include <vnet/ipsec/ipsec_io.h>
-#include <vnet/ipsec/ipsec.api_enum.h>
#define foreach_ah_decrypt_next \
_(DROP, "error-drop") \
@@ -104,8 +103,9 @@
if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
{
u32 bi = op->user_data;
- b[bi]->error = node->errors[AH_DECRYPT_ERROR_INTEG_ERROR];
- nexts[bi] = AH_DECRYPT_NEXT_DROP;
+ ah_decrypt_set_next_index (
+ b[bi], node, vm->thread_index, AH_DECRYPT_ERROR_INTEG_ERROR, bi,
+ nexts, AH_DECRYPT_NEXT_DROP, vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
@@ -145,8 +145,7 @@
{
if (current_sa_index != ~0)
vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
- current_sa_index,
- current_sa_pkts,
+ current_sa_index, current_sa_pkts,
current_sa_bytes);
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
sa0 = ipsec_sa_get (current_sa_index);
@@ -190,8 +189,9 @@
{
if (ip4_is_fragment (ih4))
{
- b[0]->error = node->errors[AH_DECRYPT_ERROR_DROP_FRAGMENTS];
- next[0] = AH_DECRYPT_NEXT_DROP;
+ ah_decrypt_set_next_index (
+ b[0], node, vm->thread_index, AH_DECRYPT_ERROR_DROP_FRAGMENTS,
+ 0, next, AH_DECRYPT_NEXT_DROP, current_sa_index);
goto next;
}
pd->ip_hdr_size = ip4_header_bytes (ih4);
@@ -204,8 +204,9 @@
if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, ~0, false,
&pd->seq_hi))
{
- b[0]->error = node->errors[AH_DECRYPT_ERROR_REPLAY];
- next[0] = AH_DECRYPT_NEXT_DROP;
+ ah_decrypt_set_next_index (b[0], node, vm->thread_index,
+ AH_DECRYPT_ERROR_REPLAY, 0, next,
+ AH_DECRYPT_NEXT_DROP, current_sa_index);
goto next;
}
@@ -220,8 +221,9 @@
pd->current_data + b[0]->current_length
+ sizeof (u32) > buffer_data_size))
{
- b[0]->error = node->errors[AH_DECRYPT_ERROR_NO_TAIL_SPACE];
- next[0] = AH_DECRYPT_NEXT_DROP;
+ ah_decrypt_set_next_index (
+ b[0], node, vm->thread_index, AH_DECRYPT_ERROR_NO_TAIL_SPACE,
+ 0, next, AH_DECRYPT_NEXT_DROP, current_sa_index);
goto next;
}
@@ -307,14 +309,16 @@
if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi,
true, NULL))
{
- b[0]->error = node->errors[AH_DECRYPT_ERROR_REPLAY];
- next[0] = AH_DECRYPT_NEXT_DROP;
+ ah_decrypt_set_next_index (b[0], node, vm->thread_index,
+ AH_DECRYPT_ERROR_REPLAY, 0, next,
+ AH_DECRYPT_NEXT_DROP, pd->sa_index);
goto trace;
}
n_lost = ipsec_sa_anti_replay_advance (sa0, thread_index, pd->seq,
pd->seq_hi);
- vlib_prefetch_simple_counter (&ipsec_sa_lost_counters, thread_index,
- pd->sa_index);
+ vlib_prefetch_simple_counter (
+ &ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST], thread_index,
+ pd->sa_index);
}
u16 ah_hdr_len = sizeof (ah_header_t) + pd->icv_size
@@ -330,8 +334,10 @@
next[0] = AH_DECRYPT_NEXT_IP6_INPUT;
else
{
- b[0]->error = node->errors[AH_DECRYPT_ERROR_DECRYPTION_FAILED];
- next[0] = AH_DECRYPT_NEXT_DROP;
+ ah_decrypt_set_next_index (b[0], node, vm->thread_index,
+ AH_DECRYPT_ERROR_DECRYPTION_FAILED, 0,
+ next, AH_DECRYPT_NEXT_DROP,
+ pd->sa_index);
goto trace;
}
}
@@ -382,8 +388,9 @@
}
if (PREDICT_FALSE (n_lost))
- vlib_increment_simple_counter (&ipsec_sa_lost_counters, thread_index,
- pd->sa_index, n_lost);
+ vlib_increment_simple_counter (
+ &ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST], thread_index,
+ pd->sa_index, n_lost);
vnet_buffer (b[0])->sw_if_index[VLIB_TX] = (u32) ~ 0;
trace:
diff --git a/src/vnet/ipsec/ah_encrypt.c b/src/vnet/ipsec/ah_encrypt.c
index 7116a16..e2d17d4 100644
--- a/src/vnet/ipsec/ah_encrypt.c
+++ b/src/vnet/ipsec/ah_encrypt.c
@@ -81,8 +81,10 @@
if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
{
u32 bi = op->user_data;
- b[bi]->error = node->errors[AH_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
- nexts[bi] = AH_ENCRYPT_NEXT_DROP;
+ ah_encrypt_set_next_index (b[bi], node, vm->thread_index,
+ AH_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR, bi,
+ nexts, AH_ENCRYPT_NEXT_DROP,
+ vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
@@ -153,13 +155,14 @@
{
if (current_sa_index != ~0)
vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
- current_sa_index,
- current_sa_pkts,
+ current_sa_index, current_sa_pkts,
current_sa_bytes);
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
sa0 = ipsec_sa_get (current_sa_index);
current_sa_bytes = current_sa_pkts = 0;
+ vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index);
}
pd->sa_index = current_sa_index;
@@ -183,7 +186,9 @@
if (PREDICT_FALSE (esp_seq_advance (sa0)))
{
- b[0]->error = node->errors[AH_ENCRYPT_ERROR_SEQ_CYCLED];
+ ah_encrypt_set_next_index (b[0], node, vm->thread_index,
+ AH_ENCRYPT_ERROR_SEQ_CYCLED, 0, next,
+ AH_ENCRYPT_NEXT_DROP, current_sa_index);
pd->skip = 1;
goto next;
}
diff --git a/src/vnet/ipsec/esp.h b/src/vnet/ipsec/esp.h
index 8d7e056..05773a2 100644
--- a/src/vnet/ipsec/esp.h
+++ b/src/vnet/ipsec/esp.h
@@ -18,6 +18,7 @@
#include <vnet/ip/ip.h>
#include <vnet/crypto/crypto.h>
#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ipsec.api_enum.h>
typedef struct
{
@@ -141,33 +142,96 @@
}
}
-/* Special case to drop or hand off packets for sync/async modes.
- *
- * Different than sync mode, async mode only enqueue drop or hand-off packets
- * to next nodes.
- */
-always_inline void
-esp_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node, u32 err,
- u16 index, u16 *nexts, u16 drop_next)
+always_inline u32
+esp_encrypt_err_to_sa_err (u32 err)
{
- nexts[index] = drop_next;
- b->error = node->errors[err];
+ switch (err)
+ {
+ case ESP_ENCRYPT_ERROR_HANDOFF:
+ return IPSEC_SA_ERROR_HANDOFF;
+ case ESP_ENCRYPT_ERROR_SEQ_CYCLED:
+ return IPSEC_SA_ERROR_SEQ_CYCLED;
+ case ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR:
+ return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR;
+ case ESP_ENCRYPT_ERROR_CRYPTO_QUEUE_FULL:
+ return IPSEC_SA_ERROR_CRYPTO_QUEUE_FULL;
+ case ESP_ENCRYPT_ERROR_NO_BUFFERS:
+ return IPSEC_SA_ERROR_NO_BUFFERS;
+ case ESP_ENCRYPT_ERROR_NO_ENCRYPTION:
+ return IPSEC_SA_ERROR_NO_ENCRYPTION;
+ }
+ return ~0;
+}
+
+always_inline u32
+esp_decrypt_err_to_sa_err (u32 err)
+{
+ switch (err)
+ {
+ case ESP_DECRYPT_ERROR_HANDOFF:
+ return IPSEC_SA_ERROR_HANDOFF;
+ case ESP_DECRYPT_ERROR_DECRYPTION_FAILED:
+ return IPSEC_SA_ERROR_DECRYPTION_FAILED;
+ case ESP_DECRYPT_ERROR_INTEG_ERROR:
+ return IPSEC_SA_ERROR_INTEG_ERROR;
+ case ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR:
+ return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR;
+ case ESP_DECRYPT_ERROR_REPLAY:
+ return IPSEC_SA_ERROR_REPLAY;
+ case ESP_DECRYPT_ERROR_RUNT:
+ return IPSEC_SA_ERROR_RUNT;
+ case ESP_DECRYPT_ERROR_NO_BUFFERS:
+ return IPSEC_SA_ERROR_NO_BUFFERS;
+ case ESP_DECRYPT_ERROR_OVERSIZED_HEADER:
+ return IPSEC_SA_ERROR_OVERSIZED_HEADER;
+ case ESP_DECRYPT_ERROR_NO_TAIL_SPACE:
+ return IPSEC_SA_ERROR_NO_TAIL_SPACE;
+ case ESP_DECRYPT_ERROR_TUN_NO_PROTO:
+ return IPSEC_SA_ERROR_TUN_NO_PROTO;
+ case ESP_DECRYPT_ERROR_UNSUP_PAYLOAD:
+ return IPSEC_SA_ERROR_UNSUP_PAYLOAD;
+ }
+ return ~0;
+}
+
+always_inline void
+esp_encrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
+ u32 thread_index, u32 err, u16 index, u16 *nexts,
+ u16 drop_next, u32 sa_index)
+{
+ ipsec_set_next_index (b, node, thread_index, err,
+ esp_encrypt_err_to_sa_err (err), index, nexts,
+ drop_next, sa_index);
+}
+
+always_inline void
+esp_decrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
+ u32 thread_index, u32 err, u16 index, u16 *nexts,
+ u16 drop_next, u32 sa_index)
+{
+ ipsec_set_next_index (b, node, thread_index, err,
+ esp_decrypt_err_to_sa_err (err), index, nexts,
+ drop_next, sa_index);
}
/* when submitting a frame is failed, drop all buffers in the frame */
always_inline u32
esp_async_recycle_failed_submit (vlib_main_t *vm, vnet_crypto_async_frame_t *f,
- vlib_node_runtime_t *node, u32 err, u16 index,
- u32 *from, u16 *nexts, u16 drop_next_index)
+ vlib_node_runtime_t *node, u32 err,
+ u32 ipsec_sa_err, u16 index, u32 *from,
+ u16 *nexts, u16 drop_next_index)
{
+ vlib_buffer_t *b;
u32 n_drop = f->n_elts;
u32 *bi = f->buffer_indices;
while (n_drop--)
{
from[index] = bi[0];
- esp_set_next_index (vlib_get_buffer (vm, bi[0]), node, err, index, nexts,
- drop_next_index);
+ b = vlib_get_buffer (vm, bi[0]);
+ ipsec_set_next_index (b, node, vm->thread_index, err, ipsec_sa_err,
+ index, nexts, drop_next_index,
+ vnet_buffer (b)->ipsec.sad_index);
bi++;
index++;
}
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c
index 306fb7d..1bcc65c 100644
--- a/src/vnet/ipsec/esp_decrypt.c
+++ b/src/vnet/ipsec/esp_decrypt.c
@@ -23,7 +23,6 @@
#include <vnet/ipsec/esp.h>
#include <vnet/ipsec/ipsec_io.h>
#include <vnet/ipsec/ipsec_tun.h>
-#include <vnet/ipsec/ipsec.api_enum.h>
#include <vnet/gre/packet.h>
@@ -114,8 +113,9 @@
err = e;
else
err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
- b[bi]->error = node->errors[err];
- nexts[bi] = ESP_DECRYPT_NEXT_DROP;
+ esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi,
+ nexts, ESP_DECRYPT_NEXT_DROP,
+ vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
@@ -146,8 +146,9 @@
err = e;
else
err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
- b[bi]->error = node->errors[err];
- nexts[bi] = ESP_DECRYPT_NEXT_DROP;
+ esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi,
+ nexts, ESP_DECRYPT_NEXT_DROP,
+ vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
@@ -525,8 +526,9 @@
payload, pd->current_length,
&op->digest, &op->n_chunks, 0) < 0)
{
- b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
- next[0] = ESP_DECRYPT_NEXT_DROP;
+ esp_decrypt_set_next_index (
+ b, node, vm->thread_index, ESP_DECRYPT_ERROR_NO_BUFFERS, 0,
+ next, ESP_DECRYPT_NEXT_DROP, pd->sa_index);
return;
}
}
@@ -721,7 +723,7 @@
}
static_always_inline void
-esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node,
+esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
const u16 *next_by_next_header,
const esp_decrypt_packet_data_t *pd,
const esp_decrypt_packet_data2_t *pd2,
@@ -760,16 +762,17 @@
if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
NULL))
{
- b->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
- next[0] = ESP_DECRYPT_NEXT_DROP;
+ esp_decrypt_set_next_index (b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_REPLAY, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
return;
}
u64 n_lost =
ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq, pd->seq_hi);
- vlib_prefetch_simple_counter (&ipsec_sa_lost_counters, vm->thread_index,
- pd->sa_index);
+ vlib_prefetch_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
+ vm->thread_index, pd->sa_index);
if (pd->is_chain)
{
@@ -918,8 +921,9 @@
next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
break;
default:
- b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
- next[0] = ESP_DECRYPT_NEXT_DROP;
+ esp_decrypt_set_next_index (
+ b, node, vm->thread_index, ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0,
+ next, ESP_DECRYPT_NEXT_DROP, pd->sa_index);
break;
}
}
@@ -932,8 +936,9 @@
}
else
{
- next[0] = ESP_DECRYPT_NEXT_DROP;
- b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
+ esp_decrypt_set_next_index (b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
return;
}
@@ -973,8 +978,10 @@
!ip46_address_is_equal_v4 (&itp->itp_tun.dst,
&ip4->src_address))
{
- next[0] = ESP_DECRYPT_NEXT_DROP;
- b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
+ esp_decrypt_set_next_index (
+ b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
}
}
else if (next_header == IP_PROTOCOL_IPV6)
@@ -988,8 +995,10 @@
!ip46_address_is_equal_v6 (&itp->itp_tun.dst,
&ip6->src_address))
{
- next[0] = ESP_DECRYPT_NEXT_DROP;
- b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
+ esp_decrypt_set_next_index (
+ b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
}
}
}
@@ -997,8 +1006,8 @@
}
if (PREDICT_FALSE (n_lost))
- vlib_increment_simple_counter (&ipsec_sa_lost_counters, vm->thread_index,
- pd->sa_index, n_lost);
+ vlib_increment_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
+ vm->thread_index, pd->sa_index, n_lost);
}
always_inline uword
@@ -1066,8 +1075,9 @@
if (n_bufs == 0)
{
err = ESP_DECRYPT_ERROR_NO_BUFFERS;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_DROP);
+ esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, ESP_DECRYPT_NEXT_DROP,
+ vnet_buffer (b[0])->ipsec.sad_index);
goto next;
}
@@ -1075,12 +1085,13 @@
{
if (current_sa_pkts)
vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
- current_sa_index,
- current_sa_pkts,
+ current_sa_index, current_sa_pkts,
current_sa_bytes);
current_sa_bytes = current_sa_pkts = 0;
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
+ vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index);
sa0 = ipsec_sa_get (current_sa_index);
/* fetch the second cacheline ASAP */
@@ -1105,8 +1116,9 @@
{
vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
err = ESP_DECRYPT_ERROR_HANDOFF;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_HANDOFF);
+ esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, ESP_DECRYPT_NEXT_HANDOFF,
+ current_sa_index);
goto next;
}
@@ -1144,16 +1156,18 @@
&pd->seq_hi))
{
err = ESP_DECRYPT_ERROR_REPLAY;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_DROP);
+ esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, ESP_DECRYPT_NEXT_DROP,
+ current_sa_index);
goto next;
}
if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
{
err = ESP_DECRYPT_ERROR_RUNT;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_DROP);
+ esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, ESP_DECRYPT_NEXT_DROP,
+ current_sa_index);
goto next;
}
@@ -1182,8 +1196,9 @@
async_next_node);
if (ESP_DECRYPT_ERROR_RX_PKTS != err)
{
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_DROP);
+ esp_decrypt_set_next_index (
+ b[0], node, thread_index, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP, current_sa_index);
}
}
else
@@ -1233,7 +1248,8 @@
{
n_noop += esp_async_recycle_failed_submit (
vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR,
- n_noop, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP);
+ IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP);
vnet_crypto_async_reset_frame (*async_frame);
vnet_crypto_async_free_frame (vm, *async_frame);
}
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
index aa0fb0a..88e93b9 100644
--- a/src/vnet/ipsec/esp_encrypt.c
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -254,8 +254,10 @@
if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
{
u32 bi = op->user_data;
- b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
- nexts[bi] = drop_next;
+ esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
+ ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+ bi, nexts, drop_next,
+ vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
@@ -282,8 +284,10 @@
if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
{
u32 bi = op->user_data;
- b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
- nexts[bi] = drop_next;
+ esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
+ ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+ bi, nexts, drop_next,
+ vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
@@ -659,8 +663,8 @@
if (PREDICT_FALSE (INDEX_INVALID == sa_index0))
{
err = ESP_ENCRYPT_ERROR_NO_PROTECTION;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- drop_next);
+ noop_nexts[n_noop] = drop_next;
+ b[0]->error = node->errors[err];
goto trace;
}
}
@@ -670,10 +674,9 @@
if (sa_index0 != current_sa_index)
{
if (current_sa_packets)
- vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
- current_sa_index,
- current_sa_packets,
- current_sa_bytes);
+ vlib_increment_combined_counter (
+ &ipsec_sa_counters, thread_index, current_sa_index,
+ current_sa_packets, current_sa_bytes);
current_sa_packets = current_sa_bytes = 0;
sa0 = ipsec_sa_get (sa_index0);
@@ -683,14 +686,18 @@
!ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0)))
{
err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err,
+ n_noop, noop_nexts, drop_next,
+ sa_index0);
goto trace;
}
+ current_sa_index = sa_index0;
+ vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index);
+
/* fetch the second cacheline ASAP */
clib_prefetch_load (sa0->cacheline1);
- current_sa_index = sa_index0;
spi = clib_net_to_host_u32 (sa0->spi);
esp_align = sa0->esp_block_align;
icv_sz = sa0->integ_icv_size;
@@ -711,8 +718,9 @@
{
vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
err = ESP_ENCRYPT_ERROR_HANDOFF;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- handoff_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, handoff_next,
+ current_sa_index);
goto trace;
}
@@ -721,7 +729,8 @@
if (n_bufs == 0)
{
err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, drop_next, current_sa_index);
goto trace;
}
@@ -735,7 +744,8 @@
if (PREDICT_FALSE (esp_seq_advance (sa0)))
{
err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, drop_next, current_sa_index);
goto trace;
}
@@ -751,8 +761,9 @@
if (!next_hdr_ptr)
{
err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err,
+ n_noop, noop_nexts, drop_next,
+ current_sa_index);
goto trace;
}
b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
@@ -873,8 +884,9 @@
if ((old_ip_hdr - ip_len) < &b[0]->pre_data[0])
{
err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err,
+ n_noop, noop_nexts, drop_next,
+ current_sa_index);
goto trace;
}
@@ -886,8 +898,9 @@
if (!next_hdr_ptr)
{
err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err,
+ n_noop, noop_nexts, drop_next,
+ current_sa_index);
goto trace;
}
@@ -1076,7 +1089,8 @@
{
n_noop += esp_async_recycle_failed_submit (
vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
- n_noop, noop_bi, noop_nexts, drop_next);
+ IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi,
+ noop_nexts, drop_next);
vnet_crypto_async_reset_frame (*async_frame);
vnet_crypto_async_free_frame (vm, *async_frame);
}
diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h
index 69aa661..5b51529 100644
--- a/src/vnet/ipsec/ipsec.h
+++ b/src/vnet/ipsec/ipsec.h
@@ -347,6 +347,23 @@
clib_atomic_release (lock);
}
+/* Special case to drop or hand off packets for sync/async modes.
+ *
+ * Different than sync mode, async mode only enqueue drop or hand-off packets
+ * to next nodes.
+ */
+always_inline void
+ipsec_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
+ u32 thread_index, u32 err, u32 ipsec_sa_err, u16 index,
+ u16 *nexts, u16 drop_next, u32 sa_index)
+{
+ nexts[index] = drop_next;
+ b->error = node->errors[err];
+ if (PREDICT_TRUE (ipsec_sa_err != ~0))
+ vlib_increment_simple_counter (&ipsec_sa_err_counters[ipsec_sa_err],
+ thread_index, sa_index, 1);
+}
+
u32 ipsec_register_ah_backend (vlib_main_t * vm, ipsec_main_t * im,
const char *name,
const char *ah4_encrypt_node_name,
diff --git a/src/vnet/ipsec/ipsec_cli.c b/src/vnet/ipsec/ipsec_cli.c
index 8b436b6..35fee29 100644
--- a/src/vnet/ipsec/ipsec_cli.c
+++ b/src/vnet/ipsec/ipsec_cli.c
@@ -769,7 +769,8 @@
{
vlib_clear_combined_counters (&ipsec_spd_policy_counters);
vlib_clear_combined_counters (&ipsec_sa_counters);
- vlib_clear_simple_counters (&ipsec_sa_lost_counters);
+ for (int i = 0; i < IPSEC_SA_N_ERRORS; i++)
+ vlib_clear_simple_counters (&ipsec_sa_err_counters[i]);
return (NULL);
}
diff --git a/src/vnet/ipsec/ipsec_format.c b/src/vnet/ipsec/ipsec_format.c
index 86ec368..d1511ac 100644
--- a/src/vnet/ipsec/ipsec_format.c
+++ b/src/vnet/ipsec/ipsec_format.c
@@ -444,7 +444,7 @@
u32 sai = va_arg (*args, u32);
ipsec_format_flags_t flags = va_arg (*args, ipsec_format_flags_t);
vlib_counter_t counts;
- counter_t lost;
+ counter_t errors;
ipsec_sa_t *sa;
if (pool_is_free_index (ipsec_sa_pool, sai))
@@ -485,12 +485,17 @@
clib_host_to_net_u16 (sa->udp_hdr.dst_port));
vlib_get_combined_counter (&ipsec_sa_counters, sai, &counts);
- lost = vlib_get_simple_counter (&ipsec_sa_lost_counters, sai);
- s = format (s, "\n tx/rx:[packets:%Ld bytes:%Ld], lost:[packets:%Ld]",
- counts.packets, counts.bytes, lost);
+ s = format (s, "\n tx/rx:[packets:%Ld bytes:%Ld]", counts.packets,
+ counts.bytes);
+ s = format (s, "\n SA errors:");
+#define _(index, val, err, desc) \
+ errors = vlib_get_simple_counter (&ipsec_sa_err_counters[index], sai); \
+ s = format (s, "\n " #desc ":[packets:%Ld]", errors);
+ foreach_ipsec_sa_err
+#undef _
- if (ipsec_sa_is_set_IS_TUNNEL (sa))
- s = format (s, "\n%U", format_tunnel, &sa->tunnel, 3);
+ if (ipsec_sa_is_set_IS_TUNNEL (sa)) s =
+ format (s, "\n%U", format_tunnel, &sa->tunnel, 3);
done:
return (s);
diff --git a/src/vnet/ipsec/ipsec_sa.c b/src/vnet/ipsec/ipsec_sa.c
index 12f8ece..eed71a4 100644
--- a/src/vnet/ipsec/ipsec_sa.c
+++ b/src/vnet/ipsec/ipsec_sa.c
@@ -19,6 +19,7 @@
#include <vnet/fib/fib_table.h>
#include <vnet/fib/fib_entry_track.h>
#include <vnet/ipsec/ipsec_tun.h>
+#include <vnet/ipsec/ipsec.api_enum.h>
/**
* @brief
@@ -28,10 +29,8 @@
.name = "SA",
.stat_segment_name = "/net/ipsec/sa",
};
-vlib_simple_counter_main_t ipsec_sa_lost_counters = {
- .name = "SA-lost",
- .stat_segment_name = "/net/ipsec/sa/lost",
-};
+/* Per-SA error counters */
+vlib_simple_counter_main_t ipsec_sa_err_counters[IPSEC_SA_N_ERRORS];
ipsec_sa_t *ipsec_sa_pool;
@@ -329,8 +328,11 @@
vlib_validate_combined_counter (&ipsec_sa_counters, sa_index);
vlib_zero_combined_counter (&ipsec_sa_counters, sa_index);
- vlib_validate_simple_counter (&ipsec_sa_lost_counters, sa_index);
- vlib_zero_simple_counter (&ipsec_sa_lost_counters, sa_index);
+ for (int i = 0; i < IPSEC_SA_N_ERRORS; i++)
+ {
+ vlib_validate_simple_counter (&ipsec_sa_err_counters[i], sa_index);
+ vlib_zero_simple_counter (&ipsec_sa_err_counters[i], sa_index);
+ }
tunnel_copy (tun, &sa->tunnel);
sa->id = id;
@@ -567,7 +569,8 @@
ipsec_sa_clear (index_t sai)
{
vlib_zero_combined_counter (&ipsec_sa_counters, sai);
- vlib_zero_simple_counter (&ipsec_sa_lost_counters, sai);
+ for (int i = 0; i < IPSEC_SA_N_ERRORS; i++)
+ vlib_zero_simple_counter (&ipsec_sa_err_counters[i], sai);
}
void
@@ -640,16 +643,24 @@
.fnv_back_walk = ipsec_sa_back_walk,
};
-/* force inclusion from application's main.c */
+/* Init per-SA error counters and node type */
clib_error_t *
-ipsec_sa_interface_init (vlib_main_t * vm)
+ipsec_sa_init (vlib_main_t *vm)
{
fib_node_register_type (FIB_NODE_TYPE_IPSEC_SA, &ipsec_sa_vft);
- return 0;
+#define _(index, val, err, desc) \
+ ipsec_sa_err_counters[index].name = \
+ (char *) format (0, "SA-" #err "%c", 0); \
+ ipsec_sa_err_counters[index].stat_segment_name = \
+ (char *) format (0, "/net/ipsec/sa/err/" #err "%c", 0); \
+ ipsec_sa_err_counters[index].counters = 0;
+ foreach_ipsec_sa_err
+#undef _
+ return 0;
}
-VLIB_INIT_FUNCTION (ipsec_sa_interface_init);
+VLIB_INIT_FUNCTION (ipsec_sa_init);
/*
* fd.io coding-style-patch-verification: ON
diff --git a/src/vnet/ipsec/ipsec_sa.h b/src/vnet/ipsec/ipsec_sa.h
index 88d5c42..4ef8f87 100644
--- a/src/vnet/ipsec/ipsec_sa.h
+++ b/src/vnet/ipsec/ipsec_sa.h
@@ -118,6 +118,35 @@
STATIC_ASSERT (sizeof (ipsec_sa_flags_t) == 2, "IPSEC SA flags != 2 byte");
+#define foreach_ipsec_sa_err \
+ _ (0, LOST, lost, "packets lost") \
+ _ (1, HANDOFF, handoff, "hand-off") \
+ _ (2, INTEG_ERROR, integ_error, "Integrity check failed") \
+ _ (3, DECRYPTION_FAILED, decryption_failed, "Decryption failed") \
+ _ (4, CRYPTO_ENGINE_ERROR, crypto_engine_error, \
+ "crypto engine error (dropped)") \
+ _ (5, REPLAY, replay, "SA replayed packet") \
+ _ (6, RUNT, runt, "undersized packet") \
+ _ (7, NO_BUFFERS, no_buffers, "no buffers (dropped)") \
+ _ (8, OVERSIZED_HEADER, oversized_header, \
+ "buffer with oversized header (dropped)") \
+ _ (9, NO_TAIL_SPACE, no_tail_space, \
+ "no enough buffer tail space (dropped)") \
+ _ (10, TUN_NO_PROTO, tun_no_proto, "no tunnel protocol") \
+ _ (11, UNSUP_PAYLOAD, unsup_payload, "unsupported payload") \
+ _ (12, SEQ_CYCLED, seq_cycled, "sequence number cycled (dropped)") \
+ _ (13, CRYPTO_QUEUE_FULL, crypto_queue_full, "crypto queue full (dropped)") \
+ _ (14, NO_ENCRYPTION, no_encryption, "no Encrypting SA (dropped)") \
+ _ (15, DROP_FRAGMENTS, drop_fragments, "IP fragments drop")
+
+typedef enum
+{
+#define _(v, f, s, d) IPSEC_SA_ERROR_##f = v,
+ foreach_ipsec_sa_err
+#undef _
+ IPSEC_SA_N_ERRORS,
+} __clib_packed ipsec_sa_err_t;
+
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
@@ -266,7 +295,7 @@
* SA packet & bytes counters
*/
extern vlib_combined_counter_main_t ipsec_sa_counters;
-extern vlib_simple_counter_main_t ipsec_sa_lost_counters;
+extern vlib_simple_counter_main_t ipsec_sa_err_counters[IPSEC_SA_N_ERRORS];
extern void ipsec_mk_key (ipsec_key_t * key, const u8 * data, u8 len);