ipsec: fix AES CBC IV generation (CVE-2022-46397)

For AES-CBC, the IV must be unpredictable (see NIST SP800-38a Appendix
C). Chaining IVs like is done by ipsecmb and native backends for the
VNET_CRYPTO_OP_FLAG_INIT_IV is fully predictable.
Encrypt a counter as part of the message, making the (predictable)
counter-generated IV unpredictable.

Fixes: VPP-2037
Type: fix

Change-Id: If4f192d62bf97dda553e7573331c75efa11822ae
Signed-off-by: Benoît Ganne <bganne@cisco.com>
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
index 4d312be..aa0fb0a 100644
--- a/src/vnet/ipsec/esp_encrypt.c
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -215,6 +215,24 @@
   return len;
 }
 
+/* IPsec IV generation: IVs requirements differ depending of the
+ * encryption mode: IVs must be unpredictable for AES-CBC whereas it can
+ * be predictable but should never be reused with the same key material
+ * for CTR and GCM.
+ * We use a packet counter as the IV for CTR and GCM, and to ensure the
+ * IV is unpredictable for CBC, it is then encrypted using the same key
+ * as the message. You can refer to NIST SP800-38a and NIST SP800-38d
+ * for more details. */
+static_always_inline void *
+esp_generate_iv (ipsec_sa_t *sa, void *payload, int iv_sz)
+{
+  ASSERT (iv_sz >= sizeof (u64));
+  u64 *iv = (u64 *) (payload - iv_sz);
+  clib_memset_u8 (iv, 0, iv_sz);
+  *iv = sa->iv_counter++;
+  return iv;
+}
+
 static_always_inline void
 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
 			 vnet_crypto_op_t * ops, vlib_buffer_t * b[],
@@ -368,27 +386,29 @@
       vnet_crypto_op_t *op;
       vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
       vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
+      u8 *crypto_start = payload;
+      /* esp_add_footer_and_icv() in esp_encrypt_inline() makes sure we always
+       * have enough space for ESP header and footer which includes ICV */
+      ASSERT (payload_len > icv_sz);
+      u16 crypto_len = payload_len - icv_sz;
 
-      op->src = op->dst = payload;
+      /* generate the IV in front of the payload */
+      void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz);
+
       op->key_index = sa0->crypto_key_index;
-      op->len = payload_len - icv_sz;
       op->user_data = bi;
 
       if (ipsec_sa_is_set_IS_CTR (sa0))
 	{
-	  ASSERT (sizeof (u64) == iv_sz);
 	  /* construct nonce in a scratch space in front of the IP header */
 	  esp_ctr_nonce_t *nonce =
-	    (esp_ctr_nonce_t *) (payload - sizeof (u64) - hdr_len -
-				 sizeof (*nonce));
-	  u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
-
+	    (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
 	  if (ipsec_sa_is_set_IS_AEAD (sa0))
 	    {
 	      /* constuct aad in a scratch space in front of the nonce */
 	      op->aad = (u8 *) nonce - sizeof (esp_aead_t);
 	      op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
-	      op->tag = payload + op->len;
+	      op->tag = payload + crypto_len;
 	      op->tag_len = 16;
 	    }
 	  else
@@ -397,13 +417,17 @@
 	    }
 
 	  nonce->salt = sa0->salt;
-	  nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa0->ctr_iv_counter++);
+	  nonce->iv = *(u64 *) pkt_iv;
 	  op->iv = (u8 *) nonce;
 	}
       else
 	{
-	  op->iv = payload - iv_sz;
-	  op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
+	  /* construct zero iv in front of the IP header */
+	  op->iv = pkt_iv - hdr_len - iv_sz;
+	  clib_memset_u8 (op->iv, 0, iv_sz);
+	  /* include iv field in crypto */
+	  crypto_start -= iv_sz;
+	  crypto_len += iv_sz;
 	}
 
       if (lb != b[0])
@@ -412,8 +436,15 @@
 	  op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
 	  op->chunk_index = vec_len (ptd->chunks);
 	  op->tag = vlib_buffer_get_tail (lb) - icv_sz;
-	  esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload,
-				    payload_len, &op->n_chunks);
+	  esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz,
+				    crypto_start, crypto_len + icv_sz,
+				    &op->n_chunks);
+	}
+      else
+	{
+	  /* not chained */
+	  op->src = op->dst = crypto_start;
+	  op->len = crypto_len;
 	}
     }
 
@@ -463,26 +494,26 @@
   u8 *tag, *iv, *aad = 0;
   u8 flag = 0;
   u32 key_index;
-  i16 crypto_start_offset, integ_start_offset = 0;
+  i16 crypto_start_offset, integ_start_offset;
   u16 crypto_total_len, integ_total_len;
 
   post->next_index = next;
 
   /* crypto */
-  crypto_start_offset = payload - b->data;
+  crypto_start_offset = integ_start_offset = payload - b->data;
   crypto_total_len = integ_total_len = payload_len - icv_sz;
   tag = payload + crypto_total_len;
 
   key_index = sa->linked_key_index;
 
+  /* generate the IV in front of the payload */
+  void *pkt_iv = esp_generate_iv (sa, payload, iv_sz);
+
   if (ipsec_sa_is_set_IS_CTR (sa))
     {
-      ASSERT (sizeof (u64) == iv_sz);
       /* construct nonce in a scratch space in front of the IP header */
-      esp_ctr_nonce_t *nonce = (esp_ctr_nonce_t *) (payload - sizeof (u64) -
-						    hdr_len - sizeof (*nonce));
-      u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
-
+      esp_ctr_nonce_t *nonce =
+	(esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
       if (ipsec_sa_is_set_IS_AEAD (sa))
 	{
 	  /* constuct aad in a scratch space in front of the nonce */
@@ -496,13 +527,17 @@
 	}
 
       nonce->salt = sa->salt;
-      nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->ctr_iv_counter++);
+      nonce->iv = *(u64 *) pkt_iv;
       iv = (u8 *) nonce;
     }
   else
     {
-      iv = payload - iv_sz;
-      flag |= VNET_CRYPTO_OP_FLAG_INIT_IV;
+      /* construct zero iv in front of the IP header */
+      iv = pkt_iv - hdr_len - iv_sz;
+      clib_memset_u8 (iv, 0, iv_sz);
+      /* include iv field in crypto */
+      crypto_start_offset -= iv_sz;
+      crypto_total_len += iv_sz;
     }
 
   if (lb != b)
@@ -510,13 +545,14 @@
       /* chain */
       flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
       tag = vlib_buffer_get_tail (lb) - icv_sz;
-      crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, icv_sz,
-						   payload, payload_len, 0);
+      crypto_total_len = esp_encrypt_chain_crypto (
+	vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset,
+	crypto_total_len + icv_sz, 0);
     }
 
   if (sa->integ_op_id)
     {
-      integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t);
+      integ_start_offset -= iv_sz + sizeof (esp_header_t);
       integ_total_len += iv_sz + sizeof (esp_header_t);
 
       if (b != lb)
diff --git a/src/vnet/ipsec/ipsec_sa.h b/src/vnet/ipsec/ipsec_sa.h
index df079b1..88d5c42 100644
--- a/src/vnet/ipsec/ipsec_sa.h
+++ b/src/vnet/ipsec/ipsec_sa.h
@@ -137,7 +137,7 @@
   u32 seq;
   u32 seq_hi;
   u64 replay_window;
-  u64 ctr_iv_counter;
+  u64 iv_counter;
   dpo_id_t dpo;
 
   vnet_crypto_key_index_t crypto_key_index;