blob: e6c7498c0d3da121dea6eec31fe5b2b47da0823a [file] [log] [blame]
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +00001/*
2 * Copyright (c) 2016 Intel and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#ifndef __DPDK_IPSEC_H__
16#define __DPDK_IPSEC_H__
17
18#include <vnet/vnet.h>
19
20#undef always_inline
21#include <rte_crypto.h>
22#include <rte_cryptodev.h>
23
24#if CLIB_DEBUG > 0
25#define always_inline static inline
26#else
27#define always_inline static inline __attribute__ ((__always_inline__))
28#endif
29
30
31#define MAX_QP_PER_LCORE 16
32
33typedef struct
34{
Radu Nicolau6929ea92016-11-29 11:00:30 +000035 u32 salt;
36 u32 iv[2];
37 u32 cnt;
38} dpdk_gcm_cnt_blk;
39
40typedef struct
41{
42 dpdk_gcm_cnt_blk cb;
43 union
44 {
45 u8 aad[12];
46 u8 icv[64];
47 };
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000048} dpdk_cop_priv_t;
49
50typedef struct
51{
52 u8 cipher_algo;
53 u8 auth_algo;
54 u8 is_outbound;
55} crypto_worker_qp_key_t;
56
57typedef struct
58{
59 u16 dev_id;
60 u16 qp_id;
61 u16 is_outbound;
62 i16 inflights;
63 u32 bi[VLIB_FRAME_SIZE];
64 struct rte_crypto_op *cops[VLIB_FRAME_SIZE];
65 struct rte_crypto_op **free_cops;
66} crypto_qp_data_t;
67
68typedef struct
69{
70 u8 qp_index;
71 void *sess;
72} crypto_sa_session_t;
73
74typedef struct
75{
76 crypto_sa_session_t *sa_sess_d[2];
77 crypto_qp_data_t *qp_data;
78 uword *algo_qp_map;
79} crypto_worker_main_t;
80
81typedef struct
82{
83 struct rte_mempool **cop_pools;
84 crypto_worker_main_t *workers_main;
85} dpdk_crypto_main_t;
86
87dpdk_crypto_main_t dpdk_crypto_main;
88
89extern vlib_node_registration_t dpdk_crypto_input_node;
90
91#define CRYPTO_N_FREE_COPS (VLIB_FRAME_SIZE * 3)
92
93static_always_inline void
94crypto_alloc_cops ()
95{
96 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
97 u32 cpu_index = os_get_cpu_number ();
98 crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index];
99 unsigned socket_id = rte_socket_id ();
100 crypto_qp_data_t *qpd;
101
102 /* *INDENT-OFF* */
103 vec_foreach (qpd, cwm->qp_data)
104 {
105 u32 l = vec_len (qpd->free_cops);
106
107 if (PREDICT_FALSE (l < VLIB_FRAME_SIZE))
108 {
109 u32 n_alloc;
110
111 if (PREDICT_FALSE (!qpd->free_cops))
112 vec_alloc (qpd->free_cops, CRYPTO_N_FREE_COPS);
113
114 n_alloc = rte_crypto_op_bulk_alloc (dcm->cop_pools[socket_id],
115 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
116 &qpd->free_cops[l],
117 CRYPTO_N_FREE_COPS - l - 1);
118
119 _vec_len (qpd->free_cops) = l + n_alloc;
120 }
121 }
122 /* *INDENT-ON* */
123}
124
125static_always_inline void
126crypto_free_cop (crypto_qp_data_t * qpd, struct rte_crypto_op **cops, u32 n)
127{
128 u32 l = vec_len (qpd->free_cops);
129
130 if (l + n >= CRYPTO_N_FREE_COPS)
131 {
132 l -= VLIB_FRAME_SIZE;
133 rte_mempool_put_bulk (cops[0]->mempool,
134 (void **) &qpd->free_cops[l], VLIB_FRAME_SIZE);
135 }
136 clib_memcpy (&qpd->free_cops[l], cops, sizeof (*cops) * n);
137
138 _vec_len (qpd->free_cops) = l + n;
139}
140
141static_always_inline int
142check_algo_is_supported (const struct rte_cryptodev_capabilities *cap,
143 char *name)
144{
145 struct
146 {
147 uint8_t cipher_algo;
148 enum rte_crypto_sym_xform_type type;
149 union
150 {
151 enum rte_crypto_auth_algorithm auth;
152 enum rte_crypto_cipher_algorithm cipher;
153 };
154 char *name;
155 } supported_algo[] =
156 {
157 {
158 .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
159 RTE_CRYPTO_CIPHER_NULL,.name = "NULL"},
160 {
161 .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
162 RTE_CRYPTO_CIPHER_AES_CBC,.name = "AES_CBC"},
163 {
164 .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
165 RTE_CRYPTO_CIPHER_AES_CTR,.name = "AES_CTR"},
166 {
167 .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
168 RTE_CRYPTO_CIPHER_3DES_CBC,.name = "3DES-CBC"},
169 {
170 .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.auth =
171 RTE_CRYPTO_CIPHER_AES_GCM,.name = "AES-GCM"},
172 {
173 .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
174 RTE_CRYPTO_AUTH_SHA1_HMAC,.name = "HMAC-SHA1"},
175 {
176 .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
177 RTE_CRYPTO_AUTH_SHA256_HMAC,.name = "HMAC-SHA256"},
178 {
179 .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
180 RTE_CRYPTO_AUTH_SHA384_HMAC,.name = "HMAC-SHA384"},
181 {
182 .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
183 RTE_CRYPTO_AUTH_SHA512_HMAC,.name = "HMAC-SHA512"},
184 {
185 .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
186 RTE_CRYPTO_AUTH_AES_XCBC_MAC,.name = "AES-XCBC-MAC"},
187 {
188 .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
189 RTE_CRYPTO_AUTH_AES_GCM,.name = "AES-GCM"},
190 {
191 /* tail */
192 .type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED},};
193 uint32_t i = 0;
194
195 if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
196 return -1;
197
198 while (supported_algo[i].type != RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED)
199 {
200 if (cap->sym.xform_type == supported_algo[i].type)
201 {
202 if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
203 cap->sym.cipher.algo == supported_algo[i].cipher) ||
204 (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH &&
205 cap->sym.auth.algo == supported_algo[i].auth))
206 {
207 if (name)
208 strcpy (name, supported_algo[i].name);
209 return 0;
210 }
211 }
212
213 i++;
214 }
215
216 return -1;
217}
218
219#endif /* __DPDK_IPSEC_H__ */
220
221/*
222 * fd.io coding-style-patch-verification: ON
223 *
224 * Local Variables:
225 * eval: (c-set-style "gnu")
226 * End:
227 */