Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Asynchronous Cryptographic Hash operations. |
| 3 | * |
| 4 | * This is the asynchronous version of hash.c with notification of |
| 5 | * completion via a callback. |
| 6 | * |
| 7 | * Copyright (c) 2008 Loc Ho <lho@amcc.com> |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify it |
| 10 | * under the terms of the GNU General Public License as published by the Free |
| 11 | * Software Foundation; either version 2 of the License, or (at your option) |
| 12 | * any later version. |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #include <crypto/internal/hash.h> |
| 17 | #include <crypto/scatterwalk.h> |
| 18 | #include <linux/bug.h> |
| 19 | #include <linux/err.h> |
| 20 | #include <linux/kernel.h> |
| 21 | #include <linux/module.h> |
| 22 | #include <linux/sched.h> |
| 23 | #include <linux/slab.h> |
| 24 | #include <linux/seq_file.h> |
| 25 | #include <linux/cryptouser.h> |
| 26 | #include <net/netlink.h> |
| 27 | |
| 28 | #include "internal.h" |
| 29 | |
| 30 | struct ahash_request_priv { |
| 31 | crypto_completion_t complete; |
| 32 | void *data; |
| 33 | u8 *result; |
| 34 | u32 flags; |
| 35 | void *ubuf[] CRYPTO_MINALIGN_ATTR; |
| 36 | }; |
| 37 | |
| 38 | static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) |
| 39 | { |
| 40 | return container_of(crypto_hash_alg_common(hash), struct ahash_alg, |
| 41 | halg); |
| 42 | } |
| 43 | |
| 44 | static int hash_walk_next(struct crypto_hash_walk *walk) |
| 45 | { |
| 46 | unsigned int alignmask = walk->alignmask; |
| 47 | unsigned int offset = walk->offset; |
| 48 | unsigned int nbytes = min(walk->entrylen, |
| 49 | ((unsigned int)(PAGE_SIZE)) - offset); |
| 50 | |
| 51 | if (walk->flags & CRYPTO_ALG_ASYNC) |
| 52 | walk->data = kmap(walk->pg); |
| 53 | else |
| 54 | walk->data = kmap_atomic(walk->pg); |
| 55 | walk->data += offset; |
| 56 | |
| 57 | if (offset & alignmask) { |
| 58 | unsigned int unaligned = alignmask + 1 - (offset & alignmask); |
| 59 | |
| 60 | if (nbytes > unaligned) |
| 61 | nbytes = unaligned; |
| 62 | } |
| 63 | |
| 64 | walk->entrylen -= nbytes; |
| 65 | return nbytes; |
| 66 | } |
| 67 | |
| 68 | static int hash_walk_new_entry(struct crypto_hash_walk *walk) |
| 69 | { |
| 70 | struct scatterlist *sg; |
| 71 | |
| 72 | sg = walk->sg; |
| 73 | walk->offset = sg->offset; |
| 74 | walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); |
| 75 | walk->offset = offset_in_page(walk->offset); |
| 76 | walk->entrylen = sg->length; |
| 77 | |
| 78 | if (walk->entrylen > walk->total) |
| 79 | walk->entrylen = walk->total; |
| 80 | walk->total -= walk->entrylen; |
| 81 | |
| 82 | return hash_walk_next(walk); |
| 83 | } |
| 84 | |
| 85 | int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) |
| 86 | { |
| 87 | unsigned int alignmask = walk->alignmask; |
| 88 | unsigned int nbytes = walk->entrylen; |
| 89 | |
| 90 | walk->data -= walk->offset; |
| 91 | |
| 92 | if (nbytes && walk->offset & alignmask && !err) { |
| 93 | walk->offset = ALIGN(walk->offset, alignmask + 1); |
| 94 | walk->data += walk->offset; |
| 95 | |
| 96 | nbytes = min(nbytes, |
| 97 | ((unsigned int)(PAGE_SIZE)) - walk->offset); |
| 98 | walk->entrylen -= nbytes; |
| 99 | |
| 100 | return nbytes; |
| 101 | } |
| 102 | |
| 103 | if (walk->flags & CRYPTO_ALG_ASYNC) |
| 104 | kunmap(walk->pg); |
| 105 | else { |
| 106 | kunmap_atomic(walk->data); |
| 107 | /* |
| 108 | * The may sleep test only makes sense for sync users. |
| 109 | * Async users don't need to sleep here anyway. |
| 110 | */ |
| 111 | crypto_yield(walk->flags); |
| 112 | } |
| 113 | |
| 114 | if (err) |
| 115 | return err; |
| 116 | |
| 117 | if (nbytes) { |
| 118 | walk->offset = 0; |
| 119 | walk->pg++; |
| 120 | return hash_walk_next(walk); |
| 121 | } |
| 122 | |
| 123 | if (!walk->total) |
| 124 | return 0; |
| 125 | |
| 126 | walk->sg = sg_next(walk->sg); |
| 127 | |
| 128 | return hash_walk_new_entry(walk); |
| 129 | } |
| 130 | EXPORT_SYMBOL_GPL(crypto_hash_walk_done); |
| 131 | |
| 132 | int crypto_hash_walk_first(struct ahash_request *req, |
| 133 | struct crypto_hash_walk *walk) |
| 134 | { |
| 135 | walk->total = req->nbytes; |
| 136 | |
| 137 | if (!walk->total) { |
| 138 | walk->entrylen = 0; |
| 139 | return 0; |
| 140 | } |
| 141 | |
| 142 | walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); |
| 143 | walk->sg = req->src; |
| 144 | walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; |
| 145 | |
| 146 | return hash_walk_new_entry(walk); |
| 147 | } |
| 148 | EXPORT_SYMBOL_GPL(crypto_hash_walk_first); |
| 149 | |
| 150 | int crypto_ahash_walk_first(struct ahash_request *req, |
| 151 | struct crypto_hash_walk *walk) |
| 152 | { |
| 153 | walk->total = req->nbytes; |
| 154 | |
| 155 | if (!walk->total) { |
| 156 | walk->entrylen = 0; |
| 157 | return 0; |
| 158 | } |
| 159 | |
| 160 | walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); |
| 161 | walk->sg = req->src; |
| 162 | walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; |
| 163 | walk->flags |= CRYPTO_ALG_ASYNC; |
| 164 | |
| 165 | BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC); |
| 166 | |
| 167 | return hash_walk_new_entry(walk); |
| 168 | } |
| 169 | EXPORT_SYMBOL_GPL(crypto_ahash_walk_first); |
| 170 | |
| 171 | int crypto_hash_walk_first_compat(struct hash_desc *hdesc, |
| 172 | struct crypto_hash_walk *walk, |
| 173 | struct scatterlist *sg, unsigned int len) |
| 174 | { |
| 175 | walk->total = len; |
| 176 | |
| 177 | if (!walk->total) { |
| 178 | walk->entrylen = 0; |
| 179 | return 0; |
| 180 | } |
| 181 | |
| 182 | walk->alignmask = crypto_hash_alignmask(hdesc->tfm); |
| 183 | walk->sg = sg; |
| 184 | walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK; |
| 185 | |
| 186 | return hash_walk_new_entry(walk); |
| 187 | } |
| 188 | |
| 189 | static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, |
| 190 | unsigned int keylen) |
| 191 | { |
| 192 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
| 193 | int ret; |
| 194 | u8 *buffer, *alignbuffer; |
| 195 | unsigned long absize; |
| 196 | |
| 197 | absize = keylen + alignmask; |
| 198 | buffer = kmalloc(absize, GFP_KERNEL); |
| 199 | if (!buffer) |
| 200 | return -ENOMEM; |
| 201 | |
| 202 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
| 203 | memcpy(alignbuffer, key, keylen); |
| 204 | ret = tfm->setkey(tfm, alignbuffer, keylen); |
| 205 | kzfree(buffer); |
| 206 | return ret; |
| 207 | } |
| 208 | |
| 209 | int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
| 210 | unsigned int keylen) |
| 211 | { |
| 212 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
| 213 | |
| 214 | if ((unsigned long)key & alignmask) |
| 215 | return ahash_setkey_unaligned(tfm, key, keylen); |
| 216 | |
| 217 | return tfm->setkey(tfm, key, keylen); |
| 218 | } |
| 219 | EXPORT_SYMBOL_GPL(crypto_ahash_setkey); |
| 220 | |
| 221 | static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, |
| 222 | unsigned int keylen) |
| 223 | { |
| 224 | return -ENOSYS; |
| 225 | } |
| 226 | |
| 227 | static inline unsigned int ahash_align_buffer_size(unsigned len, |
| 228 | unsigned long mask) |
| 229 | { |
| 230 | return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); |
| 231 | } |
| 232 | |
| 233 | static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) |
| 234 | { |
| 235 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 236 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
| 237 | unsigned int ds = crypto_ahash_digestsize(tfm); |
| 238 | struct ahash_request_priv *priv; |
| 239 | |
| 240 | priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), |
| 241 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
| 242 | GFP_KERNEL : GFP_ATOMIC); |
| 243 | if (!priv) |
| 244 | return -ENOMEM; |
| 245 | |
| 246 | /* |
| 247 | * WARNING: Voodoo programming below! |
| 248 | * |
| 249 | * The code below is obscure and hard to understand, thus explanation |
| 250 | * is necessary. See include/crypto/hash.h and include/linux/crypto.h |
| 251 | * to understand the layout of structures used here! |
| 252 | * |
| 253 | * The code here will replace portions of the ORIGINAL request with |
| 254 | * pointers to new code and buffers so the hashing operation can store |
| 255 | * the result in aligned buffer. We will call the modified request |
| 256 | * an ADJUSTED request. |
| 257 | * |
| 258 | * The newly mangled request will look as such: |
| 259 | * |
| 260 | * req { |
| 261 | * .result = ADJUSTED[new aligned buffer] |
| 262 | * .base.complete = ADJUSTED[pointer to completion function] |
| 263 | * .base.data = ADJUSTED[*req (pointer to self)] |
| 264 | * .priv = ADJUSTED[new priv] { |
| 265 | * .result = ORIGINAL(result) |
| 266 | * .complete = ORIGINAL(base.complete) |
| 267 | * .data = ORIGINAL(base.data) |
| 268 | * } |
| 269 | */ |
| 270 | |
| 271 | priv->result = req->result; |
| 272 | priv->complete = req->base.complete; |
| 273 | priv->data = req->base.data; |
| 274 | priv->flags = req->base.flags; |
| 275 | |
| 276 | /* |
| 277 | * WARNING: We do not backup req->priv here! The req->priv |
| 278 | * is for internal use of the Crypto API and the |
| 279 | * user must _NOT_ _EVER_ depend on it's content! |
| 280 | */ |
| 281 | |
| 282 | req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); |
| 283 | req->base.complete = cplt; |
| 284 | req->base.data = req; |
| 285 | req->priv = priv; |
| 286 | |
| 287 | return 0; |
| 288 | } |
| 289 | |
| 290 | static void ahash_restore_req(struct ahash_request *req, int err) |
| 291 | { |
| 292 | struct ahash_request_priv *priv = req->priv; |
| 293 | |
| 294 | if (!err) |
| 295 | memcpy(priv->result, req->result, |
| 296 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); |
| 297 | |
| 298 | /* Restore the original crypto request. */ |
| 299 | req->result = priv->result; |
| 300 | |
| 301 | ahash_request_set_callback(req, priv->flags, |
| 302 | priv->complete, priv->data); |
| 303 | req->priv = NULL; |
| 304 | |
| 305 | /* Free the req->priv.priv from the ADJUSTED request. */ |
| 306 | kzfree(priv); |
| 307 | } |
| 308 | |
| 309 | static void ahash_notify_einprogress(struct ahash_request *req) |
| 310 | { |
| 311 | struct ahash_request_priv *priv = req->priv; |
| 312 | struct crypto_async_request oreq; |
| 313 | |
| 314 | oreq.data = priv->data; |
| 315 | |
| 316 | priv->complete(&oreq, -EINPROGRESS); |
| 317 | } |
| 318 | |
| 319 | static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) |
| 320 | { |
| 321 | struct ahash_request *areq = req->data; |
| 322 | |
| 323 | if (err == -EINPROGRESS) { |
| 324 | ahash_notify_einprogress(areq); |
| 325 | return; |
| 326 | } |
| 327 | |
| 328 | /* |
| 329 | * Restore the original request, see ahash_op_unaligned() for what |
| 330 | * goes where. |
| 331 | * |
| 332 | * The "struct ahash_request *req" here is in fact the "req.base" |
| 333 | * from the ADJUSTED request from ahash_op_unaligned(), thus as it |
| 334 | * is a pointer to self, it is also the ADJUSTED "req" . |
| 335 | */ |
| 336 | |
| 337 | /* First copy req->result into req->priv.result */ |
| 338 | ahash_restore_req(areq, err); |
| 339 | |
| 340 | /* Complete the ORIGINAL request. */ |
| 341 | areq->base.complete(&areq->base, err); |
| 342 | } |
| 343 | |
| 344 | static int ahash_op_unaligned(struct ahash_request *req, |
| 345 | int (*op)(struct ahash_request *)) |
| 346 | { |
| 347 | int err; |
| 348 | |
| 349 | err = ahash_save_req(req, ahash_op_unaligned_done); |
| 350 | if (err) |
| 351 | return err; |
| 352 | |
| 353 | err = op(req); |
| 354 | if (err == -EINPROGRESS || |
| 355 | (err == -EBUSY && (ahash_request_flags(req) & |
| 356 | CRYPTO_TFM_REQ_MAY_BACKLOG))) |
| 357 | return err; |
| 358 | |
| 359 | ahash_restore_req(req, err); |
| 360 | |
| 361 | return err; |
| 362 | } |
| 363 | |
| 364 | static int crypto_ahash_op(struct ahash_request *req, |
| 365 | int (*op)(struct ahash_request *)) |
| 366 | { |
| 367 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 368 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
| 369 | |
| 370 | if ((unsigned long)req->result & alignmask) |
| 371 | return ahash_op_unaligned(req, op); |
| 372 | |
| 373 | return op(req); |
| 374 | } |
| 375 | |
| 376 | int crypto_ahash_final(struct ahash_request *req) |
| 377 | { |
| 378 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); |
| 379 | } |
| 380 | EXPORT_SYMBOL_GPL(crypto_ahash_final); |
| 381 | |
| 382 | int crypto_ahash_finup(struct ahash_request *req) |
| 383 | { |
| 384 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); |
| 385 | } |
| 386 | EXPORT_SYMBOL_GPL(crypto_ahash_finup); |
| 387 | |
| 388 | int crypto_ahash_digest(struct ahash_request *req) |
| 389 | { |
| 390 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); |
| 391 | } |
| 392 | EXPORT_SYMBOL_GPL(crypto_ahash_digest); |
| 393 | |
| 394 | static void ahash_def_finup_done2(struct crypto_async_request *req, int err) |
| 395 | { |
| 396 | struct ahash_request *areq = req->data; |
| 397 | |
| 398 | if (err == -EINPROGRESS) |
| 399 | return; |
| 400 | |
| 401 | ahash_restore_req(areq, err); |
| 402 | |
| 403 | areq->base.complete(&areq->base, err); |
| 404 | } |
| 405 | |
| 406 | static int ahash_def_finup_finish1(struct ahash_request *req, int err) |
| 407 | { |
| 408 | if (err) |
| 409 | goto out; |
| 410 | |
| 411 | req->base.complete = ahash_def_finup_done2; |
| 412 | |
| 413 | err = crypto_ahash_reqtfm(req)->final(req); |
| 414 | if (err == -EINPROGRESS || |
| 415 | (err == -EBUSY && (ahash_request_flags(req) & |
| 416 | CRYPTO_TFM_REQ_MAY_BACKLOG))) |
| 417 | return err; |
| 418 | |
| 419 | out: |
| 420 | ahash_restore_req(req, err); |
| 421 | return err; |
| 422 | } |
| 423 | |
| 424 | static void ahash_def_finup_done1(struct crypto_async_request *req, int err) |
| 425 | { |
| 426 | struct ahash_request *areq = req->data; |
| 427 | |
| 428 | if (err == -EINPROGRESS) { |
| 429 | ahash_notify_einprogress(areq); |
| 430 | return; |
| 431 | } |
| 432 | |
| 433 | areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
| 434 | |
| 435 | err = ahash_def_finup_finish1(areq, err); |
| 436 | if (areq->priv) |
| 437 | return; |
| 438 | |
| 439 | areq->base.complete(&areq->base, err); |
| 440 | } |
| 441 | |
| 442 | static int ahash_def_finup(struct ahash_request *req) |
| 443 | { |
| 444 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 445 | int err; |
| 446 | |
| 447 | err = ahash_save_req(req, ahash_def_finup_done1); |
| 448 | if (err) |
| 449 | return err; |
| 450 | |
| 451 | err = tfm->update(req); |
| 452 | if (err == -EINPROGRESS || |
| 453 | (err == -EBUSY && (ahash_request_flags(req) & |
| 454 | CRYPTO_TFM_REQ_MAY_BACKLOG))) |
| 455 | return err; |
| 456 | |
| 457 | return ahash_def_finup_finish1(req, err); |
| 458 | } |
| 459 | |
| 460 | static int ahash_no_export(struct ahash_request *req, void *out) |
| 461 | { |
| 462 | return -ENOSYS; |
| 463 | } |
| 464 | |
| 465 | static int ahash_no_import(struct ahash_request *req, const void *in) |
| 466 | { |
| 467 | return -ENOSYS; |
| 468 | } |
| 469 | |
| 470 | static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) |
| 471 | { |
| 472 | struct crypto_ahash *hash = __crypto_ahash_cast(tfm); |
| 473 | struct ahash_alg *alg = crypto_ahash_alg(hash); |
| 474 | |
| 475 | hash->setkey = ahash_nosetkey; |
| 476 | hash->has_setkey = false; |
| 477 | hash->export = ahash_no_export; |
| 478 | hash->import = ahash_no_import; |
| 479 | |
| 480 | if (tfm->__crt_alg->cra_type != &crypto_ahash_type) |
| 481 | return crypto_init_shash_ops_async(tfm); |
| 482 | |
| 483 | hash->init = alg->init; |
| 484 | hash->update = alg->update; |
| 485 | hash->final = alg->final; |
| 486 | hash->finup = alg->finup ?: ahash_def_finup; |
| 487 | hash->digest = alg->digest; |
| 488 | |
| 489 | if (alg->setkey) { |
| 490 | hash->setkey = alg->setkey; |
| 491 | hash->has_setkey = true; |
| 492 | } |
| 493 | if (alg->export) |
| 494 | hash->export = alg->export; |
| 495 | if (alg->import) |
| 496 | hash->import = alg->import; |
| 497 | |
| 498 | return 0; |
| 499 | } |
| 500 | |
| 501 | static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) |
| 502 | { |
| 503 | if (alg->cra_type == &crypto_ahash_type) |
| 504 | return alg->cra_ctxsize; |
| 505 | |
| 506 | return sizeof(struct crypto_shash *); |
| 507 | } |
| 508 | |
| 509 | #ifdef CONFIG_NET |
| 510 | static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) |
| 511 | { |
| 512 | struct crypto_report_hash rhash; |
| 513 | |
| 514 | strncpy(rhash.type, "ahash", sizeof(rhash.type)); |
| 515 | |
| 516 | rhash.blocksize = alg->cra_blocksize; |
| 517 | rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; |
| 518 | |
| 519 | if (nla_put(skb, CRYPTOCFGA_REPORT_HASH, |
| 520 | sizeof(struct crypto_report_hash), &rhash)) |
| 521 | goto nla_put_failure; |
| 522 | return 0; |
| 523 | |
| 524 | nla_put_failure: |
| 525 | return -EMSGSIZE; |
| 526 | } |
| 527 | #else |
| 528 | static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) |
| 529 | { |
| 530 | return -ENOSYS; |
| 531 | } |
| 532 | #endif |
| 533 | |
| 534 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
| 535 | __attribute__ ((unused)); |
| 536 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
| 537 | { |
| 538 | seq_printf(m, "type : ahash\n"); |
| 539 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? |
| 540 | "yes" : "no"); |
| 541 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
| 542 | seq_printf(m, "digestsize : %u\n", |
| 543 | __crypto_hash_alg_common(alg)->digestsize); |
| 544 | } |
| 545 | |
| 546 | const struct crypto_type crypto_ahash_type = { |
| 547 | .extsize = crypto_ahash_extsize, |
| 548 | .init_tfm = crypto_ahash_init_tfm, |
| 549 | #ifdef CONFIG_PROC_FS |
| 550 | .show = crypto_ahash_show, |
| 551 | #endif |
| 552 | .report = crypto_ahash_report, |
| 553 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, |
| 554 | .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, |
| 555 | .type = CRYPTO_ALG_TYPE_AHASH, |
| 556 | .tfmsize = offsetof(struct crypto_ahash, base), |
| 557 | }; |
| 558 | EXPORT_SYMBOL_GPL(crypto_ahash_type); |
| 559 | |
| 560 | struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, |
| 561 | u32 mask) |
| 562 | { |
| 563 | return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); |
| 564 | } |
| 565 | EXPORT_SYMBOL_GPL(crypto_alloc_ahash); |
| 566 | |
| 567 | static int ahash_prepare_alg(struct ahash_alg *alg) |
| 568 | { |
| 569 | struct crypto_alg *base = &alg->halg.base; |
| 570 | |
| 571 | if (alg->halg.digestsize > PAGE_SIZE / 8 || |
| 572 | alg->halg.statesize > PAGE_SIZE / 8 || |
| 573 | alg->halg.statesize == 0) |
| 574 | return -EINVAL; |
| 575 | |
| 576 | base->cra_type = &crypto_ahash_type; |
| 577 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; |
| 578 | base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; |
| 579 | |
| 580 | return 0; |
| 581 | } |
| 582 | |
| 583 | int crypto_register_ahash(struct ahash_alg *alg) |
| 584 | { |
| 585 | struct crypto_alg *base = &alg->halg.base; |
| 586 | int err; |
| 587 | |
| 588 | err = ahash_prepare_alg(alg); |
| 589 | if (err) |
| 590 | return err; |
| 591 | |
| 592 | return crypto_register_alg(base); |
| 593 | } |
| 594 | EXPORT_SYMBOL_GPL(crypto_register_ahash); |
| 595 | |
| 596 | int crypto_unregister_ahash(struct ahash_alg *alg) |
| 597 | { |
| 598 | return crypto_unregister_alg(&alg->halg.base); |
| 599 | } |
| 600 | EXPORT_SYMBOL_GPL(crypto_unregister_ahash); |
| 601 | |
| 602 | int ahash_register_instance(struct crypto_template *tmpl, |
| 603 | struct ahash_instance *inst) |
| 604 | { |
| 605 | int err; |
| 606 | |
| 607 | err = ahash_prepare_alg(&inst->alg); |
| 608 | if (err) |
| 609 | return err; |
| 610 | |
| 611 | return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); |
| 612 | } |
| 613 | EXPORT_SYMBOL_GPL(ahash_register_instance); |
| 614 | |
| 615 | void ahash_free_instance(struct crypto_instance *inst) |
| 616 | { |
| 617 | crypto_drop_spawn(crypto_instance_ctx(inst)); |
| 618 | kfree(ahash_instance(inst)); |
| 619 | } |
| 620 | EXPORT_SYMBOL_GPL(ahash_free_instance); |
| 621 | |
| 622 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, |
| 623 | struct hash_alg_common *alg, |
| 624 | struct crypto_instance *inst) |
| 625 | { |
| 626 | return crypto_init_spawn2(&spawn->base, &alg->base, inst, |
| 627 | &crypto_ahash_type); |
| 628 | } |
| 629 | EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn); |
| 630 | |
| 631 | struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) |
| 632 | { |
| 633 | struct crypto_alg *alg; |
| 634 | |
| 635 | alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask); |
| 636 | return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); |
| 637 | } |
| 638 | EXPORT_SYMBOL_GPL(ahash_attr_alg); |
| 639 | |
| 640 | MODULE_LICENSE("GPL"); |
| 641 | MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); |