blob: faea9d728fd20445758eccd8c070fdead856f709 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * algif_aead: User-space interface for AEAD algorithms
3 *
4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
5 *
6 * This file provides the user-space API for AEAD ciphers.
7 *
8 * This file is derived from algif_skcipher.c.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 */
15
16#include <crypto/aead.h>
17#include <crypto/scatterwalk.h>
18#include <crypto/if_alg.h>
19#include <linux/init.h>
20#include <linux/list.h>
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/net.h>
25#include <net/sock.h>
26
27struct aead_sg_list {
28 unsigned int cur;
29 struct scatterlist sg[ALG_MAX_PAGES];
30};
31
32struct aead_tfm {
33 struct crypto_aead *aead;
34 bool has_key;
35};
36
37struct aead_ctx {
38 struct aead_sg_list tsgl;
39 /*
40 * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
41 * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
42 * pages
43 */
44#define RSGL_MAX_ENTRIES ALG_MAX_PAGES
45 struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
46
47 void *iv;
48
49 struct af_alg_completion completion;
50
51 unsigned long used;
52
53 unsigned int len;
54 bool more;
55 bool merge;
56 bool enc;
57
58 size_t aead_assoclen;
59 struct aead_request aead_req;
60};
61
62static inline int aead_sndbuf(struct sock *sk)
63{
64 struct alg_sock *ask = alg_sk(sk);
65 struct aead_ctx *ctx = ask->private;
66
67 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
68 ctx->used, 0);
69}
70
71static inline bool aead_writable(struct sock *sk)
72{
73 return PAGE_SIZE <= aead_sndbuf(sk);
74}
75
76static inline bool aead_sufficient_data(struct aead_ctx *ctx)
77{
78 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
79
80 return ctx->used >= ctx->aead_assoclen + as;
81}
82
83static void aead_put_sgl(struct sock *sk)
84{
85 struct alg_sock *ask = alg_sk(sk);
86 struct aead_ctx *ctx = ask->private;
87 struct aead_sg_list *sgl = &ctx->tsgl;
88 struct scatterlist *sg = sgl->sg;
89 unsigned int i;
90
91 for (i = 0; i < sgl->cur; i++) {
92 if (!sg_page(sg + i))
93 continue;
94
95 put_page(sg_page(sg + i));
96 sg_assign_page(sg + i, NULL);
97 }
98 sg_init_table(sg, ALG_MAX_PAGES);
99 sgl->cur = 0;
100 ctx->used = 0;
101 ctx->more = 0;
102 ctx->merge = 0;
103}
104
105static void aead_wmem_wakeup(struct sock *sk)
106{
107 struct socket_wq *wq;
108
109 if (!aead_writable(sk))
110 return;
111
112 rcu_read_lock();
113 wq = rcu_dereference(sk->sk_wq);
114 if (wq_has_sleeper(wq))
115 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
116 POLLRDNORM |
117 POLLRDBAND);
118 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
119 rcu_read_unlock();
120}
121
122static int aead_wait_for_data(struct sock *sk, unsigned flags)
123{
124 struct alg_sock *ask = alg_sk(sk);
125 struct aead_ctx *ctx = ask->private;
126 long timeout;
127 DEFINE_WAIT(wait);
128 int err = -ERESTARTSYS;
129
130 if (flags & MSG_DONTWAIT)
131 return -EAGAIN;
132
133 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
134
135 for (;;) {
136 if (signal_pending(current))
137 break;
138 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
139 timeout = MAX_SCHEDULE_TIMEOUT;
140 if (sk_wait_event(sk, &timeout, !ctx->more)) {
141 err = 0;
142 break;
143 }
144 }
145 finish_wait(sk_sleep(sk), &wait);
146
147 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
148
149 return err;
150}
151
152static void aead_data_wakeup(struct sock *sk)
153{
154 struct alg_sock *ask = alg_sk(sk);
155 struct aead_ctx *ctx = ask->private;
156 struct socket_wq *wq;
157
158 if (ctx->more)
159 return;
160 if (!ctx->used)
161 return;
162
163 rcu_read_lock();
164 wq = rcu_dereference(sk->sk_wq);
165 if (wq_has_sleeper(wq))
166 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
167 POLLRDNORM |
168 POLLRDBAND);
169 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
170 rcu_read_unlock();
171}
172
173static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
174{
175 struct sock *sk = sock->sk;
176 struct alg_sock *ask = alg_sk(sk);
177 struct aead_ctx *ctx = ask->private;
178 unsigned ivsize =
179 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
180 struct aead_sg_list *sgl = &ctx->tsgl;
181 struct af_alg_control con = {};
182 long copied = 0;
183 bool enc = 0;
184 bool init = 0;
185 int err = -EINVAL;
186
187 if (msg->msg_controllen) {
188 err = af_alg_cmsg_send(msg, &con);
189 if (err)
190 return err;
191
192 init = 1;
193 switch (con.op) {
194 case ALG_OP_ENCRYPT:
195 enc = 1;
196 break;
197 case ALG_OP_DECRYPT:
198 enc = 0;
199 break;
200 default:
201 return -EINVAL;
202 }
203
204 if (con.iv && con.iv->ivlen != ivsize)
205 return -EINVAL;
206 }
207
208 lock_sock(sk);
209 if (!ctx->more && ctx->used)
210 goto unlock;
211
212 if (init) {
213 ctx->enc = enc;
214 if (con.iv)
215 memcpy(ctx->iv, con.iv->iv, ivsize);
216
217 ctx->aead_assoclen = con.aead_assoclen;
218 }
219
220 while (size) {
221 unsigned long len = size;
222 struct scatterlist *sg = NULL;
223
224 /* use the existing memory in an allocated page */
225 if (ctx->merge) {
226 sg = sgl->sg + sgl->cur - 1;
227 len = min_t(unsigned long, len,
228 PAGE_SIZE - sg->offset - sg->length);
229 err = memcpy_from_msg(page_address(sg_page(sg)) +
230 sg->offset + sg->length,
231 msg, len);
232 if (err)
233 goto unlock;
234
235 sg->length += len;
236 ctx->merge = (sg->offset + sg->length) &
237 (PAGE_SIZE - 1);
238
239 ctx->used += len;
240 copied += len;
241 size -= len;
242 continue;
243 }
244
245 if (!aead_writable(sk)) {
246 /* user space sent too much data */
247 aead_put_sgl(sk);
248 err = -EMSGSIZE;
249 goto unlock;
250 }
251
252 /* allocate a new page */
253 len = min_t(unsigned long, size, aead_sndbuf(sk));
254 while (len) {
255 int plen = 0;
256
257 if (sgl->cur >= ALG_MAX_PAGES) {
258 aead_put_sgl(sk);
259 err = -E2BIG;
260 goto unlock;
261 }
262
263 sg = sgl->sg + sgl->cur;
264 plen = min_t(int, len, PAGE_SIZE);
265
266 sg_assign_page(sg, alloc_page(GFP_KERNEL));
267 err = -ENOMEM;
268 if (!sg_page(sg))
269 goto unlock;
270
271 err = memcpy_from_msg(page_address(sg_page(sg)),
272 msg, plen);
273 if (err) {
274 __free_page(sg_page(sg));
275 sg_assign_page(sg, NULL);
276 goto unlock;
277 }
278
279 sg->offset = 0;
280 sg->length = plen;
281 len -= plen;
282 ctx->used += plen;
283 copied += plen;
284 sgl->cur++;
285 size -= plen;
286 ctx->merge = plen & (PAGE_SIZE - 1);
287 }
288 }
289
290 err = 0;
291
292 ctx->more = msg->msg_flags & MSG_MORE;
293 if (!ctx->more && !aead_sufficient_data(ctx)) {
294 aead_put_sgl(sk);
295 err = -EMSGSIZE;
296 }
297
298unlock:
299 aead_data_wakeup(sk);
300 release_sock(sk);
301
302 return err ?: copied;
303}
304
305static ssize_t aead_sendpage(struct socket *sock, struct page *page,
306 int offset, size_t size, int flags)
307{
308 struct sock *sk = sock->sk;
309 struct alg_sock *ask = alg_sk(sk);
310 struct aead_ctx *ctx = ask->private;
311 struct aead_sg_list *sgl = &ctx->tsgl;
312 int err = -EINVAL;
313
314 if (flags & MSG_SENDPAGE_NOTLAST)
315 flags |= MSG_MORE;
316
317 if (sgl->cur >= ALG_MAX_PAGES)
318 return -E2BIG;
319
320 lock_sock(sk);
321 if (!ctx->more && ctx->used)
322 goto unlock;
323
324 if (!size)
325 goto done;
326
327 if (!aead_writable(sk)) {
328 /* user space sent too much data */
329 aead_put_sgl(sk);
330 err = -EMSGSIZE;
331 goto unlock;
332 }
333
334 ctx->merge = 0;
335
336 get_page(page);
337 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
338 sgl->cur++;
339 ctx->used += size;
340
341 err = 0;
342
343done:
344 ctx->more = flags & MSG_MORE;
345 if (!ctx->more && !aead_sufficient_data(ctx)) {
346 aead_put_sgl(sk);
347 err = -EMSGSIZE;
348 }
349
350unlock:
351 aead_data_wakeup(sk);
352 release_sock(sk);
353
354 return err ?: size;
355}
356
357static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags)
358{
359 struct sock *sk = sock->sk;
360 struct alg_sock *ask = alg_sk(sk);
361 struct aead_ctx *ctx = ask->private;
362 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
363 struct aead_sg_list *sgl = &ctx->tsgl;
364 unsigned int i = 0;
365 int err = -EINVAL;
366 unsigned long used = 0;
367 size_t outlen = 0;
368 size_t usedpages = 0;
369 unsigned int cnt = 0;
370
371 /* Limit number of IOV blocks to be accessed below */
372 if (msg->msg_iter.nr_segs > RSGL_MAX_ENTRIES)
373 return -ENOMSG;
374
375 lock_sock(sk);
376
377 /*
378 * AEAD memory structure: For encryption, the tag is appended to the
379 * ciphertext which implies that the memory allocated for the ciphertext
380 * must be increased by the tag length. For decryption, the tag
381 * is expected to be concatenated to the ciphertext. The plaintext
382 * therefore has a memory size of the ciphertext minus the tag length.
383 *
384 * The memory structure for cipher operation has the following
385 * structure:
386 * AEAD encryption input: assoc data || plaintext
387 * AEAD encryption output: cipherntext || auth tag
388 * AEAD decryption input: assoc data || ciphertext || auth tag
389 * AEAD decryption output: plaintext
390 */
391
392 if (ctx->more) {
393 err = aead_wait_for_data(sk, flags);
394 if (err)
395 goto unlock;
396 }
397
398 used = ctx->used;
399
400 /*
401 * Make sure sufficient data is present -- note, the same check is
402 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
403 * shall provide an information to the data sender that something is
404 * wrong, but they are irrelevant to maintain the kernel integrity.
405 * We need this check here too in case user space decides to not honor
406 * the error message in sendmsg/sendpage and still call recvmsg. This
407 * check here protects the kernel integrity.
408 */
409 if (!aead_sufficient_data(ctx))
410 goto unlock;
411
412 outlen = used;
413
414 /*
415 * The cipher operation input data is reduced by the associated data
416 * length as this data is processed separately later on.
417 */
418 used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
419
420 /* convert iovecs of output buffers into scatterlists */
421 while (iov_iter_count(&msg->msg_iter)) {
422 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
423 (outlen - usedpages));
424
425 /* make one iovec available as scatterlist */
426 err = af_alg_make_sg(&ctx->rsgl[cnt], &msg->msg_iter,
427 seglen);
428 if (err < 0)
429 goto unlock;
430 usedpages += err;
431 /* chain the new scatterlist with previous one */
432 if (cnt)
433 af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]);
434
435 /* we do not need more iovecs as we have sufficient memory */
436 if (outlen <= usedpages)
437 break;
438 iov_iter_advance(&msg->msg_iter, err);
439 cnt++;
440 }
441
442 err = -EINVAL;
443 /* ensure output buffer is sufficiently large */
444 if (usedpages < outlen)
445 goto unlock;
446
447 sg_mark_end(sgl->sg + sgl->cur - 1);
448
449 aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->rsgl[0].sg,
450 used, ctx->iv);
451 aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
452
453 err = af_alg_wait_for_completion(ctx->enc ?
454 crypto_aead_encrypt(&ctx->aead_req) :
455 crypto_aead_decrypt(&ctx->aead_req),
456 &ctx->completion);
457
458 if (err) {
459 /* EBADMSG implies a valid cipher operation took place */
460 if (err == -EBADMSG)
461 aead_put_sgl(sk);
462 goto unlock;
463 }
464
465 aead_put_sgl(sk);
466
467 err = 0;
468
469unlock:
470 for (i = 0; i < cnt; i++)
471 af_alg_free_sg(&ctx->rsgl[i]);
472
473 aead_wmem_wakeup(sk);
474 release_sock(sk);
475
476 return err ? err : outlen;
477}
478
479static unsigned int aead_poll(struct file *file, struct socket *sock,
480 poll_table *wait)
481{
482 struct sock *sk = sock->sk;
483 struct alg_sock *ask = alg_sk(sk);
484 struct aead_ctx *ctx = ask->private;
485 unsigned int mask;
486
487 sock_poll_wait(file, sk_sleep(sk), wait);
488 mask = 0;
489
490 if (!ctx->more)
491 mask |= POLLIN | POLLRDNORM;
492
493 if (aead_writable(sk))
494 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
495
496 return mask;
497}
498
499static struct proto_ops algif_aead_ops = {
500 .family = PF_ALG,
501
502 .connect = sock_no_connect,
503 .socketpair = sock_no_socketpair,
504 .getname = sock_no_getname,
505 .ioctl = sock_no_ioctl,
506 .listen = sock_no_listen,
507 .shutdown = sock_no_shutdown,
508 .getsockopt = sock_no_getsockopt,
509 .mmap = sock_no_mmap,
510 .bind = sock_no_bind,
511 .accept = sock_no_accept,
512 .setsockopt = sock_no_setsockopt,
513
514 .release = af_alg_release,
515 .sendmsg = aead_sendmsg,
516 .sendpage = aead_sendpage,
517 .recvmsg = aead_recvmsg,
518 .poll = aead_poll,
519};
520
521static int aead_check_key(struct socket *sock)
522{
523 int err = 0;
524 struct sock *psk;
525 struct alg_sock *pask;
526 struct aead_tfm *tfm;
527 struct sock *sk = sock->sk;
528 struct alg_sock *ask = alg_sk(sk);
529
530 lock_sock(sk);
531 if (ask->refcnt)
532 goto unlock_child;
533
534 psk = ask->parent;
535 pask = alg_sk(ask->parent);
536 tfm = pask->private;
537
538 err = -ENOKEY;
539 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
540 if (!tfm->has_key)
541 goto unlock;
542
543 if (!pask->refcnt++)
544 sock_hold(psk);
545
546 ask->refcnt = 1;
547 sock_put(psk);
548
549 err = 0;
550
551unlock:
552 release_sock(psk);
553unlock_child:
554 release_sock(sk);
555
556 return err;
557}
558
559static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
560 size_t size)
561{
562 int err;
563
564 err = aead_check_key(sock);
565 if (err)
566 return err;
567
568 return aead_sendmsg(sock, msg, size);
569}
570
571static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
572 int offset, size_t size, int flags)
573{
574 int err;
575
576 err = aead_check_key(sock);
577 if (err)
578 return err;
579
580 return aead_sendpage(sock, page, offset, size, flags);
581}
582
583static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
584 size_t ignored, int flags)
585{
586 int err;
587
588 err = aead_check_key(sock);
589 if (err)
590 return err;
591
592 return aead_recvmsg(sock, msg, ignored, flags);
593}
594
595static struct proto_ops algif_aead_ops_nokey = {
596 .family = PF_ALG,
597
598 .connect = sock_no_connect,
599 .socketpair = sock_no_socketpair,
600 .getname = sock_no_getname,
601 .ioctl = sock_no_ioctl,
602 .listen = sock_no_listen,
603 .shutdown = sock_no_shutdown,
604 .getsockopt = sock_no_getsockopt,
605 .mmap = sock_no_mmap,
606 .bind = sock_no_bind,
607 .accept = sock_no_accept,
608 .setsockopt = sock_no_setsockopt,
609
610 .release = af_alg_release,
611 .sendmsg = aead_sendmsg_nokey,
612 .sendpage = aead_sendpage_nokey,
613 .recvmsg = aead_recvmsg_nokey,
614 .poll = aead_poll,
615};
616
617static void *aead_bind(const char *name, u32 type, u32 mask)
618{
619 struct aead_tfm *tfm;
620 struct crypto_aead *aead;
621
622 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
623 if (!tfm)
624 return ERR_PTR(-ENOMEM);
625
626 aead = crypto_alloc_aead(name, type, mask);
627 if (IS_ERR(aead)) {
628 kfree(tfm);
629 return ERR_CAST(aead);
630 }
631
632 tfm->aead = aead;
633
634 return tfm;
635}
636
637static void aead_release(void *private)
638{
639 struct aead_tfm *tfm = private;
640
641 crypto_free_aead(tfm->aead);
642 kfree(tfm);
643}
644
645static int aead_setauthsize(void *private, unsigned int authsize)
646{
647 struct aead_tfm *tfm = private;
648
649 return crypto_aead_setauthsize(tfm->aead, authsize);
650}
651
652static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
653{
654 struct aead_tfm *tfm = private;
655 int err;
656
657 err = crypto_aead_setkey(tfm->aead, key, keylen);
658 tfm->has_key = !err;
659
660 return err;
661}
662
663static void aead_sock_destruct(struct sock *sk)
664{
665 struct alg_sock *ask = alg_sk(sk);
666 struct aead_ctx *ctx = ask->private;
667 unsigned int ivlen = crypto_aead_ivsize(
668 crypto_aead_reqtfm(&ctx->aead_req));
669
670 aead_put_sgl(sk);
671 sock_kzfree_s(sk, ctx->iv, ivlen);
672 sock_kfree_s(sk, ctx, ctx->len);
673 af_alg_release_parent(sk);
674}
675
676static int aead_accept_parent_nokey(void *private, struct sock *sk)
677{
678 struct aead_ctx *ctx;
679 struct alg_sock *ask = alg_sk(sk);
680 struct aead_tfm *tfm = private;
681 struct crypto_aead *aead = tfm->aead;
682 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead);
683 unsigned int ivlen = crypto_aead_ivsize(aead);
684
685 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
686 if (!ctx)
687 return -ENOMEM;
688 memset(ctx, 0, len);
689
690 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
691 if (!ctx->iv) {
692 sock_kfree_s(sk, ctx, len);
693 return -ENOMEM;
694 }
695 memset(ctx->iv, 0, ivlen);
696
697 ctx->len = len;
698 ctx->used = 0;
699 ctx->more = 0;
700 ctx->merge = 0;
701 ctx->enc = 0;
702 ctx->tsgl.cur = 0;
703 ctx->aead_assoclen = 0;
704 af_alg_init_completion(&ctx->completion);
705 sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
706
707 ask->private = ctx;
708
709 aead_request_set_tfm(&ctx->aead_req, aead);
710 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
711 af_alg_complete, &ctx->completion);
712
713 sk->sk_destruct = aead_sock_destruct;
714
715 return 0;
716}
717
718static int aead_accept_parent(void *private, struct sock *sk)
719{
720 struct aead_tfm *tfm = private;
721
722 if (!tfm->has_key)
723 return -ENOKEY;
724
725 return aead_accept_parent_nokey(private, sk);
726}
727
728static const struct af_alg_type algif_type_aead = {
729 .bind = aead_bind,
730 .release = aead_release,
731 .setkey = aead_setkey,
732 .setauthsize = aead_setauthsize,
733 .accept = aead_accept_parent,
734 .accept_nokey = aead_accept_parent_nokey,
735 .ops = &algif_aead_ops,
736 .ops_nokey = &algif_aead_ops_nokey,
737 .name = "aead",
738 .owner = THIS_MODULE
739};
740
741static int __init algif_aead_init(void)
742{
743 return af_alg_register_type(&algif_type_aead);
744}
745
746static void __exit algif_aead_exit(void)
747{
748 int err = af_alg_unregister_type(&algif_type_aead);
749 BUG_ON(err);
750}
751
752module_init(algif_aead_init);
753module_exit(algif_aead_exit);
754MODULE_LICENSE("GPL");
755MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
756MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");