* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
+ * The following concept of the memory management is used:
+ *
+ * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
+ * filled by user space with the data submitted via sendpage/sendmsg. Filling
+ * up the TX SGL does not cause a crypto operation -- the data will only be
+ * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
+ * provide a buffer which is tracked with the RX SGL.
+ *
+ * During the processing of the recvmsg operation, the cipher request is
+ * allocated and prepared. As part of the recvmsg operation, the processed
+ * TX buffers are extracted from the TX SGL into a separate SGL.
+ *
+ * After the completion of the crypto operation, the RX SGL and the cipher
+ * request is released. The extracted TX SGL parts are released together with
+ * the RX SGL release.
*/
#include <crypto/scatterwalk.h>
#include <linux/net.h>
#include <net/sock.h>
-struct skcipher_sg_list {
+struct skcipher_tsgl {
struct list_head list;
-
int cur;
-
struct scatterlist sg[0];
};
+struct skcipher_rsgl {
+ struct af_alg_sgl sgl;
+ struct list_head list;
+ size_t sg_num_bytes;
+};
+
+struct skcipher_async_req {
+ struct kiocb *iocb;
+ struct sock *sk;
+
+ struct skcipher_rsgl first_sgl;
+ struct list_head rsgl_list;
+
+ struct scatterlist *tsgl;
+ unsigned int tsgl_entries;
+
+ unsigned int areqlen;
+ struct skcipher_request req;
+};
+
struct skcipher_tfm {
struct crypto_skcipher *skcipher;
bool has_key;
};
struct skcipher_ctx {
- struct list_head tsgl;
- struct af_alg_sgl rsgl;
+ struct list_head tsgl_list;
void *iv;
struct af_alg_completion completion;
- atomic_t inflight;
size_t used;
+ size_t rcvused;
- unsigned int len;
bool more;
bool merge;
bool enc;
- struct skcipher_request req;
-};
-
-struct skcipher_async_rsgl {
- struct af_alg_sgl sgl;
- struct list_head list;
-};
-
-struct skcipher_async_req {
- struct kiocb *iocb;
- struct skcipher_async_rsgl first_sgl;
- struct list_head list;
- struct scatterlist *tsg;
- atomic_t *inflight;
- struct skcipher_request req;
+ unsigned int len;
};
-#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
+#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_tsgl)) / \
sizeof(struct scatterlist) - 1)
-static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
+static inline int skcipher_sndbuf(struct sock *sk)
{
- struct skcipher_async_rsgl *rsgl, *tmp;
- struct scatterlist *sgl;
- struct scatterlist *sg;
- int i, n;
-
- list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
- af_alg_free_sg(&rsgl->sgl);
- if (rsgl != &sreq->first_sgl)
- kfree(rsgl);
- }
- sgl = sreq->tsg;
- n = sg_nents(sgl);
- for_each_sg(sgl, sg, n, i)
- put_page(sg_page(sg));
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
- kfree(sreq->tsg);
+ return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
+ ctx->used, 0);
}
-static void skcipher_async_cb(struct crypto_async_request *req, int err)
+static inline bool skcipher_writable(struct sock *sk)
{
- struct skcipher_async_req *sreq = req->data;
- struct kiocb *iocb = sreq->iocb;
-
- atomic_dec(sreq->inflight);
- skcipher_free_async_sgls(sreq);
- kzfree(sreq);
- iocb->ki_complete(iocb, err, err);
+ return PAGE_SIZE <= skcipher_sndbuf(sk);
}
-static inline int skcipher_sndbuf(struct sock *sk)
+static inline int skcipher_rcvbuf(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
- return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
- ctx->used, 0);
+ return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
+ ctx->rcvused, 0);
}
-static inline bool skcipher_writable(struct sock *sk)
+static inline bool skcipher_readable(struct sock *sk)
{
- return PAGE_SIZE <= skcipher_sndbuf(sk);
+ return PAGE_SIZE <= skcipher_rcvbuf(sk);
}
-static int skcipher_alloc_sgl(struct sock *sk)
+static int skcipher_alloc_tsgl(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
- struct skcipher_sg_list *sgl;
+ struct skcipher_tsgl *sgl;
struct scatterlist *sg = NULL;
- sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
- if (!list_empty(&ctx->tsgl))
+ sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, list);
+ if (!list_empty(&ctx->tsgl_list))
sg = sgl->sg;
if (!sg || sgl->cur >= MAX_SGL_ENTS) {
if (sg)
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
- list_add_tail(&sgl->list, &ctx->tsgl);
+ list_add_tail(&sgl->list, &ctx->tsgl_list);
}
return 0;
}
-static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
+static unsigned int skcipher_count_tsgl(struct sock *sk, size_t bytes)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+ struct skcipher_tsgl *sgl, *tmp;
+ unsigned int i;
+ unsigned int sgl_count = 0;
+
+ if (!bytes)
+ return 0;
+
+ list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
+ struct scatterlist *sg = sgl->sg;
+
+ for (i = 0; i < sgl->cur; i++) {
+ sgl_count++;
+ if (sg[i].length >= bytes)
+ return sgl_count;
+
+ bytes -= sg[i].length;
+ }
+ }
+
+ return sgl_count;
+}
+
+static void skcipher_pull_tsgl(struct sock *sk, size_t used,
+ struct scatterlist *dst)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
- struct skcipher_sg_list *sgl;
+ struct skcipher_tsgl *sgl;
struct scatterlist *sg;
- int i;
+ unsigned int i;
- while (!list_empty(&ctx->tsgl)) {
- sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
+ while (!list_empty(&ctx->tsgl_list)) {
+ sgl = list_first_entry(&ctx->tsgl_list, struct skcipher_tsgl,
list);
sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
size_t plen = min_t(size_t, used, sg[i].length);
+ struct page *page = sg_page(sg + i);
- if (!sg_page(sg + i))
+ if (!page)
continue;
+ /*
+ * Assumption: caller created skcipher_count_tsgl(len)
+ * SG entries in dst.
+ */
+ if (dst)
+ sg_set_page(dst + i, page, plen, sg[i].offset);
+
sg[i].length -= plen;
sg[i].offset += plen;
if (sg[i].length)
return;
- if (put)
- put_page(sg_page(sg + i));
+
+ if (!dst)
+ put_page(page);
sg_assign_page(sg + i, NULL);
}
list_del(&sgl->list);
- sock_kfree_s(sk, sgl,
- sizeof(*sgl) + sizeof(sgl->sg[0]) *
- (MAX_SGL_ENTS + 1));
+ sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
+ (MAX_SGL_ENTS + 1));
}
if (!ctx->used)
ctx->merge = 0;
}
-static void skcipher_free_sgl(struct sock *sk)
+static void skcipher_free_areq_sgls(struct skcipher_async_req *areq)
{
+ struct sock *sk = areq->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
+ struct skcipher_rsgl *rsgl, *tmp;
+ struct scatterlist *tsgl;
+ struct scatterlist *sg;
+ unsigned int i;
+
+ list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
+ ctx->rcvused -= rsgl->sg_num_bytes;
+ af_alg_free_sg(&rsgl->sgl);
+ list_del(&rsgl->list);
+ if (rsgl != &areq->first_sgl)
+ sock_kfree_s(sk, rsgl, sizeof(*rsgl));
+ }
+
+ tsgl = areq->tsgl;
+ for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
+ if (!sg_page(sg))
+ continue;
+ put_page(sg_page(sg));
+ }
- skcipher_pull_sgl(sk, ctx->used, 1);
+ if (areq->tsgl && areq->tsgl_entries)
+ sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
}
static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
struct skcipher_tfm *skc = pask->private;
struct crypto_skcipher *tfm = skc->skcipher;
unsigned ivsize = crypto_skcipher_ivsize(tfm);
- struct skcipher_sg_list *sgl;
+ struct skcipher_tsgl *sgl;
struct af_alg_control con = {};
long copied = 0;
bool enc = 0;
size_t plen;
if (ctx->merge) {
- sgl = list_entry(ctx->tsgl.prev,
- struct skcipher_sg_list, list);
+ sgl = list_entry(ctx->tsgl_list.prev,
+ struct skcipher_tsgl, list);
sg = sgl->sg + sgl->cur - 1;
len = min_t(unsigned long, len,
PAGE_SIZE - sg->offset - sg->length);
len = min_t(unsigned long, len, skcipher_sndbuf(sk));
- err = skcipher_alloc_sgl(sk);
+ err = skcipher_alloc_tsgl(sk);
if (err)
goto unlock;
- sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+ sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl,
+ list);
sg = sgl->sg;
if (sgl->cur)
sg_unmark_end(sg + sgl->cur - 1);
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
- struct skcipher_sg_list *sgl;
+ struct skcipher_tsgl *sgl;
int err = -EINVAL;
if (flags & MSG_SENDPAGE_NOTLAST)
goto unlock;
}
- err = skcipher_alloc_sgl(sk);
+ err = skcipher_alloc_tsgl(sk);
if (err)
goto unlock;
ctx->merge = 0;
- sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+ sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, list);
if (sgl->cur)
sg_unmark_end(sgl->sg + sgl->cur - 1);
return err ?: size;
}
-static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
+static void skcipher_async_cb(struct crypto_async_request *req, int err)
{
- struct skcipher_sg_list *sgl;
- struct scatterlist *sg;
- int nents = 0;
+ struct skcipher_async_req *areq = req->data;
+ struct sock *sk = areq->sk;
+ struct kiocb *iocb = areq->iocb;
+ unsigned int resultlen;
- list_for_each_entry(sgl, &ctx->tsgl, list) {
- sg = sgl->sg;
+ lock_sock(sk);
- while (!sg->length)
- sg++;
+ /* Buffer size written by crypto operation. */
+ resultlen = areq->req.cryptlen;
- nents += sg_nents(sg);
- }
- return nents;
+ skcipher_free_areq_sgls(areq);
+ sock_kfree_s(sk, areq, areq->areqlen);
+ __sock_put(sk);
+
+ iocb->ki_complete(iocb, err ? err : resultlen, 0);
+
+ release_sock(sk);
}
-static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
- int flags)
+static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_tfm *skc = pask->private;
struct crypto_skcipher *tfm = skc->skcipher;
- struct skcipher_sg_list *sgl;
- struct scatterlist *sg;
- struct skcipher_async_req *sreq;
- struct skcipher_request *req;
- struct skcipher_async_rsgl *last_rsgl = NULL;
- unsigned int txbufs = 0, len = 0, tx_nents;
- unsigned int reqsize = crypto_skcipher_reqsize(tfm);
- unsigned int ivsize = crypto_skcipher_ivsize(tfm);
- int err = -ENOMEM;
- bool mark = false;
- char *iv;
-
- sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
- if (unlikely(!sreq))
- goto out;
-
- req = &sreq->req;
- iv = (char *)(req + 1) + reqsize;
- sreq->iocb = msg->msg_iocb;
- INIT_LIST_HEAD(&sreq->list);
- sreq->inflight = &ctx->inflight;
+ unsigned int bs = crypto_skcipher_blocksize(tfm);
+ unsigned int areqlen = sizeof(struct skcipher_async_req) +
+ crypto_skcipher_reqsize(tfm);
+ struct skcipher_async_req *areq;
+ struct skcipher_rsgl *last_rsgl = NULL;
+ int err = 0;
+ size_t len = 0;
- lock_sock(sk);
- tx_nents = skcipher_all_sg_nents(ctx);
- sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
- if (unlikely(!sreq->tsg))
- goto unlock;
- sg_init_table(sreq->tsg, tx_nents);
- memcpy(iv, ctx->iv, ivsize);
- skcipher_request_set_tfm(req, tfm);
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
- skcipher_async_cb, sreq);
+ /* Allocate cipher request for current operation. */
+ areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
+ if (unlikely(!areq))
+ return -ENOMEM;
+ areq->areqlen = areqlen;
+ areq->sk = sk;
+ INIT_LIST_HEAD(&areq->rsgl_list);
+ areq->tsgl = NULL;
+ areq->tsgl_entries = 0;
- while (iov_iter_count(&msg->msg_iter)) {
- struct skcipher_async_rsgl *rsgl;
- int used;
+ /* convert iovecs of output buffers into RX SGL */
+ while (msg_data_left(msg)) {
+ struct skcipher_rsgl *rsgl;
+ size_t seglen;
+
+ /* limit the amount of readable buffers */
+ if (!skcipher_readable(sk))
+ break;
if (!ctx->used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto free;
}
- sgl = list_first_entry(&ctx->tsgl,
- struct skcipher_sg_list, list);
- sg = sgl->sg;
- while (!sg->length)
- sg++;
-
- used = min_t(unsigned long, ctx->used,
- iov_iter_count(&msg->msg_iter));
- used = min_t(unsigned long, used, sg->length);
-
- if (txbufs == tx_nents) {
- struct scatterlist *tmp;
- int x;
- /* Ran out of tx slots in async request
- * need to expand */
- tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
- GFP_KERNEL);
- if (!tmp) {
- err = -ENOMEM;
- goto free;
- }
+ seglen = min_t(size_t, ctx->used, msg_data_left(msg));
- sg_init_table(tmp, tx_nents * 2);
- for (x = 0; x < tx_nents; x++)
- sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
- sreq->tsg[x].length,
- sreq->tsg[x].offset);
- kfree(sreq->tsg);
- sreq->tsg = tmp;
- tx_nents *= 2;
- mark = true;
- }
- /* Need to take over the tx sgl from ctx
- * to the asynch req - these sgls will be freed later */
- sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
- sg->offset);
-
- if (list_empty(&sreq->list)) {
- rsgl = &sreq->first_sgl;
- list_add_tail(&rsgl->list, &sreq->list);
+ if (list_empty(&areq->rsgl_list)) {
+ rsgl = &areq->first_sgl;
} else {
- rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
+ rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
if (!rsgl) {
err = -ENOMEM;
goto free;
}
- list_add_tail(&rsgl->list, &sreq->list);
}
- used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
- err = used;
- if (used < 0)
+ rsgl->sgl.npages = 0;
+ list_add_tail(&rsgl->list, &areq->rsgl_list);
+
+ /* make one iovec available as scatterlist */
+ err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
+ if (err < 0)
goto free;
+
+ /* chain the new scatterlist with previous one */
if (last_rsgl)
af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
last_rsgl = rsgl;
- len += used;
- skcipher_pull_sgl(sk, used, 0);
- iov_iter_advance(&msg->msg_iter, used);
+ len += err;
+ ctx->rcvused += err;
+ rsgl->sg_num_bytes = err;
+ iov_iter_advance(&msg->msg_iter, err);
}
- if (mark)
- sg_mark_end(sreq->tsg + txbufs - 1);
+ /* Process only as much RX buffers for which we have TX data */
+ if (len > ctx->used)
+ len = ctx->used;
+
+ /*
+ * If more buffers are to be expected to be processed, process only
+ * full block size buffers.
+ */
+ if (ctx->more || len < ctx->used)
+ len -= len % bs;
+
+ /*
+ * Create a per request TX SGL for this request which tracks the
+ * SG entries from the global TX SGL.
+ */
+ areq->tsgl_entries = skcipher_count_tsgl(sk, len);
+ if (!areq->tsgl_entries)
+ areq->tsgl_entries = 1;
+ areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
+ GFP_KERNEL);
+ if (!areq->tsgl) {
+ err = -ENOMEM;
+ goto free;
+ }
+ sg_init_table(areq->tsgl, areq->tsgl_entries);
+ skcipher_pull_tsgl(sk, len, areq->tsgl);
+
+ /* Initialize the crypto operation */
+ skcipher_request_set_tfm(&areq->req, tfm);
+ skcipher_request_set_crypt(&areq->req, areq->tsgl,
+ areq->first_sgl.sgl.sg, len, ctx->iv);
+
+ if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
+ /* AIO operation */
+ areq->iocb = msg->msg_iocb;
+ skcipher_request_set_callback(&areq->req,
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ skcipher_async_cb, areq);
+ err = ctx->enc ? crypto_skcipher_encrypt(&areq->req) :
+ crypto_skcipher_decrypt(&areq->req);
+ } else {
+ /* Synchronous operation */
+ skcipher_request_set_callback(&areq->req,
+ CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ af_alg_complete,
+ &ctx->completion);
+ err = af_alg_wait_for_completion(ctx->enc ?
+ crypto_skcipher_encrypt(&areq->req) :
+ crypto_skcipher_decrypt(&areq->req),
+ &ctx->completion);
+ }
- skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
- len, iv);
- err = ctx->enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req);
+ /* AIO operation in progress */
if (err == -EINPROGRESS) {
- atomic_inc(&ctx->inflight);
- err = -EIOCBQUEUED;
- sreq = NULL;
- goto unlock;
+ sock_hold(sk);
+ return -EIOCBQUEUED;
}
+
free:
- skcipher_free_async_sgls(sreq);
-unlock:
- skcipher_wmem_wakeup(sk);
- release_sock(sk);
- kzfree(sreq);
-out:
- return err;
+ skcipher_free_areq_sgls(areq);
+ if (areq)
+ sock_kfree_s(sk, areq, areqlen);
+
+ return err ? err : len;
}
-static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
- int flags)
+static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
{
struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct sock *psk = ask->parent;
- struct alg_sock *pask = alg_sk(psk);
- struct skcipher_ctx *ctx = ask->private;
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
- unsigned bs = crypto_skcipher_blocksize(tfm);
- struct skcipher_sg_list *sgl;
- struct scatterlist *sg;
- int err = -EAGAIN;
- int used;
- long copied = 0;
+ int ret = 0;
lock_sock(sk);
while (msg_data_left(msg)) {
- if (!ctx->used) {
- err = skcipher_wait_for_data(sk, flags);
- if (err)
- goto unlock;
+ int err = _skcipher_recvmsg(sock, msg, ignored, flags);
+
+ /*
+ * This error covers -EIOCBQUEUED which implies that we can
+ * only handle one AIO request. If the caller wants to have
+ * multiple AIO requests in parallel, he must make multiple
+ * separate AIO calls.
+ */
+ if (err <= 0) {
+ if (err == -EIOCBQUEUED)
+ ret = err;
+ goto out;
}
- used = min_t(unsigned long, ctx->used, msg_data_left(msg));
-
- used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
- err = used;
- if (err < 0)
- goto unlock;
-
- if (ctx->more || used < ctx->used)
- used -= used % bs;
-
- err = -EINVAL;
- if (!used)
- goto free;
-
- sgl = list_first_entry(&ctx->tsgl,
- struct skcipher_sg_list, list);
- sg = sgl->sg;
-
- while (!sg->length)
- sg++;
-
- skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
- ctx->iv);
-
- err = af_alg_wait_for_completion(
- ctx->enc ?
- crypto_skcipher_encrypt(&ctx->req) :
- crypto_skcipher_decrypt(&ctx->req),
- &ctx->completion);
-
-free:
- af_alg_free_sg(&ctx->rsgl);
-
- if (err)
- goto unlock;
-
- copied += used;
- skcipher_pull_sgl(sk, used, 1);
- iov_iter_advance(&msg->msg_iter, used);
+ ret += err;
}
- err = 0;
-
-unlock:
+out:
skcipher_wmem_wakeup(sk);
release_sock(sk);
-
- return copied ?: err;
-}
-
-static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
- size_t ignored, int flags)
-{
- return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
- skcipher_recvmsg_async(sock, msg, flags) :
- skcipher_recvmsg_sync(sock, msg, flags);
+ return ret;
}
static unsigned int skcipher_poll(struct file *file, struct socket *sock,
return err;
}
-static void skcipher_wait(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- int ctr = 0;
-
- while (atomic_read(&ctx->inflight) && ctr++ < 100)
- msleep(100);
-}
-
static void skcipher_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
-
- if (atomic_read(&ctx->inflight))
- skcipher_wait(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
- skcipher_free_sgl(sk);
+ skcipher_pull_tsgl(sk, ctx->used, NULL);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
struct alg_sock *ask = alg_sk(sk);
struct skcipher_tfm *tfm = private;
struct crypto_skcipher *skcipher = tfm->skcipher;
- unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher);
+ unsigned int len = sizeof(*ctx);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
- INIT_LIST_HEAD(&ctx->tsgl);
+ INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
ctx->used = 0;
+ ctx->rcvused = 0;
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
- atomic_set(&ctx->inflight, 0);
af_alg_init_completion(&ctx->completion);
ask->private = ctx;
- skcipher_request_set_tfm(&ctx->req, skcipher);
- skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete, &ctx->completion);
-
sk->sk_destruct = skcipher_sock_destruct;
return 0;