]> git.openfabrics.org - ~shefty/rdma-dev.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 2 Apr 2012 16:40:24 +0000 (09:40 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 2 Apr 2012 16:40:24 +0000 (09:40 -0700)
Pull crypto fixes from Herbert Xu:
 - Fix for CPU hotplug hang in padata.
 - Avoid using cpu_active inappropriately in pcrypt and padata.
 - Fix for user-space algorithm lookup hang with IV generators.
 - Fix for netlink dump of algorithms where stuff went missing due to
   incorrect calculation of message size.

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: user - Fix size of netlink dump message
  crypto: user - Fix lookup of algorithms with IV generator
  crypto: pcrypt - Use the online cpumask as the default
  padata: Fix cpu hotplug
  padata: Use the online cpumask as the default
  padata: Add a reference to the api documentation

crypto/ablkcipher.c
crypto/aead.c
crypto/crypto_user.c
crypto/pcrypt.c
include/crypto/internal/aead.h
include/crypto/internal/skcipher.h
include/linux/cryptouser.h
kernel/padata.c

index a0f768c1d9aa75fdb94a70dba711e175d8f66c9f..8d3a056ebeeaf250d5ec61f4fe0cba990001528c 100644 (file)
@@ -613,8 +613,7 @@ out:
        return err;
 }
 
-static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
-                                                u32 mask)
+struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
 {
        struct crypto_alg *alg;
 
@@ -652,6 +651,7 @@ static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
 
        return ERR_PTR(crypto_givcipher_default(alg, type, mask));
 }
+EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
 
 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
                         u32 type, u32 mask)
index 04add3dca6fe44dfc242e89a36741c56786327fa..e4cb35159be43fe365ec569908f84732c5c67042 100644 (file)
@@ -470,8 +470,7 @@ out:
        return err;
 }
 
-static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
-                                            u32 mask)
+struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask)
 {
        struct crypto_alg *alg;
 
@@ -503,6 +502,7 @@ static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
 
        return ERR_PTR(crypto_nivaead_default(alg, type, mask));
 }
+EXPORT_SYMBOL_GPL(crypto_lookup_aead);
 
 int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
                     u32 type, u32 mask)
index f76e42bcc6e7f4c213eef714761ee34630fbc483..f1ea0a0641350cbd0f7628bc3e7edde4fc38972d 100644 (file)
 #include <linux/module.h>
 #include <linux/crypto.h>
 #include <linux/cryptouser.h>
+#include <linux/sched.h>
 #include <net/netlink.h>
 #include <linux/security.h>
 #include <net/net_namespace.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
 #include "internal.h"
 
 DEFINE_MUTEX(crypto_cfg_mutex);
@@ -301,6 +305,60 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
        return crypto_unregister_instance(alg);
 }
 
+static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
+                                                  u32 mask)
+{
+       int err;
+       struct crypto_alg *alg;
+
+       type = crypto_skcipher_type(type);
+       mask = crypto_skcipher_mask(mask);
+
+       for (;;) {
+               alg = crypto_lookup_skcipher(name,  type, mask);
+               if (!IS_ERR(alg))
+                       return alg;
+
+               err = PTR_ERR(alg);
+               if (err != -EAGAIN)
+                       break;
+               if (signal_pending(current)) {
+                       err = -EINTR;
+                       break;
+               }
+       }
+
+       return ERR_PTR(err);
+}
+
+static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
+                                              u32 mask)
+{
+       int err;
+       struct crypto_alg *alg;
+
+       type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+       type |= CRYPTO_ALG_TYPE_AEAD;
+       mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+       mask |= CRYPTO_ALG_TYPE_MASK;
+
+       for (;;) {
+               alg = crypto_lookup_aead(name,  type, mask);
+               if (!IS_ERR(alg))
+                       return alg;
+
+               err = PTR_ERR(alg);
+               if (err != -EAGAIN)
+                       break;
+               if (signal_pending(current)) {
+                       err = -EINTR;
+                       break;
+               }
+       }
+
+       return ERR_PTR(err);
+}
+
 static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
                          struct nlattr **attrs)
 {
@@ -325,7 +383,19 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
        else
                name = p->cru_name;
 
-       alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
+       switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) {
+       case CRYPTO_ALG_TYPE_AEAD:
+               alg = crypto_user_aead_alg(name, p->cru_type, p->cru_mask);
+               break;
+       case CRYPTO_ALG_TYPE_GIVCIPHER:
+       case CRYPTO_ALG_TYPE_BLKCIPHER:
+       case CRYPTO_ALG_TYPE_ABLKCIPHER:
+               alg = crypto_user_skcipher_alg(name, p->cru_type, p->cru_mask);
+               break;
+       default:
+               alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
+       }
+
        if (IS_ERR(alg))
                return PTR_ERR(alg);
 
@@ -387,12 +457,20 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
            (nlh->nlmsg_flags & NLM_F_DUMP))) {
+               struct crypto_alg *alg;
+               u16 dump_alloc = 0;
+
                if (link->dump == NULL)
                        return -EINVAL;
+
+               list_for_each_entry(alg, &crypto_alg_list, cra_list)
+                       dump_alloc += CRYPTO_REPORT_MAXSIZE;
+
                {
                        struct netlink_dump_control c = {
                                .dump = link->dump,
                                .done = link->done,
+                               .min_dump_alloc = dump_alloc,
                        };
                        return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
                }
index 29a89dad68b664b0dcfefdf29a19631904b21e3f..b2c99dc1c5e2f244bab9cf8fafcedb0860df67f6 100644 (file)
@@ -280,11 +280,11 @@ static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
 
        ictx->tfm_count++;
 
-       cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
+       cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask);
 
-       ctx->cb_cpu = cpumask_first(cpu_active_mask);
+       ctx->cb_cpu = cpumask_first(cpu_online_mask);
        for (cpu = 0; cpu < cpu_index; cpu++)
-               ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
+               ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
 
        cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
 
@@ -472,7 +472,7 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
                goto err_free_padata;
        }
 
-       cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask);
+       cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
        rcu_assign_pointer(pcrypt->cb_cpumask, mask);
 
        pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
index d838c945575a73bf313a809edece47ee27ad16a2..2eba340230a73a63e8a1472a20ff0ed0a451d981 100644 (file)
@@ -31,6 +31,8 @@ static inline void crypto_set_aead_spawn(
        crypto_set_spawn(&spawn->base, inst);
 }
 
+struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask);
+
 int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
                     u32 type, u32 mask);
 
index 3a748a6bf772b25e83e8657024cddea8f40d8026..06e8b32d541c57280edbf06fdd03772a84fa55ff 100644 (file)
@@ -34,6 +34,8 @@ static inline void crypto_set_skcipher_spawn(
 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
                         u32 type, u32 mask);
 
+struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask);
+
 static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
 {
        crypto_drop_spawn(&spawn->base);
index 532fb58f16bf9a417f869d7fe24149e6283d4ff6..4abf2ea6a88761dd0980cba05553737c769bf72d 100644 (file)
@@ -100,3 +100,6 @@ struct crypto_report_rng {
        char type[CRYPTO_MAX_NAME];
        unsigned int seedsize;
 };
+
+#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
+                              sizeof(struct crypto_report_blkcipher))
index 6f10eb285ece5136177345ba368493a3b66932a9..89fe3d1b9efbc1f34351e5752dd730db3c5bbab4 100644 (file)
@@ -1,6 +1,8 @@
 /*
  * padata.c - generic interface to process data streams in parallel
  *
+ * See Documentation/padata.txt for an api documentation.
+ *
  * Copyright (C) 2008, 2009 secunet Security Networks AG
  * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
  *
@@ -354,13 +356,13 @@ static int padata_setup_cpumasks(struct parallel_data *pd,
        if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
                return -ENOMEM;
 
-       cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask);
+       cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
        if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
                free_cpumask_var(pd->cpumask.cbcpu);
                return -ENOMEM;
        }
 
-       cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask);
+       cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
        return 0;
 }
 
@@ -564,7 +566,7 @@ EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
 static bool padata_validate_cpumask(struct padata_instance *pinst,
                                    const struct cpumask *cpumask)
 {
-       if (!cpumask_intersects(cpumask, cpu_active_mask)) {
+       if (!cpumask_intersects(cpumask, cpu_online_mask)) {
                pinst->flags |= PADATA_INVALID;
                return false;
        }
@@ -678,7 +680,7 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
 {
        struct parallel_data *pd;
 
-       if (cpumask_test_cpu(cpu, cpu_active_mask)) {
+       if (cpumask_test_cpu(cpu, cpu_online_mask)) {
                pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
                                     pinst->cpumask.cbcpu);
                if (!pd)
@@ -746,6 +748,9 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
                        return -ENOMEM;
 
                padata_replace(pinst, pd);
+
+               cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
+               cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
        }
 
        return 0;