crypto: s390 - add System z hardware support for XTS mode
authorGerald Schaefer <gerald.schaefer@de.ibm.com>
Tue, 26 Apr 2011 06:12:42 +0000 (16:12 +1000)
committerHerbert Xu <herbert@gondor.apana.org.au>
Wed, 4 May 2011 05:06:30 +0000 (15:06 +1000)
This patch adds System z hardware acceleration support for the AES XTS mode.
The hardware support is available beginning with System z196.

Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/s390/crypto/aes_s390.c
arch/s390/crypto/crypt_s390.h
drivers/crypto/Kconfig

index fc97b94..8230e86 100644 (file)
@@ -45,6 +45,24 @@ struct s390_aes_ctx {
        } fallback;
 };
 
+struct pcc_param {
+       u8 key[32];
+       u8 tweak[16];
+       u8 block[16];
+       u8 bit[16];
+       u8 xts[16];
+};
+
+struct s390_xts_ctx {
+       u8 key[32];
+       u8 xts_param[16];
+       struct pcc_param pcc;
+       long enc;
+       long dec;
+       int key_len;
+       struct crypto_blkcipher *fallback;
+};
+
 /*
  * Check if the key_len is supported by the HW.
  * Returns 0 if it is, a positive number if it is not and software fallback is
@@ -504,8 +522,211 @@ static struct crypto_alg cbc_aes_alg = {
        }
 };
 
+static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
+                                  unsigned int len)
+{
+       struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+       unsigned int ret;
+
+       xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+       xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags &
+                       CRYPTO_TFM_REQ_MASK);
+
+       ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len);
+       if (ret) {
+               tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+               tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags &
+                               CRYPTO_TFM_RES_MASK);
+       }
+       return ret;
+}
+
+static int xts_fallback_decrypt(struct blkcipher_desc *desc,
+               struct scatterlist *dst, struct scatterlist *src,
+               unsigned int nbytes)
+{
+       struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct crypto_blkcipher *tfm;
+       unsigned int ret;
+
+       tfm = desc->tfm;
+       desc->tfm = xts_ctx->fallback;
+
+       ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
+
+       desc->tfm = tfm;
+       return ret;
+}
+
+static int xts_fallback_encrypt(struct blkcipher_desc *desc,
+               struct scatterlist *dst, struct scatterlist *src,
+               unsigned int nbytes)
+{
+       struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct crypto_blkcipher *tfm;
+       unsigned int ret;
+
+       tfm = desc->tfm;
+       desc->tfm = xts_ctx->fallback;
+
+       ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
+
+       desc->tfm = tfm;
+       return ret;
+}
+
+static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+                          unsigned int key_len)
+{
+       struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+       u32 *flags = &tfm->crt_flags;
+
+       switch (key_len) {
+       case 32:
+               xts_ctx->enc = KM_XTS_128_ENCRYPT;
+               xts_ctx->dec = KM_XTS_128_DECRYPT;
+               memcpy(xts_ctx->key + 16, in_key, 16);
+               memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
+               break;
+       case 48:
+               xts_ctx->enc = 0;
+               xts_ctx->dec = 0;
+               xts_fallback_setkey(tfm, in_key, key_len);
+               break;
+       case 64:
+               xts_ctx->enc = KM_XTS_256_ENCRYPT;
+               xts_ctx->dec = KM_XTS_256_DECRYPT;
+               memcpy(xts_ctx->key, in_key, 32);
+               memcpy(xts_ctx->pcc.key, in_key + 32, 32);
+               break;
+       default:
+               *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+       xts_ctx->key_len = key_len;
+       return 0;
+}
+
+static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
+                        struct s390_xts_ctx *xts_ctx,
+                        struct blkcipher_walk *walk)
+{
+       unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
+       int ret = blkcipher_walk_virt(desc, walk);
+       unsigned int nbytes = walk->nbytes;
+       unsigned int n;
+       u8 *in, *out;
+       void *param;
+
+       if (!nbytes)
+               goto out;
+
+       memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
+       memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
+       memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
+       memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
+       param = xts_ctx->pcc.key + offset;
+       ret = crypt_s390_pcc(func, param);
+       BUG_ON(ret < 0);
+
+       memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
+       param = xts_ctx->key + offset;
+       do {
+               /* only use complete blocks */
+               n = nbytes & ~(AES_BLOCK_SIZE - 1);
+               out = walk->dst.virt.addr;
+               in = walk->src.virt.addr;
+
+               ret = crypt_s390_km(func, param, out, in, n);
+               BUG_ON(ret < 0 || ret != n);
+
+               nbytes &= AES_BLOCK_SIZE - 1;
+               ret = blkcipher_walk_done(desc, walk, nbytes);
+       } while ((nbytes = walk->nbytes));
+out:
+       return ret;
+}
+
+static int xts_aes_encrypt(struct blkcipher_desc *desc,
+                          struct scatterlist *dst, struct scatterlist *src,
+                          unsigned int nbytes)
+{
+       struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+
+       if (unlikely(xts_ctx->key_len == 48))
+               return xts_fallback_encrypt(desc, dst, src, nbytes);
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
+}
+
+static int xts_aes_decrypt(struct blkcipher_desc *desc,
+                          struct scatterlist *dst, struct scatterlist *src,
+                          unsigned int nbytes)
+{
+       struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+
+       if (unlikely(xts_ctx->key_len == 48))
+               return xts_fallback_decrypt(desc, dst, src, nbytes);
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
+}
+
+static int xts_fallback_init(struct crypto_tfm *tfm)
+{
+       const char *name = tfm->__crt_alg->cra_name;
+       struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+
+       xts_ctx->fallback = crypto_alloc_blkcipher(name, 0,
+                       CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+       if (IS_ERR(xts_ctx->fallback)) {
+               pr_err("Allocating XTS fallback algorithm %s failed\n",
+                      name);
+               return PTR_ERR(xts_ctx->fallback);
+       }
+       return 0;
+}
+
+static void xts_fallback_exit(struct crypto_tfm *tfm)
+{
+       struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+
+       crypto_free_blkcipher(xts_ctx->fallback);
+       xts_ctx->fallback = NULL;
+}
+
+static struct crypto_alg xts_aes_alg = {
+       .cra_name               =       "xts(aes)",
+       .cra_driver_name        =       "xts-aes-s390",
+       .cra_priority           =       CRYPT_S390_COMPOSITE_PRIORITY,
+       .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
+                                       CRYPTO_ALG_NEED_FALLBACK,
+       .cra_blocksize          =       AES_BLOCK_SIZE,
+       .cra_ctxsize            =       sizeof(struct s390_xts_ctx),
+       .cra_type               =       &crypto_blkcipher_type,
+       .cra_module             =       THIS_MODULE,
+       .cra_list               =       LIST_HEAD_INIT(xts_aes_alg.cra_list),
+       .cra_init               =       xts_fallback_init,
+       .cra_exit               =       xts_fallback_exit,
+       .cra_u                  =       {
+               .blkcipher = {
+                       .min_keysize            =       2 * AES_MIN_KEY_SIZE,
+                       .max_keysize            =       2 * AES_MAX_KEY_SIZE,
+                       .ivsize                 =       AES_BLOCK_SIZE,
+                       .setkey                 =       xts_aes_set_key,
+                       .encrypt                =       xts_aes_encrypt,
+                       .decrypt                =       xts_aes_decrypt,
+               }
+       }
+};
+
 static int __init aes_s390_init(void)
 {
+       unsigned long long facility_bits[2];
        int ret;
 
        if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
@@ -535,9 +756,20 @@ static int __init aes_s390_init(void)
        if (ret)
                goto cbc_aes_err;
 
+       if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
+                       CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
+           crypt_s390_func_available(KM_XTS_256_ENCRYPT,
+                       CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
+               ret = crypto_register_alg(&xts_aes_alg);
+               if (ret)
+                       goto xts_aes_err;
+       }
+
 out:
        return ret;
 
+xts_aes_err:
+       crypto_unregister_alg(&cbc_aes_alg);
 cbc_aes_err:
        crypto_unregister_alg(&ecb_aes_alg);
 ecb_aes_err:
@@ -548,6 +780,7 @@ aes_err:
 
 static void __exit aes_s390_fini(void)
 {
+       crypto_unregister_alg(&xts_aes_alg);
        crypto_unregister_alg(&cbc_aes_alg);
        crypto_unregister_alg(&ecb_aes_alg);
        crypto_unregister_alg(&aes_alg);
index 4b8c96c..7cbfaf0 100644 (file)
@@ -55,6 +55,10 @@ enum crypt_s390_km_func {
        KM_AES_192_DECRYPT  = CRYPT_S390_KM | 0x13 | 0x80,
        KM_AES_256_ENCRYPT  = CRYPT_S390_KM | 0x14,
        KM_AES_256_DECRYPT  = CRYPT_S390_KM | 0x14 | 0x80,
+       KM_XTS_128_ENCRYPT  = CRYPT_S390_KM | 0x32,
+       KM_XTS_128_DECRYPT  = CRYPT_S390_KM | 0x32 | 0x80,
+       KM_XTS_256_ENCRYPT  = CRYPT_S390_KM | 0x34,
+       KM_XTS_256_DECRYPT  = CRYPT_S390_KM | 0x34 | 0x80,
 };
 
 /*
@@ -334,4 +338,31 @@ static inline int crypt_s390_func_available(int func,
        return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
 }
 
+/**
+ * crypt_s390_pcc:
+ * @func: the function code passed to KM; see crypt_s390_km_func
+ * @param: address of parameter block; see POP for details on each func
+ *
+ * Executes the PCC (PERFORM CRYPTOGRAPHIC COMPUTATION) operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for success.
+ */
+static inline int crypt_s390_pcc(long func, void *param)
+{
+       register long __func asm("0") = func & 0x7f; /* encrypt or decrypt */
+       register void *__param asm("1") = param;
+       int ret = -1;
+
+       asm volatile(
+               "0:     .insn   rre,0xb92c0000,0,0 \n" /* PCC opcode */
+               "1:     brc     1,0b \n" /* handle partial completion */
+               "       la      %0,0\n"
+               "2:\n"
+               EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+               : "+d" (ret)
+               : "d" (__func), "a" (__param) : "cc", "memory");
+       return ret;
+}
+
+
 #endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
index 7957acb..78df36a 100644 (file)
@@ -131,20 +131,14 @@ config CRYPTO_AES_S390
        select CRYPTO_BLKCIPHER
        help
          This is the s390 hardware accelerated implementation of the
-         AES cipher algorithms (FIPS-197). AES uses the Rijndael
-         algorithm.
-
-         Rijndael appears to be consistently a very good performer in
-         both hardware and software across a wide range of computing
-         environments regardless of its use in feedback or non-feedback
-         modes. Its key setup time is excellent, and its key agility is
-         good. Rijndael's very low memory requirements make it very well
-         suited for restricted-space environments, in which it also
-         demonstrates excellent performance. Rijndael's operations are
-         among the easiest to defend against power and timing attacks.
-
-         On s390 the System z9-109 currently only supports the key size
-         of 128 bit.
+         AES cipher algorithms (FIPS-197).
+
+         As of z9 the ECB and CBC modes are hardware accelerated
+         for 128 bit keys.
+         As of z10 the ECB and CBC modes are hardware accelerated
+         for all AES key sizes.
+         As of z196 the XTS mode is hardware accelerated for 256 and
+         512 bit keys.
 
 config S390_PRNG
        tristate "Pseudo random number generator device driver"