1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Glue code for accelerated AES-GCM stitched implementation for ppc64le.
4 *
5 * Copyright 2022- IBM Inc. All rights reserved
6 */
7
8 #include <asm/unaligned.h>
9 #include <asm/simd.h>
10 #include <asm/switch_to.h>
11 #include <crypto/aes.h>
12 #include <crypto/algapi.h>
13 #include <crypto/b128ops.h>
14 #include <crypto/gf128mul.h>
15 #include <crypto/internal/simd.h>
16 #include <crypto/internal/aead.h>
17 #include <crypto/internal/hash.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/cpufeature.h>
21 #include <linux/crypto.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24
25 #define PPC_ALIGN 16
26 #define GCM_IV_SIZE 12
27
28 MODULE_DESCRIPTION("PPC64le AES-GCM with Stitched implementation");
29 MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com");
30 MODULE_LICENSE("GPL v2");
31 MODULE_ALIAS_CRYPTO("aes");
32
33 asmlinkage int aes_p10_set_encrypt_key(const u8 *userKey, const int bits,
34 void *key);
35 asmlinkage void aes_p10_encrypt(const u8 *in, u8 *out, const void *key);
36 asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
37 void *rkey, u8 *iv, void *Xi);
38 asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
39 void *rkey, u8 *iv, void *Xi);
40 asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]);
41 asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
42 unsigned char *aad, unsigned int alen);
43
44 struct aes_key {
45 u8 key[AES_MAX_KEYLENGTH];
46 u64 rounds;
47 };
48
49 struct gcm_ctx {
50 u8 iv[16];
51 u8 ivtag[16];
52 u8 aad_hash[16];
53 u64 aadLen;
54 u64 Plen; /* offset 56 - used in aes_p10_gcm_{en/de}crypt */
55 };
56 struct Hash_ctx {
57 u8 H[16]; /* subkey */
58 u8 Htable[256]; /* Xi, Hash table(offset 32) */
59 };
60
61 struct p10_aes_gcm_ctx {
62 struct aes_key enc_key;
63 };
64
vsx_begin(void)65 static void vsx_begin(void)
66 {
67 preempt_disable();
68 enable_kernel_vsx();
69 }
70
vsx_end(void)71 static void vsx_end(void)
72 {
73 disable_kernel_vsx();
74 preempt_enable();
75 }
76
set_subkey(unsigned char * hash)77 static void set_subkey(unsigned char *hash)
78 {
79 *(u64 *)&hash[0] = be64_to_cpup((__be64 *)&hash[0]);
80 *(u64 *)&hash[8] = be64_to_cpup((__be64 *)&hash[8]);
81 }
82
83 /*
84 * Compute aad if any.
85 * - Hash aad and copy to Xi.
86 */
set_aad(struct gcm_ctx * gctx,struct Hash_ctx * hash,unsigned char * aad,int alen)87 static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
88 unsigned char *aad, int alen)
89 {
90 int i;
91 u8 nXi[16] = {0, };
92
93 gctx->aadLen = alen;
94 i = alen & ~0xf;
95 if (i) {
96 gcm_ghash_p10(nXi, hash->Htable+32, aad, i);
97 aad += i;
98 alen -= i;
99 }
100 if (alen) {
101 for (i = 0; i < alen; i++)
102 nXi[i] ^= aad[i];
103
104 memset(gctx->aad_hash, 0, 16);
105 gcm_ghash_p10(gctx->aad_hash, hash->Htable+32, nXi, 16);
106 } else {
107 memcpy(gctx->aad_hash, nXi, 16);
108 }
109
110 memcpy(hash->Htable, gctx->aad_hash, 16);
111 }
112
gcmp10_init(struct gcm_ctx * gctx,u8 * iv,unsigned char * rdkey,struct Hash_ctx * hash,u8 * assoc,unsigned int assoclen)113 static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
114 struct Hash_ctx *hash, u8 *assoc, unsigned int assoclen)
115 {
116 __be32 counter = cpu_to_be32(1);
117
118 aes_p10_encrypt(hash->H, hash->H, rdkey);
119 set_subkey(hash->H);
120 gcm_init_htable(hash->Htable+32, hash->H);
121
122 *((__be32 *)(iv+12)) = counter;
123
124 gctx->Plen = 0;
125
126 /*
127 * Encrypt counter vector as iv tag and increment counter.
128 */
129 aes_p10_encrypt(iv, gctx->ivtag, rdkey);
130
131 counter = cpu_to_be32(2);
132 *((__be32 *)(iv+12)) = counter;
133 memcpy(gctx->iv, iv, 16);
134
135 gctx->aadLen = assoclen;
136 memset(gctx->aad_hash, 0, 16);
137 if (assoclen)
138 set_aad(gctx, hash, assoc, assoclen);
139 }
140
finish_tag(struct gcm_ctx * gctx,struct Hash_ctx * hash,int len)141 static void finish_tag(struct gcm_ctx *gctx, struct Hash_ctx *hash, int len)
142 {
143 int i;
144 unsigned char len_ac[16 + PPC_ALIGN];
145 unsigned char *aclen = PTR_ALIGN((void *)len_ac, PPC_ALIGN);
146 __be64 clen = cpu_to_be64(len << 3);
147 __be64 alen = cpu_to_be64(gctx->aadLen << 3);
148
149 if (len == 0 && gctx->aadLen == 0) {
150 memcpy(hash->Htable, gctx->ivtag, 16);
151 return;
152 }
153
154 /*
155 * Len is in bits.
156 */
157 *((__be64 *)(aclen)) = alen;
158 *((__be64 *)(aclen+8)) = clen;
159
160 /*
161 * hash (AAD len and len)
162 */
163 gcm_ghash_p10(hash->Htable, hash->Htable+32, aclen, 16);
164
165 for (i = 0; i < 16; i++)
166 hash->Htable[i] ^= gctx->ivtag[i];
167 }
168
set_authsize(struct crypto_aead * tfm,unsigned int authsize)169 static int set_authsize(struct crypto_aead *tfm, unsigned int authsize)
170 {
171 switch (authsize) {
172 case 4:
173 case 8:
174 case 12:
175 case 13:
176 case 14:
177 case 15:
178 case 16:
179 break;
180 default:
181 return -EINVAL;
182 }
183
184 return 0;
185 }
186
p10_aes_gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)187 static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
188 unsigned int keylen)
189 {
190 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
191 struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
192 int ret;
193
194 vsx_begin();
195 ret = aes_p10_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
196 vsx_end();
197
198 return ret ? -EINVAL : 0;
199 }
200
p10_aes_gcm_crypt(struct aead_request * req,int enc)201 static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
202 {
203 struct crypto_tfm *tfm = req->base.tfm;
204 struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
205 u8 databuf[sizeof(struct gcm_ctx) + PPC_ALIGN];
206 struct gcm_ctx *gctx = PTR_ALIGN((void *)databuf, PPC_ALIGN);
207 u8 hashbuf[sizeof(struct Hash_ctx) + PPC_ALIGN];
208 struct Hash_ctx *hash = PTR_ALIGN((void *)hashbuf, PPC_ALIGN);
209 struct scatter_walk assoc_sg_walk;
210 struct skcipher_walk walk;
211 u8 *assocmem = NULL;
212 u8 *assoc;
213 unsigned int assoclen = req->assoclen;
214 unsigned int cryptlen = req->cryptlen;
215 unsigned char ivbuf[AES_BLOCK_SIZE+PPC_ALIGN];
216 unsigned char *iv = PTR_ALIGN((void *)ivbuf, PPC_ALIGN);
217 int ret;
218 unsigned long auth_tag_len = crypto_aead_authsize(__crypto_aead_cast(tfm));
219 u8 otag[16];
220 int total_processed = 0;
221
222 memset(databuf, 0, sizeof(databuf));
223 memset(hashbuf, 0, sizeof(hashbuf));
224 memset(ivbuf, 0, sizeof(ivbuf));
225 memcpy(iv, req->iv, GCM_IV_SIZE);
226
227 /* Linearize assoc, if not already linear */
228 if (req->src->length >= assoclen && req->src->length) {
229 scatterwalk_start(&assoc_sg_walk, req->src);
230 assoc = scatterwalk_map(&assoc_sg_walk);
231 } else {
232 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
233 GFP_KERNEL : GFP_ATOMIC;
234
235 /* assoc can be any length, so must be on heap */
236 assocmem = kmalloc(assoclen, flags);
237 if (unlikely(!assocmem))
238 return -ENOMEM;
239 assoc = assocmem;
240
241 scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
242 }
243
244 vsx_begin();
245 gcmp10_init(gctx, iv, (unsigned char *) &ctx->enc_key, hash, assoc, assoclen);
246 vsx_end();
247
248 if (!assocmem)
249 scatterwalk_unmap(assoc);
250 else
251 kfree(assocmem);
252
253 if (enc)
254 ret = skcipher_walk_aead_encrypt(&walk, req, false);
255 else
256 ret = skcipher_walk_aead_decrypt(&walk, req, false);
257 if (ret)
258 return ret;
259
260 while (walk.nbytes > 0 && ret == 0) {
261
262 vsx_begin();
263 if (enc)
264 aes_p10_gcm_encrypt(walk.src.virt.addr,
265 walk.dst.virt.addr,
266 walk.nbytes,
267 &ctx->enc_key, gctx->iv, hash->Htable);
268 else
269 aes_p10_gcm_decrypt(walk.src.virt.addr,
270 walk.dst.virt.addr,
271 walk.nbytes,
272 &ctx->enc_key, gctx->iv, hash->Htable);
273 vsx_end();
274
275 total_processed += walk.nbytes;
276 ret = skcipher_walk_done(&walk, 0);
277 }
278
279 if (ret)
280 return ret;
281
282 /* Finalize hash */
283 vsx_begin();
284 finish_tag(gctx, hash, total_processed);
285 vsx_end();
286
287 /* copy Xi to end of dst */
288 if (enc)
289 scatterwalk_map_and_copy(hash->Htable, req->dst, req->assoclen + cryptlen,
290 auth_tag_len, 1);
291 else {
292 scatterwalk_map_and_copy(otag, req->src,
293 req->assoclen + cryptlen - auth_tag_len,
294 auth_tag_len, 0);
295
296 if (crypto_memneq(otag, hash->Htable, auth_tag_len)) {
297 memzero_explicit(hash->Htable, 16);
298 return -EBADMSG;
299 }
300 }
301
302 return 0;
303 }
304
p10_aes_gcm_encrypt(struct aead_request * req)305 static int p10_aes_gcm_encrypt(struct aead_request *req)
306 {
307 return p10_aes_gcm_crypt(req, 1);
308 }
309
p10_aes_gcm_decrypt(struct aead_request * req)310 static int p10_aes_gcm_decrypt(struct aead_request *req)
311 {
312 return p10_aes_gcm_crypt(req, 0);
313 }
314
315 static struct aead_alg gcm_aes_alg = {
316 .ivsize = GCM_IV_SIZE,
317 .maxauthsize = 16,
318
319 .setauthsize = set_authsize,
320 .setkey = p10_aes_gcm_setkey,
321 .encrypt = p10_aes_gcm_encrypt,
322 .decrypt = p10_aes_gcm_decrypt,
323
324 .base.cra_name = "gcm(aes)",
325 .base.cra_driver_name = "aes_gcm_p10",
326 .base.cra_priority = 2100,
327 .base.cra_blocksize = 1,
328 .base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx),
329 .base.cra_module = THIS_MODULE,
330 };
331
p10_init(void)332 static int __init p10_init(void)
333 {
334 return crypto_register_aead(&gcm_aes_alg);
335 }
336
p10_exit(void)337 static void __exit p10_exit(void)
338 {
339 crypto_unregister_aead(&gcm_aes_alg);
340 }
341
342 module_cpu_feature_match(PPC_MODULE_FEATURE_P10, p10_init);
343 module_exit(p10_exit);
344