1 /*
2  * Shared glue code for 128bit block ciphers
3  *
4  * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
5  *
6  * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
7  *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8  * CTR part based on code (crypto/ctr.c) by:
9  *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
24  * USA
25  *
26  */
27 
28 #include <linux/module.h>
29 #include <crypto/b128ops.h>
30 #include <crypto/gf128mul.h>
31 #include <crypto/internal/skcipher.h>
32 #include <crypto/xts.h>
33 #include <asm/crypto/glue_helper.h>
34 
glue_ecb_req_128bit(const struct common_glue_ctx * gctx,struct skcipher_request * req)35 int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
36 			struct skcipher_request *req)
37 {
38 	void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
39 	const unsigned int bsize = 128 / 8;
40 	struct skcipher_walk walk;
41 	bool fpu_enabled = false;
42 	unsigned int nbytes;
43 	int err;
44 
45 	err = skcipher_walk_virt(&walk, req, false);
46 
47 	while ((nbytes = walk.nbytes)) {
48 		const u8 *src = walk.src.virt.addr;
49 		u8 *dst = walk.dst.virt.addr;
50 		unsigned int func_bytes;
51 		unsigned int i;
52 
53 		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
54 					     &walk, fpu_enabled, nbytes);
55 		for (i = 0; i < gctx->num_funcs; i++) {
56 			func_bytes = bsize * gctx->funcs[i].num_blocks;
57 
58 			if (nbytes < func_bytes)
59 				continue;
60 
61 			/* Process multi-block batch */
62 			do {
63 				gctx->funcs[i].fn_u.ecb(ctx, dst, src);
64 				src += func_bytes;
65 				dst += func_bytes;
66 				nbytes -= func_bytes;
67 			} while (nbytes >= func_bytes);
68 
69 			if (nbytes < bsize)
70 				break;
71 		}
72 		err = skcipher_walk_done(&walk, nbytes);
73 	}
74 
75 	glue_fpu_end(fpu_enabled);
76 	return err;
77 }
78 EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
79 
glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,struct skcipher_request * req)80 int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
81 				struct skcipher_request *req)
82 {
83 	void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
84 	const unsigned int bsize = 128 / 8;
85 	struct skcipher_walk walk;
86 	unsigned int nbytes;
87 	int err;
88 
89 	err = skcipher_walk_virt(&walk, req, false);
90 
91 	while ((nbytes = walk.nbytes)) {
92 		const u128 *src = (u128 *)walk.src.virt.addr;
93 		u128 *dst = (u128 *)walk.dst.virt.addr;
94 		u128 *iv = (u128 *)walk.iv;
95 
96 		do {
97 			u128_xor(dst, src, iv);
98 			fn(ctx, (u8 *)dst, (u8 *)dst);
99 			iv = dst;
100 			src++;
101 			dst++;
102 			nbytes -= bsize;
103 		} while (nbytes >= bsize);
104 
105 		*(u128 *)walk.iv = *iv;
106 		err = skcipher_walk_done(&walk, nbytes);
107 	}
108 	return err;
109 }
110 EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit);
111 
glue_cbc_decrypt_req_128bit(const struct common_glue_ctx * gctx,struct skcipher_request * req)112 int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
113 				struct skcipher_request *req)
114 {
115 	void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
116 	const unsigned int bsize = 128 / 8;
117 	struct skcipher_walk walk;
118 	bool fpu_enabled = false;
119 	unsigned int nbytes;
120 	int err;
121 
122 	err = skcipher_walk_virt(&walk, req, false);
123 
124 	while ((nbytes = walk.nbytes)) {
125 		const u128 *src = walk.src.virt.addr;
126 		u128 *dst = walk.dst.virt.addr;
127 		unsigned int func_bytes, num_blocks;
128 		unsigned int i;
129 		u128 last_iv;
130 
131 		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
132 					     &walk, fpu_enabled, nbytes);
133 		/* Start of the last block. */
134 		src += nbytes / bsize - 1;
135 		dst += nbytes / bsize - 1;
136 
137 		last_iv = *src;
138 
139 		for (i = 0; i < gctx->num_funcs; i++) {
140 			num_blocks = gctx->funcs[i].num_blocks;
141 			func_bytes = bsize * num_blocks;
142 
143 			if (nbytes < func_bytes)
144 				continue;
145 
146 			/* Process multi-block batch */
147 			do {
148 				src -= num_blocks - 1;
149 				dst -= num_blocks - 1;
150 
151 				gctx->funcs[i].fn_u.cbc(ctx, dst, src);
152 
153 				nbytes -= func_bytes;
154 				if (nbytes < bsize)
155 					goto done;
156 
157 				u128_xor(dst, dst, --src);
158 				dst--;
159 			} while (nbytes >= func_bytes);
160 		}
161 done:
162 		u128_xor(dst, dst, (u128 *)walk.iv);
163 		*(u128 *)walk.iv = last_iv;
164 		err = skcipher_walk_done(&walk, nbytes);
165 	}
166 
167 	glue_fpu_end(fpu_enabled);
168 	return err;
169 }
170 EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
171 
glue_ctr_req_128bit(const struct common_glue_ctx * gctx,struct skcipher_request * req)172 int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
173 			struct skcipher_request *req)
174 {
175 	void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
176 	const unsigned int bsize = 128 / 8;
177 	struct skcipher_walk walk;
178 	bool fpu_enabled = false;
179 	unsigned int nbytes;
180 	int err;
181 
182 	err = skcipher_walk_virt(&walk, req, false);
183 
184 	while ((nbytes = walk.nbytes) >= bsize) {
185 		const u128 *src = walk.src.virt.addr;
186 		u128 *dst = walk.dst.virt.addr;
187 		unsigned int func_bytes, num_blocks;
188 		unsigned int i;
189 		le128 ctrblk;
190 
191 		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
192 					     &walk, fpu_enabled, nbytes);
193 
194 		be128_to_le128(&ctrblk, (be128 *)walk.iv);
195 
196 		for (i = 0; i < gctx->num_funcs; i++) {
197 			num_blocks = gctx->funcs[i].num_blocks;
198 			func_bytes = bsize * num_blocks;
199 
200 			if (nbytes < func_bytes)
201 				continue;
202 
203 			/* Process multi-block batch */
204 			do {
205 				gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
206 				src += num_blocks;
207 				dst += num_blocks;
208 				nbytes -= func_bytes;
209 			} while (nbytes >= func_bytes);
210 
211 			if (nbytes < bsize)
212 				break;
213 		}
214 
215 		le128_to_be128((be128 *)walk.iv, &ctrblk);
216 		err = skcipher_walk_done(&walk, nbytes);
217 	}
218 
219 	glue_fpu_end(fpu_enabled);
220 
221 	if (nbytes) {
222 		le128 ctrblk;
223 		u128 tmp;
224 
225 		be128_to_le128(&ctrblk, (be128 *)walk.iv);
226 		memcpy(&tmp, walk.src.virt.addr, nbytes);
227 		gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, &tmp, &tmp,
228 							  &ctrblk);
229 		memcpy(walk.dst.virt.addr, &tmp, nbytes);
230 		le128_to_be128((be128 *)walk.iv, &ctrblk);
231 
232 		err = skcipher_walk_done(&walk, 0);
233 	}
234 
235 	return err;
236 }
237 EXPORT_SYMBOL_GPL(glue_ctr_req_128bit);
238 
__glue_xts_req_128bit(const struct common_glue_ctx * gctx,void * ctx,struct skcipher_walk * walk)239 static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
240 					  void *ctx,
241 					  struct skcipher_walk *walk)
242 {
243 	const unsigned int bsize = 128 / 8;
244 	unsigned int nbytes = walk->nbytes;
245 	u128 *src = walk->src.virt.addr;
246 	u128 *dst = walk->dst.virt.addr;
247 	unsigned int num_blocks, func_bytes;
248 	unsigned int i;
249 
250 	/* Process multi-block batch */
251 	for (i = 0; i < gctx->num_funcs; i++) {
252 		num_blocks = gctx->funcs[i].num_blocks;
253 		func_bytes = bsize * num_blocks;
254 
255 		if (nbytes >= func_bytes) {
256 			do {
257 				gctx->funcs[i].fn_u.xts(ctx, dst, src,
258 							walk->iv);
259 
260 				src += num_blocks;
261 				dst += num_blocks;
262 				nbytes -= func_bytes;
263 			} while (nbytes >= func_bytes);
264 
265 			if (nbytes < bsize)
266 				goto done;
267 		}
268 	}
269 
270 done:
271 	return nbytes;
272 }
273 
glue_xts_req_128bit(const struct common_glue_ctx * gctx,struct skcipher_request * req,common_glue_func_t tweak_fn,void * tweak_ctx,void * crypt_ctx)274 int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
275 			struct skcipher_request *req,
276 			common_glue_func_t tweak_fn, void *tweak_ctx,
277 			void *crypt_ctx)
278 {
279 	const unsigned int bsize = 128 / 8;
280 	struct skcipher_walk walk;
281 	bool fpu_enabled = false;
282 	unsigned int nbytes;
283 	int err;
284 
285 	err = skcipher_walk_virt(&walk, req, false);
286 	nbytes = walk.nbytes;
287 	if (!nbytes)
288 		return err;
289 
290 	/* set minimum length to bsize, for tweak_fn */
291 	fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
292 				     &walk, fpu_enabled,
293 				     nbytes < bsize ? bsize : nbytes);
294 
295 	/* calculate first value of T */
296 	tweak_fn(tweak_ctx, walk.iv, walk.iv);
297 
298 	while (nbytes) {
299 		nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);
300 
301 		err = skcipher_walk_done(&walk, nbytes);
302 		nbytes = walk.nbytes;
303 	}
304 
305 	glue_fpu_end(fpu_enabled);
306 
307 	return err;
308 }
309 EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
310 
glue_xts_crypt_128bit_one(void * ctx,u128 * dst,const u128 * src,le128 * iv,common_glue_func_t fn)311 void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
312 			       common_glue_func_t fn)
313 {
314 	le128 ivblk = *iv;
315 
316 	/* generate next IV */
317 	gf128mul_x_ble(iv, &ivblk);
318 
319 	/* CC <- T xor C */
320 	u128_xor(dst, src, (u128 *)&ivblk);
321 
322 	/* PP <- D(Key2,CC) */
323 	fn(ctx, (u8 *)dst, (u8 *)dst);
324 
325 	/* P <- T xor PP */
326 	u128_xor(dst, dst, (u128 *)&ivblk);
327 }
328 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
329 
330 MODULE_LICENSE("GPL");
331