1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Symmetric key ciphers.
4 *
5 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7
8 #ifndef _CRYPTO_INTERNAL_SKCIPHER_H
9 #define _CRYPTO_INTERNAL_SKCIPHER_H
10
11 #include <crypto/algapi.h>
12 #include <crypto/skcipher.h>
13 #include <linux/list.h>
14 #include <linux/types.h>
15
16 struct aead_request;
17 struct rtattr;
18
19 struct skcipher_instance {
20 void (*free)(struct skcipher_instance *inst);
21 union {
22 struct {
23 char head[offsetof(struct skcipher_alg, base)];
24 struct crypto_instance base;
25 } s;
26 struct skcipher_alg alg;
27 };
28 };
29
30 struct crypto_skcipher_spawn {
31 struct crypto_spawn base;
32 };
33
34 struct skcipher_walk {
35 union {
36 struct {
37 struct page *page;
38 unsigned long offset;
39 } phys;
40
41 struct {
42 u8 *page;
43 void *addr;
44 } virt;
45 } src, dst;
46
47 struct scatter_walk in;
48 unsigned int nbytes;
49
50 struct scatter_walk out;
51 unsigned int total;
52
53 struct list_head buffers;
54
55 u8 *page;
56 u8 *buffer;
57 u8 *oiv;
58 void *iv;
59
60 unsigned int ivsize;
61
62 int flags;
63 unsigned int blocksize;
64 unsigned int stride;
65 unsigned int alignmask;
66 };
67
skcipher_crypto_instance(struct skcipher_instance * inst)68 static inline struct crypto_instance *skcipher_crypto_instance(
69 struct skcipher_instance *inst)
70 {
71 return &inst->s.base;
72 }
73
skcipher_alg_instance(struct crypto_skcipher * skcipher)74 static inline struct skcipher_instance *skcipher_alg_instance(
75 struct crypto_skcipher *skcipher)
76 {
77 return container_of(crypto_skcipher_alg(skcipher),
78 struct skcipher_instance, alg);
79 }
80
skcipher_instance_ctx(struct skcipher_instance * inst)81 static inline void *skcipher_instance_ctx(struct skcipher_instance *inst)
82 {
83 return crypto_instance_ctx(skcipher_crypto_instance(inst));
84 }
85
skcipher_request_complete(struct skcipher_request * req,int err)86 static inline void skcipher_request_complete(struct skcipher_request *req, int err)
87 {
88 req->base.complete(&req->base, err);
89 }
90
crypto_set_skcipher_spawn(struct crypto_skcipher_spawn * spawn,struct crypto_instance * inst)91 static inline void crypto_set_skcipher_spawn(
92 struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst)
93 {
94 crypto_set_spawn(&spawn->base, inst);
95 }
96
97 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
98 u32 type, u32 mask);
99
crypto_drop_skcipher(struct crypto_skcipher_spawn * spawn)100 static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
101 {
102 crypto_drop_spawn(&spawn->base);
103 }
104
crypto_skcipher_spawn_alg(struct crypto_skcipher_spawn * spawn)105 static inline struct skcipher_alg *crypto_skcipher_spawn_alg(
106 struct crypto_skcipher_spawn *spawn)
107 {
108 return container_of(spawn->base.alg, struct skcipher_alg, base);
109 }
110
crypto_spawn_skcipher_alg(struct crypto_skcipher_spawn * spawn)111 static inline struct skcipher_alg *crypto_spawn_skcipher_alg(
112 struct crypto_skcipher_spawn *spawn)
113 {
114 return crypto_skcipher_spawn_alg(spawn);
115 }
116
crypto_spawn_skcipher(struct crypto_skcipher_spawn * spawn)117 static inline struct crypto_skcipher *crypto_spawn_skcipher(
118 struct crypto_skcipher_spawn *spawn)
119 {
120 return crypto_spawn_tfm2(&spawn->base);
121 }
122
crypto_skcipher_set_reqsize(struct crypto_skcipher * skcipher,unsigned int reqsize)123 static inline void crypto_skcipher_set_reqsize(
124 struct crypto_skcipher *skcipher, unsigned int reqsize)
125 {
126 skcipher->reqsize = reqsize;
127 }
128
129 int crypto_register_skcipher(struct skcipher_alg *alg);
130 void crypto_unregister_skcipher(struct skcipher_alg *alg);
131 int crypto_register_skciphers(struct skcipher_alg *algs, int count);
132 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
133 int skcipher_register_instance(struct crypto_template *tmpl,
134 struct skcipher_instance *inst);
135
136 int skcipher_walk_done(struct skcipher_walk *walk, int err);
137 int skcipher_walk_virt(struct skcipher_walk *walk,
138 struct skcipher_request *req,
139 bool atomic);
140 void skcipher_walk_atomise(struct skcipher_walk *walk);
141 int skcipher_walk_async(struct skcipher_walk *walk,
142 struct skcipher_request *req);
143 int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
144 bool atomic);
145 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
146 struct aead_request *req, bool atomic);
147 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
148 struct aead_request *req, bool atomic);
149 void skcipher_walk_complete(struct skcipher_walk *walk, int err);
150
skcipher_walk_abort(struct skcipher_walk * walk)151 static inline void skcipher_walk_abort(struct skcipher_walk *walk)
152 {
153 skcipher_walk_done(walk, -ECANCELED);
154 }
155
ablkcipher_request_complete(struct ablkcipher_request * req,int err)156 static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
157 int err)
158 {
159 req->base.complete(&req->base, err);
160 }
161
ablkcipher_request_flags(struct ablkcipher_request * req)162 static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
163 {
164 return req->base.flags;
165 }
166
crypto_skcipher_ctx(struct crypto_skcipher * tfm)167 static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
168 {
169 return crypto_tfm_ctx(&tfm->base);
170 }
171
skcipher_request_ctx(struct skcipher_request * req)172 static inline void *skcipher_request_ctx(struct skcipher_request *req)
173 {
174 return req->__ctx;
175 }
176
skcipher_request_flags(struct skcipher_request * req)177 static inline u32 skcipher_request_flags(struct skcipher_request *req)
178 {
179 return req->base.flags;
180 }
181
crypto_skcipher_alg_min_keysize(struct skcipher_alg * alg)182 static inline unsigned int crypto_skcipher_alg_min_keysize(
183 struct skcipher_alg *alg)
184 {
185 if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
186 CRYPTO_ALG_TYPE_BLKCIPHER)
187 return alg->base.cra_blkcipher.min_keysize;
188
189 if (alg->base.cra_ablkcipher.encrypt)
190 return alg->base.cra_ablkcipher.min_keysize;
191
192 return alg->min_keysize;
193 }
194
crypto_skcipher_alg_max_keysize(struct skcipher_alg * alg)195 static inline unsigned int crypto_skcipher_alg_max_keysize(
196 struct skcipher_alg *alg)
197 {
198 if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
199 CRYPTO_ALG_TYPE_BLKCIPHER)
200 return alg->base.cra_blkcipher.max_keysize;
201
202 if (alg->base.cra_ablkcipher.encrypt)
203 return alg->base.cra_ablkcipher.max_keysize;
204
205 return alg->max_keysize;
206 }
207
crypto_skcipher_alg_chunksize(struct skcipher_alg * alg)208 static inline unsigned int crypto_skcipher_alg_chunksize(
209 struct skcipher_alg *alg)
210 {
211 if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
212 CRYPTO_ALG_TYPE_BLKCIPHER)
213 return alg->base.cra_blocksize;
214
215 if (alg->base.cra_ablkcipher.encrypt)
216 return alg->base.cra_blocksize;
217
218 return alg->chunksize;
219 }
220
crypto_skcipher_alg_walksize(struct skcipher_alg * alg)221 static inline unsigned int crypto_skcipher_alg_walksize(
222 struct skcipher_alg *alg)
223 {
224 if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
225 CRYPTO_ALG_TYPE_BLKCIPHER)
226 return alg->base.cra_blocksize;
227
228 if (alg->base.cra_ablkcipher.encrypt)
229 return alg->base.cra_blocksize;
230
231 return alg->walksize;
232 }
233
234 /**
235 * crypto_skcipher_chunksize() - obtain chunk size
236 * @tfm: cipher handle
237 *
238 * The block size is set to one for ciphers such as CTR. However,
239 * you still need to provide incremental updates in multiples of
240 * the underlying block size as the IV does not have sub-block
241 * granularity. This is known in this API as the chunk size.
242 *
243 * Return: chunk size in bytes
244 */
crypto_skcipher_chunksize(struct crypto_skcipher * tfm)245 static inline unsigned int crypto_skcipher_chunksize(
246 struct crypto_skcipher *tfm)
247 {
248 return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
249 }
250
251 /**
252 * crypto_skcipher_walksize() - obtain walk size
253 * @tfm: cipher handle
254 *
255 * In some cases, algorithms can only perform optimally when operating on
256 * multiple blocks in parallel. This is reflected by the walksize, which
257 * must be a multiple of the chunksize (or equal if the concern does not
258 * apply)
259 *
260 * Return: walk size in bytes
261 */
crypto_skcipher_walksize(struct crypto_skcipher * tfm)262 static inline unsigned int crypto_skcipher_walksize(
263 struct crypto_skcipher *tfm)
264 {
265 return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
266 }
267
268 /* Helpers for simple block cipher modes of operation */
269 struct skcipher_ctx_simple {
270 struct crypto_cipher *cipher; /* underlying block cipher */
271 };
272 static inline struct crypto_cipher *
skcipher_cipher_simple(struct crypto_skcipher * tfm)273 skcipher_cipher_simple(struct crypto_skcipher *tfm)
274 {
275 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
276
277 return ctx->cipher;
278 }
279 struct skcipher_instance *
280 skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
281 struct crypto_alg **cipher_alg_ret);
282
283 #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
284
285