1 /*
2 * Copyright (c) 2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/init.h>
9 #include <errno.h>
10 #include <zephyr/crypto/crypto.h>
11 #include "crypto_intel_sha_priv.h"
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_REGISTER(SHA);
14
15 #define DT_DRV_COMPAT intel_adsp_sha
16
17 static struct sha_session sha_sessions[SHA_MAX_SESSIONS];
18
intel_sha_get_unused_session_idx(void)19 static int intel_sha_get_unused_session_idx(void)
20 {
21 int i;
22
23 for (i = 0; i < SHA_MAX_SESSIONS; i++) {
24 if (!sha_sessions[i].in_use) {
25 sha_sessions[i].in_use = true;
26 return i;
27 }
28 }
29 return -1;
30 }
31
intel_sha_set_ctl_enable(struct sha_container * sha,int status)32 static int intel_sha_set_ctl_enable(struct sha_container *sha, int status)
33 {
34 /* wait until not busy when turning off */
35 if (status == 0 && sha->dfsha->shactl.part.en == 1) {
36 while (sha->dfsha->shasts.part.busy) {
37 }
38 }
39
40 sha->dfsha->shactl.part.en = status;
41 return 0;
42 }
43
intel_sha_set_resume_length_dw0(struct sha_container * sha,uint32_t lower_length)44 static int intel_sha_set_resume_length_dw0(struct sha_container *sha, uint32_t lower_length)
45 {
46 int err = -EINVAL;
47
48 if (IS_ALIGNED(lower_length, SHA_REQUIRED_BLOCK_ALIGNMENT)) {
49 sha->dfsha->sharldw0.full = lower_length;
50 err = 0;
51 }
52
53 return err;
54 }
55
intel_sha_set_resume_length_dw1(struct sha_container * sha,uint32_t upper_length)56 static int intel_sha_set_resume_length_dw1(struct sha_container *sha, uint32_t upper_length)
57 {
58 sha->dfsha->sharldw1.full = upper_length;
59 return 0;
60 }
61
intel_sha_regs_cpy(void * dst,const void * src,size_t len)62 static int intel_sha_regs_cpy(void *dst, const void *src, size_t len)
63 {
64 uint32_t counter;
65 int err = -EINVAL;
66
67 if ((IS_ALIGNED(len, sizeof(uint32_t))) && (IS_ALIGNED(dst, sizeof(uint32_t))) &&
68 (IS_ALIGNED(src, sizeof(uint32_t)))) {
69 len /= sizeof(uint32_t);
70 for (counter = 0; counter != len; ++counter) {
71 ((uint32_t *)dst)[counter] = ((uint32_t *)src)[counter];
72 }
73
74 err = 0;
75 }
76
77 return err;
78 }
79
80 /* ! Perform SHA computation over requested region. */
intel_sha_device_run(const struct device * dev,const void * buf_in,size_t buf_in_size,size_t max_buff_len,uint32_t state)81 static int intel_sha_device_run(const struct device *dev, const void *buf_in, size_t buf_in_size,
82 size_t max_buff_len, uint32_t state)
83 {
84 int err;
85 struct sha_container *const self = dev->data;
86 union sha_state state_u = { .full = state };
87 /* align to OWORD */
88 const size_t aligned_buff_size = ROUND_UP(buf_in_size, 0x10);
89
90 err = intel_sha_set_ctl_enable(self, 0);
91 if (err) {
92 return err;
93 }
94
95 /* set processing element disable */
96 self->dfsha->pibcs.part.peen = 0;
97 /* set pib base addr */
98 self->dfsha->pibba.full = (uint32_t)buf_in;
99
100 if (max_buff_len < aligned_buff_size) {
101 return -EINVAL;
102 }
103
104 self->dfsha->pibs.full = aligned_buff_size;
105 /* enable interrupt */
106 self->dfsha->pibcs.part.bscie = 1;
107 self->dfsha->pibcs.part.teie = 0;
108 /* set processing element enable */
109 self->dfsha->pibcs.part.peen = 1;
110
111 if (self->dfsha->shactl.part.en) {
112 return -EINVAL; /* already enabled */
113 }
114
115 self->dfsha->shactl.part.hrsm = state_u.part.hrsm;
116
117 /* set initial values if resuming */
118 if (state_u.part.hrsm) {
119 err = intel_sha_set_resume_length_dw0(self, self->dfsha->shaaldw0.full);
120 if (err) {
121 return err;
122 }
123 err = intel_sha_set_resume_length_dw1(self, self->dfsha->shaaldw1.full);
124 if (err) {
125 return err;
126 }
127 err = intel_sha_regs_cpy((void *)self->dfsha->initial_vector,
128 (void *)self->dfsha->sha_result,
129 sizeof(self->dfsha->initial_vector));
130 if (err) {
131 return err;
132 }
133 }
134
135 /* set ctl hash first middle */
136 if (self->dfsha->shactl.part.en) {
137 return -EINVAL; /* already enabled */
138 }
139
140 self->dfsha->shactl.part.hfm = state_u.part.state;
141
142 /* increment pointer */
143 self->dfsha->pibfpi.full = buf_in_size;
144
145 err = intel_sha_set_ctl_enable(self, 1);
146 if (err) {
147 return err;
148 }
149
150 err = intel_sha_set_ctl_enable(self, 0);
151
152 return err;
153 }
154
intel_sha_copy_hash(struct sha_container * const self,void * dst,size_t len)155 static int intel_sha_copy_hash(struct sha_container *const self, void *dst, size_t len)
156 {
157 /* NOTE: generated hash value should be read from the end */
158
159 int err = -EINVAL;
160 uint32_t counter = 0;
161 uint32_t last_idx = 0;
162
163 if ((IS_ALIGNED(len, sizeof(uint32_t))) && (IS_ALIGNED(dst, sizeof(uint32_t)))) {
164 len /= sizeof(uint32_t);
165 counter = 0;
166 /* The index of a last element in the sha result buffer. */
167 last_idx = (sizeof(self->dfsha->sha_result) / sizeof(uint32_t)) - 1;
168
169 for (counter = 0; counter != len; counter++) {
170 ((uint32_t *)dst)[counter] =
171 ((uint32_t *)self->dfsha->sha_result)[last_idx - counter];
172 }
173
174 err = 0;
175 }
176
177 return err;
178 }
179
intel_sha_device_get_hash(const struct device * dev,void * buf_out,size_t buf_out_size)180 static int intel_sha_device_get_hash(const struct device *dev, void *buf_out, size_t buf_out_size)
181 {
182 int err;
183 struct sha_container *const self = dev->data;
184
185 if (buf_out == NULL) {
186 return -EINVAL;
187 }
188 /* wait until not busy */
189 while (self->dfsha->shasts.part.busy) {
190 }
191
192 err = intel_sha_copy_hash(self, buf_out, buf_out_size);
193
194 return err;
195 }
196
intel_sha_compute(struct hash_ctx * ctx,struct hash_pkt * pkt,bool finish)197 static int intel_sha_compute(struct hash_ctx *ctx, struct hash_pkt *pkt, bool finish)
198 {
199 int ret;
200 struct sha_container *self = (struct sha_container *const)(ctx->device)->data;
201 struct sha_session *session = (struct sha_session *)ctx->drv_sessn_state;
202 size_t frag_length;
203 size_t output_size;
204 uint32_t *hash_int_ptr = (uint32_t *)(pkt->out_buf);
205
206 /* set algo */
207 self->dfsha->shactl.full = 0x0;
208 self->dfsha->shactl.part.algo = session->algo;
209
210 /* restore ctx */
211 self->dfsha->shaaldw0 = session->sha_ctx.shaaldw0;
212 self->dfsha->shaaldw1 = session->sha_ctx.shaaldw1;
213
214 ret = intel_sha_regs_cpy((void *)self->dfsha->initial_vector,
215 (void *)session->sha_ctx.initial_vector,
216 sizeof(self->dfsha->initial_vector));
217 if (ret) {
218 return ret;
219 }
220
221 ret = intel_sha_regs_cpy((void *)self->dfsha->sha_result,
222 (void *)session->sha_ctx.sha_result,
223 sizeof(self->dfsha->sha_result));
224
225 if (ret) {
226 return ret;
227 }
228
229 /* compute hash */
230 do {
231 frag_length = pkt->in_len > SHA_API_MAX_FRAG_LEN ?
232 SHA_API_MAX_FRAG_LEN :
233 pkt->in_len;
234
235 if ((frag_length == pkt->in_len) && finish) {
236 session->state.part.state = SHA_LAST;
237 }
238
239 ret = intel_sha_device_run(ctx->device, pkt->in_buf, frag_length, frag_length,
240 session->state.full);
241 if (ret) {
242 return ret;
243 }
244
245 /* set state for next iteration */
246 session->state.part.hrsm = SHA_HRSM_ENABLE;
247 session->state.part.state = SHA_MIDLE;
248
249 pkt->in_len -= frag_length;
250 pkt->in_buf += frag_length;
251 } while (pkt->in_len > 0);
252
253 if (finish) {
254 switch (self->dfsha->shactl.part.algo) {
255 case CRYPTO_HASH_ALGO_SHA224:
256 output_size = SHA224_ALGORITHM_HASH_SIZEOF;
257 break;
258 case CRYPTO_HASH_ALGO_SHA256:
259 output_size = SHA256_ALGORITHM_HASH_SIZEOF;
260 break;
261 case CRYPTO_HASH_ALGO_SHA384:
262 output_size = SHA384_ALGORITHM_HASH_SIZEOF;
263 break;
264 case CRYPTO_HASH_ALGO_SHA512:
265 output_size = SHA512_ALGORITHM_HASH_SIZEOF;
266 break;
267 default:
268 return -ENOTSUP;
269 }
270 ret = intel_sha_device_get_hash(ctx->device, pkt->out_buf, output_size);
271
272 if (ret) {
273 return ret;
274 }
275
276 /* Fix byte ordering to match common hash representation. */
277 for (size_t i = 0; i != output_size / sizeof(uint32_t); i++) {
278 hash_int_ptr[i] = BYTE_SWAP32(hash_int_ptr[i]);
279 }
280 }
281 return ret;
282 }
283
intel_sha_device_set_hash_type(const struct device * dev,struct hash_ctx * ctx,enum hash_algo algo)284 static int intel_sha_device_set_hash_type(const struct device *dev, struct hash_ctx *ctx,
285 enum hash_algo algo)
286 {
287 int ctx_idx;
288 struct sha_container *self = (struct sha_container *const)(dev)->data;
289
290 ctx_idx = intel_sha_get_unused_session_idx();
291
292 if (ctx_idx < 0) {
293 LOG_ERR("All sessions in use!");
294 return -ENOSPC;
295 }
296 ctx->drv_sessn_state = &sha_sessions[ctx_idx];
297
298 /* set processing element enable */
299 self->dfsha->pibcs.part.peen = 0;
300
301 /* populate sha session data */
302 sha_sessions[ctx_idx].state.part.state = SHA_FIRST;
303 sha_sessions[ctx_idx].state.part.hrsm = SHA_HRSM_DISABLE;
304 sha_sessions[ctx_idx].algo = algo;
305
306 ctx->hash_hndlr = intel_sha_compute;
307 return 0;
308 }
309
intel_sha_device_free(const struct device * dev,struct hash_ctx * ctx)310 static int intel_sha_device_free(const struct device *dev, struct hash_ctx *ctx)
311 {
312 struct sha_container *self = (struct sha_container *const)(dev)->data;
313 struct sha_session *session = (struct sha_session *)ctx->drv_sessn_state;
314
315 (void)memset((void *)self->dfsha, 0, sizeof(struct sha_hw_regs));
316 (void)memset(&session->sha_ctx, 0, sizeof(struct sha_context));
317 (void)memset(&session->state, 0, sizeof(union sha_state));
318 session->in_use = 0;
319 session->algo = 0;
320 return 0;
321 }
322
intel_sha_device_hw_caps(const struct device * dev)323 static int intel_sha_device_hw_caps(const struct device *dev)
324 {
325 return (CAP_SEPARATE_IO_BUFS | CAP_SYNC_OPS);
326 }
327
328 static struct crypto_driver_api hash_enc_funcs = {
329 .hash_begin_session = intel_sha_device_set_hash_type,
330 .hash_free_session = intel_sha_device_free,
331 .hash_async_callback_set = NULL,
332 .query_hw_caps = intel_sha_device_hw_caps,
333 };
334
335 #define INTEL_SHA_DEVICE_INIT(inst) \
336 static struct sha_container sha_data_##inst = { \
337 .dfsha = (volatile struct sha_hw_regs *)DT_INST_REG_ADDR_BY_IDX(inst, 0) \
338 }; \
339 DEVICE_DT_INST_DEFINE(inst, NULL, NULL, &sha_data_##inst, NULL, \
340 POST_KERNEL, CONFIG_CRYPTO_INIT_PRIORITY, (void *)&hash_enc_funcs);
341
342 DT_INST_FOREACH_STATUS_OKAY(INTEL_SHA_DEVICE_INIT)
343