1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4 
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10 
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15 
16   Contact Information:
17   qat-linux@intel.com
18 
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24 
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34 
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/slab.h>
48 #include <linux/ctype.h>
49 #include <linux/kernel.h>
50 #include <linux/delay.h>
51 #include "adf_accel_devices.h"
52 #include "adf_common_drv.h"
53 #include "icp_qat_uclo.h"
54 #include "icp_qat_hal.h"
55 #include "icp_qat_fw_loader_handle.h"
56 
57 #define UWORD_CPYBUF_SIZE 1024
58 #define INVLD_UWORD 0xffffffffffull
59 #define PID_MINOR_REV 0xf
60 #define PID_MAJOR_REV (0xf << 4)
61 
qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle * obj_handle,unsigned int ae,unsigned int image_num)62 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
63 				 unsigned int ae, unsigned int image_num)
64 {
65 	struct icp_qat_uclo_aedata *ae_data;
66 	struct icp_qat_uclo_encapme *encap_image;
67 	struct icp_qat_uclo_page *page = NULL;
68 	struct icp_qat_uclo_aeslice *ae_slice = NULL;
69 
70 	ae_data = &obj_handle->ae_data[ae];
71 	encap_image = &obj_handle->ae_uimage[image_num];
72 	ae_slice = &ae_data->ae_slices[ae_data->slice_num];
73 	ae_slice->encap_image = encap_image;
74 
75 	if (encap_image->img_ptr) {
76 		ae_slice->ctx_mask_assigned =
77 					encap_image->img_ptr->ctx_assigned;
78 		ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
79 	} else {
80 		ae_slice->ctx_mask_assigned = 0;
81 	}
82 	ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
83 	if (!ae_slice->region)
84 		return -ENOMEM;
85 	ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
86 	if (!ae_slice->page)
87 		goto out_err;
88 	page = ae_slice->page;
89 	page->encap_page = encap_image->page;
90 	ae_slice->page->region = ae_slice->region;
91 	ae_data->slice_num++;
92 	return 0;
93 out_err:
94 	kfree(ae_slice->region);
95 	ae_slice->region = NULL;
96 	return -ENOMEM;
97 }
98 
qat_uclo_free_ae_data(struct icp_qat_uclo_aedata * ae_data)99 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
100 {
101 	unsigned int i;
102 
103 	if (!ae_data) {
104 		pr_err("QAT: bad argument, ae_data is NULL\n ");
105 		return -EINVAL;
106 	}
107 
108 	for (i = 0; i < ae_data->slice_num; i++) {
109 		kfree(ae_data->ae_slices[i].region);
110 		ae_data->ae_slices[i].region = NULL;
111 		kfree(ae_data->ae_slices[i].page);
112 		ae_data->ae_slices[i].page = NULL;
113 	}
114 	return 0;
115 }
116 
qat_uclo_get_string(struct icp_qat_uof_strtable * str_table,unsigned int str_offset)117 static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
118 				 unsigned int str_offset)
119 {
120 	if ((!str_table->table_len) || (str_offset > str_table->table_len))
121 		return NULL;
122 	return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
123 }
124 
qat_uclo_check_uof_format(struct icp_qat_uof_filehdr * hdr)125 static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
126 {
127 	int maj = hdr->maj_ver & 0xff;
128 	int min = hdr->min_ver & 0xff;
129 
130 	if (hdr->file_id != ICP_QAT_UOF_FID) {
131 		pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
132 		return -EINVAL;
133 	}
134 	if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
135 		pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
136 		       maj, min);
137 		return -EINVAL;
138 	}
139 	return 0;
140 }
141 
qat_uclo_check_suof_format(struct icp_qat_suof_filehdr * suof_hdr)142 static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
143 {
144 	int maj = suof_hdr->maj_ver & 0xff;
145 	int min = suof_hdr->min_ver & 0xff;
146 
147 	if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
148 		pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
149 		return -EINVAL;
150 	}
151 	if (suof_hdr->fw_type != 0) {
152 		pr_err("QAT: unsupported firmware type\n");
153 		return -EINVAL;
154 	}
155 	if (suof_hdr->num_chunks <= 0x1) {
156 		pr_err("QAT: SUOF chunk amount is incorrect\n");
157 		return -EINVAL;
158 	}
159 	if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
160 		pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
161 		       maj, min);
162 		return -EINVAL;
163 	}
164 	return 0;
165 }
166 
qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle * handle,unsigned int addr,unsigned int * val,unsigned int num_in_bytes)167 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
168 				      unsigned int addr, unsigned int *val,
169 				      unsigned int num_in_bytes)
170 {
171 	unsigned int outval;
172 	unsigned char *ptr = (unsigned char *)val;
173 
174 	while (num_in_bytes) {
175 		memcpy(&outval, ptr, 4);
176 		SRAM_WRITE(handle, addr, outval);
177 		num_in_bytes -= 4;
178 		ptr += 4;
179 		addr += 4;
180 	}
181 }
182 
qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int addr,unsigned int * val,unsigned int num_in_bytes)183 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
184 				      unsigned char ae, unsigned int addr,
185 				      unsigned int *val,
186 				      unsigned int num_in_bytes)
187 {
188 	unsigned int outval;
189 	unsigned char *ptr = (unsigned char *)val;
190 
191 	addr >>= 0x2; /* convert to uword address */
192 
193 	while (num_in_bytes) {
194 		memcpy(&outval, ptr, 4);
195 		qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
196 		num_in_bytes -= 4;
197 		ptr += 4;
198 	}
199 }
200 
qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle * handle,unsigned char ae,struct icp_qat_uof_batch_init * umem_init_header)201 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
202 				   unsigned char ae,
203 				   struct icp_qat_uof_batch_init
204 				   *umem_init_header)
205 {
206 	struct icp_qat_uof_batch_init *umem_init;
207 
208 	if (!umem_init_header)
209 		return;
210 	umem_init = umem_init_header->next;
211 	while (umem_init) {
212 		unsigned int addr, *value, size;
213 
214 		ae = umem_init->ae;
215 		addr = umem_init->addr;
216 		value = umem_init->value;
217 		size = umem_init->size;
218 		qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
219 		umem_init = umem_init->next;
220 	}
221 }
222 
223 static void
qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_batch_init ** base)224 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
225 				 struct icp_qat_uof_batch_init **base)
226 {
227 	struct icp_qat_uof_batch_init *umem_init;
228 
229 	umem_init = *base;
230 	while (umem_init) {
231 		struct icp_qat_uof_batch_init *pre;
232 
233 		pre = umem_init;
234 		umem_init = umem_init->next;
235 		kfree(pre);
236 	}
237 	*base = NULL;
238 }
239 
qat_uclo_parse_num(char * str,unsigned int * num)240 static int qat_uclo_parse_num(char *str, unsigned int *num)
241 {
242 	char buf[16] = {0};
243 	unsigned long ae = 0;
244 	int i;
245 
246 	strncpy(buf, str, 15);
247 	for (i = 0; i < 16; i++) {
248 		if (!isdigit(buf[i])) {
249 			buf[i] = '\0';
250 			break;
251 		}
252 	}
253 	if ((kstrtoul(buf, 10, &ae)))
254 		return -EFAULT;
255 
256 	*num = (unsigned int)ae;
257 	return 0;
258 }
259 
qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem,unsigned int size_range,unsigned int * ae)260 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
261 				     struct icp_qat_uof_initmem *init_mem,
262 				     unsigned int size_range, unsigned int *ae)
263 {
264 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
265 	char *str;
266 
267 	if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
268 		pr_err("QAT: initmem is out of range");
269 		return -EINVAL;
270 	}
271 	if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
272 		pr_err("QAT: Memory scope for init_mem error\n");
273 		return -EINVAL;
274 	}
275 	str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
276 	if (!str) {
277 		pr_err("QAT: AE name assigned in UOF init table is NULL\n");
278 		return -EINVAL;
279 	}
280 	if (qat_uclo_parse_num(str, ae)) {
281 		pr_err("QAT: Parse num for AE number failed\n");
282 		return -EINVAL;
283 	}
284 	if (*ae >= ICP_QAT_UCLO_MAX_AE) {
285 		pr_err("QAT: ae %d out of range\n", *ae);
286 		return -EINVAL;
287 	}
288 	return 0;
289 }
290 
qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem,unsigned int ae,struct icp_qat_uof_batch_init ** init_tab_base)291 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
292 					   *handle, struct icp_qat_uof_initmem
293 					   *init_mem, unsigned int ae,
294 					   struct icp_qat_uof_batch_init
295 					   **init_tab_base)
296 {
297 	struct icp_qat_uof_batch_init *init_header, *tail;
298 	struct icp_qat_uof_batch_init *mem_init, *tail_old;
299 	struct icp_qat_uof_memvar_attr *mem_val_attr;
300 	unsigned int i, flag = 0;
301 
302 	mem_val_attr =
303 		(struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
304 		sizeof(struct icp_qat_uof_initmem));
305 
306 	init_header = *init_tab_base;
307 	if (!init_header) {
308 		init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
309 		if (!init_header)
310 			return -ENOMEM;
311 		init_header->size = 1;
312 		*init_tab_base = init_header;
313 		flag = 1;
314 	}
315 	tail_old = init_header;
316 	while (tail_old->next)
317 		tail_old = tail_old->next;
318 	tail = tail_old;
319 	for (i = 0; i < init_mem->val_attr_num; i++) {
320 		mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
321 		if (!mem_init)
322 			goto out_err;
323 		mem_init->ae = ae;
324 		mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
325 		mem_init->value = &mem_val_attr->value;
326 		mem_init->size = 4;
327 		mem_init->next = NULL;
328 		tail->next = mem_init;
329 		tail = mem_init;
330 		init_header->size += qat_hal_get_ins_num();
331 		mem_val_attr++;
332 	}
333 	return 0;
334 out_err:
335 	while (tail_old) {
336 		mem_init = tail_old->next;
337 		kfree(tail_old);
338 		tail_old = mem_init;
339 	}
340 	if (flag)
341 		kfree(*init_tab_base);
342 	return -ENOMEM;
343 }
344 
qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem)345 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
346 				  struct icp_qat_uof_initmem *init_mem)
347 {
348 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
349 	unsigned int ae;
350 
351 	if (qat_uclo_fetch_initmem_ae(handle, init_mem,
352 				      ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
353 		return -EINVAL;
354 	if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
355 					    &obj_handle->lm_init_tab[ae]))
356 		return -EINVAL;
357 	return 0;
358 }
359 
qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem)360 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
361 				  struct icp_qat_uof_initmem *init_mem)
362 {
363 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
364 	unsigned int ae, ustore_size, uaddr, i;
365 
366 	ustore_size = obj_handle->ustore_phy_size;
367 	if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
368 		return -EINVAL;
369 	if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
370 					    &obj_handle->umem_init_tab[ae]))
371 		return -EINVAL;
372 	/* set the highest ustore address referenced */
373 	uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
374 	for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
375 		if (obj_handle->ae_data[ae].ae_slices[i].
376 		    encap_image->uwords_num < uaddr)
377 			obj_handle->ae_data[ae].ae_slices[i].
378 			encap_image->uwords_num = uaddr;
379 	}
380 	return 0;
381 }
382 
383 #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem)384 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
385 				   struct icp_qat_uof_initmem *init_mem)
386 {
387 	switch (init_mem->region) {
388 	case ICP_QAT_UOF_LMEM_REGION:
389 		if (qat_uclo_init_lmem_seg(handle, init_mem))
390 			return -EINVAL;
391 		break;
392 	case ICP_QAT_UOF_UMEM_REGION:
393 		if (qat_uclo_init_umem_seg(handle, init_mem))
394 			return -EINVAL;
395 		break;
396 	default:
397 		pr_err("QAT: initmem region error. region type=0x%x\n",
398 		       init_mem->region);
399 		return -EINVAL;
400 	}
401 	return 0;
402 }
403 
qat_uclo_init_ustore(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uclo_encapme * image)404 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
405 				struct icp_qat_uclo_encapme *image)
406 {
407 	unsigned int i;
408 	struct icp_qat_uclo_encap_page *page;
409 	struct icp_qat_uof_image *uof_image;
410 	unsigned char ae;
411 	unsigned int ustore_size;
412 	unsigned int patt_pos;
413 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
414 	uint64_t *fill_data;
415 
416 	uof_image = image->img_ptr;
417 	fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
418 			    GFP_KERNEL);
419 	if (!fill_data)
420 		return -ENOMEM;
421 	for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
422 		memcpy(&fill_data[i], &uof_image->fill_pattern,
423 		       sizeof(uint64_t));
424 	page = image->page;
425 
426 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
427 		if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
428 			continue;
429 		ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
430 		patt_pos = page->beg_addr_p + page->micro_words_num;
431 
432 		qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
433 				  page->beg_addr_p, &fill_data[0]);
434 		qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
435 				  ustore_size - patt_pos + 1,
436 				  &fill_data[page->beg_addr_p]);
437 	}
438 	kfree(fill_data);
439 	return 0;
440 }
441 
qat_uclo_init_memory(struct icp_qat_fw_loader_handle * handle)442 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
443 {
444 	int i, ae;
445 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
446 	struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
447 
448 	for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
449 		if (initmem->num_in_bytes) {
450 			if (qat_uclo_init_ae_memory(handle, initmem))
451 				return -EINVAL;
452 		}
453 		initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
454 			(uintptr_t)initmem +
455 			sizeof(struct icp_qat_uof_initmem)) +
456 			(sizeof(struct icp_qat_uof_memvar_attr) *
457 			initmem->val_attr_num));
458 	}
459 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
460 		if (qat_hal_batch_wr_lm(handle, ae,
461 					obj_handle->lm_init_tab[ae])) {
462 			pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
463 			return -EINVAL;
464 		}
465 		qat_uclo_cleanup_batch_init_list(handle,
466 						 &obj_handle->lm_init_tab[ae]);
467 		qat_uclo_batch_wr_umem(handle, ae,
468 				       obj_handle->umem_init_tab[ae]);
469 		qat_uclo_cleanup_batch_init_list(handle,
470 						 &obj_handle->
471 						 umem_init_tab[ae]);
472 	}
473 	return 0;
474 }
475 
qat_uclo_find_chunk(struct icp_qat_uof_objhdr * obj_hdr,char * chunk_id,void * cur)476 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
477 				 char *chunk_id, void *cur)
478 {
479 	int i;
480 	struct icp_qat_uof_chunkhdr *chunk_hdr =
481 	    (struct icp_qat_uof_chunkhdr *)
482 	    ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
483 
484 	for (i = 0; i < obj_hdr->num_chunks; i++) {
485 		if ((cur < (void *)&chunk_hdr[i]) &&
486 		    !strncmp(chunk_hdr[i].chunk_id, chunk_id,
487 			     ICP_QAT_UOF_OBJID_LEN)) {
488 			return &chunk_hdr[i];
489 		}
490 	}
491 	return NULL;
492 }
493 
qat_uclo_calc_checksum(unsigned int reg,int ch)494 static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
495 {
496 	int i;
497 	unsigned int topbit = 1 << 0xF;
498 	unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
499 
500 	reg ^= inbyte << 0x8;
501 	for (i = 0; i < 0x8; i++) {
502 		if (reg & topbit)
503 			reg = (reg << 1) ^ 0x1021;
504 		else
505 			reg <<= 1;
506 	}
507 	return reg & 0xFFFF;
508 }
509 
qat_uclo_calc_str_checksum(char * ptr,int num)510 static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
511 {
512 	unsigned int chksum = 0;
513 
514 	if (ptr)
515 		while (num--)
516 			chksum = qat_uclo_calc_checksum(chksum, *ptr++);
517 	return chksum;
518 }
519 
520 static struct icp_qat_uclo_objhdr *
qat_uclo_map_chunk(char * buf,struct icp_qat_uof_filehdr * file_hdr,char * chunk_id)521 qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
522 		   char *chunk_id)
523 {
524 	struct icp_qat_uof_filechunkhdr *file_chunk;
525 	struct icp_qat_uclo_objhdr *obj_hdr;
526 	char *chunk;
527 	int i;
528 
529 	file_chunk = (struct icp_qat_uof_filechunkhdr *)
530 		(buf + sizeof(struct icp_qat_uof_filehdr));
531 	for (i = 0; i < file_hdr->num_chunks; i++) {
532 		if (!strncmp(file_chunk->chunk_id, chunk_id,
533 			     ICP_QAT_UOF_OBJID_LEN)) {
534 			chunk = buf + file_chunk->offset;
535 			if (file_chunk->checksum != qat_uclo_calc_str_checksum(
536 				chunk, file_chunk->size))
537 				break;
538 			obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
539 			if (!obj_hdr)
540 				break;
541 			obj_hdr->file_buff = chunk;
542 			obj_hdr->checksum = file_chunk->checksum;
543 			obj_hdr->size = file_chunk->size;
544 			return obj_hdr;
545 		}
546 		file_chunk++;
547 	}
548 	return NULL;
549 }
550 
551 static unsigned int
qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj * encap_uof_obj,struct icp_qat_uof_image * image)552 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
553 			    struct icp_qat_uof_image *image)
554 {
555 	struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
556 	struct icp_qat_uof_objtable *neigh_reg_tab;
557 	struct icp_qat_uof_code_page *code_page;
558 
559 	code_page = (struct icp_qat_uof_code_page *)
560 			((char *)image + sizeof(struct icp_qat_uof_image));
561 	uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
562 		     code_page->uc_var_tab_offset);
563 	imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
564 		      code_page->imp_var_tab_offset);
565 	imp_expr_tab = (struct icp_qat_uof_objtable *)
566 		       (encap_uof_obj->beg_uof +
567 		       code_page->imp_expr_tab_offset);
568 	if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
569 	    imp_expr_tab->entry_num) {
570 		pr_err("QAT: UOF can't contain imported variable to be parsed\n");
571 		return -EINVAL;
572 	}
573 	neigh_reg_tab = (struct icp_qat_uof_objtable *)
574 			(encap_uof_obj->beg_uof +
575 			code_page->neigh_reg_tab_offset);
576 	if (neigh_reg_tab->entry_num) {
577 		pr_err("QAT: UOF can't contain shared control store feature\n");
578 		return -EINVAL;
579 	}
580 	if (image->numpages > 1) {
581 		pr_err("QAT: UOF can't contain multiple pages\n");
582 		return -EINVAL;
583 	}
584 	if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
585 		pr_err("QAT: UOF can't use shared control store feature\n");
586 		return -EFAULT;
587 	}
588 	if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
589 		pr_err("QAT: UOF can't use reloadable feature\n");
590 		return -EFAULT;
591 	}
592 	return 0;
593 }
594 
qat_uclo_map_image_page(struct icp_qat_uof_encap_obj * encap_uof_obj,struct icp_qat_uof_image * img,struct icp_qat_uclo_encap_page * page)595 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
596 				     *encap_uof_obj,
597 				     struct icp_qat_uof_image *img,
598 				     struct icp_qat_uclo_encap_page *page)
599 {
600 	struct icp_qat_uof_code_page *code_page;
601 	struct icp_qat_uof_code_area *code_area;
602 	struct icp_qat_uof_objtable *uword_block_tab;
603 	struct icp_qat_uof_uword_block *uwblock;
604 	int i;
605 
606 	code_page = (struct icp_qat_uof_code_page *)
607 			((char *)img + sizeof(struct icp_qat_uof_image));
608 	page->def_page = code_page->def_page;
609 	page->page_region = code_page->page_region;
610 	page->beg_addr_v = code_page->beg_addr_v;
611 	page->beg_addr_p = code_page->beg_addr_p;
612 	code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
613 						code_page->code_area_offset);
614 	page->micro_words_num = code_area->micro_words_num;
615 	uword_block_tab = (struct icp_qat_uof_objtable *)
616 			  (encap_uof_obj->beg_uof +
617 			  code_area->uword_block_tab);
618 	page->uwblock_num = uword_block_tab->entry_num;
619 	uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
620 			sizeof(struct icp_qat_uof_objtable));
621 	page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
622 	for (i = 0; i < uword_block_tab->entry_num; i++)
623 		page->uwblock[i].micro_words =
624 		(uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
625 }
626 
qat_uclo_map_uimage(struct icp_qat_uclo_objhandle * obj_handle,struct icp_qat_uclo_encapme * ae_uimage,int max_image)627 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
628 			       struct icp_qat_uclo_encapme *ae_uimage,
629 			       int max_image)
630 {
631 	int i, j;
632 	struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
633 	struct icp_qat_uof_image *image;
634 	struct icp_qat_uof_objtable *ae_regtab;
635 	struct icp_qat_uof_objtable *init_reg_sym_tab;
636 	struct icp_qat_uof_objtable *sbreak_tab;
637 	struct icp_qat_uof_encap_obj *encap_uof_obj =
638 					&obj_handle->encap_uof_obj;
639 
640 	for (j = 0; j < max_image; j++) {
641 		chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
642 						ICP_QAT_UOF_IMAG, chunk_hdr);
643 		if (!chunk_hdr)
644 			break;
645 		image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
646 						     chunk_hdr->offset);
647 		ae_regtab = (struct icp_qat_uof_objtable *)
648 			   (image->reg_tab_offset +
649 			   obj_handle->obj_hdr->file_buff);
650 		ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
651 		ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
652 			(((char *)ae_regtab) +
653 			sizeof(struct icp_qat_uof_objtable));
654 		init_reg_sym_tab = (struct icp_qat_uof_objtable *)
655 				   (image->init_reg_sym_tab +
656 				   obj_handle->obj_hdr->file_buff);
657 		ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
658 		ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
659 			(((char *)init_reg_sym_tab) +
660 			sizeof(struct icp_qat_uof_objtable));
661 		sbreak_tab = (struct icp_qat_uof_objtable *)
662 			(image->sbreak_tab + obj_handle->obj_hdr->file_buff);
663 		ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
664 		ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
665 				      (((char *)sbreak_tab) +
666 				      sizeof(struct icp_qat_uof_objtable));
667 		ae_uimage[j].img_ptr = image;
668 		if (qat_uclo_check_image_compat(encap_uof_obj, image))
669 			goto out_err;
670 		ae_uimage[j].page =
671 			kzalloc(sizeof(struct icp_qat_uclo_encap_page),
672 				GFP_KERNEL);
673 		if (!ae_uimage[j].page)
674 			goto out_err;
675 		qat_uclo_map_image_page(encap_uof_obj, image,
676 					ae_uimage[j].page);
677 	}
678 	return j;
679 out_err:
680 	for (i = 0; i < j; i++)
681 		kfree(ae_uimage[i].page);
682 	return 0;
683 }
684 
qat_uclo_map_ae(struct icp_qat_fw_loader_handle * handle,int max_ae)685 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
686 {
687 	int i, ae;
688 	int mflag = 0;
689 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
690 
691 	for (ae = 0; ae < max_ae; ae++) {
692 		if (!test_bit(ae,
693 			      (unsigned long *)&handle->hal_handle->ae_mask))
694 			continue;
695 		for (i = 0; i < obj_handle->uimage_num; i++) {
696 			if (!test_bit(ae, (unsigned long *)
697 			&obj_handle->ae_uimage[i].img_ptr->ae_assigned))
698 				continue;
699 			mflag = 1;
700 			if (qat_uclo_init_ae_data(obj_handle, ae, i))
701 				return -EINVAL;
702 		}
703 	}
704 	if (!mflag) {
705 		pr_err("QAT: uimage uses AE not set\n");
706 		return -EINVAL;
707 	}
708 	return 0;
709 }
710 
711 static struct icp_qat_uof_strtable *
qat_uclo_map_str_table(struct icp_qat_uclo_objhdr * obj_hdr,char * tab_name,struct icp_qat_uof_strtable * str_table)712 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
713 		       char *tab_name, struct icp_qat_uof_strtable *str_table)
714 {
715 	struct icp_qat_uof_chunkhdr *chunk_hdr;
716 
717 	chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
718 					obj_hdr->file_buff, tab_name, NULL);
719 	if (chunk_hdr) {
720 		int hdr_size;
721 
722 		memcpy(&str_table->table_len, obj_hdr->file_buff +
723 		       chunk_hdr->offset, sizeof(str_table->table_len));
724 		hdr_size = (char *)&str_table->strings - (char *)str_table;
725 		str_table->strings = (uintptr_t)obj_hdr->file_buff +
726 					chunk_hdr->offset + hdr_size;
727 		return str_table;
728 	}
729 	return NULL;
730 }
731 
732 static void
qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj * encap_uof_obj,struct icp_qat_uclo_init_mem_table * init_mem_tab)733 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
734 			   struct icp_qat_uclo_init_mem_table *init_mem_tab)
735 {
736 	struct icp_qat_uof_chunkhdr *chunk_hdr;
737 
738 	chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
739 					ICP_QAT_UOF_IMEM, NULL);
740 	if (chunk_hdr) {
741 		memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
742 			chunk_hdr->offset, sizeof(unsigned int));
743 		init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
744 		(encap_uof_obj->beg_uof + chunk_hdr->offset +
745 		sizeof(unsigned int));
746 	}
747 }
748 
749 static unsigned int
qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle * handle)750 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
751 {
752 	switch (handle->pci_dev->device) {
753 	case ADF_DH895XCC_PCI_DEVICE_ID:
754 		return ICP_QAT_AC_895XCC_DEV_TYPE;
755 	case ADF_C62X_PCI_DEVICE_ID:
756 		return ICP_QAT_AC_C62X_DEV_TYPE;
757 	case ADF_C3XXX_PCI_DEVICE_ID:
758 		return ICP_QAT_AC_C3XXX_DEV_TYPE;
759 	default:
760 		pr_err("QAT: unsupported device 0x%x\n",
761 		       handle->pci_dev->device);
762 		return 0;
763 	}
764 }
765 
qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle * obj_handle)766 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
767 {
768 	unsigned int maj_ver, prod_type = obj_handle->prod_type;
769 
770 	if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
771 		pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
772 		       obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
773 		       prod_type);
774 		return -EINVAL;
775 	}
776 	maj_ver = obj_handle->prod_rev & 0xff;
777 	if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
778 	    (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
779 		pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
780 		return -EINVAL;
781 	}
782 	return 0;
783 }
784 
qat_uclo_init_reg(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx_mask,enum icp_qat_uof_regtype reg_type,unsigned short reg_addr,unsigned int value)785 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
786 			     unsigned char ae, unsigned char ctx_mask,
787 			     enum icp_qat_uof_regtype reg_type,
788 			     unsigned short reg_addr, unsigned int value)
789 {
790 	switch (reg_type) {
791 	case ICP_GPA_ABS:
792 	case ICP_GPB_ABS:
793 		ctx_mask = 0;
794 		/* fall through */
795 	case ICP_GPA_REL:
796 	case ICP_GPB_REL:
797 		return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
798 					reg_addr, value);
799 	case ICP_SR_ABS:
800 	case ICP_DR_ABS:
801 	case ICP_SR_RD_ABS:
802 	case ICP_DR_RD_ABS:
803 		ctx_mask = 0;
804 		/* fall through */
805 	case ICP_SR_REL:
806 	case ICP_DR_REL:
807 	case ICP_SR_RD_REL:
808 	case ICP_DR_RD_REL:
809 		return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
810 					    reg_addr, value);
811 	case ICP_SR_WR_ABS:
812 	case ICP_DR_WR_ABS:
813 		ctx_mask = 0;
814 		/* fall through */
815 	case ICP_SR_WR_REL:
816 	case ICP_DR_WR_REL:
817 		return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
818 					    reg_addr, value);
819 	case ICP_NEIGH_REL:
820 		return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
821 	default:
822 		pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
823 		return -EFAULT;
824 	}
825 	return 0;
826 }
827 
qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle * handle,unsigned int ae,struct icp_qat_uclo_encapme * encap_ae)828 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
829 				 unsigned int ae,
830 				 struct icp_qat_uclo_encapme *encap_ae)
831 {
832 	unsigned int i;
833 	unsigned char ctx_mask;
834 	struct icp_qat_uof_init_regsym *init_regsym;
835 
836 	if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
837 	    ICP_QAT_UCLO_MAX_CTX)
838 		ctx_mask = 0xff;
839 	else
840 		ctx_mask = 0x55;
841 
842 	for (i = 0; i < encap_ae->init_regsym_num; i++) {
843 		unsigned int exp_res;
844 
845 		init_regsym = &encap_ae->init_regsym[i];
846 		exp_res = init_regsym->value;
847 		switch (init_regsym->init_type) {
848 		case ICP_QAT_UOF_INIT_REG:
849 			qat_uclo_init_reg(handle, ae, ctx_mask,
850 					  (enum icp_qat_uof_regtype)
851 					  init_regsym->reg_type,
852 					  (unsigned short)init_regsym->reg_addr,
853 					  exp_res);
854 			break;
855 		case ICP_QAT_UOF_INIT_REG_CTX:
856 			/* check if ctx is appropriate for the ctxMode */
857 			if (!((1 << init_regsym->ctx) & ctx_mask)) {
858 				pr_err("QAT: invalid ctx num = 0x%x\n",
859 				       init_regsym->ctx);
860 				return -EINVAL;
861 			}
862 			qat_uclo_init_reg(handle, ae,
863 					  (unsigned char)
864 					  (1 << init_regsym->ctx),
865 					  (enum icp_qat_uof_regtype)
866 					  init_regsym->reg_type,
867 					  (unsigned short)init_regsym->reg_addr,
868 					  exp_res);
869 			break;
870 		case ICP_QAT_UOF_INIT_EXPR:
871 			pr_err("QAT: INIT_EXPR feature not supported\n");
872 			return -EINVAL;
873 		case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
874 			pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
875 			return -EINVAL;
876 		default:
877 			break;
878 		}
879 	}
880 	return 0;
881 }
882 
qat_uclo_init_globals(struct icp_qat_fw_loader_handle * handle)883 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
884 {
885 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
886 	unsigned int s, ae;
887 
888 	if (obj_handle->global_inited)
889 		return 0;
890 	if (obj_handle->init_mem_tab.entry_num) {
891 		if (qat_uclo_init_memory(handle)) {
892 			pr_err("QAT: initialize memory failed\n");
893 			return -EINVAL;
894 		}
895 	}
896 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
897 		for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
898 			if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
899 				continue;
900 			if (qat_uclo_init_reg_sym(handle, ae,
901 						  obj_handle->ae_data[ae].
902 						  ae_slices[s].encap_image))
903 				return -EINVAL;
904 		}
905 	}
906 	obj_handle->global_inited = 1;
907 	return 0;
908 }
909 
qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle * handle)910 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
911 {
912 	unsigned char ae, nn_mode, s;
913 	struct icp_qat_uof_image *uof_image;
914 	struct icp_qat_uclo_aedata *ae_data;
915 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
916 
917 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
918 		if (!test_bit(ae,
919 			      (unsigned long *)&handle->hal_handle->ae_mask))
920 			continue;
921 		ae_data = &obj_handle->ae_data[ae];
922 		for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
923 				      ICP_QAT_UCLO_MAX_CTX); s++) {
924 			if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
925 				continue;
926 			uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
927 			if (qat_hal_set_ae_ctx_mode(handle, ae,
928 						    (char)ICP_QAT_CTX_MODE
929 						    (uof_image->ae_mode))) {
930 				pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
931 				return -EFAULT;
932 			}
933 			nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
934 			if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
935 				pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
936 				return -EFAULT;
937 			}
938 			if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
939 						   (char)ICP_QAT_LOC_MEM0_MODE
940 						   (uof_image->ae_mode))) {
941 				pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
942 				return -EFAULT;
943 			}
944 			if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
945 						   (char)ICP_QAT_LOC_MEM1_MODE
946 						   (uof_image->ae_mode))) {
947 				pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
948 				return -EFAULT;
949 			}
950 		}
951 	}
952 	return 0;
953 }
954 
qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle * handle)955 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
956 {
957 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
958 	struct icp_qat_uclo_encapme *image;
959 	int a;
960 
961 	for (a = 0; a < obj_handle->uimage_num; a++) {
962 		image = &obj_handle->ae_uimage[a];
963 		image->uwords_num = image->page->beg_addr_p +
964 					image->page->micro_words_num;
965 	}
966 }
967 
qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle * handle)968 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
969 {
970 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
971 	unsigned int ae;
972 
973 	obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
974 	obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
975 					     obj_handle->obj_hdr->file_buff;
976 	obj_handle->uword_in_bytes = 6;
977 	obj_handle->prod_type = qat_uclo_get_dev_type(handle);
978 	obj_handle->prod_rev = PID_MAJOR_REV |
979 			(PID_MINOR_REV & handle->hal_handle->revision_id);
980 	if (qat_uclo_check_uof_compat(obj_handle)) {
981 		pr_err("QAT: UOF incompatible\n");
982 		return -EINVAL;
983 	}
984 	obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
985 					GFP_KERNEL);
986 	if (!obj_handle->uword_buf)
987 		return -ENOMEM;
988 	obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
989 	if (!obj_handle->obj_hdr->file_buff ||
990 	    !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
991 				    &obj_handle->str_table)) {
992 		pr_err("QAT: UOF doesn't have effective images\n");
993 		goto out_err;
994 	}
995 	obj_handle->uimage_num =
996 		qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
997 				    ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
998 	if (!obj_handle->uimage_num)
999 		goto out_err;
1000 	if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
1001 		pr_err("QAT: Bad object\n");
1002 		goto out_check_uof_aemask_err;
1003 	}
1004 	qat_uclo_init_uword_num(handle);
1005 	qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
1006 				   &obj_handle->init_mem_tab);
1007 	if (qat_uclo_set_ae_mode(handle))
1008 		goto out_check_uof_aemask_err;
1009 	return 0;
1010 out_check_uof_aemask_err:
1011 	for (ae = 0; ae < obj_handle->uimage_num; ae++)
1012 		kfree(obj_handle->ae_uimage[ae].page);
1013 out_err:
1014 	kfree(obj_handle->uword_buf);
1015 	return -EFAULT;
1016 }
1017 
qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle * handle,struct icp_qat_suof_filehdr * suof_ptr,int suof_size)1018 static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1019 				      struct icp_qat_suof_filehdr *suof_ptr,
1020 				      int suof_size)
1021 {
1022 	unsigned int check_sum = 0;
1023 	unsigned int min_ver_offset = 0;
1024 	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1025 
1026 	suof_handle->file_id = ICP_QAT_SUOF_FID;
1027 	suof_handle->suof_buf = (char *)suof_ptr;
1028 	suof_handle->suof_size = suof_size;
1029 	min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
1030 					      min_ver);
1031 	check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
1032 					       min_ver_offset);
1033 	if (check_sum != suof_ptr->check_sum) {
1034 		pr_err("QAT: incorrect SUOF checksum\n");
1035 		return -EINVAL;
1036 	}
1037 	suof_handle->check_sum = suof_ptr->check_sum;
1038 	suof_handle->min_ver = suof_ptr->min_ver;
1039 	suof_handle->maj_ver = suof_ptr->maj_ver;
1040 	suof_handle->fw_type = suof_ptr->fw_type;
1041 	return 0;
1042 }
1043 
qat_uclo_map_simg(struct icp_qat_suof_handle * suof_handle,struct icp_qat_suof_img_hdr * suof_img_hdr,struct icp_qat_suof_chunk_hdr * suof_chunk_hdr)1044 static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle,
1045 			      struct icp_qat_suof_img_hdr *suof_img_hdr,
1046 			      struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1047 {
1048 	struct icp_qat_simg_ae_mode *ae_mode;
1049 	struct icp_qat_suof_objhdr *suof_objhdr;
1050 
1051 	suof_img_hdr->simg_buf  = (suof_handle->suof_buf +
1052 				   suof_chunk_hdr->offset +
1053 				   sizeof(*suof_objhdr));
1054 	suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
1055 				  (suof_handle->suof_buf +
1056 				   suof_chunk_hdr->offset))->img_length;
1057 
1058 	suof_img_hdr->css_header = suof_img_hdr->simg_buf;
1059 	suof_img_hdr->css_key = (suof_img_hdr->css_header +
1060 				 sizeof(struct icp_qat_css_hdr));
1061 	suof_img_hdr->css_signature = suof_img_hdr->css_key +
1062 				      ICP_QAT_CSS_FWSK_MODULUS_LEN +
1063 				      ICP_QAT_CSS_FWSK_EXPONENT_LEN;
1064 	suof_img_hdr->css_simg = suof_img_hdr->css_signature +
1065 				 ICP_QAT_CSS_SIGNATURE_LEN;
1066 
1067 	ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
1068 	suof_img_hdr->ae_mask = ae_mode->ae_mask;
1069 	suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
1070 	suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
1071 	suof_img_hdr->fw_type = ae_mode->fw_type;
1072 }
1073 
1074 static void
qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle * suof_handle,struct icp_qat_suof_chunk_hdr * suof_chunk_hdr)1075 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
1076 			  struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1077 {
1078 	char **sym_str = (char **)&suof_handle->sym_str;
1079 	unsigned int *sym_size = &suof_handle->sym_size;
1080 	struct icp_qat_suof_strtable *str_table_obj;
1081 
1082 	*sym_size = *(unsigned int *)(uintptr_t)
1083 		   (suof_chunk_hdr->offset + suof_handle->suof_buf);
1084 	*sym_str = (char *)(uintptr_t)
1085 		   (suof_handle->suof_buf + suof_chunk_hdr->offset +
1086 		   sizeof(str_table_obj->tab_length));
1087 }
1088 
qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle * handle,struct icp_qat_suof_img_hdr * img_hdr)1089 static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
1090 				      struct icp_qat_suof_img_hdr *img_hdr)
1091 {
1092 	struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
1093 	unsigned int prod_rev, maj_ver, prod_type;
1094 
1095 	prod_type = qat_uclo_get_dev_type(handle);
1096 	img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
1097 	prod_rev = PID_MAJOR_REV |
1098 			 (PID_MINOR_REV & handle->hal_handle->revision_id);
1099 	if (img_ae_mode->dev_type != prod_type) {
1100 		pr_err("QAT: incompatible product type %x\n",
1101 		       img_ae_mode->dev_type);
1102 		return -EINVAL;
1103 	}
1104 	maj_ver = prod_rev & 0xff;
1105 	if ((maj_ver > img_ae_mode->devmax_ver) ||
1106 	    (maj_ver < img_ae_mode->devmin_ver)) {
1107 		pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
1108 		return -EINVAL;
1109 	}
1110 	return 0;
1111 }
1112 
qat_uclo_del_suof(struct icp_qat_fw_loader_handle * handle)1113 static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
1114 {
1115 	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1116 
1117 	kfree(sobj_handle->img_table.simg_hdr);
1118 	sobj_handle->img_table.simg_hdr = NULL;
1119 	kfree(handle->sobj_handle);
1120 	handle->sobj_handle = NULL;
1121 }
1122 
qat_uclo_tail_img(struct icp_qat_suof_img_hdr * suof_img_hdr,unsigned int img_id,unsigned int num_simgs)1123 static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
1124 			      unsigned int img_id, unsigned int num_simgs)
1125 {
1126 	struct icp_qat_suof_img_hdr img_header;
1127 
1128 	if (img_id != num_simgs - 1) {
1129 		memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
1130 		       sizeof(*suof_img_hdr));
1131 		memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
1132 		       sizeof(*suof_img_hdr));
1133 		memcpy(&suof_img_hdr[img_id], &img_header,
1134 		       sizeof(*suof_img_hdr));
1135 	}
1136 }
1137 
qat_uclo_map_suof(struct icp_qat_fw_loader_handle * handle,struct icp_qat_suof_filehdr * suof_ptr,int suof_size)1138 static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
1139 			     struct icp_qat_suof_filehdr *suof_ptr,
1140 			     int suof_size)
1141 {
1142 	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1143 	struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
1144 	struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
1145 	int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
1146 	unsigned int i = 0;
1147 	struct icp_qat_suof_img_hdr img_header;
1148 
1149 	if (!suof_ptr || (suof_size == 0)) {
1150 		pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
1151 		return -EINVAL;
1152 	}
1153 	if (qat_uclo_check_suof_format(suof_ptr))
1154 		return -EINVAL;
1155 	ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
1156 	if (ret)
1157 		return ret;
1158 	suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
1159 			 ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
1160 
1161 	qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
1162 	suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
1163 
1164 	if (suof_handle->img_table.num_simgs != 0) {
1165 		suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
1166 				       sizeof(img_header),
1167 				       GFP_KERNEL);
1168 		if (!suof_img_hdr)
1169 			return -ENOMEM;
1170 		suof_handle->img_table.simg_hdr = suof_img_hdr;
1171 	}
1172 
1173 	for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1174 		qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i],
1175 				  &suof_chunk_hdr[1 + i]);
1176 		ret = qat_uclo_check_simg_compat(handle,
1177 						 &suof_img_hdr[i]);
1178 		if (ret)
1179 			return ret;
1180 		if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
1181 			ae0_img = i;
1182 	}
1183 	qat_uclo_tail_img(suof_img_hdr, ae0_img,
1184 			  suof_handle->img_table.num_simgs);
1185 	return 0;
1186 }
1187 
1188 #define ADD_ADDR(high, low)  ((((uint64_t)high) << 32) + low)
1189 #define BITS_IN_DWORD 32
1190 
qat_uclo_auth_fw(struct icp_qat_fw_loader_handle * handle,struct icp_qat_fw_auth_desc * desc)1191 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
1192 			    struct icp_qat_fw_auth_desc *desc)
1193 {
1194 	unsigned int fcu_sts, retry = 0;
1195 	u64 bus_addr;
1196 
1197 	bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
1198 			   - sizeof(struct icp_qat_auth_chunk);
1199 	SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD));
1200 	SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr);
1201 	SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH);
1202 
1203 	do {
1204 		msleep(FW_AUTH_WAIT_PERIOD);
1205 		fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
1206 		if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
1207 			goto auth_fail;
1208 		if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
1209 			if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
1210 				return 0;
1211 	} while (retry++ < FW_AUTH_MAX_RETRY);
1212 auth_fail:
1213 	pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1214 	       fcu_sts & FCU_AUTH_STS_MASK, retry);
1215 	return -EINVAL;
1216 }
1217 
qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle * handle,struct icp_firml_dram_desc * dram_desc,unsigned int size)1218 static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
1219 			       struct icp_firml_dram_desc *dram_desc,
1220 			       unsigned int size)
1221 {
1222 	void *vptr;
1223 	dma_addr_t ptr;
1224 
1225 	vptr = dma_alloc_coherent(&handle->pci_dev->dev,
1226 				  size, &ptr, GFP_KERNEL);
1227 	if (!vptr)
1228 		return -ENOMEM;
1229 	dram_desc->dram_base_addr_v = vptr;
1230 	dram_desc->dram_bus_addr = ptr;
1231 	dram_desc->dram_size = size;
1232 	return 0;
1233 }
1234 
qat_uclo_simg_free(struct icp_qat_fw_loader_handle * handle,struct icp_firml_dram_desc * dram_desc)1235 static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
1236 			       struct icp_firml_dram_desc *dram_desc)
1237 {
1238 	dma_free_coherent(&handle->pci_dev->dev,
1239 			  (size_t)(dram_desc->dram_size),
1240 			  (dram_desc->dram_base_addr_v),
1241 			  dram_desc->dram_bus_addr);
1242 	memset(dram_desc, 0, sizeof(*dram_desc));
1243 }
1244 
qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle * handle,struct icp_qat_fw_auth_desc ** desc)1245 static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
1246 				   struct icp_qat_fw_auth_desc **desc)
1247 {
1248 	struct icp_firml_dram_desc dram_desc;
1249 
1250 	dram_desc.dram_base_addr_v = *desc;
1251 	dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
1252 				   (*desc))->chunk_bus_addr;
1253 	dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
1254 			       (*desc))->chunk_size;
1255 	qat_uclo_simg_free(handle, &dram_desc);
1256 }
1257 
qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle * handle,char * image,unsigned int size,struct icp_qat_fw_auth_desc ** desc)1258 static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
1259 				char *image, unsigned int size,
1260 				struct icp_qat_fw_auth_desc **desc)
1261 {
1262 	struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
1263 	struct icp_qat_fw_auth_desc *auth_desc;
1264 	struct icp_qat_auth_chunk *auth_chunk;
1265 	u64 virt_addr,  bus_addr, virt_base;
1266 	unsigned int length, simg_offset = sizeof(*auth_chunk);
1267 	struct icp_firml_dram_desc img_desc;
1268 
1269 	if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
1270 		pr_err("QAT: error, input image size overflow %d\n", size);
1271 		return -EINVAL;
1272 	}
1273 	length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
1274 		 ICP_QAT_CSS_AE_SIMG_LEN + simg_offset :
1275 		 size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset;
1276 	if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
1277 		pr_err("QAT: error, allocate continuous dram fail\n");
1278 		return -ENOMEM;
1279 	}
1280 
1281 	auth_chunk = img_desc.dram_base_addr_v;
1282 	auth_chunk->chunk_size = img_desc.dram_size;
1283 	auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
1284 	virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
1285 	bus_addr  = img_desc.dram_bus_addr + simg_offset;
1286 	auth_desc = img_desc.dram_base_addr_v;
1287 	auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1288 	auth_desc->css_hdr_low = (unsigned int)bus_addr;
1289 	virt_addr = virt_base;
1290 
1291 	memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
1292 	/* pub key */
1293 	bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
1294 			   sizeof(*css_hdr);
1295 	virt_addr = virt_addr + sizeof(*css_hdr);
1296 
1297 	auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1298 	auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
1299 
1300 	memcpy((void *)(uintptr_t)virt_addr,
1301 	       (void *)(image + sizeof(*css_hdr)),
1302 	       ICP_QAT_CSS_FWSK_MODULUS_LEN);
1303 	/* padding */
1304 	memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN),
1305 	       0, ICP_QAT_CSS_FWSK_PAD_LEN);
1306 
1307 	/* exponent */
1308 	memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN +
1309 	       ICP_QAT_CSS_FWSK_PAD_LEN),
1310 	       (void *)(image + sizeof(*css_hdr) +
1311 			ICP_QAT_CSS_FWSK_MODULUS_LEN),
1312 	       sizeof(unsigned int));
1313 
1314 	/* signature */
1315 	bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
1316 			    auth_desc->fwsk_pub_low) +
1317 		   ICP_QAT_CSS_FWSK_PUB_LEN;
1318 	virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN;
1319 	auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1320 	auth_desc->signature_low = (unsigned int)bus_addr;
1321 
1322 	memcpy((void *)(uintptr_t)virt_addr,
1323 	       (void *)(image + sizeof(*css_hdr) +
1324 	       ICP_QAT_CSS_FWSK_MODULUS_LEN +
1325 	       ICP_QAT_CSS_FWSK_EXPONENT_LEN),
1326 	       ICP_QAT_CSS_SIGNATURE_LEN);
1327 
1328 	bus_addr = ADD_ADDR(auth_desc->signature_high,
1329 			    auth_desc->signature_low) +
1330 		   ICP_QAT_CSS_SIGNATURE_LEN;
1331 	virt_addr += ICP_QAT_CSS_SIGNATURE_LEN;
1332 
1333 	auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1334 	auth_desc->img_low = (unsigned int)bus_addr;
1335 	auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET;
1336 	memcpy((void *)(uintptr_t)virt_addr,
1337 	       (void *)(image + ICP_QAT_AE_IMG_OFFSET),
1338 	       auth_desc->img_len);
1339 	virt_addr = virt_base;
1340 	/* AE firmware */
1341 	if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
1342 	    CSS_AE_FIRMWARE) {
1343 		auth_desc->img_ae_mode_data_high = auth_desc->img_high;
1344 		auth_desc->img_ae_mode_data_low = auth_desc->img_low;
1345 		bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
1346 				    auth_desc->img_ae_mode_data_low) +
1347 			   sizeof(struct icp_qat_simg_ae_mode);
1348 
1349 		auth_desc->img_ae_init_data_high = (unsigned int)
1350 						 (bus_addr >> BITS_IN_DWORD);
1351 		auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
1352 		bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
1353 		auth_desc->img_ae_insts_high = (unsigned int)
1354 					     (bus_addr >> BITS_IN_DWORD);
1355 		auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
1356 	} else {
1357 		auth_desc->img_ae_insts_high = auth_desc->img_high;
1358 		auth_desc->img_ae_insts_low = auth_desc->img_low;
1359 	}
1360 	*desc = auth_desc;
1361 	return 0;
1362 }
1363 
qat_uclo_load_fw(struct icp_qat_fw_loader_handle * handle,struct icp_qat_fw_auth_desc * desc)1364 static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
1365 			    struct icp_qat_fw_auth_desc *desc)
1366 {
1367 	unsigned int i;
1368 	unsigned int fcu_sts;
1369 	struct icp_qat_simg_ae_mode *virt_addr;
1370 	unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
1371 
1372 	virt_addr = (void *)((uintptr_t)desc +
1373 		     sizeof(struct icp_qat_auth_chunk) +
1374 		     sizeof(struct icp_qat_css_hdr) +
1375 		     ICP_QAT_CSS_FWSK_PUB_LEN +
1376 		     ICP_QAT_CSS_SIGNATURE_LEN);
1377 	for (i = 0; i < handle->hal_handle->ae_max_num; i++) {
1378 		int retry = 0;
1379 
1380 		if (!((virt_addr->ae_mask >> i) & 0x1))
1381 			continue;
1382 		if (qat_hal_check_ae_active(handle, i)) {
1383 			pr_err("QAT: AE %d is active\n", i);
1384 			return -EINVAL;
1385 		}
1386 		SET_CAP_CSR(handle, FCU_CONTROL,
1387 			    (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS)));
1388 
1389 		do {
1390 			msleep(FW_AUTH_WAIT_PERIOD);
1391 			fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
1392 			if (((fcu_sts & FCU_AUTH_STS_MASK) ==
1393 			    FCU_STS_LOAD_DONE) &&
1394 			    ((fcu_sts >> fcu_loaded_ae_pos) & (1 << i)))
1395 				break;
1396 		} while (retry++ < FW_AUTH_MAX_RETRY);
1397 		if (retry > FW_AUTH_MAX_RETRY) {
1398 			pr_err("QAT: firmware load failed timeout %x\n", retry);
1399 			return -EINVAL;
1400 		}
1401 	}
1402 	return 0;
1403 }
1404 
qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle * handle,void * addr_ptr,int mem_size)1405 static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
1406 				 void *addr_ptr, int mem_size)
1407 {
1408 	struct icp_qat_suof_handle *suof_handle;
1409 
1410 	suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
1411 	if (!suof_handle)
1412 		return -ENOMEM;
1413 	handle->sobj_handle = suof_handle;
1414 	if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
1415 		qat_uclo_del_suof(handle);
1416 		pr_err("QAT: map SUOF failed\n");
1417 		return -EINVAL;
1418 	}
1419 	return 0;
1420 }
1421 
qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle * handle,void * addr_ptr,int mem_size)1422 int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
1423 		       void *addr_ptr, int mem_size)
1424 {
1425 	struct icp_qat_fw_auth_desc *desc = NULL;
1426 	int status = 0;
1427 
1428 	if (handle->fw_auth) {
1429 		if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc))
1430 			status = qat_uclo_auth_fw(handle, desc);
1431 		qat_uclo_ummap_auth_fw(handle, &desc);
1432 	} else {
1433 		if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) {
1434 			pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
1435 			return -EINVAL;
1436 		}
1437 		qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
1438 	}
1439 	return status;
1440 }
1441 
qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle * handle,void * addr_ptr,int mem_size)1442 static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1443 				void *addr_ptr, int mem_size)
1444 {
1445 	struct icp_qat_uof_filehdr *filehdr;
1446 	struct icp_qat_uclo_objhandle *objhdl;
1447 
1448 	objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
1449 	if (!objhdl)
1450 		return -ENOMEM;
1451 	objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
1452 	if (!objhdl->obj_buf)
1453 		goto out_objbuf_err;
1454 	filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1455 	if (qat_uclo_check_uof_format(filehdr))
1456 		goto out_objhdr_err;
1457 	objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1458 					     ICP_QAT_UOF_OBJS);
1459 	if (!objhdl->obj_hdr) {
1460 		pr_err("QAT: object file chunk is null\n");
1461 		goto out_objhdr_err;
1462 	}
1463 	handle->obj_handle = objhdl;
1464 	if (qat_uclo_parse_uof_obj(handle))
1465 		goto out_overlay_obj_err;
1466 	return 0;
1467 
1468 out_overlay_obj_err:
1469 	handle->obj_handle = NULL;
1470 	kfree(objhdl->obj_hdr);
1471 out_objhdr_err:
1472 	kfree(objhdl->obj_buf);
1473 out_objbuf_err:
1474 	kfree(objhdl);
1475 	return -ENOMEM;
1476 }
1477 
qat_uclo_map_obj(struct icp_qat_fw_loader_handle * handle,void * addr_ptr,int mem_size)1478 int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
1479 		     void *addr_ptr, int mem_size)
1480 {
1481 	BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
1482 		     (sizeof(handle->hal_handle->ae_mask) * 8));
1483 
1484 	if (!handle || !addr_ptr || mem_size < 24)
1485 		return -EINVAL;
1486 
1487 	return (handle->fw_auth) ?
1488 			qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) :
1489 			qat_uclo_map_uof_obj(handle, addr_ptr, mem_size);
1490 }
1491 
qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle * handle)1492 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
1493 {
1494 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1495 	unsigned int a;
1496 
1497 	if (handle->sobj_handle)
1498 		qat_uclo_del_suof(handle);
1499 	if (!obj_handle)
1500 		return;
1501 
1502 	kfree(obj_handle->uword_buf);
1503 	for (a = 0; a < obj_handle->uimage_num; a++)
1504 		kfree(obj_handle->ae_uimage[a].page);
1505 
1506 	for (a = 0; a < handle->hal_handle->ae_max_num; a++)
1507 		qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1508 
1509 	kfree(obj_handle->obj_hdr);
1510 	kfree(obj_handle->obj_buf);
1511 	kfree(obj_handle);
1512 	handle->obj_handle = NULL;
1513 }
1514 
qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle * obj_handle,struct icp_qat_uclo_encap_page * encap_page,uint64_t * uword,unsigned int addr_p,unsigned int raddr,uint64_t fill)1515 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1516 				 struct icp_qat_uclo_encap_page *encap_page,
1517 				 uint64_t *uword, unsigned int addr_p,
1518 				 unsigned int raddr, uint64_t fill)
1519 {
1520 	uint64_t uwrd = 0;
1521 	unsigned int i;
1522 
1523 	if (!encap_page) {
1524 		*uword = fill;
1525 		return;
1526 	}
1527 	for (i = 0; i < encap_page->uwblock_num; i++) {
1528 		if (raddr >= encap_page->uwblock[i].start_addr &&
1529 		    raddr <= encap_page->uwblock[i].start_addr +
1530 		    encap_page->uwblock[i].words_num - 1) {
1531 			raddr -= encap_page->uwblock[i].start_addr;
1532 			raddr *= obj_handle->uword_in_bytes;
1533 			memcpy(&uwrd, (void *)(((uintptr_t)
1534 			       encap_page->uwblock[i].micro_words) + raddr),
1535 			       obj_handle->uword_in_bytes);
1536 			uwrd = uwrd & 0xbffffffffffull;
1537 		}
1538 	}
1539 	*uword = uwrd;
1540 	if (*uword == INVLD_UWORD)
1541 		*uword = fill;
1542 }
1543 
qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uclo_encap_page * encap_page,unsigned int ae)1544 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1545 					struct icp_qat_uclo_encap_page
1546 					*encap_page, unsigned int ae)
1547 {
1548 	unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1549 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1550 	uint64_t fill_pat;
1551 
1552 	/* load the page starting at appropriate ustore address */
1553 	/* get fill-pattern from an image -- they are all the same */
1554 	memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1555 	       sizeof(uint64_t));
1556 	uw_physical_addr = encap_page->beg_addr_p;
1557 	uw_relative_addr = 0;
1558 	words_num = encap_page->micro_words_num;
1559 	while (words_num) {
1560 		if (words_num < UWORD_CPYBUF_SIZE)
1561 			cpylen = words_num;
1562 		else
1563 			cpylen = UWORD_CPYBUF_SIZE;
1564 
1565 		/* load the buffer */
1566 		for (i = 0; i < cpylen; i++)
1567 			qat_uclo_fill_uwords(obj_handle, encap_page,
1568 					     &obj_handle->uword_buf[i],
1569 					     uw_physical_addr + i,
1570 					     uw_relative_addr + i, fill_pat);
1571 
1572 		/* copy the buffer to ustore */
1573 		qat_hal_wr_uwords(handle, (unsigned char)ae,
1574 				  uw_physical_addr, cpylen,
1575 				  obj_handle->uword_buf);
1576 
1577 		uw_physical_addr += cpylen;
1578 		uw_relative_addr += cpylen;
1579 		words_num -= cpylen;
1580 	}
1581 }
1582 
qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_image * image)1583 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
1584 				    struct icp_qat_uof_image *image)
1585 {
1586 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1587 	unsigned int ctx_mask, s;
1588 	struct icp_qat_uclo_page *page;
1589 	unsigned char ae;
1590 	int ctx;
1591 
1592 	if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
1593 		ctx_mask = 0xff;
1594 	else
1595 		ctx_mask = 0x55;
1596 	/* load the default page and set assigned CTX PC
1597 	 * to the entrypoint address */
1598 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
1599 		if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
1600 			continue;
1601 		/* find the slice to which this image is assigned */
1602 		for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
1603 			if (image->ctx_assigned & obj_handle->ae_data[ae].
1604 			    ae_slices[s].ctx_mask_assigned)
1605 				break;
1606 		}
1607 		if (s >= obj_handle->ae_data[ae].slice_num)
1608 			continue;
1609 		page = obj_handle->ae_data[ae].ae_slices[s].page;
1610 		if (!page->encap_page->def_page)
1611 			continue;
1612 		qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
1613 
1614 		page = obj_handle->ae_data[ae].ae_slices[s].page;
1615 		for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
1616 			obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
1617 					(ctx_mask & (1 << ctx)) ? page : NULL;
1618 		qat_hal_set_live_ctx(handle, (unsigned char)ae,
1619 				     image->ctx_assigned);
1620 		qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
1621 			       image->entry_address);
1622 	}
1623 }
1624 
qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle * handle)1625 static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
1626 {
1627 	unsigned int i;
1628 	struct icp_qat_fw_auth_desc *desc = NULL;
1629 	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1630 	struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
1631 
1632 	for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
1633 		if (qat_uclo_map_auth_fw(handle,
1634 					 (char *)simg_hdr[i].simg_buf,
1635 					 (unsigned int)
1636 					 (simg_hdr[i].simg_len),
1637 					 &desc))
1638 			goto wr_err;
1639 		if (qat_uclo_auth_fw(handle, desc))
1640 			goto wr_err;
1641 		if (qat_uclo_load_fw(handle, desc))
1642 			goto wr_err;
1643 		qat_uclo_ummap_auth_fw(handle, &desc);
1644 	}
1645 	return 0;
1646 wr_err:
1647 	qat_uclo_ummap_auth_fw(handle, &desc);
1648 	return -EINVAL;
1649 }
1650 
qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle * handle)1651 static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
1652 {
1653 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1654 	unsigned int i;
1655 
1656 	if (qat_uclo_init_globals(handle))
1657 		return -EINVAL;
1658 	for (i = 0; i < obj_handle->uimage_num; i++) {
1659 		if (!obj_handle->ae_uimage[i].img_ptr)
1660 			return -EINVAL;
1661 		if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
1662 			return -EINVAL;
1663 		qat_uclo_wr_uimage_page(handle,
1664 					obj_handle->ae_uimage[i].img_ptr);
1665 	}
1666 	return 0;
1667 }
1668 
qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle * handle)1669 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
1670 {
1671 	return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) :
1672 				   qat_uclo_wr_uof_img(handle);
1673 }
1674