1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/gfp.h>
3 #include <linux/workqueue.h>
4 #include <crypto/internal/skcipher.h>
5 
6 #include "nitrox_dev.h"
7 #include "nitrox_req.h"
8 #include "nitrox_csr.h"
9 
10 /* SLC_STORE_INFO */
11 #define MIN_UDD_LEN 16
12 /* PKT_IN_HDR + SLC_STORE_INFO */
13 #define FDATA_SIZE 32
14 /* Base destination port for the solicited requests */
15 #define SOLICIT_BASE_DPORT 256
16 #define PENDING_SIG	0xFFFFFFFFFFFFFFFFUL
17 
18 #define REQ_NOT_POSTED 1
19 #define REQ_BACKLOG    2
20 #define REQ_POSTED     3
21 
22 /**
23  * Response codes from SE microcode
24  * 0x00 - Success
25  *   Completion with no error
26  * 0x43 - ERR_GC_DATA_LEN_INVALID
27  *   Invalid Data length if Encryption Data length is
28  *   less than 16 bytes for AES-XTS and AES-CTS.
29  * 0x45 - ERR_GC_CTX_LEN_INVALID
30  *   Invalid context length: CTXL != 23 words.
31  * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
32  *   DOCSIS support is enabled with other than
33  *   AES/DES-CBC mode encryption.
34  * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
35  *   Authentication offset is other than 0 with
36  *   Encryption IV source = 0.
37  *   Authentication offset is other than 8 (DES)/16 (AES)
38  *   with Encryption IV source = 1
39  * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
40  *   CRC32 is enabled for other than DOCSIS encryption.
41  * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
42  *   Invalid flag options in AES-CCM IV.
43  */
44 
incr_index(int index,int count,int max)45 static inline int incr_index(int index, int count, int max)
46 {
47 	if ((index + count) >= max)
48 		index = index + count - max;
49 	else
50 		index += count;
51 
52 	return index;
53 }
54 
55 /**
56  * dma_free_sglist - unmap and free the sg lists.
57  * @ndev: N5 device
58  * @sgtbl: SG table
59  */
softreq_unmap_sgbufs(struct nitrox_softreq * sr)60 static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
61 {
62 	struct nitrox_device *ndev = sr->ndev;
63 	struct device *dev = DEV(ndev);
64 	struct nitrox_sglist *sglist;
65 
66 	/* unmap in sgbuf */
67 	sglist = sr->in.sglist;
68 	if (!sglist)
69 		goto out_unmap;
70 
71 	/* unmap iv */
72 	dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL);
73 	/* unmpa src sglist */
74 	dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir);
75 	/* unamp gather component */
76 	dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE);
77 	kfree(sr->in.sglist);
78 	kfree(sr->in.sgcomp);
79 	sr->in.sglist = NULL;
80 	sr->in.buf = NULL;
81 	sr->in.map_bufs_cnt = 0;
82 
83 out_unmap:
84 	/* unmap out sgbuf */
85 	sglist = sr->out.sglist;
86 	if (!sglist)
87 		return;
88 
89 	/* unmap orh */
90 	dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
91 
92 	/* unmap dst sglist */
93 	if (!sr->inplace) {
94 		dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3),
95 			     sr->out.dir);
96 	}
97 	/* unmap completion */
98 	dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
99 
100 	/* unmap scatter component */
101 	dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE);
102 	kfree(sr->out.sglist);
103 	kfree(sr->out.sgcomp);
104 	sr->out.sglist = NULL;
105 	sr->out.buf = NULL;
106 	sr->out.map_bufs_cnt = 0;
107 }
108 
softreq_destroy(struct nitrox_softreq * sr)109 static void softreq_destroy(struct nitrox_softreq *sr)
110 {
111 	softreq_unmap_sgbufs(sr);
112 	kfree(sr);
113 }
114 
115 /**
116  * create_sg_component - create SG componets for N5 device.
117  * @sr: Request structure
118  * @sgtbl: SG table
119  * @nr_comp: total number of components required
120  *
121  * Component structure
122  *
123  *   63     48 47     32 31    16 15      0
124  *   --------------------------------------
125  *   |   LEN0  |  LEN1  |  LEN2  |  LEN3  |
126  *   |-------------------------------------
127  *   |               PTR0                 |
128  *   --------------------------------------
129  *   |               PTR1                 |
130  *   --------------------------------------
131  *   |               PTR2                 |
132  *   --------------------------------------
133  *   |               PTR3                 |
134  *   --------------------------------------
135  *
136  *   Returns 0 if success or a negative errno code on error.
137  */
create_sg_component(struct nitrox_softreq * sr,struct nitrox_sgtable * sgtbl,int map_nents)138 static int create_sg_component(struct nitrox_softreq *sr,
139 			       struct nitrox_sgtable *sgtbl, int map_nents)
140 {
141 	struct nitrox_device *ndev = sr->ndev;
142 	struct nitrox_sgcomp *sgcomp;
143 	struct nitrox_sglist *sglist;
144 	dma_addr_t dma;
145 	size_t sz_comp;
146 	int i, j, nr_sgcomp;
147 
148 	nr_sgcomp = roundup(map_nents, 4) / 4;
149 
150 	/* each component holds 4 dma pointers */
151 	sz_comp = nr_sgcomp * sizeof(*sgcomp);
152 	sgcomp = kzalloc(sz_comp, sr->gfp);
153 	if (!sgcomp)
154 		return -ENOMEM;
155 
156 	sgtbl->sgcomp = sgcomp;
157 	sgtbl->nr_sgcomp = nr_sgcomp;
158 
159 	sglist = sgtbl->sglist;
160 	/* populate device sg component */
161 	for (i = 0; i < nr_sgcomp; i++) {
162 		for (j = 0; j < 4; j++) {
163 			sgcomp->len[j] = cpu_to_be16(sglist->len);
164 			sgcomp->dma[j] = cpu_to_be64(sglist->dma);
165 			sglist++;
166 		}
167 		sgcomp++;
168 	}
169 	/* map the device sg component */
170 	dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
171 	if (dma_mapping_error(DEV(ndev), dma)) {
172 		kfree(sgtbl->sgcomp);
173 		sgtbl->sgcomp = NULL;
174 		return -ENOMEM;
175 	}
176 
177 	sgtbl->dma = dma;
178 	sgtbl->len = sz_comp;
179 
180 	return 0;
181 }
182 
183 /**
184  * dma_map_inbufs - DMA map input sglist and creates sglist component
185  *                  for N5 device.
186  * @sr: Request structure
187  * @req: Crypto request structre
188  *
189  * Returns 0 if successful or a negative errno code on error.
190  */
dma_map_inbufs(struct nitrox_softreq * sr,struct se_crypto_request * req)191 static int dma_map_inbufs(struct nitrox_softreq *sr,
192 			  struct se_crypto_request *req)
193 {
194 	struct device *dev = DEV(sr->ndev);
195 	struct scatterlist *sg = req->src;
196 	struct nitrox_sglist *glist;
197 	int i, nents, ret = 0;
198 	dma_addr_t dma;
199 	size_t sz;
200 
201 	nents = sg_nents(req->src);
202 
203 	/* creater gather list IV and src entries */
204 	sz = roundup((1 + nents), 4) * sizeof(*glist);
205 	glist = kzalloc(sz, sr->gfp);
206 	if (!glist)
207 		return -ENOMEM;
208 
209 	sr->in.sglist = glist;
210 	/* map IV */
211 	dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL);
212 	if (dma_mapping_error(dev, dma)) {
213 		ret = -EINVAL;
214 		goto iv_map_err;
215 	}
216 
217 	sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
218 	/* map src entries */
219 	nents = dma_map_sg(dev, req->src, nents, sr->in.dir);
220 	if (!nents) {
221 		ret = -EINVAL;
222 		goto src_map_err;
223 	}
224 	sr->in.buf = req->src;
225 
226 	/* store the mappings */
227 	glist->len = req->ivsize;
228 	glist->dma = dma;
229 	glist++;
230 	sr->in.total_bytes += req->ivsize;
231 
232 	for_each_sg(req->src, sg, nents, i) {
233 		glist->len = sg_dma_len(sg);
234 		glist->dma = sg_dma_address(sg);
235 		sr->in.total_bytes += glist->len;
236 		glist++;
237 	}
238 	/* roundup map count to align with entires in sg component */
239 	sr->in.map_bufs_cnt = (1 + nents);
240 
241 	/* create NITROX gather component */
242 	ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt);
243 	if (ret)
244 		goto incomp_err;
245 
246 	return 0;
247 
248 incomp_err:
249 	dma_unmap_sg(dev, req->src, nents, sr->in.dir);
250 	sr->in.map_bufs_cnt = 0;
251 src_map_err:
252 	dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL);
253 iv_map_err:
254 	kfree(sr->in.sglist);
255 	sr->in.sglist = NULL;
256 	return ret;
257 }
258 
dma_map_outbufs(struct nitrox_softreq * sr,struct se_crypto_request * req)259 static int dma_map_outbufs(struct nitrox_softreq *sr,
260 			   struct se_crypto_request *req)
261 {
262 	struct device *dev = DEV(sr->ndev);
263 	struct nitrox_sglist *glist = sr->in.sglist;
264 	struct nitrox_sglist *slist;
265 	struct scatterlist *sg;
266 	int i, nents, map_bufs_cnt, ret = 0;
267 	size_t sz;
268 
269 	nents = sg_nents(req->dst);
270 
271 	/* create scatter list ORH, IV, dst entries and Completion header */
272 	sz = roundup((3 + nents), 4) * sizeof(*slist);
273 	slist = kzalloc(sz, sr->gfp);
274 	if (!slist)
275 		return -ENOMEM;
276 
277 	sr->out.sglist = slist;
278 	sr->out.dir = DMA_BIDIRECTIONAL;
279 	/* map ORH */
280 	sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN,
281 					  sr->out.dir);
282 	if (dma_mapping_error(dev, sr->resp.orh_dma)) {
283 		ret = -EINVAL;
284 		goto orh_map_err;
285 	}
286 
287 	/* map completion */
288 	sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion,
289 						 COMP_HLEN, sr->out.dir);
290 	if (dma_mapping_error(dev, sr->resp.completion_dma)) {
291 		ret = -EINVAL;
292 		goto compl_map_err;
293 	}
294 
295 	sr->inplace = (req->src == req->dst) ? true : false;
296 	/* out place */
297 	if (!sr->inplace) {
298 		nents = dma_map_sg(dev, req->dst, nents, sr->out.dir);
299 		if (!nents) {
300 			ret = -EINVAL;
301 			goto dst_map_err;
302 		}
303 	}
304 	sr->out.buf = req->dst;
305 
306 	/* store the mappings */
307 	/* orh */
308 	slist->len = ORH_HLEN;
309 	slist->dma = sr->resp.orh_dma;
310 	slist++;
311 
312 	/* copy the glist mappings */
313 	if (sr->inplace) {
314 		nents = sr->in.map_bufs_cnt - 1;
315 		map_bufs_cnt = sr->in.map_bufs_cnt;
316 		while (map_bufs_cnt--) {
317 			slist->len = glist->len;
318 			slist->dma = glist->dma;
319 			slist++;
320 			glist++;
321 		}
322 	} else {
323 		/* copy iv mapping */
324 		slist->len = glist->len;
325 		slist->dma = glist->dma;
326 		slist++;
327 		/* copy remaining maps */
328 		for_each_sg(req->dst, sg, nents, i) {
329 			slist->len = sg_dma_len(sg);
330 			slist->dma = sg_dma_address(sg);
331 			slist++;
332 		}
333 	}
334 
335 	/* completion */
336 	slist->len = COMP_HLEN;
337 	slist->dma = sr->resp.completion_dma;
338 
339 	sr->out.map_bufs_cnt = (3 + nents);
340 
341 	ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt);
342 	if (ret)
343 		goto outcomp_map_err;
344 
345 	return 0;
346 
347 outcomp_map_err:
348 	if (!sr->inplace)
349 		dma_unmap_sg(dev, req->dst, nents, sr->out.dir);
350 	sr->out.map_bufs_cnt = 0;
351 	sr->out.buf = NULL;
352 dst_map_err:
353 	dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
354 	sr->resp.completion_dma = 0;
355 compl_map_err:
356 	dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
357 	sr->resp.orh_dma = 0;
358 orh_map_err:
359 	kfree(sr->out.sglist);
360 	sr->out.sglist = NULL;
361 	return ret;
362 }
363 
softreq_map_iobuf(struct nitrox_softreq * sr,struct se_crypto_request * creq)364 static inline int softreq_map_iobuf(struct nitrox_softreq *sr,
365 				    struct se_crypto_request *creq)
366 {
367 	int ret;
368 
369 	ret = dma_map_inbufs(sr, creq);
370 	if (ret)
371 		return ret;
372 
373 	ret = dma_map_outbufs(sr, creq);
374 	if (ret)
375 		softreq_unmap_sgbufs(sr);
376 
377 	return ret;
378 }
379 
backlog_list_add(struct nitrox_softreq * sr,struct nitrox_cmdq * cmdq)380 static inline void backlog_list_add(struct nitrox_softreq *sr,
381 				    struct nitrox_cmdq *cmdq)
382 {
383 	INIT_LIST_HEAD(&sr->backlog);
384 
385 	spin_lock_bh(&cmdq->backlog_lock);
386 	list_add_tail(&sr->backlog, &cmdq->backlog_head);
387 	atomic_inc(&cmdq->backlog_count);
388 	atomic_set(&sr->status, REQ_BACKLOG);
389 	spin_unlock_bh(&cmdq->backlog_lock);
390 }
391 
response_list_add(struct nitrox_softreq * sr,struct nitrox_cmdq * cmdq)392 static inline void response_list_add(struct nitrox_softreq *sr,
393 				     struct nitrox_cmdq *cmdq)
394 {
395 	INIT_LIST_HEAD(&sr->response);
396 
397 	spin_lock_bh(&cmdq->response_lock);
398 	list_add_tail(&sr->response, &cmdq->response_head);
399 	spin_unlock_bh(&cmdq->response_lock);
400 }
401 
response_list_del(struct nitrox_softreq * sr,struct nitrox_cmdq * cmdq)402 static inline void response_list_del(struct nitrox_softreq *sr,
403 				     struct nitrox_cmdq *cmdq)
404 {
405 	spin_lock_bh(&cmdq->response_lock);
406 	list_del(&sr->response);
407 	spin_unlock_bh(&cmdq->response_lock);
408 }
409 
410 static struct nitrox_softreq *
get_first_response_entry(struct nitrox_cmdq * cmdq)411 get_first_response_entry(struct nitrox_cmdq *cmdq)
412 {
413 	return list_first_entry_or_null(&cmdq->response_head,
414 					struct nitrox_softreq, response);
415 }
416 
cmdq_full(struct nitrox_cmdq * cmdq,int qlen)417 static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
418 {
419 	if (atomic_inc_return(&cmdq->pending_count) > qlen) {
420 		atomic_dec(&cmdq->pending_count);
421 		/* sync with other cpus */
422 		smp_mb__after_atomic();
423 		return true;
424 	}
425 	return false;
426 }
427 
428 /**
429  * post_se_instr - Post SE instruction to Packet Input ring
430  * @sr: Request structure
431  *
432  * Returns 0 if successful or a negative error code,
433  * if no space in ring.
434  */
post_se_instr(struct nitrox_softreq * sr,struct nitrox_cmdq * cmdq)435 static void post_se_instr(struct nitrox_softreq *sr,
436 			  struct nitrox_cmdq *cmdq)
437 {
438 	struct nitrox_device *ndev = sr->ndev;
439 	int idx;
440 	u8 *ent;
441 
442 	spin_lock_bh(&cmdq->cmdq_lock);
443 
444 	idx = cmdq->write_idx;
445 	/* copy the instruction */
446 	ent = cmdq->head + (idx * cmdq->instr_size);
447 	memcpy(ent, &sr->instr, cmdq->instr_size);
448 
449 	atomic_set(&sr->status, REQ_POSTED);
450 	response_list_add(sr, cmdq);
451 	sr->tstamp = jiffies;
452 	/* flush the command queue updates */
453 	dma_wmb();
454 
455 	/* Ring doorbell with count 1 */
456 	writeq(1, cmdq->dbell_csr_addr);
457 	/* orders the doorbell rings */
458 	mmiowb();
459 
460 	cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
461 
462 	spin_unlock_bh(&cmdq->cmdq_lock);
463 }
464 
post_backlog_cmds(struct nitrox_cmdq * cmdq)465 static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
466 {
467 	struct nitrox_device *ndev = cmdq->ndev;
468 	struct nitrox_softreq *sr, *tmp;
469 	int ret = 0;
470 
471 	if (!atomic_read(&cmdq->backlog_count))
472 		return 0;
473 
474 	spin_lock_bh(&cmdq->backlog_lock);
475 
476 	list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
477 		struct skcipher_request *skreq;
478 
479 		/* submit until space available */
480 		if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
481 			ret = -ENOSPC;
482 			break;
483 		}
484 		/* delete from backlog list */
485 		list_del(&sr->backlog);
486 		atomic_dec(&cmdq->backlog_count);
487 		/* sync with other cpus */
488 		smp_mb__after_atomic();
489 
490 		skreq = sr->skreq;
491 		/* post the command */
492 		post_se_instr(sr, cmdq);
493 
494 		/* backlog requests are posted, wakeup with -EINPROGRESS */
495 		skcipher_request_complete(skreq, -EINPROGRESS);
496 	}
497 	spin_unlock_bh(&cmdq->backlog_lock);
498 
499 	return ret;
500 }
501 
nitrox_enqueue_request(struct nitrox_softreq * sr)502 static int nitrox_enqueue_request(struct nitrox_softreq *sr)
503 {
504 	struct nitrox_cmdq *cmdq = sr->cmdq;
505 	struct nitrox_device *ndev = sr->ndev;
506 
507 	/* try to post backlog requests */
508 	post_backlog_cmds(cmdq);
509 
510 	if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
511 		if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
512 			return -ENOSPC;
513 		/* add to backlog list */
514 		backlog_list_add(sr, cmdq);
515 		return -EBUSY;
516 	}
517 	post_se_instr(sr, cmdq);
518 
519 	return -EINPROGRESS;
520 }
521 
522 /**
523  * nitrox_se_request - Send request to SE core
524  * @ndev: NITROX device
525  * @req: Crypto request
526  *
527  * Returns 0 on success, or a negative error code.
528  */
nitrox_process_se_request(struct nitrox_device * ndev,struct se_crypto_request * req,completion_t callback,struct skcipher_request * skreq)529 int nitrox_process_se_request(struct nitrox_device *ndev,
530 			      struct se_crypto_request *req,
531 			      completion_t callback,
532 			      struct skcipher_request *skreq)
533 {
534 	struct nitrox_softreq *sr;
535 	dma_addr_t ctx_handle = 0;
536 	int qno, ret = 0;
537 
538 	if (!nitrox_ready(ndev))
539 		return -ENODEV;
540 
541 	sr = kzalloc(sizeof(*sr), req->gfp);
542 	if (!sr)
543 		return -ENOMEM;
544 
545 	sr->ndev = ndev;
546 	sr->flags = req->flags;
547 	sr->gfp = req->gfp;
548 	sr->callback = callback;
549 	sr->skreq = skreq;
550 
551 	atomic_set(&sr->status, REQ_NOT_POSTED);
552 
553 	WRITE_ONCE(sr->resp.orh, PENDING_SIG);
554 	WRITE_ONCE(sr->resp.completion, PENDING_SIG);
555 
556 	ret = softreq_map_iobuf(sr, req);
557 	if (ret) {
558 		kfree(sr);
559 		return ret;
560 	}
561 
562 	/* get the context handle */
563 	if (req->ctx_handle) {
564 		struct ctx_hdr *hdr;
565 		u8 *ctx_ptr;
566 
567 		ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle;
568 		hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr));
569 		ctx_handle = hdr->ctx_dma;
570 	}
571 
572 	/* select the queue */
573 	qno = smp_processor_id() % ndev->nr_queues;
574 
575 	sr->cmdq = &ndev->pkt_cmdqs[qno];
576 
577 	/*
578 	 * 64-Byte Instruction Format
579 	 *
580 	 *  ----------------------
581 	 *  |      DPTR0         | 8 bytes
582 	 *  ----------------------
583 	 *  |  PKT_IN_INSTR_HDR  | 8 bytes
584 	 *  ----------------------
585 	 *  |    PKT_IN_HDR      | 16 bytes
586 	 *  ----------------------
587 	 *  |    SLC_INFO        | 16 bytes
588 	 *  ----------------------
589 	 *  |   Front data       | 16 bytes
590 	 *  ----------------------
591 	 */
592 
593 	/* fill the packet instruction */
594 	/* word 0 */
595 	sr->instr.dptr0 = cpu_to_be64(sr->in.dma);
596 
597 	/* word 1 */
598 	sr->instr.ih.value = 0;
599 	sr->instr.ih.s.g = 1;
600 	sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
601 	sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
602 	sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
603 	sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
604 	sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value);
605 
606 	/* word 2 */
607 	sr->instr.irh.value[0] = 0;
608 	sr->instr.irh.s.uddl = MIN_UDD_LEN;
609 	/* context length in 64-bit words */
610 	sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8);
611 	/* offset from solicit base port 256 */
612 	sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
613 	sr->instr.irh.s.ctxc = req->ctrl.s.ctxc;
614 	sr->instr.irh.s.arg = req->ctrl.s.arg;
615 	sr->instr.irh.s.opcode = req->opcode;
616 	sr->instr.irh.value[0] = cpu_to_be64(sr->instr.irh.value[0]);
617 
618 	/* word 3 */
619 	sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle);
620 
621 	/* word 4 */
622 	sr->instr.slc.value[0] = 0;
623 	sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
624 	sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]);
625 
626 	/* word 5 */
627 	sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma);
628 
629 	/*
630 	 * No conversion for front data,
631 	 * It goes into payload
632 	 * put GP Header in front data
633 	 */
634 	sr->instr.fdata[0] = *((u64 *)&req->gph);
635 	sr->instr.fdata[1] = 0;
636 
637 	ret = nitrox_enqueue_request(sr);
638 	if (ret == -ENOSPC)
639 		goto send_fail;
640 
641 	return ret;
642 
643 send_fail:
644 	softreq_destroy(sr);
645 	return ret;
646 }
647 
cmd_timeout(unsigned long tstamp,unsigned long timeout)648 static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout)
649 {
650 	return time_after_eq(jiffies, (tstamp + timeout));
651 }
652 
backlog_qflush_work(struct work_struct * work)653 void backlog_qflush_work(struct work_struct *work)
654 {
655 	struct nitrox_cmdq *cmdq;
656 
657 	cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush);
658 	post_backlog_cmds(cmdq);
659 }
660 
661 /**
662  * process_request_list - process completed requests
663  * @ndev: N5 device
664  * @qno: queue to operate
665  *
666  * Returns the number of responses processed.
667  */
process_response_list(struct nitrox_cmdq * cmdq)668 static void process_response_list(struct nitrox_cmdq *cmdq)
669 {
670 	struct nitrox_device *ndev = cmdq->ndev;
671 	struct nitrox_softreq *sr;
672 	struct skcipher_request *skreq;
673 	completion_t callback;
674 	int req_completed = 0, err = 0, budget;
675 
676 	/* check all pending requests */
677 	budget = atomic_read(&cmdq->pending_count);
678 
679 	while (req_completed < budget) {
680 		sr = get_first_response_entry(cmdq);
681 		if (!sr)
682 			break;
683 
684 		if (atomic_read(&sr->status) != REQ_POSTED)
685 			break;
686 
687 		/* check orh and completion bytes updates */
688 		if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) {
689 			/* request not completed, check for timeout */
690 			if (!cmd_timeout(sr->tstamp, ndev->timeout))
691 				break;
692 			dev_err_ratelimited(DEV(ndev),
693 					    "Request timeout, orh 0x%016llx\n",
694 					    READ_ONCE(sr->resp.orh));
695 		}
696 		atomic_dec(&cmdq->pending_count);
697 		/* sync with other cpus */
698 		smp_mb__after_atomic();
699 		/* remove from response list */
700 		response_list_del(sr, cmdq);
701 
702 		callback = sr->callback;
703 		skreq = sr->skreq;
704 
705 		/* ORH error code */
706 		err = READ_ONCE(sr->resp.orh) & 0xff;
707 		softreq_destroy(sr);
708 
709 		if (callback)
710 			callback(skreq, err);
711 
712 		req_completed++;
713 	}
714 }
715 
716 /**
717  * pkt_slc_resp_handler - post processing of SE responses
718  */
pkt_slc_resp_handler(unsigned long data)719 void pkt_slc_resp_handler(unsigned long data)
720 {
721 	struct bh_data *bh = (void *)(uintptr_t)(data);
722 	struct nitrox_cmdq *cmdq = bh->cmdq;
723 	union nps_pkt_slc_cnts pkt_slc_cnts;
724 
725 	/* read completion count */
726 	pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr);
727 	/* resend the interrupt if more work to do */
728 	pkt_slc_cnts.s.resend = 1;
729 
730 	process_response_list(cmdq);
731 
732 	/*
733 	 * clear the interrupt with resend bit enabled,
734 	 * MSI-X interrupt generates if Completion count > Threshold
735 	 */
736 	writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr);
737 	/* order the writes */
738 	mmiowb();
739 
740 	if (atomic_read(&cmdq->backlog_count))
741 		schedule_work(&cmdq->backlog_qflush);
742 }
743