1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cpumask.h>
3 #include <linux/dma-mapping.h>
4 #include <linux/dmapool.h>
5 #include <linux/delay.h>
6 #include <linux/gfp.h>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/pci_regs.h>
10 #include <linux/vmalloc.h>
11 #include <linux/pci.h>
12 
13 #include "nitrox_dev.h"
14 #include "nitrox_common.h"
15 #include "nitrox_req.h"
16 #include "nitrox_csr.h"
17 
18 #define CRYPTO_CTX_SIZE	256
19 
20 /* command queue alignments */
21 #define PKT_IN_ALIGN	16
22 
cmdq_common_init(struct nitrox_cmdq * cmdq)23 static int cmdq_common_init(struct nitrox_cmdq *cmdq)
24 {
25 	struct nitrox_device *ndev = cmdq->ndev;
26 	u32 qsize;
27 
28 	qsize = (ndev->qlen) * cmdq->instr_size;
29 	cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev),
30 						   (qsize + PKT_IN_ALIGN),
31 						   &cmdq->dma_unaligned,
32 						   GFP_KERNEL);
33 	if (!cmdq->head_unaligned)
34 		return -ENOMEM;
35 
36 	cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
37 	cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
38 	cmdq->qsize = (qsize + PKT_IN_ALIGN);
39 	cmdq->write_idx = 0;
40 
41 	spin_lock_init(&cmdq->response_lock);
42 	spin_lock_init(&cmdq->cmdq_lock);
43 	spin_lock_init(&cmdq->backlog_lock);
44 
45 	INIT_LIST_HEAD(&cmdq->response_head);
46 	INIT_LIST_HEAD(&cmdq->backlog_head);
47 	INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work);
48 
49 	atomic_set(&cmdq->pending_count, 0);
50 	atomic_set(&cmdq->backlog_count, 0);
51 	return 0;
52 }
53 
cmdq_common_cleanup(struct nitrox_cmdq * cmdq)54 static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq)
55 {
56 	struct nitrox_device *ndev = cmdq->ndev;
57 
58 	cancel_work_sync(&cmdq->backlog_qflush);
59 
60 	dma_free_coherent(DEV(ndev), cmdq->qsize,
61 			  cmdq->head_unaligned, cmdq->dma_unaligned);
62 
63 	atomic_set(&cmdq->pending_count, 0);
64 	atomic_set(&cmdq->backlog_count, 0);
65 
66 	cmdq->dbell_csr_addr = NULL;
67 	cmdq->head = NULL;
68 	cmdq->dma = 0;
69 	cmdq->qsize = 0;
70 	cmdq->instr_size = 0;
71 }
72 
nitrox_cleanup_pkt_cmdqs(struct nitrox_device * ndev)73 static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev)
74 {
75 	int i;
76 
77 	for (i = 0; i < ndev->nr_queues; i++) {
78 		struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
79 
80 		cmdq_common_cleanup(cmdq);
81 	}
82 	kfree(ndev->pkt_cmdqs);
83 	ndev->pkt_cmdqs = NULL;
84 }
85 
nitrox_init_pkt_cmdqs(struct nitrox_device * ndev)86 static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev)
87 {
88 	int i, err, size;
89 
90 	size = ndev->nr_queues * sizeof(struct nitrox_cmdq);
91 	ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL);
92 	if (!ndev->pkt_cmdqs)
93 		return -ENOMEM;
94 
95 	for (i = 0; i < ndev->nr_queues; i++) {
96 		struct nitrox_cmdq *cmdq;
97 		u64 offset;
98 
99 		cmdq = &ndev->pkt_cmdqs[i];
100 		cmdq->ndev = ndev;
101 		cmdq->qno = i;
102 		cmdq->instr_size = sizeof(struct nps_pkt_instr);
103 
104 		offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
105 		/* SE ring doorbell address for this queue */
106 		cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
107 
108 		err = cmdq_common_init(cmdq);
109 		if (err)
110 			goto pkt_cmdq_fail;
111 	}
112 	return 0;
113 
114 pkt_cmdq_fail:
115 	nitrox_cleanup_pkt_cmdqs(ndev);
116 	return err;
117 }
118 
create_crypto_dma_pool(struct nitrox_device * ndev)119 static int create_crypto_dma_pool(struct nitrox_device *ndev)
120 {
121 	size_t size;
122 
123 	/* Crypto context pool, 16 byte aligned */
124 	size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
125 	ndev->ctx_pool = dma_pool_create("crypto-context",
126 					 DEV(ndev), size, 16, 0);
127 	if (!ndev->ctx_pool)
128 		return -ENOMEM;
129 
130 	return 0;
131 }
132 
destroy_crypto_dma_pool(struct nitrox_device * ndev)133 static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
134 {
135 	if (!ndev->ctx_pool)
136 		return;
137 
138 	dma_pool_destroy(ndev->ctx_pool);
139 	ndev->ctx_pool = NULL;
140 }
141 
142 /*
143  * crypto_alloc_context - Allocate crypto context from pool
144  * @ndev: NITROX Device
145  */
crypto_alloc_context(struct nitrox_device * ndev)146 void *crypto_alloc_context(struct nitrox_device *ndev)
147 {
148 	struct ctx_hdr *ctx;
149 	void *vaddr;
150 	dma_addr_t dma;
151 
152 	vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
153 	if (!vaddr)
154 		return NULL;
155 
156 	/* fill meta data */
157 	ctx = vaddr;
158 	ctx->pool = ndev->ctx_pool;
159 	ctx->dma = dma;
160 	ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
161 
162 	return ((u8 *)vaddr + sizeof(struct ctx_hdr));
163 }
164 
165 /**
166  * crypto_free_context - Free crypto context to pool
167  * @ctx: context to free
168  */
crypto_free_context(void * ctx)169 void crypto_free_context(void *ctx)
170 {
171 	struct ctx_hdr *ctxp;
172 
173 	if (!ctx)
174 		return;
175 
176 	ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
177 	dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
178 }
179 
180 /**
181  * nitrox_common_sw_init - allocate software resources.
182  * @ndev: NITROX device
183  *
184  * Allocates crypto context pools and command queues etc.
185  *
186  * Return: 0 on success, or a negative error code on error.
187  */
nitrox_common_sw_init(struct nitrox_device * ndev)188 int nitrox_common_sw_init(struct nitrox_device *ndev)
189 {
190 	int err = 0;
191 
192 	/* per device crypto context pool */
193 	err = create_crypto_dma_pool(ndev);
194 	if (err)
195 		return err;
196 
197 	err = nitrox_init_pkt_cmdqs(ndev);
198 	if (err)
199 		destroy_crypto_dma_pool(ndev);
200 
201 	return err;
202 }
203 
204 /**
205  * nitrox_common_sw_cleanup - free software resources.
206  * @ndev: NITROX device
207  */
nitrox_common_sw_cleanup(struct nitrox_device * ndev)208 void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
209 {
210 	nitrox_cleanup_pkt_cmdqs(ndev);
211 	destroy_crypto_dma_pool(ndev);
212 }
213