1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NITROX_DEV_H
3 #define __NITROX_DEV_H
4
5 #include <linux/dma-mapping.h>
6 #include <linux/interrupt.h>
7 #include <linux/pci.h>
8 #include <linux/if.h>
9
10 #define VERSION_LEN 32
11 /* Maximum queues in PF mode */
12 #define MAX_PF_QUEUES 64
13 /* Maximum device queues */
14 #define MAX_DEV_QUEUES (MAX_PF_QUEUES)
15 /* Maximum UCD Blocks */
16 #define CNN55XX_MAX_UCD_BLOCKS 8
17
18 /**
19 * struct nitrox_cmdq - NITROX command queue
20 * @cmd_qlock: command queue lock
21 * @resp_qlock: response queue lock
22 * @backlog_qlock: backlog queue lock
23 * @ndev: NITROX device
24 * @response_head: submitted request list
25 * @backlog_head: backlog queue
26 * @dbell_csr_addr: doorbell register address for this queue
27 * @compl_cnt_csr_addr: completion count register address of the slc port
28 * @base: command queue base address
29 * @dma: dma address of the base
30 * @pending_count: request pending at device
31 * @backlog_count: backlog request count
32 * @write_idx: next write index for the command
33 * @instr_size: command size
34 * @qno: command queue number
35 * @qsize: command queue size
36 * @unalign_base: unaligned base address
37 * @unalign_dma: unaligned dma address
38 */
39 struct nitrox_cmdq {
40 spinlock_t cmd_qlock;
41 spinlock_t resp_qlock;
42 spinlock_t backlog_qlock;
43
44 struct nitrox_device *ndev;
45 struct list_head response_head;
46 struct list_head backlog_head;
47
48 u8 __iomem *dbell_csr_addr;
49 u8 __iomem *compl_cnt_csr_addr;
50 u8 *base;
51 dma_addr_t dma;
52
53 struct work_struct backlog_qflush;
54
55 atomic_t pending_count;
56 atomic_t backlog_count;
57
58 int write_idx;
59 u8 instr_size;
60 u8 qno;
61 u32 qsize;
62
63 u8 *unalign_base;
64 dma_addr_t unalign_dma;
65 };
66
67 /**
68 * struct nitrox_hw - NITROX hardware information
69 * @partname: partname ex: CNN55xxx-xxx
70 * @fw_name: firmware version
71 * @freq: NITROX frequency
72 * @vendor_id: vendor ID
73 * @device_id: device ID
74 * @revision_id: revision ID
75 * @se_cores: number of symmetric cores
76 * @ae_cores: number of asymmetric cores
77 * @zip_cores: number of zip cores
78 */
79 struct nitrox_hw {
80 char partname[IFNAMSIZ * 2];
81 char fw_name[CNN55XX_MAX_UCD_BLOCKS][VERSION_LEN];
82
83 int freq;
84 u16 vendor_id;
85 u16 device_id;
86 u8 revision_id;
87
88 u8 se_cores;
89 u8 ae_cores;
90 u8 zip_cores;
91 };
92
93 struct nitrox_stats {
94 atomic64_t posted;
95 atomic64_t completed;
96 atomic64_t dropped;
97 };
98
99 #define IRQ_NAMESZ 32
100
101 struct nitrox_q_vector {
102 char name[IRQ_NAMESZ];
103 bool valid;
104 int ring;
105 struct tasklet_struct resp_tasklet;
106 union {
107 struct nitrox_cmdq *cmdq;
108 struct nitrox_device *ndev;
109 };
110 };
111
112 /**
113 * mbox_msg - Mailbox message data
114 * @type: message type
115 * @opcode: message opcode
116 * @data: message data
117 */
118 union mbox_msg {
119 u64 value;
120 struct {
121 u64 type: 2;
122 u64 opcode: 6;
123 u64 data: 58;
124 };
125 struct {
126 u64 type: 2;
127 u64 opcode: 6;
128 u64 chipid: 8;
129 u64 vfid: 8;
130 } id;
131 };
132
133 /**
134 * nitrox_vfdev - NITROX VF device instance in PF
135 * @state: VF device state
136 * @vfno: VF number
137 * @nr_queues: number of queues enabled in VF
138 * @ring: ring to communicate with VF
139 * @msg: Mailbox message data from VF
140 * @mbx_resp: Mailbox counters
141 */
142 struct nitrox_vfdev {
143 atomic_t state;
144 int vfno;
145 int nr_queues;
146 int ring;
147 union mbox_msg msg;
148 atomic64_t mbx_resp;
149 };
150
151 /**
152 * struct nitrox_iov - SR-IOV information
153 * @num_vfs: number of VF(s) enabled
154 * @max_vf_queues: Maximum number of queues allowed for VF
155 * @vfdev: VF(s) devices
156 * @pf2vf_wq: workqueue for PF2VF communication
157 * @msix: MSI-X entry for PF in SR-IOV case
158 */
159 struct nitrox_iov {
160 int num_vfs;
161 int max_vf_queues;
162 struct nitrox_vfdev *vfdev;
163 struct workqueue_struct *pf2vf_wq;
164 struct msix_entry msix;
165 };
166
167 /*
168 * NITROX Device states
169 */
170 enum ndev_state {
171 __NDEV_NOT_READY,
172 __NDEV_READY,
173 __NDEV_IN_RESET,
174 };
175
176 /* NITROX support modes for VF(s) */
177 enum vf_mode {
178 __NDEV_MODE_PF,
179 __NDEV_MODE_VF16,
180 __NDEV_MODE_VF32,
181 __NDEV_MODE_VF64,
182 __NDEV_MODE_VF128,
183 };
184
185 #define __NDEV_SRIOV_BIT 0
186
187 /* command queue size */
188 #define DEFAULT_CMD_QLEN 2048
189 /* command timeout in milliseconds */
190 #define CMD_TIMEOUT 2000
191
192 #define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev))
193
194 #define NITROX_CSR_ADDR(ndev, offset) \
195 ((ndev)->bar_addr + (offset))
196
197 /**
198 * struct nitrox_device - NITROX Device Information.
199 * @list: pointer to linked list of devices
200 * @bar_addr: iomap address
201 * @pdev: PCI device information
202 * @state: NITROX device state
203 * @flags: flags to indicate device the features
204 * @timeout: Request timeout in jiffies
205 * @refcnt: Device usage count
206 * @idx: device index (0..N)
207 * @node: NUMA node id attached
208 * @qlen: Command queue length
209 * @nr_queues: Number of command queues
210 * @mode: Device mode PF/VF
211 * @ctx_pool: DMA pool for crypto context
212 * @pkt_inq: Packet input rings
213 * @aqmq: AQM command queues
214 * @qvec: MSI-X queue vectors information
215 * @iov: SR-IOV informatin
216 * @num_vecs: number of MSI-X vectors
217 * @stats: request statistics
218 * @hw: hardware information
219 * @debugfs_dir: debugfs directory
220 */
221 struct nitrox_device {
222 struct list_head list;
223
224 u8 __iomem *bar_addr;
225 struct pci_dev *pdev;
226
227 atomic_t state;
228 unsigned long flags;
229 unsigned long timeout;
230 refcount_t refcnt;
231
232 u8 idx;
233 int node;
234 u16 qlen;
235 u16 nr_queues;
236 enum vf_mode mode;
237
238 struct dma_pool *ctx_pool;
239 struct nitrox_cmdq *pkt_inq;
240 struct nitrox_cmdq *aqmq[MAX_DEV_QUEUES] ____cacheline_aligned_in_smp;
241
242 struct nitrox_q_vector *qvec;
243 struct nitrox_iov iov;
244 int num_vecs;
245
246 struct nitrox_stats stats;
247 struct nitrox_hw hw;
248 #if IS_ENABLED(CONFIG_DEBUG_FS)
249 struct dentry *debugfs_dir;
250 #endif
251 };
252
253 /**
254 * nitrox_read_csr - Read from device register
255 * @ndev: NITROX device
256 * @offset: offset of the register to read
257 *
258 * Returns: value read
259 */
nitrox_read_csr(struct nitrox_device * ndev,u64 offset)260 static inline u64 nitrox_read_csr(struct nitrox_device *ndev, u64 offset)
261 {
262 return readq(ndev->bar_addr + offset);
263 }
264
265 /**
266 * nitrox_write_csr - Write to device register
267 * @ndev: NITROX device
268 * @offset: offset of the register to write
269 * @value: value to write
270 */
nitrox_write_csr(struct nitrox_device * ndev,u64 offset,u64 value)271 static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset,
272 u64 value)
273 {
274 writeq(value, (ndev->bar_addr + offset));
275 }
276
nitrox_ready(struct nitrox_device * ndev)277 static inline bool nitrox_ready(struct nitrox_device *ndev)
278 {
279 return atomic_read(&ndev->state) == __NDEV_READY;
280 }
281
nitrox_vfdev_ready(struct nitrox_vfdev * vfdev)282 static inline bool nitrox_vfdev_ready(struct nitrox_vfdev *vfdev)
283 {
284 return atomic_read(&vfdev->state) == __NDEV_READY;
285 }
286
287 #endif /* __NITROX_DEV_H */
288