1 /*
2  * Copyright (c) 2022 Intel Corporation
3  * SPDX-License-Identifier: Apache-2.0
4  *
5  * Derived from FreeBSD original driver made by Jim Harris
6  * with contributions from Alexander Motin and Wojciech Macek
7  */
8 
9 #ifndef ZEPHYR_DRIVERS_DISK_NVME_NVME_COMMAND_H_
10 #define ZEPHYR_DRIVERS_DISK_NVME_NVME_COMMAND_H_
11 
12 #include <zephyr/sys/slist.h>
13 #include <zephyr/sys/byteorder.h>
14 
15 struct nvme_command {
16 	/* dword 0 */
17 	struct _cdw0 {
18 		uint8_t opc;		/* opcode */
19 		uint8_t fuse : 2;	/* fused operation */
20 		uint8_t rsvd : 4;	/* reserved */
21 		uint8_t psdt : 2;       /* PRP or SGL for Data Transfer */
22 		uint16_t cid;		/* command identifier */
23 	} cdw0;
24 
25 	/* dword 1 */
26 	uint32_t nsid;		/* namespace identifier */
27 
28 	/* dword 2-3 */
29 	uint32_t cdw2;
30 	uint32_t cdw3;
31 
32 	/* dword 4-5 */
33 	uint64_t mptr;		/* metadata pointer */
34 
35 	/* dword 6-7 and 8-9 */
36 	struct _dptr {
37 		uint64_t prp1;	/* prp entry 1 */
38 		uint64_t prp2;	/* prp entry 2 */
39 	} dptr;			/* data pointer */
40 
41 	/* dword 10 */
42 	union {
43 		uint32_t cdw10;	/* command-specific */
44 		uint32_t ndt;	/* Number of Dwords in Data transfer */
45 	};
46 
47 	/* dword 11 */
48 	union {
49 		uint32_t cdw11;	/* command-specific */
50 		uint32_t ndm;	/* Number of Dwords in Metadata transfer */
51 	};
52 
53 	/* dword 12-15 */
54 	uint32_t cdw12;		/* command-specific */
55 	uint32_t cdw13;		/* command-specific */
56 	uint32_t cdw14;		/* command-specific */
57 	uint32_t cdw15;		/* command-specific */
58 };
59 
60 struct nvme_completion {
61 	/* dword 0 */
62 	uint32_t	cdw0;	/* command-specific */
63 
64 	/* dword 1 */
65 	uint32_t	rsvd;
66 
67 	/* dword 2 */
68 	uint16_t	sqhd;	/* submission queue head pointer */
69 	uint16_t	sqid;	/* submission queue identifier */
70 
71 	/* dword 3 */
72 	uint16_t	cid;	/* command identifier */
73 	uint16_t	status;
74 } __aligned(8);
75 
76 struct nvme_completion_poll_status {
77 	int status;
78 	struct nvme_completion cpl;
79 	struct k_sem sem;
80 };
81 
82 /* status code types */
83 enum nvme_status_code_type {
84 	NVME_SCT_GENERIC		= 0x0,
85 	NVME_SCT_COMMAND_SPECIFIC	= 0x1,
86 	NVME_SCT_MEDIA_ERROR		= 0x2,
87 	NVME_SCT_PATH_RELATED		= 0x3,
88 	/* 0x3-0x6 - reserved */
89 	NVME_SCT_VENDOR_SPECIFIC	= 0x7,
90 };
91 
92 /* generic command status codes */
93 enum nvme_generic_command_status_code {
94 	NVME_SC_SUCCESS				= 0x00,
95 	NVME_SC_INVALID_OPCODE			= 0x01,
96 	NVME_SC_INVALID_FIELD			= 0x02,
97 	NVME_SC_COMMAND_ID_CONFLICT		= 0x03,
98 	NVME_SC_DATA_TRANSFER_ERROR		= 0x04,
99 	NVME_SC_ABORTED_POWER_LOSS		= 0x05,
100 	NVME_SC_INTERNAL_DEVICE_ERROR		= 0x06,
101 	NVME_SC_ABORTED_BY_REQUEST		= 0x07,
102 	NVME_SC_ABORTED_SQ_DELETION		= 0x08,
103 	NVME_SC_ABORTED_FAILED_FUSED		= 0x09,
104 	NVME_SC_ABORTED_MISSING_FUSED		= 0x0a,
105 	NVME_SC_INVALID_NAMESPACE_OR_FORMAT	= 0x0b,
106 	NVME_SC_COMMAND_SEQUENCE_ERROR		= 0x0c,
107 	NVME_SC_INVALID_SGL_SEGMENT_DESCR	= 0x0d,
108 	NVME_SC_INVALID_NUMBER_OF_SGL_DESCR	= 0x0e,
109 	NVME_SC_DATA_SGL_LENGTH_INVALID		= 0x0f,
110 	NVME_SC_METADATA_SGL_LENGTH_INVALID	= 0x10,
111 	NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID	= 0x11,
112 	NVME_SC_INVALID_USE_OF_CMB		= 0x12,
113 	NVME_SC_PRP_OFFSET_INVALID		= 0x13,
114 	NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED	= 0x14,
115 	NVME_SC_OPERATION_DENIED		= 0x15,
116 	NVME_SC_SGL_OFFSET_INVALID		= 0x16,
117 	/* 0x17 - reserved */
118 	NVME_SC_HOST_ID_INCONSISTENT_FORMAT	= 0x18,
119 	NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED	= 0x19,
120 	NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID	= 0x1a,
121 	NVME_SC_ABORTED_DUE_TO_PREEMPT		= 0x1b,
122 	NVME_SC_SANITIZE_FAILED			= 0x1c,
123 	NVME_SC_SANITIZE_IN_PROGRESS		= 0x1d,
124 	NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID	= 0x1e,
125 	NVME_SC_NOT_SUPPORTED_IN_CMB		= 0x1f,
126 	NVME_SC_NAMESPACE_IS_WRITE_PROTECTED	= 0x20,
127 	NVME_SC_COMMAND_INTERRUPTED		= 0x21,
128 	NVME_SC_TRANSIENT_TRANSPORT_ERROR	= 0x22,
129 
130 	NVME_SC_LBA_OUT_OF_RANGE		= 0x80,
131 	NVME_SC_CAPACITY_EXCEEDED		= 0x81,
132 	NVME_SC_NAMESPACE_NOT_READY		= 0x82,
133 	NVME_SC_RESERVATION_CONFLICT		= 0x83,
134 	NVME_SC_FORMAT_IN_PROGRESS		= 0x84,
135 };
136 
137 /* command specific status codes */
138 enum nvme_command_specific_status_code {
139 	NVME_SC_COMPLETION_QUEUE_INVALID	= 0x00,
140 	NVME_SC_INVALID_QUEUE_IDENTIFIER	= 0x01,
141 	NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED	= 0x02,
142 	NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED	= 0x03,
143 	/* 0x04 - reserved */
144 	NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED = 0x05,
145 	NVME_SC_INVALID_FIRMWARE_SLOT		= 0x06,
146 	NVME_SC_INVALID_FIRMWARE_IMAGE		= 0x07,
147 	NVME_SC_INVALID_INTERRUPT_VECTOR	= 0x08,
148 	NVME_SC_INVALID_LOG_PAGE		= 0x09,
149 	NVME_SC_INVALID_FORMAT			= 0x0a,
150 	NVME_SC_FIRMWARE_REQUIRES_RESET		= 0x0b,
151 	NVME_SC_INVALID_QUEUE_DELETION		= 0x0c,
152 	NVME_SC_FEATURE_NOT_SAVEABLE		= 0x0d,
153 	NVME_SC_FEATURE_NOT_CHANGEABLE		= 0x0e,
154 	NVME_SC_FEATURE_NOT_NS_SPECIFIC		= 0x0f,
155 	NVME_SC_FW_ACT_REQUIRES_NVMS_RESET	= 0x10,
156 	NVME_SC_FW_ACT_REQUIRES_RESET		= 0x11,
157 	NVME_SC_FW_ACT_REQUIRES_TIME		= 0x12,
158 	NVME_SC_FW_ACT_PROHIBITED		= 0x13,
159 	NVME_SC_OVERLAPPING_RANGE		= 0x14,
160 	NVME_SC_NS_INSUFFICIENT_CAPACITY	= 0x15,
161 	NVME_SC_NS_ID_UNAVAILABLE		= 0x16,
162 	/* 0x17 - reserved */
163 	NVME_SC_NS_ALREADY_ATTACHED		= 0x18,
164 	NVME_SC_NS_IS_PRIVATE			= 0x19,
165 	NVME_SC_NS_NOT_ATTACHED			= 0x1a,
166 	NVME_SC_THIN_PROV_NOT_SUPPORTED		= 0x1b,
167 	NVME_SC_CTRLR_LIST_INVALID		= 0x1c,
168 	NVME_SC_SELF_TEST_IN_PROGRESS		= 0x1d,
169 	NVME_SC_BOOT_PART_WRITE_PROHIB		= 0x1e,
170 	NVME_SC_INVALID_CTRLR_ID		= 0x1f,
171 	NVME_SC_INVALID_SEC_CTRLR_STATE		= 0x20,
172 	NVME_SC_INVALID_NUM_OF_CTRLR_RESRC	= 0x21,
173 	NVME_SC_INVALID_RESOURCE_ID		= 0x22,
174 	NVME_SC_SANITIZE_PROHIBITED_WPMRE	= 0x23,
175 	NVME_SC_ANA_GROUP_ID_INVALID		= 0x24,
176 	NVME_SC_ANA_ATTACH_FAILED		= 0x25,
177 
178 	NVME_SC_CONFLICTING_ATTRIBUTES		= 0x80,
179 	NVME_SC_INVALID_PROTECTION_INFO		= 0x81,
180 	NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE	= 0x82,
181 };
182 
183 /* media error status codes */
184 enum nvme_media_error_status_code {
185 	NVME_SC_WRITE_FAULTS			= 0x80,
186 	NVME_SC_UNRECOVERED_READ_ERROR		= 0x81,
187 	NVME_SC_GUARD_CHECK_ERROR		= 0x82,
188 	NVME_SC_APPLICATION_TAG_CHECK_ERROR	= 0x83,
189 	NVME_SC_REFERENCE_TAG_CHECK_ERROR	= 0x84,
190 	NVME_SC_COMPARE_FAILURE			= 0x85,
191 	NVME_SC_ACCESS_DENIED			= 0x86,
192 	NVME_SC_DEALLOCATED_OR_UNWRITTEN	= 0x87,
193 };
194 
195 /* path related status codes */
196 enum nvme_path_related_status_code {
197 	NVME_SC_INTERNAL_PATH_ERROR		= 0x00,
198 	NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS = 0x01,
199 	NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE	= 0x02,
200 	NVME_SC_ASYMMETRIC_ACCESS_TRANSITION	= 0x03,
201 	NVME_SC_CONTROLLER_PATHING_ERROR	= 0x60,
202 	NVME_SC_HOST_PATHING_ERROR		= 0x70,
203 	NVME_SC_COMMAND_ABORTED_BY_HOST		= 0x71,
204 };
205 
206 /* admin opcodes */
207 enum nvme_admin_opcode {
208 	NVME_OPC_DELETE_IO_SQ			= 0x00,
209 	NVME_OPC_CREATE_IO_SQ			= 0x01,
210 	NVME_OPC_GET_LOG_PAGE			= 0x02,
211 	/* 0x03 - reserved */
212 	NVME_OPC_DELETE_IO_CQ			= 0x04,
213 	NVME_OPC_CREATE_IO_CQ			= 0x05,
214 	NVME_OPC_IDENTIFY			= 0x06,
215 	/* 0x07 - reserved */
216 	NVME_OPC_ABORT				= 0x08,
217 	NVME_OPC_SET_FEATURES			= 0x09,
218 	NVME_OPC_GET_FEATURES			= 0x0a,
219 	/* 0x0b - reserved */
220 	NVME_OPC_ASYNC_EVENT_REQUEST		= 0x0c,
221 	NVME_OPC_NAMESPACE_MANAGEMENT		= 0x0d,
222 	/* 0x0e-0x0f - reserved */
223 	NVME_OPC_FIRMWARE_ACTIVATE		= 0x10,
224 	NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD	= 0x11,
225 	/* 0x12-0x13 - reserved */
226 	NVME_OPC_DEVICE_SELF_TEST		= 0x14,
227 	NVME_OPC_NAMESPACE_ATTACHMENT		= 0x15,
228 	/* 0x16-0x17 - reserved */
229 	NVME_OPC_KEEP_ALIVE			= 0x18,
230 	NVME_OPC_DIRECTIVE_SEND			= 0x19,
231 	NVME_OPC_DIRECTIVE_RECEIVE		= 0x1a,
232 	/* 0x1b - reserved */
233 	NVME_OPC_VIRTUALIZATION_MANAGEMENT	= 0x1c,
234 	NVME_OPC_NVME_MI_SEND			= 0x1d,
235 	NVME_OPC_NVME_MI_RECEIVE		= 0x1e,
236 	/* 0x1f-0x7b - reserved */
237 	NVME_OPC_DOORBELL_BUFFER_CONFIG		= 0x7c,
238 
239 	NVME_OPC_FORMAT_NVM			= 0x80,
240 	NVME_OPC_SECURITY_SEND			= 0x81,
241 	NVME_OPC_SECURITY_RECEIVE		= 0x82,
242 	/* 0x83 - reserved */
243 	NVME_OPC_SANITIZE			= 0x84,
244 	/* 0x85 - reserved */
245 	NVME_OPC_GET_LBA_STATUS			= 0x86,
246 };
247 
248 /* nvme nvm opcodes */
249 enum nvme_nvm_opcode {
250 	NVME_OPC_FLUSH				= 0x00,
251 	NVME_OPC_WRITE				= 0x01,
252 	NVME_OPC_READ				= 0x02,
253 	/* 0x03 - reserved */
254 	NVME_OPC_WRITE_UNCORRECTABLE		= 0x04,
255 	NVME_OPC_COMPARE			= 0x05,
256 	/* 0x06-0x07 - reserved */
257 	NVME_OPC_WRITE_ZEROES			= 0x08,
258 	NVME_OPC_DATASET_MANAGEMENT		= 0x09,
259 	/* 0x0a-0x0b - reserved */
260 	NVME_OPC_VERIFY				= 0x0c,
261 	NVME_OPC_RESERVATION_REGISTER		= 0x0d,
262 	NVME_OPC_RESERVATION_REPORT		= 0x0e,
263 	/* 0x0f-0x10 - reserved */
264 	NVME_OPC_RESERVATION_ACQUIRE		= 0x11,
265 	/* 0x12-0x14 - reserved */
266 	NVME_OPC_RESERVATION_RELEASE		= 0x15,
267 };
268 
269 enum nvme_feature {
270 	/* 0x00 - reserved */
271 	NVME_FEAT_ARBITRATION			= 0x01,
272 	NVME_FEAT_POWER_MANAGEMENT		= 0x02,
273 	NVME_FEAT_LBA_RANGE_TYPE		= 0x03,
274 	NVME_FEAT_TEMPERATURE_THRESHOLD		= 0x04,
275 	NVME_FEAT_ERROR_RECOVERY		= 0x05,
276 	NVME_FEAT_VOLATILE_WRITE_CACHE		= 0x06,
277 	NVME_FEAT_NUMBER_OF_QUEUES		= 0x07,
278 	NVME_FEAT_INTERRUPT_COALESCING		= 0x08,
279 	NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09,
280 	NVME_FEAT_WRITE_ATOMICITY		= 0x0A,
281 	NVME_FEAT_ASYNC_EVENT_CONFIGURATION	= 0x0B,
282 	NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION = 0x0C,
283 	NVME_FEAT_HOST_MEMORY_BUFFER		= 0x0D,
284 	NVME_FEAT_TIMESTAMP			= 0x0E,
285 	NVME_FEAT_KEEP_ALIVE_TIMER		= 0x0F,
286 	NVME_FEAT_HOST_CONTROLLED_THERMAL_MGMT	= 0x10,
287 	NVME_FEAT_NON_OP_POWER_STATE_CONFIG	= 0x11,
288 	NVME_FEAT_READ_RECOVERY_LEVEL_CONFIG	= 0x12,
289 	NVME_FEAT_PREDICTABLE_LATENCY_MODE_CONFIG = 0x13,
290 	NVME_FEAT_PREDICTABLE_LATENCY_MODE_WINDOW = 0x14,
291 	NVME_FEAT_LBA_STATUS_INFORMATION_ATTRIBUTES = 0x15,
292 	NVME_FEAT_HOST_BEHAVIOR_SUPPORT		= 0x16,
293 	NVME_FEAT_SANITIZE_CONFIG		= 0x17,
294 	NVME_FEAT_ENDURANCE_GROUP_EVENT_CONFIGURATION = 0x18,
295 	/* 0x19-0x77 - reserved */
296 	/* 0x78-0x7f - NVMe Management Interface */
297 	NVME_FEAT_SOFTWARE_PROGRESS_MARKER	= 0x80,
298 	NVME_FEAT_HOST_IDENTIFIER		= 0x81,
299 	NVME_FEAT_RESERVATION_NOTIFICATION_MASK	= 0x82,
300 	NVME_FEAT_RESERVATION_PERSISTENCE	= 0x83,
301 	NVME_FEAT_NAMESPACE_WRITE_PROTECTION_CONFIG = 0x84,
302 	/* 0x85-0xBF - command set specific (reserved) */
303 	/* 0xC0-0xFF - vendor specific */
304 };
305 
306 #if !defined(CONFIG_DCACHE_LINE_SIZE) || (CONFIG_DCACHE_LINE_SIZE == 0)
307 #define CACHE_LINE_SIZE				(64)
308 #else
309 #define CACHE_LINE_SIZE				CONFIG_DCACHE_LINE_SIZE
310 #endif
311 
312 #define NVME_PBAO_MASK (CONFIG_MMU_PAGE_SIZE - 1)
313 
314 #define NVME_PRP_NEXT_PAGE(_addr)				\
315 	((_addr & ~NVME_PBAO_MASK) + CONFIG_MMU_PAGE_SIZE)
316 
317 struct nvme_prp_list {
318 	uintptr_t prp[CONFIG_MMU_PAGE_SIZE / sizeof(uintptr_t)]
319 						__aligned(CONFIG_MMU_PAGE_SIZE);
320 	sys_dnode_t node;
321 };
322 
323 struct nvme_cmd_qpair {
324 	struct nvme_controller	*ctrlr;
325 	uint32_t		id;
326 
327 	uint32_t		num_entries;
328 
329 	uint32_t		sq_tdbl_off;
330 	uint32_t		cq_hdbl_off;
331 
332 	uint32_t		phase;
333 	uint32_t		sq_head;
334 	uint32_t		sq_tail;
335 	uint32_t		cq_head;
336 
337 	int64_t			num_cmds;
338 	int64_t			num_intr_handler_calls;
339 	int64_t			num_retries;
340 	int64_t			num_failures;
341 	int64_t			num_ignored;
342 
343 	struct nvme_command	*cmd;
344 	struct nvme_completion	*cpl;
345 
346 	uintptr_t		cmd_bus_addr;
347 	uintptr_t		cpl_bus_addr;
348 
349 	uint16_t		vector;
350 } __aligned(CACHE_LINE_SIZE);
351 
352 typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *);
353 
354 enum nvme_request_type {
355 	NVME_REQUEST_NULL	= 1,
356 	NVME_REQUEST_VADDR	= 2,
357 };
358 
359 struct nvme_request {
360 	struct nvme_command		cmd;
361 	struct nvme_cmd_qpair		*qpair;
362 
363 	uint32_t			type;
364 	uint32_t			req_start;
365 	int32_t				retries;
366 
367 	void				*payload;
368 	uint32_t			payload_size;
369 	nvme_cb_fn_t			cb_fn;
370 	void				*cb_arg;
371 
372 	struct nvme_prp_list		*prp_list;
373 
374 	sys_dnode_t			node;
375 };
376 
377 void nvme_cmd_init(void);
378 
379 void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
380 
381 #ifdef CONFIG_NVME_LOG_LEVEL_DBG
382 void nvme_completion_print(const struct nvme_completion *cpl);
383 #else
384 #define nvme_completion_print(...)
385 #endif /* CONFIG_NVME_LOG_LEVEL_DBG */
386 
387 void nvme_cmd_request_free(struct nvme_request *request);
388 
389 struct nvme_request *nvme_cmd_request_alloc(void);
390 
391 int nvme_cmd_qpair_setup(struct nvme_cmd_qpair *qpair,
392 			 struct nvme_controller *ctrlr,
393 			 uint32_t id);
394 
395 void nvme_cmd_qpair_reset(struct nvme_cmd_qpair *qpair);
396 
397 int nvme_cmd_qpair_submit_request(struct nvme_cmd_qpair *qpair,
398 				  struct nvme_request *request);
399 
400 int nvme_cmd_identify_controller(struct nvme_controller *ctrlr,
401 				 void *payload,
402 				 nvme_cb_fn_t cb_fn,
403 				 void *cb_arg);
404 
405 int nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
406 				       nvme_cb_fn_t cb_fn, void *cb_arg);
407 
408 int nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
409 				      uint32_t nsid, void *payload,
410 				      nvme_cb_fn_t cb_fn, void *cb_arg);
411 
412 int nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
413 				struct nvme_cmd_qpair *io_queue,
414 				nvme_cb_fn_t cb_fn, void *cb_arg);
415 
416 int nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
417 				struct nvme_cmd_qpair *io_queue,
418 				nvme_cb_fn_t cb_fn, void *cb_arg);
419 
420 int nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
421 				struct nvme_cmd_qpair *io_queue,
422 				nvme_cb_fn_t cb_fn, void *cb_arg);
423 
424 int nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
425 				struct nvme_cmd_qpair *io_queue,
426 				nvme_cb_fn_t cb_fn, void *cb_arg);
427 
428 int nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr,
429 			       uint8_t feature, uint32_t cdw11,
430 			       uint32_t cdw12, uint32_t cdw13,
431 			       uint32_t cdw14, uint32_t cdw15,
432 			       void *payload, uint32_t payload_size,
433 			       nvme_cb_fn_t cb_fn, void *cb_arg);
434 
435 int nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr,
436 			       uint8_t feature, uint32_t cdw11,
437 			       void *payload, uint32_t payload_size,
438 			       nvme_cb_fn_t cb_fn, void *cb_arg);
439 
440 int nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
441 				  uint32_t num_queues,
442 				  nvme_cb_fn_t cb_fn, void *cb_arg);
443 
444 static inline
nvme_allocate_request(nvme_cb_fn_t cb_fn,void * cb_arg)445 struct nvme_request *nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
446 {
447 	struct nvme_request *request;
448 
449 	request = nvme_cmd_request_alloc();
450 	if (request != NULL) {
451 		request->cb_fn = cb_fn;
452 		request->cb_arg = cb_arg;
453 	}
454 
455 	return request;
456 }
457 
458 static inline
nvme_allocate_request_vaddr(void * payload,uint32_t payload_size,nvme_cb_fn_t cb_fn,void * cb_arg)459 struct nvme_request *nvme_allocate_request_vaddr(void *payload,
460 						 uint32_t payload_size,
461 						 nvme_cb_fn_t cb_fn,
462 						 void *cb_arg)
463 {
464 	struct nvme_request *request;
465 
466 	request = nvme_allocate_request(cb_fn, cb_arg);
467 	if (request != NULL) {
468 		request->type = NVME_REQUEST_VADDR;
469 		request->payload = payload;
470 		request->payload_size = payload_size;
471 	}
472 
473 	return request;
474 }
475 
476 
477 static inline
nvme_allocate_request_null(nvme_cb_fn_t cb_fn,void * cb_arg)478 struct nvme_request *nvme_allocate_request_null(nvme_cb_fn_t cb_fn,
479 						void *cb_arg)
480 {
481 	struct nvme_request *request;
482 
483 	request = nvme_allocate_request(cb_fn, cb_arg);
484 	if (request != NULL) {
485 		request->type = NVME_REQUEST_NULL;
486 	}
487 
488 	return request;
489 }
490 
491 /*
492  * Command building helper functions
493  * These functions assume allocator zeros out cmd structure
494  */
495 static inline
nvme_namespace_flush_cmd(struct nvme_command * cmd,uint32_t nsid)496 void nvme_namespace_flush_cmd(struct nvme_command *cmd, uint32_t nsid)
497 {
498 	cmd->cdw0.opc = NVME_OPC_FLUSH;
499 	cmd->nsid = sys_cpu_to_le32(nsid);
500 }
501 
502 static inline
nvme_namespace_rw_cmd(struct nvme_command * cmd,uint32_t rwcmd,uint32_t nsid,uint64_t lba,uint32_t count)503 void nvme_namespace_rw_cmd(struct nvme_command *cmd, uint32_t rwcmd,
504 			   uint32_t nsid, uint64_t lba, uint32_t count)
505 {
506 	cmd->cdw0.opc = rwcmd;
507 	cmd->nsid = sys_cpu_to_le32(nsid);
508 	cmd->cdw10 = sys_cpu_to_le32(lba & 0xffffffffu);
509 	cmd->cdw11 = sys_cpu_to_le32(lba >> 32);
510 	cmd->cdw12 = sys_cpu_to_le32(count-1);
511 }
512 
513 static inline
nvme_namespace_write_cmd(struct nvme_command * cmd,uint32_t nsid,uint64_t lba,uint32_t count)514 void nvme_namespace_write_cmd(struct nvme_command *cmd, uint32_t nsid,
515 			      uint64_t lba, uint32_t count)
516 {
517 	nvme_namespace_rw_cmd(cmd, NVME_OPC_WRITE, nsid, lba, count);
518 }
519 
520 static inline
nvme_namespace_read_cmd(struct nvme_command * cmd,uint32_t nsid,uint64_t lba,uint32_t count)521 void nvme_namespace_read_cmd(struct nvme_command *cmd, uint32_t nsid,
522 			     uint64_t lba, uint32_t count)
523 {
524 	nvme_namespace_rw_cmd(cmd, NVME_OPC_READ, nsid, lba, count);
525 }
526 
nvme_completion_swapbytes(struct nvme_completion * cpl)527 static inline void nvme_completion_swapbytes(struct nvme_completion *cpl)
528 {
529 #if _BYTE_ORDER != _LITTLE_ENDIAN
530 	cpl->cdw0 = sys_le32_to_cpu(cpl->cdw0);
531 	/* omit rsvd1 */
532 	cpl->sqhd = sys_le16_to_cpu(cpl->sqhd);
533 	cpl->sqid = sys_le16_to_cpu(cpl->sqid);
534 	/* omit cid */
535 	cpl->status = sys_le16_to_cpu(s->status);
536 #else
537 	ARG_UNUSED(cpl);
538 #endif
539 }
540 
541 static inline
nvme_completion_poll(struct nvme_completion_poll_status * status)542 void nvme_completion_poll(struct nvme_completion_poll_status *status)
543 {
544 	k_sem_take(&status->sem, K_FOREVER);
545 }
546 
547 #define NVME_CPL_STATUS_POLL_INIT(cpl_status)			\
548 	{							\
549 		.status = 0,					\
550 		.sem = Z_SEM_INITIALIZER(cpl_status.sem, 0, 1),	\
551 	}
552 
553 static inline
nvme_cpl_status_poll_init(struct nvme_completion_poll_status * status)554 void nvme_cpl_status_poll_init(struct nvme_completion_poll_status *status)
555 {
556 	status->status = 0;
557 	k_sem_init(&status->sem, 0, 1);
558 }
559 
560 #define nvme_completion_is_error(cpl)			\
561 	((NVME_STATUS_GET_SC((cpl)->status) != 0) |	\
562 	 (NVME_STATUS_GET_SCT((cpl)->status) != 0))
563 
564 static inline
nvme_cpl_status_is_error(struct nvme_completion_poll_status * status)565 bool nvme_cpl_status_is_error(struct nvme_completion_poll_status *status)
566 {
567 	return ((status->status != 0) ||
568 		nvme_completion_is_error(&status->cpl));
569 }
570 
571 #endif /* ZEPHYR_DRIVERS_DISK_NVME_NVME_COMMAND_H_ */
572