1 /*
2  * Copyright (c) 2022 Intel Corporation
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #include <zephyr/logging/log.h>
7 LOG_MODULE_DECLARE(nvme, CONFIG_NVME_LOG_LEVEL);
8 
9 #include <zephyr/kernel.h>
10 #include <zephyr/sys/byteorder.h>
11 
12 #include "nvme.h"
13 
nvme_disk_status(struct disk_info * disk)14 static int nvme_disk_status(struct disk_info *disk)
15 {
16 	return 0;
17 }
18 
nvme_disk_read(struct disk_info * disk,uint8_t * data_buf,uint32_t start_sector,uint32_t num_sector)19 static int nvme_disk_read(struct disk_info *disk,
20 			  uint8_t *data_buf,
21 			  uint32_t start_sector,
22 			  uint32_t num_sector)
23 {
24 	struct nvme_namespace *ns = CONTAINER_OF(disk->name,
25 						 struct nvme_namespace, name[0]);
26 	struct nvme_completion_poll_status status =
27 		NVME_CPL_STATUS_POLL_INIT(status);
28 	struct nvme_request *request;
29 	uint32_t payload_size;
30 	int ret = 0;
31 
32 	if (!NVME_IS_BUFFER_DWORD_ALIGNED(data_buf)) {
33 		LOG_WRN("Data buffer pointer needs to be 4-bytes aligned");
34 		return -EINVAL;
35 	}
36 
37 	nvme_lock(disk->dev);
38 
39 	payload_size = num_sector * nvme_namespace_get_sector_size(ns);
40 
41 	request = nvme_allocate_request_vaddr((void *)data_buf, payload_size,
42 					      nvme_completion_poll_cb, &status);
43 	if (request == NULL) {
44 		ret = -ENOMEM;
45 		goto out;
46 	}
47 
48 	nvme_namespace_read_cmd(&request->cmd, ns->id,
49 				start_sector, num_sector);
50 
51 	/* We use only the first ioq atm
52 	 * ToDo: use smp cpu id and use it to select ioq
53 	 */
54 	nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request);
55 
56 	nvme_completion_poll(&status);
57 	if (nvme_cpl_status_is_error(&status)) {
58 		LOG_WRN("Reading at sector %u (count %d) on disk %s failed",
59 			start_sector, num_sector, ns->name);
60 		nvme_completion_print(&status.cpl);
61 		ret = -EIO;
62 	}
63 out:
64 	nvme_unlock(disk->dev);
65 	return ret;
66 }
67 
nvme_disk_write(struct disk_info * disk,const uint8_t * data_buf,uint32_t start_sector,uint32_t num_sector)68 static int nvme_disk_write(struct disk_info *disk,
69 			   const uint8_t *data_buf,
70 			   uint32_t start_sector,
71 			   uint32_t num_sector)
72 {
73 	struct nvme_namespace *ns = CONTAINER_OF(disk->name,
74 						 struct nvme_namespace, name[0]);
75 	struct nvme_completion_poll_status status =
76 		NVME_CPL_STATUS_POLL_INIT(status);
77 	struct nvme_request *request;
78 	uint32_t payload_size;
79 	int ret = 0;
80 
81 	if (!NVME_IS_BUFFER_DWORD_ALIGNED(data_buf)) {
82 		LOG_WRN("Data buffer pointer needs to be 4-bytes aligned");
83 		return -EINVAL;
84 	}
85 
86 	nvme_lock(disk->dev);
87 
88 	payload_size = num_sector * nvme_namespace_get_sector_size(ns);
89 
90 	request = nvme_allocate_request_vaddr((void *)data_buf, payload_size,
91 					      nvme_completion_poll_cb, &status);
92 	if (request == NULL) {
93 		ret = -ENOMEM;
94 		goto out;
95 	}
96 
97 	nvme_namespace_write_cmd(&request->cmd, ns->id,
98 				 start_sector, num_sector);
99 
100 	/* We use only the first ioq atm
101 	 * ToDo: use smp cpu id and use it to select ioq
102 	 */
103 	nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request);
104 
105 	nvme_completion_poll(&status);
106 	if (nvme_cpl_status_is_error(&status)) {
107 		LOG_WRN("Writing at sector %u (count %d) on disk %s failed",
108 			start_sector, num_sector, ns->name);
109 		nvme_completion_print(&status.cpl);
110 		ret = -EIO;
111 	}
112 out:
113 	nvme_unlock(disk->dev);
114 	return ret;
115 }
116 
nvme_disk_flush(struct nvme_namespace * ns)117 static int nvme_disk_flush(struct nvme_namespace *ns)
118 {
119 	struct nvme_completion_poll_status status =
120 		NVME_CPL_STATUS_POLL_INIT(status);
121 	struct nvme_request *request;
122 
123 	request = nvme_allocate_request_null(nvme_completion_poll_cb, &status);
124 	if (request == NULL) {
125 		return -ENOMEM;
126 	}
127 
128 	nvme_namespace_flush_cmd(&request->cmd, ns->id);
129 
130 	/* We use only the first ioq
131 	 * ToDo: use smp cpu id and use it to select ioq
132 	 */
133 	nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request);
134 
135 	nvme_completion_poll(&status);
136 	if (nvme_cpl_status_is_error(&status)) {
137 		LOG_ERR("Flushing disk %s failed", ns->name);
138 		nvme_completion_print(&status.cpl);
139 		return -EIO;
140 	}
141 
142 	return 0;
143 }
144 
nvme_disk_ioctl(struct disk_info * disk,uint8_t cmd,void * buff)145 static int nvme_disk_ioctl(struct disk_info *disk, uint8_t cmd, void *buff)
146 {
147 	struct nvme_namespace *ns = CONTAINER_OF(disk->name,
148 						 struct nvme_namespace, name[0]);
149 	int ret = 0;
150 
151 	nvme_lock(disk->dev);
152 
153 	switch (cmd) {
154 	case DISK_IOCTL_GET_SECTOR_COUNT:
155 		if (!buff) {
156 			ret = -EINVAL;
157 			break;
158 		}
159 
160 		*(uint32_t *)buff = nvme_namespace_get_num_sectors(ns);
161 
162 		break;
163 	case DISK_IOCTL_GET_SECTOR_SIZE:
164 		if (!buff) {
165 			ret = -EINVAL;
166 			break;
167 		}
168 
169 		*(uint32_t *)buff = nvme_namespace_get_sector_size(ns);
170 
171 		break;
172 	case DISK_IOCTL_GET_ERASE_BLOCK_SZ:
173 		if (!buff) {
174 			ret = -EINVAL;
175 			break;
176 		}
177 
178 		*(uint32_t *)buff = nvme_namespace_get_sector_size(ns);
179 
180 		break;
181 	case DISK_IOCTL_CTRL_DEINIT:
182 	case DISK_IOCTL_CTRL_SYNC:
183 		ret = nvme_disk_flush(ns);
184 		break;
185 	case DISK_IOCTL_CTRL_INIT:
186 		ret = 0;
187 		break;
188 	default:
189 		ret = -EINVAL;
190 	}
191 
192 	nvme_unlock(disk->dev);
193 	return ret;
194 }
195 
nvme_disk_init(struct disk_info * disk)196 static int nvme_disk_init(struct disk_info *disk)
197 {
198 	return nvme_disk_ioctl(disk, DISK_IOCTL_CTRL_INIT, NULL);
199 }
200 
201 static const struct disk_operations nvme_disk_ops = {
202 	.init = nvme_disk_init,
203 	.status = nvme_disk_status,
204 	.read = nvme_disk_read,
205 	.write = nvme_disk_write,
206 	.ioctl = nvme_disk_ioctl,
207 };
208 
nvme_namespace_disk_setup(struct nvme_namespace * ns,struct disk_info * disk)209 int nvme_namespace_disk_setup(struct nvme_namespace *ns,
210 			      struct disk_info *disk)
211 {
212 	disk->name = ns->name;
213 	disk->ops = &nvme_disk_ops;
214 	disk->dev = ns->ctrlr->dev;
215 
216 	return disk_access_register(disk);
217 }
218