1 /*
2  * SPDX-License-Identifier: Apache-2.0
3  * Copyright (c) 2022 Intel Corp.
4  */
5 
6 #include <zephyr/logging/log.h>
7 LOG_MODULE_DECLARE(nvme, CONFIG_NVME_LOG_LEVEL);
8 
9 #include <zephyr/kernel.h>
10 #include <zephyr/sys/byteorder.h>
11 
12 #include "nvme.h"
13 
nvme_disk_init(struct disk_info * disk)14 static int nvme_disk_init(struct disk_info *disk)
15 {
16 	return 0;
17 }
18 
nvme_disk_status(struct disk_info * disk)19 static int nvme_disk_status(struct disk_info *disk)
20 {
21 	return 0;
22 }
23 
nvme_disk_read(struct disk_info * disk,uint8_t * data_buf,uint32_t start_sector,uint32_t num_sector)24 static int nvme_disk_read(struct disk_info *disk,
25 			  uint8_t *data_buf,
26 			  uint32_t start_sector,
27 			  uint32_t num_sector)
28 {
29 	struct nvme_namespace *ns = CONTAINER_OF(disk->name,
30 						 struct nvme_namespace, name[0]);
31 	struct nvme_completion_poll_status status =
32 		NVME_CPL_STATUS_POLL_INIT(status);
33 	struct nvme_request *request;
34 	uint32_t payload_size;
35 	int ret = 0;
36 
37 	if (!NVME_IS_BUFFER_DWORD_ALIGNED(data_buf)) {
38 		LOG_WRN("Data buffer pointer needs to be 4-bytes aligned");
39 		return -EINVAL;
40 	}
41 
42 	nvme_lock(disk->dev);
43 
44 	payload_size = num_sector * nvme_namespace_get_sector_size(ns);
45 
46 	request = nvme_allocate_request_vaddr((void *)data_buf, payload_size,
47 					      nvme_completion_poll_cb, &status);
48 	if (request == NULL) {
49 		ret = -ENOMEM;
50 		goto out;
51 	}
52 
53 	nvme_namespace_read_cmd(&request->cmd, ns->id,
54 				start_sector, num_sector);
55 
56 	/* We use only the first ioq atm
57 	 * ToDo: use smp cpu id and use it to select ioq
58 	 */
59 	nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request);
60 
61 	nvme_completion_poll(&status);
62 	if (nvme_cpl_status_is_error(&status)) {
63 		LOG_WRN("Reading at sector %u (count %d) on disk %s failed",
64 			start_sector, num_sector, ns->name);
65 		nvme_completion_print(&status.cpl);
66 		ret = -EIO;
67 	}
68 out:
69 	nvme_unlock(disk->dev);
70 	return ret;
71 }
72 
nvme_disk_write(struct disk_info * disk,const uint8_t * data_buf,uint32_t start_sector,uint32_t num_sector)73 static int nvme_disk_write(struct disk_info *disk,
74 			   const uint8_t *data_buf,
75 			   uint32_t start_sector,
76 			   uint32_t num_sector)
77 {
78 	struct nvme_namespace *ns = CONTAINER_OF(disk->name,
79 						 struct nvme_namespace, name[0]);
80 	struct nvme_completion_poll_status status =
81 		NVME_CPL_STATUS_POLL_INIT(status);
82 	struct nvme_request *request;
83 	uint32_t payload_size;
84 	int ret = 0;
85 
86 	if (!NVME_IS_BUFFER_DWORD_ALIGNED(data_buf)) {
87 		LOG_WRN("Data buffer pointer needs to be 4-bytes aligned");
88 		return -EINVAL;
89 	}
90 
91 	nvme_lock(disk->dev);
92 
93 	payload_size = num_sector * nvme_namespace_get_sector_size(ns);
94 
95 	request = nvme_allocate_request_vaddr((void *)data_buf, payload_size,
96 					      nvme_completion_poll_cb, &status);
97 	if (request == NULL) {
98 		ret = -ENOMEM;
99 		goto out;
100 	}
101 
102 	nvme_namespace_write_cmd(&request->cmd, ns->id,
103 				 start_sector, num_sector);
104 
105 	/* We use only the first ioq atm
106 	 * ToDo: use smp cpu id and use it to select ioq
107 	 */
108 	nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request);
109 
110 	nvme_completion_poll(&status);
111 	if (nvme_cpl_status_is_error(&status)) {
112 		LOG_WRN("Writing at sector %u (count %d) on disk %s failed",
113 			start_sector, num_sector, ns->name);
114 		nvme_completion_print(&status.cpl);
115 		ret = -EIO;
116 	}
117 out:
118 	nvme_unlock(disk->dev);
119 	return ret;
120 }
121 
nvme_disk_flush(struct nvme_namespace * ns)122 static int nvme_disk_flush(struct nvme_namespace *ns)
123 {
124 	struct nvme_completion_poll_status status =
125 		NVME_CPL_STATUS_POLL_INIT(status);
126 	struct nvme_request *request;
127 
128 	request = nvme_allocate_request_null(nvme_completion_poll_cb, &status);
129 	if (request == NULL) {
130 		return -ENOMEM;
131 	}
132 
133 	nvme_namespace_flush_cmd(&request->cmd, ns->id);
134 
135 	/* We use only the first ioq
136 	 * ToDo: use smp cpu id and use it to select ioq
137 	 */
138 	nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request);
139 
140 	nvme_completion_poll(&status);
141 	if (nvme_cpl_status_is_error(&status)) {
142 		LOG_ERR("Flushing disk %s failed", ns->name);
143 		nvme_completion_print(&status.cpl);
144 		return -EIO;
145 	}
146 
147 	return 0;
148 }
149 
nvme_disk_ioctl(struct disk_info * disk,uint8_t cmd,void * buff)150 static int nvme_disk_ioctl(struct disk_info *disk, uint8_t cmd, void *buff)
151 {
152 	struct nvme_namespace *ns = CONTAINER_OF(disk->name,
153 						 struct nvme_namespace, name[0]);
154 	int ret = 0;
155 
156 	nvme_lock(disk->dev);
157 
158 	switch (cmd) {
159 	case DISK_IOCTL_GET_SECTOR_COUNT:
160 		if (!buff) {
161 			ret = -EINVAL;
162 			break;
163 		}
164 
165 		*(uint32_t *)buff = nvme_namespace_get_num_sectors(ns);
166 
167 		break;
168 	case DISK_IOCTL_GET_SECTOR_SIZE:
169 		if (!buff) {
170 			ret = -EINVAL;
171 			break;
172 		}
173 
174 		*(uint32_t *)buff = nvme_namespace_get_sector_size(ns);
175 
176 		break;
177 	case DISK_IOCTL_GET_ERASE_BLOCK_SZ:
178 		if (!buff) {
179 			ret = -EINVAL;
180 			break;
181 		}
182 
183 		*(uint32_t *)buff = nvme_namespace_get_sector_size(ns);
184 
185 		break;
186 	case DISK_IOCTL_CTRL_SYNC:
187 		ret = nvme_disk_flush(ns);
188 		break;
189 	default:
190 		ret = -EINVAL;
191 	}
192 
193 	nvme_unlock(disk->dev);
194 	return ret;
195 }
196 
197 static const struct disk_operations nvme_disk_ops = {
198 	.init = nvme_disk_init,
199 	.status = nvme_disk_status,
200 	.read = nvme_disk_read,
201 	.write = nvme_disk_write,
202 	.ioctl = nvme_disk_ioctl,
203 };
204 
nvme_namespace_disk_setup(struct nvme_namespace * ns,struct disk_info * disk)205 int nvme_namespace_disk_setup(struct nvme_namespace *ns,
206 			      struct disk_info *disk)
207 {
208 	disk->name = ns->name;
209 	disk->ops = &nvme_disk_ops;
210 	disk->dev = ns->ctrlr->dev;
211 
212 	return disk_access_register(disk);
213 }
214