1 /*
2 * SPDX-License-Identifier: Apache-2.0
3 * Copyright (c) 2022 Intel Corp.
4 */
5
6 #include <zephyr/logging/log.h>
7 LOG_MODULE_DECLARE(nvme, CONFIG_NVME_LOG_LEVEL);
8
9 #include <zephyr/kernel.h>
10 #include <zephyr/sys/byteorder.h>
11
12 #include "nvme.h"
13
nvme_disk_init(struct disk_info * disk)14 static int nvme_disk_init(struct disk_info *disk)
15 {
16 return 0;
17 }
18
nvme_disk_status(struct disk_info * disk)19 static int nvme_disk_status(struct disk_info *disk)
20 {
21 return 0;
22 }
23
nvme_disk_read(struct disk_info * disk,uint8_t * data_buf,uint32_t start_sector,uint32_t num_sector)24 static int nvme_disk_read(struct disk_info *disk,
25 uint8_t *data_buf,
26 uint32_t start_sector,
27 uint32_t num_sector)
28 {
29 struct nvme_namespace *ns = CONTAINER_OF(disk->name,
30 struct nvme_namespace, name);
31 struct nvme_completion_poll_status status =
32 NVME_CPL_STATUS_POLL_INIT(status);
33 struct nvme_request *request;
34 uint32_t payload_size;
35 int ret = 0;
36
37 nvme_lock(disk->dev);
38
39 payload_size = num_sector * nvme_namespace_get_sector_size(ns);
40
41 request = nvme_allocate_request_vaddr((void *)data_buf, payload_size,
42 nvme_completion_poll_cb, &status);
43 if (request == NULL) {
44 ret = -ENOMEM;
45 goto out;
46 }
47
48 nvme_namespace_read_cmd(&request->cmd, ns->id,
49 start_sector, num_sector);
50
51 /* We use only the first ioq atm
52 * ToDo: use smp cpu id and use it to select ioq
53 */
54 nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request);
55
56 nvme_completion_poll(&status);
57 if (nvme_cpl_status_is_error(&status)) {
58 LOG_WRN("Reading at sector %u (count %d) on disk %s failed",
59 start_sector, num_sector, ns->name);
60 ret = -EIO;
61 }
62 out:
63 nvme_unlock(disk->dev);
64 return ret;
65 }
66
nvme_disk_write(struct disk_info * disk,const uint8_t * data_buf,uint32_t start_sector,uint32_t num_sector)67 static int nvme_disk_write(struct disk_info *disk,
68 const uint8_t *data_buf,
69 uint32_t start_sector,
70 uint32_t num_sector)
71 {
72 struct nvme_namespace *ns = CONTAINER_OF(disk->name,
73 struct nvme_namespace, name);
74 struct nvme_completion_poll_status status =
75 NVME_CPL_STATUS_POLL_INIT(status);
76 struct nvme_request *request;
77 uint32_t payload_size;
78 int ret = 0;
79
80 nvme_lock(disk->dev);
81
82 payload_size = num_sector * nvme_namespace_get_sector_size(ns);
83
84 request = nvme_allocate_request_vaddr((void *)data_buf, payload_size,
85 nvme_completion_poll_cb, &status);
86 if (request == NULL) {
87 ret = -ENOMEM;
88 goto out;
89 }
90
91 nvme_namespace_write_cmd(&request->cmd, ns->id,
92 start_sector, num_sector);
93
94 /* We use only the first ioq atm
95 * ToDo: use smp cpu id and use it to select ioq
96 */
97 nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request);
98
99 nvme_completion_poll(&status);
100 if (nvme_cpl_status_is_error(&status)) {
101 LOG_WRN("Writing at sector %u (count %d) on disk %s failed",
102 start_sector, num_sector, ns->name);
103 ret = -EIO;
104 }
105 out:
106 nvme_unlock(disk->dev);
107 return ret;
108 }
109
nvme_disk_flush(struct nvme_namespace * ns)110 static int nvme_disk_flush(struct nvme_namespace *ns)
111 {
112 struct nvme_completion_poll_status status =
113 NVME_CPL_STATUS_POLL_INIT(status);
114 struct nvme_request *request;
115
116 request = nvme_allocate_request_null(nvme_completion_poll_cb, &status);
117 if (request == NULL) {
118 return -ENOMEM;
119 }
120
121 nvme_namespace_flush_cmd(&request->cmd, ns->id);
122
123 /* We use only the first ioq
124 * ToDo: use smp cpu id and use it to select ioq
125 */
126 nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request);
127
128 nvme_completion_poll(&status);
129 if (nvme_cpl_status_is_error(&status)) {
130 LOG_ERR("Flushing disk %s failed", ns->name);
131 return -EIO;
132 }
133
134 return 0;
135 }
136
nvme_disk_ioctl(struct disk_info * disk,uint8_t cmd,void * buff)137 static int nvme_disk_ioctl(struct disk_info *disk, uint8_t cmd, void *buff)
138 {
139 struct nvme_namespace *ns = CONTAINER_OF(disk->name,
140 struct nvme_namespace, name);
141 int ret = 0;
142
143 nvme_lock(disk->dev);
144
145 switch (cmd) {
146 case DISK_IOCTL_GET_SECTOR_COUNT:
147 if (!buff) {
148 ret = -EINVAL;
149 break;
150 }
151
152 *(uint32_t *)buff = nvme_namespace_get_num_sectors(ns);
153
154 break;
155 case DISK_IOCTL_GET_SECTOR_SIZE:
156 if (!buff) {
157 ret = -EINVAL;
158 break;
159 }
160
161 *(uint32_t *)buff = nvme_namespace_get_sector_size(ns);
162
163 break;
164 case DISK_IOCTL_GET_ERASE_BLOCK_SZ:
165 if (!buff) {
166 ret = -EINVAL;
167 break;
168 }
169
170 *(uint32_t *)buff = nvme_namespace_get_sector_size(ns);
171
172 break;
173 case DISK_IOCTL_CTRL_SYNC:
174 ret = nvme_disk_flush(ns);
175 break;
176 default:
177 ret = -EINVAL;
178 }
179
180 nvme_unlock(disk->dev);
181 return ret;
182 }
183
184 static const struct disk_operations nvme_disk_ops = {
185 .init = nvme_disk_init,
186 .status = nvme_disk_status,
187 .read = nvme_disk_read,
188 .write = nvme_disk_write,
189 .ioctl = nvme_disk_ioctl,
190 };
191
nvme_namespace_disk_setup(struct nvme_namespace * ns,struct disk_info * disk)192 int nvme_namespace_disk_setup(struct nvme_namespace *ns,
193 struct disk_info *disk)
194 {
195 disk->name = ns->name;
196 disk->ops = &nvme_disk_ops;
197 disk->dev = ns->ctrlr->dev;
198
199 return disk_access_register(disk);
200 }
201