1 /*
2  * Copyright (c) 2022 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/usb/usb_ch9.h>
9 #include "uhc_common.h"
10 
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_REGISTER(uhc, CONFIG_UHC_DRIVER_LOG_LEVEL);
13 
14 K_MEM_SLAB_DEFINE_STATIC(uhc_xfer_pool, sizeof(struct uhc_transfer),
15 			 CONFIG_UHC_XFER_COUNT, sizeof(void *));
16 
17 NET_BUF_POOL_VAR_DEFINE(uhc_ep_pool,
18 			CONFIG_UHC_BUF_COUNT, CONFIG_UHC_BUF_POOL_SIZE,
19 			0, NULL);
20 
21 
uhc_submit_event(const struct device * dev,const enum uhc_event_type type,const int status)22 int uhc_submit_event(const struct device *dev,
23 		     const enum uhc_event_type type,
24 		     const int status)
25 {
26 	struct uhc_data *data = dev->data;
27 	struct uhc_event drv_evt = {
28 		.type = type,
29 		.status = status,
30 		.dev = dev,
31 	};
32 
33 	if (!uhc_is_initialized(dev)) {
34 		return -EPERM;
35 	}
36 
37 	return data->event_cb(dev, &drv_evt);
38 }
39 
uhc_xfer_return(const struct device * dev,struct uhc_transfer * const xfer,const int err)40 void uhc_xfer_return(const struct device *dev,
41 		     struct uhc_transfer *const xfer,
42 		     const int err)
43 {
44 	struct uhc_data *data = dev->data;
45 	struct uhc_event drv_evt = {
46 		.type = UHC_EVT_EP_REQUEST,
47 		.xfer = xfer,
48 		.dev = dev,
49 	};
50 
51 	sys_dlist_remove(&xfer->node);
52 	xfer->queued = 0;
53 	xfer->err = err;
54 
55 	data->event_cb(dev, &drv_evt);
56 }
57 
uhc_xfer_get_next(const struct device * dev)58 struct uhc_transfer *uhc_xfer_get_next(const struct device *dev)
59 {
60 	struct uhc_data *data = dev->data;
61 	struct uhc_transfer *xfer;
62 	sys_dnode_t *node;
63 
64 	/* Draft, WIP */
65 	node = sys_dlist_peek_head(&data->ctrl_xfers);
66 	if (node == NULL) {
67 		node = sys_dlist_peek_head(&data->bulk_xfers);
68 	}
69 
70 	return (node == NULL) ? NULL : SYS_DLIST_CONTAINER(node, xfer, node);
71 }
72 
uhc_xfer_append(const struct device * dev,struct uhc_transfer * const xfer)73 int uhc_xfer_append(const struct device *dev,
74 		    struct uhc_transfer *const xfer)
75 {
76 	struct uhc_data *data = dev->data;
77 
78 	sys_dlist_append(&data->ctrl_xfers, &xfer->node);
79 
80 	return 0;
81 }
82 
uhc_xfer_buf_alloc(const struct device * dev,const size_t size)83 struct net_buf *uhc_xfer_buf_alloc(const struct device *dev,
84 				   const size_t size)
85 {
86 	return net_buf_alloc_len(&uhc_ep_pool, size, K_NO_WAIT);
87 }
88 
uhc_xfer_buf_free(const struct device * dev,struct net_buf * const buf)89 void uhc_xfer_buf_free(const struct device *dev, struct net_buf *const buf)
90 {
91 	net_buf_unref(buf);
92 }
93 
uhc_xfer_alloc(const struct device * dev,const uint8_t addr,const uint8_t ep,const uint8_t attrib,const uint16_t mps,const uint16_t timeout,void * const udev,void * const cb)94 struct uhc_transfer *uhc_xfer_alloc(const struct device *dev,
95 				    const uint8_t addr,
96 				    const uint8_t ep,
97 				    const uint8_t attrib,
98 				    const uint16_t mps,
99 				    const uint16_t timeout,
100 				    void *const udev,
101 				    void *const cb)
102 {
103 	const struct uhc_api *api = dev->api;
104 	struct uhc_transfer *xfer = NULL;
105 
106 	api->lock(dev);
107 
108 	if (!uhc_is_initialized(dev)) {
109 		goto xfer_alloc_error;
110 	}
111 
112 	LOG_DBG("Allocate xfer, ep 0x%02x attrib 0x%02x cb %p",
113 		ep, attrib, cb);
114 
115 	if (k_mem_slab_alloc(&uhc_xfer_pool, (void **)&xfer, K_NO_WAIT)) {
116 		LOG_ERR("Failed to allocate transfer");
117 		goto xfer_alloc_error;
118 	}
119 
120 	memset(xfer, 0, sizeof(struct uhc_transfer));
121 	xfer->addr = addr;
122 	xfer->ep = ep;
123 	xfer->attrib = attrib;
124 	xfer->mps = mps;
125 	xfer->timeout = timeout;
126 	xfer->udev = udev;
127 	xfer->cb = cb;
128 
129 xfer_alloc_error:
130 	api->unlock(dev);
131 
132 	return xfer;
133 }
134 
uhc_xfer_alloc_with_buf(const struct device * dev,const uint8_t addr,const uint8_t ep,const uint8_t attrib,const uint16_t mps,const uint16_t timeout,void * const udev,void * const cb,size_t size)135 struct uhc_transfer *uhc_xfer_alloc_with_buf(const struct device *dev,
136 					     const uint8_t addr,
137 					     const uint8_t ep,
138 					     const uint8_t attrib,
139 					     const uint16_t mps,
140 					     const uint16_t timeout,
141 					     void *const udev,
142 					     void *const cb,
143 					     size_t size)
144 {
145 	struct uhc_transfer *xfer;
146 	struct net_buf *buf;
147 
148 	buf = uhc_xfer_buf_alloc(dev, size);
149 	if (buf == NULL) {
150 		return NULL;
151 	}
152 
153 	xfer = uhc_xfer_alloc(dev, addr, ep, attrib, mps, timeout, udev, cb);
154 	if (xfer == NULL) {
155 		net_buf_unref(buf);
156 		return NULL;
157 	}
158 
159 	xfer->buf = buf;
160 
161 	return xfer;
162 }
163 
uhc_xfer_free(const struct device * dev,struct uhc_transfer * const xfer)164 int uhc_xfer_free(const struct device *dev, struct uhc_transfer *const xfer)
165 {
166 	const struct uhc_api *api = dev->api;
167 	int ret = 0;
168 
169 	api->lock(dev);
170 
171 	if (xfer->queued) {
172 		ret = -EBUSY;
173 		LOG_ERR("Transfer is still queued");
174 		goto xfer_free_error;
175 	}
176 
177 	k_mem_slab_free(&uhc_xfer_pool, (void *)xfer);
178 
179 xfer_free_error:
180 	api->unlock(dev);
181 
182 	return ret;
183 }
184 
uhc_xfer_buf_add(const struct device * dev,struct uhc_transfer * const xfer,struct net_buf * buf)185 int uhc_xfer_buf_add(const struct device *dev,
186 		     struct uhc_transfer *const xfer,
187 		     struct net_buf *buf)
188 {
189 	const struct uhc_api *api = dev->api;
190 	int ret = 0;
191 
192 	api->lock(dev);
193 	if (xfer->queued) {
194 		ret = -EBUSY;
195 	} else {
196 		xfer->buf = buf;
197 	}
198 
199 	api->unlock(dev);
200 
201 	return ret;
202 }
203 
uhc_ep_enqueue(const struct device * dev,struct uhc_transfer * const xfer)204 int uhc_ep_enqueue(const struct device *dev, struct uhc_transfer *const xfer)
205 {
206 	const struct uhc_api *api = dev->api;
207 	int ret;
208 
209 	api->lock(dev);
210 
211 	if (!uhc_is_initialized(dev)) {
212 		ret = -EPERM;
213 		goto ep_enqueue_error;
214 	}
215 
216 	xfer->queued = 1;
217 	ret = api->ep_enqueue(dev, xfer);
218 	if (ret) {
219 		xfer->queued = 0;
220 	}
221 
222 
223 ep_enqueue_error:
224 	api->unlock(dev);
225 
226 	return ret;
227 }
228 
uhc_ep_dequeue(const struct device * dev,struct uhc_transfer * const xfer)229 int uhc_ep_dequeue(const struct device *dev, struct uhc_transfer *const xfer)
230 {
231 	const struct uhc_api *api = dev->api;
232 	int ret;
233 
234 	api->lock(dev);
235 
236 	if (!uhc_is_initialized(dev)) {
237 		ret = -EPERM;
238 		goto ep_dequeue_error;
239 	}
240 
241 	ret = api->ep_dequeue(dev, xfer);
242 	xfer->queued = 0;
243 
244 ep_dequeue_error:
245 	api->unlock(dev);
246 
247 	return ret;
248 }
249 
uhc_enable(const struct device * dev)250 int uhc_enable(const struct device *dev)
251 {
252 	const struct uhc_api *api = dev->api;
253 	struct uhc_data *data = dev->data;
254 	int ret;
255 
256 	api->lock(dev);
257 
258 	if (!uhc_is_initialized(dev)) {
259 		ret = -EPERM;
260 		goto uhc_enable_error;
261 	}
262 
263 	if (uhc_is_enabled(dev)) {
264 		ret = -EALREADY;
265 		goto uhc_enable_error;
266 	}
267 
268 	ret = api->enable(dev);
269 	if (ret == 0) {
270 		atomic_set_bit(&data->status, UHC_STATUS_ENABLED);
271 	}
272 
273 uhc_enable_error:
274 	api->unlock(dev);
275 
276 	return ret;
277 }
278 
uhc_disable(const struct device * dev)279 int uhc_disable(const struct device *dev)
280 {
281 	const struct uhc_api *api = dev->api;
282 	struct uhc_data *data = dev->data;
283 	int ret;
284 
285 	api->lock(dev);
286 
287 	if (!uhc_is_enabled(dev)) {
288 		ret = -EALREADY;
289 		goto uhc_disable_error;
290 	}
291 
292 	ret = api->disable(dev);
293 	atomic_clear_bit(&data->status, UHC_STATUS_ENABLED);
294 
295 uhc_disable_error:
296 	api->unlock(dev);
297 
298 	return ret;
299 }
300 
uhc_init(const struct device * dev,uhc_event_cb_t event_cb)301 int uhc_init(const struct device *dev, uhc_event_cb_t event_cb)
302 {
303 	const struct uhc_api *api = dev->api;
304 	struct uhc_data *data = dev->data;
305 	int ret;
306 
307 	if (event_cb == NULL) {
308 		return -EINVAL;
309 	}
310 
311 	api->lock(dev);
312 
313 	if (uhc_is_initialized(dev)) {
314 		ret = -EALREADY;
315 		goto uhc_init_error;
316 	}
317 
318 	data->event_cb = event_cb;
319 	sys_dlist_init(&data->ctrl_xfers);
320 	sys_dlist_init(&data->bulk_xfers);
321 
322 	ret = api->init(dev);
323 	if (ret == 0) {
324 		atomic_set_bit(&data->status, UHC_STATUS_INITIALIZED);
325 	}
326 
327 uhc_init_error:
328 	api->unlock(dev);
329 
330 	return ret;
331 }
332 
uhc_shutdown(const struct device * dev)333 int uhc_shutdown(const struct device *dev)
334 {
335 	const struct uhc_api *api = dev->api;
336 	struct uhc_data *data = dev->data;
337 	int ret;
338 
339 	api->lock(dev);
340 
341 	if (uhc_is_enabled(dev)) {
342 		ret = -EBUSY;
343 		goto uhc_shutdown_error;
344 	}
345 
346 	if (!uhc_is_initialized(dev)) {
347 		ret = -EALREADY;
348 		goto uhc_shutdown_error;
349 	}
350 
351 	ret = api->shutdown(dev);
352 	atomic_clear_bit(&data->status, UHC_STATUS_INITIALIZED);
353 
354 uhc_shutdown_error:
355 	api->unlock(dev);
356 
357 	return ret;
358 }
359