1 /*
2  * Copyright (c) 2022 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/usb/usb_ch9.h>
9 #include "uhc_common.h"
10 
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_REGISTER(uhc, CONFIG_UHC_DRIVER_LOG_LEVEL);
13 
14 K_MEM_SLAB_DEFINE_STATIC(uhc_xfer_pool, sizeof(struct uhc_transfer),
15 			 CONFIG_UHC_XFER_COUNT, sizeof(void *));
16 
17 NET_BUF_POOL_VAR_DEFINE(uhc_ep_pool,
18 			CONFIG_UHC_BUF_COUNT, CONFIG_UHC_BUF_POOL_SIZE,
19 			0, NULL);
20 
21 
uhc_submit_event(const struct device * dev,const enum uhc_event_type type,const int status)22 int uhc_submit_event(const struct device *dev,
23 		     const enum uhc_event_type type,
24 		     const int status)
25 {
26 	struct uhc_data *data = dev->data;
27 	struct uhc_event drv_evt = {
28 		.type = type,
29 		.status = status,
30 		.dev = dev,
31 	};
32 
33 	if (!uhc_is_initialized(dev)) {
34 		return -EPERM;
35 	}
36 
37 	return data->event_cb(dev, &drv_evt);
38 }
39 
uhc_xfer_return(const struct device * dev,struct uhc_transfer * const xfer,const int err)40 void uhc_xfer_return(const struct device *dev,
41 		     struct uhc_transfer *const xfer,
42 		     const int err)
43 {
44 	struct uhc_data *data = dev->data;
45 	struct uhc_event drv_evt = {
46 		.type = UHC_EVT_EP_REQUEST,
47 		.xfer = xfer,
48 		.dev = dev,
49 	};
50 
51 	sys_dlist_remove(&xfer->node);
52 	xfer->queued = 0;
53 	xfer->err = err;
54 
55 	data->event_cb(dev, &drv_evt);
56 }
57 
uhc_xfer_get_next(const struct device * dev)58 struct uhc_transfer *uhc_xfer_get_next(const struct device *dev)
59 {
60 	struct uhc_data *data = dev->data;
61 	struct uhc_transfer *xfer;
62 	sys_dnode_t *node;
63 
64 	/* Draft, WIP */
65 	node = sys_dlist_peek_head(&data->ctrl_xfers);
66 	if (node == NULL) {
67 		node = sys_dlist_peek_head(&data->bulk_xfers);
68 	}
69 
70 	return (node == NULL) ? NULL : SYS_DLIST_CONTAINER(node, xfer, node);
71 }
72 
uhc_xfer_append(const struct device * dev,struct uhc_transfer * const xfer)73 int uhc_xfer_append(const struct device *dev,
74 		    struct uhc_transfer *const xfer)
75 {
76 	struct uhc_data *data = dev->data;
77 
78 	sys_dlist_append(&data->ctrl_xfers, &xfer->node);
79 
80 	return 0;
81 }
82 
uhc_xfer_buf_alloc(const struct device * dev,const size_t size)83 struct net_buf *uhc_xfer_buf_alloc(const struct device *dev,
84 				   const size_t size)
85 {
86 	return net_buf_alloc_len(&uhc_ep_pool, size, K_NO_WAIT);
87 }
88 
uhc_xfer_buf_free(const struct device * dev,struct net_buf * const buf)89 void uhc_xfer_buf_free(const struct device *dev, struct net_buf *const buf)
90 {
91 	net_buf_unref(buf);
92 }
93 
uhc_xfer_alloc(const struct device * dev,const uint8_t ep,struct usb_device * const udev,void * const cb,void * const cb_priv)94 struct uhc_transfer *uhc_xfer_alloc(const struct device *dev,
95 				    const uint8_t ep,
96 				    struct usb_device *const udev,
97 				    void *const cb,
98 				    void *const cb_priv)
99 {
100 	uint8_t ep_idx = USB_EP_GET_IDX(ep) & 0xF;
101 	const struct uhc_api *api = dev->api;
102 	struct uhc_transfer *xfer = NULL;
103 	uint16_t mps;
104 
105 	api->lock(dev);
106 
107 	if (!uhc_is_initialized(dev)) {
108 		goto xfer_alloc_error;
109 	}
110 
111 	if (ep_idx == 0) {
112 		mps = udev->dev_desc.bMaxPacketSize0;
113 	} else {
114 		struct usb_ep_descriptor *ep_desc;
115 
116 		if (USB_EP_DIR_IS_IN(ep)) {
117 			ep_desc = udev->ep_in[ep_idx].desc;
118 		} else {
119 			ep_desc = udev->ep_out[ep_idx].desc;
120 		}
121 
122 		if (ep_desc == NULL) {
123 			LOG_ERR("Endpoint 0x%02x is not configured", ep);
124 			goto xfer_alloc_error;
125 		}
126 
127 		mps = ep_desc->wMaxPacketSize;
128 	}
129 
130 	LOG_DBG("Allocate xfer, ep 0x%02x mps %u cb %p", ep, mps, cb);
131 
132 	if (k_mem_slab_alloc(&uhc_xfer_pool, (void **)&xfer, K_NO_WAIT)) {
133 		LOG_ERR("Failed to allocate transfer");
134 		goto xfer_alloc_error;
135 	}
136 
137 	memset(xfer, 0, sizeof(struct uhc_transfer));
138 	xfer->ep = ep;
139 	xfer->mps = mps;
140 	xfer->udev = udev;
141 	xfer->cb = cb;
142 	xfer->priv = cb_priv;
143 
144 xfer_alloc_error:
145 	api->unlock(dev);
146 
147 	return xfer;
148 }
149 
uhc_xfer_alloc_with_buf(const struct device * dev,const uint8_t ep,struct usb_device * const udev,void * const cb,void * const cb_priv,size_t size)150 struct uhc_transfer *uhc_xfer_alloc_with_buf(const struct device *dev,
151 					     const uint8_t ep,
152 					     struct usb_device *const udev,
153 					     void *const cb,
154 					     void *const cb_priv,
155 					     size_t size)
156 {
157 	struct uhc_transfer *xfer;
158 	struct net_buf *buf;
159 
160 	buf = uhc_xfer_buf_alloc(dev, size);
161 	if (buf == NULL) {
162 		return NULL;
163 	}
164 
165 	xfer = uhc_xfer_alloc(dev, ep, udev, cb, cb_priv);
166 	if (xfer == NULL) {
167 		net_buf_unref(buf);
168 		return NULL;
169 	}
170 
171 	xfer->buf = buf;
172 
173 	return xfer;
174 }
175 
uhc_xfer_free(const struct device * dev,struct uhc_transfer * const xfer)176 int uhc_xfer_free(const struct device *dev, struct uhc_transfer *const xfer)
177 {
178 	const struct uhc_api *api = dev->api;
179 	int ret = 0;
180 
181 	api->lock(dev);
182 
183 	if (xfer->queued) {
184 		ret = -EBUSY;
185 		LOG_ERR("Transfer is still queued");
186 		goto xfer_free_error;
187 	}
188 
189 	k_mem_slab_free(&uhc_xfer_pool, (void *)xfer);
190 
191 xfer_free_error:
192 	api->unlock(dev);
193 
194 	return ret;
195 }
196 
uhc_xfer_buf_add(const struct device * dev,struct uhc_transfer * const xfer,struct net_buf * buf)197 int uhc_xfer_buf_add(const struct device *dev,
198 		     struct uhc_transfer *const xfer,
199 		     struct net_buf *buf)
200 {
201 	const struct uhc_api *api = dev->api;
202 	int ret = 0;
203 
204 	api->lock(dev);
205 	if (xfer->queued) {
206 		ret = -EBUSY;
207 	} else {
208 		xfer->buf = buf;
209 	}
210 
211 	api->unlock(dev);
212 
213 	return ret;
214 }
215 
uhc_ep_enqueue(const struct device * dev,struct uhc_transfer * const xfer)216 int uhc_ep_enqueue(const struct device *dev, struct uhc_transfer *const xfer)
217 {
218 	const struct uhc_api *api = dev->api;
219 	int ret;
220 
221 	api->lock(dev);
222 
223 	if (!uhc_is_initialized(dev)) {
224 		ret = -EPERM;
225 		goto ep_enqueue_error;
226 	}
227 
228 	xfer->queued = 1;
229 	ret = api->ep_enqueue(dev, xfer);
230 	if (ret) {
231 		xfer->queued = 0;
232 	}
233 
234 
235 ep_enqueue_error:
236 	api->unlock(dev);
237 
238 	return ret;
239 }
240 
uhc_ep_dequeue(const struct device * dev,struct uhc_transfer * const xfer)241 int uhc_ep_dequeue(const struct device *dev, struct uhc_transfer *const xfer)
242 {
243 	const struct uhc_api *api = dev->api;
244 	int ret;
245 
246 	api->lock(dev);
247 
248 	if (!uhc_is_initialized(dev)) {
249 		ret = -EPERM;
250 		goto ep_dequeue_error;
251 	}
252 
253 	ret = api->ep_dequeue(dev, xfer);
254 	xfer->queued = 0;
255 
256 ep_dequeue_error:
257 	api->unlock(dev);
258 
259 	return ret;
260 }
261 
uhc_enable(const struct device * dev)262 int uhc_enable(const struct device *dev)
263 {
264 	const struct uhc_api *api = dev->api;
265 	struct uhc_data *data = dev->data;
266 	int ret;
267 
268 	api->lock(dev);
269 
270 	if (!uhc_is_initialized(dev)) {
271 		ret = -EPERM;
272 		goto uhc_enable_error;
273 	}
274 
275 	if (uhc_is_enabled(dev)) {
276 		ret = -EALREADY;
277 		goto uhc_enable_error;
278 	}
279 
280 	ret = api->enable(dev);
281 	if (ret == 0) {
282 		atomic_set_bit(&data->status, UHC_STATUS_ENABLED);
283 	}
284 
285 uhc_enable_error:
286 	api->unlock(dev);
287 
288 	return ret;
289 }
290 
uhc_disable(const struct device * dev)291 int uhc_disable(const struct device *dev)
292 {
293 	const struct uhc_api *api = dev->api;
294 	struct uhc_data *data = dev->data;
295 	int ret;
296 
297 	api->lock(dev);
298 
299 	if (!uhc_is_enabled(dev)) {
300 		ret = -EALREADY;
301 		goto uhc_disable_error;
302 	}
303 
304 	ret = api->disable(dev);
305 	atomic_clear_bit(&data->status, UHC_STATUS_ENABLED);
306 
307 uhc_disable_error:
308 	api->unlock(dev);
309 
310 	return ret;
311 }
312 
uhc_init(const struct device * dev,uhc_event_cb_t event_cb,const void * const event_ctx)313 int uhc_init(const struct device *dev,
314 	     uhc_event_cb_t event_cb, const void *const event_ctx)
315 {
316 	const struct uhc_api *api = dev->api;
317 	struct uhc_data *data = dev->data;
318 	int ret;
319 
320 	if (event_cb == NULL) {
321 		return -EINVAL;
322 	}
323 
324 	api->lock(dev);
325 
326 	if (uhc_is_initialized(dev)) {
327 		ret = -EALREADY;
328 		goto uhc_init_error;
329 	}
330 
331 	data->event_cb = event_cb;
332 	data->event_ctx = event_ctx;
333 	sys_dlist_init(&data->ctrl_xfers);
334 	sys_dlist_init(&data->bulk_xfers);
335 
336 	ret = api->init(dev);
337 	if (ret == 0) {
338 		atomic_set_bit(&data->status, UHC_STATUS_INITIALIZED);
339 	}
340 
341 uhc_init_error:
342 	api->unlock(dev);
343 
344 	return ret;
345 }
346 
uhc_shutdown(const struct device * dev)347 int uhc_shutdown(const struct device *dev)
348 {
349 	const struct uhc_api *api = dev->api;
350 	struct uhc_data *data = dev->data;
351 	int ret;
352 
353 	api->lock(dev);
354 
355 	if (uhc_is_enabled(dev)) {
356 		ret = -EBUSY;
357 		goto uhc_shutdown_error;
358 	}
359 
360 	if (!uhc_is_initialized(dev)) {
361 		ret = -EALREADY;
362 		goto uhc_shutdown_error;
363 	}
364 
365 	ret = api->shutdown(dev);
366 	atomic_clear_bit(&data->status, UHC_STATUS_INITIALIZED);
367 
368 uhc_shutdown_error:
369 	api->unlock(dev);
370 
371 	return ret;
372 }
373