1 /*
2  * Copyright (c) 2023 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/storage/disk_access.h>
9 #include <zephyr/sys/byteorder.h>
10 
11 #include <zephyr/usb/usbd.h>
12 #include <zephyr/usb/usb_ch9.h>
13 #include <zephyr/usb/class/usbd_msc.h>
14 #include <zephyr/sys/iterable_sections.h>
15 #include <zephyr/drivers/usb/udc.h>
16 
17 #include "usbd_msc_scsi.h"
18 
19 #include <zephyr/logging/log.h>
20 LOG_MODULE_REGISTER(usbd_msc, CONFIG_USBD_MSC_LOG_LEVEL);
21 
22 /* Subclass and Protocol codes */
23 #define SCSI_TRANSPARENT_COMMAND_SET	0x06
24 #define BULK_ONLY_TRANSPORT		0x50
25 
26 /* Control requests */
27 #define GET_MAX_LUN			0xFE
28 #define BULK_ONLY_MASS_STORAGE_RESET	0xFF
29 
30 /* Command wrapper */
31 #define CBW_SIGNATURE			0x43425355
32 
33 #define CBW_FLAGS_DIRECTION_IN		0x80
34 #define CBW_FLAGS_RESERVED_MASK		0x3F
35 
36 struct CBW {
37 	uint32_t dCBWSignature;
38 	uint32_t dCBWTag;
39 	uint32_t dCBWDataTransferLength;
40 	uint8_t bmCBWFlags;
41 	uint8_t bCBWLUN;
42 	uint8_t bCBWCBLength;
43 	uint8_t CBWCB[16];
44 } __packed;
45 
46 /* Status wrapper */
47 #define CSW_SIGNATURE			0x53425355
48 
49 #define CSW_STATUS_COMMAND_PASSED	0x00
50 #define CSW_STATUS_COMMAND_FAILED	0x01
51 #define CSW_STATUS_PHASE_ERROR		0x02
52 
53 struct CSW {
54 	uint32_t dCSWSignature;
55 	uint32_t dCSWTag;
56 	uint32_t dCSWDataResidue;
57 	uint8_t bCSWStatus;
58 } __packed;
59 
60 /* Single instance is likely enough because it can support multiple LUNs */
61 #define MSC_NUM_INSTANCES CONFIG_USBD_MSC_INSTANCES_COUNT
62 
63 /* Can be 64 if device is not High-Speed capable */
64 #define MSC_BUF_SIZE 512
65 
66 UDC_BUF_POOL_DEFINE(msc_ep_pool,
67 		    MSC_NUM_INSTANCES * 2, MSC_BUF_SIZE,
68 		    sizeof(struct udc_buf_info), NULL);
69 
70 struct msc_event {
71 	struct usbd_class_data *c_data;
72 	/* NULL to request Bulk-Only Mass Storage Reset
73 	 * Otherwise must point to previously enqueued endpoint buffer
74 	 */
75 	struct net_buf *buf;
76 	int err;
77 };
78 
79 /* Each instance has 2 endpoints and can receive bulk only reset command */
80 K_MSGQ_DEFINE(msc_msgq, sizeof(struct msc_event), MSC_NUM_INSTANCES * 3, 4);
81 
82 /* Make supported vendor request visible for the device stack */
83 static const struct usbd_cctx_vendor_req msc_bot_vregs =
84 	USBD_VENDOR_REQ(GET_MAX_LUN, BULK_ONLY_MASS_STORAGE_RESET);
85 
86 struct msc_bot_desc {
87 	struct usb_if_descriptor if0;
88 	struct usb_ep_descriptor if0_in_ep;
89 	struct usb_ep_descriptor if0_out_ep;
90 	struct usb_ep_descriptor if0_hs_in_ep;
91 	struct usb_ep_descriptor if0_hs_out_ep;
92 	struct usb_desc_header nil_desc;
93 };
94 
95 enum {
96 	MSC_CLASS_ENABLED,
97 	MSC_BULK_OUT_QUEUED,
98 	MSC_BULK_IN_QUEUED,
99 	MSC_BULK_IN_WEDGED,
100 	MSC_BULK_OUT_WEDGED,
101 };
102 
103 enum msc_bot_state {
104 	MSC_BBB_EXPECT_CBW,
105 	MSC_BBB_PROCESS_CBW,
106 	MSC_BBB_PROCESS_READ,
107 	MSC_BBB_PROCESS_WRITE,
108 	MSC_BBB_SEND_CSW,
109 	MSC_BBB_WAIT_FOR_CSW_SENT,
110 	MSC_BBB_WAIT_FOR_RESET_RECOVERY,
111 };
112 
113 struct msc_bot_ctx {
114 	struct usbd_class_data *class_node;
115 	struct msc_bot_desc *const desc;
116 	const struct usb_desc_header **const fs_desc;
117 	const struct usb_desc_header **const hs_desc;
118 	atomic_t bits;
119 	enum msc_bot_state state;
120 	uint8_t registered_luns;
121 	struct scsi_ctx luns[CONFIG_USBD_MSC_LUNS_PER_INSTANCE];
122 	struct CBW cbw;
123 	struct CSW csw;
124 	uint8_t scsi_buf[CONFIG_USBD_MSC_SCSI_BUFFER_SIZE];
125 	uint32_t transferred_data;
126 	size_t scsi_offset;
127 	size_t scsi_bytes;
128 };
129 
msc_buf_alloc(const uint8_t ep)130 static struct net_buf *msc_buf_alloc(const uint8_t ep)
131 {
132 	struct net_buf *buf = NULL;
133 	struct udc_buf_info *bi;
134 
135 	buf = net_buf_alloc(&msc_ep_pool, K_NO_WAIT);
136 	if (!buf) {
137 		return NULL;
138 	}
139 
140 	bi = udc_get_buf_info(buf);
141 	bi->ep = ep;
142 
143 	return buf;
144 }
145 
msc_get_bulk_in(struct usbd_class_data * const c_data)146 static uint8_t msc_get_bulk_in(struct usbd_class_data *const c_data)
147 {
148 	struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data);
149 	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
150 	struct msc_bot_desc *desc = ctx->desc;
151 
152 	if (usbd_bus_speed(uds_ctx) == USBD_SPEED_HS) {
153 		return desc->if0_hs_in_ep.bEndpointAddress;
154 	}
155 
156 	return desc->if0_in_ep.bEndpointAddress;
157 }
158 
msc_get_bulk_out(struct usbd_class_data * const c_data)159 static uint8_t msc_get_bulk_out(struct usbd_class_data *const c_data)
160 {
161 	struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data);
162 	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
163 	struct msc_bot_desc *desc = ctx->desc;
164 
165 	if (usbd_bus_speed(uds_ctx) == USBD_SPEED_HS) {
166 		return desc->if0_hs_out_ep.bEndpointAddress;
167 	}
168 
169 	return desc->if0_out_ep.bEndpointAddress;
170 }
171 
msc_queue_bulk_out_ep(struct usbd_class_data * const c_data)172 static void msc_queue_bulk_out_ep(struct usbd_class_data *const c_data)
173 {
174 	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
175 	struct net_buf *buf;
176 	uint8_t ep;
177 	int ret;
178 
179 	if (atomic_test_and_set_bit(&ctx->bits, MSC_BULK_OUT_QUEUED)) {
180 		/* Already queued */
181 		return;
182 	}
183 
184 	LOG_DBG("Queuing OUT");
185 	ep = msc_get_bulk_out(c_data);
186 	buf = msc_buf_alloc(ep);
187 	/* The pool is large enough to support all allocations. Failing alloc
188 	 * indicates either a memory leak or logic error.
189 	 */
190 	__ASSERT_NO_MSG(buf);
191 
192 	ret = usbd_ep_enqueue(c_data, buf);
193 	if (ret) {
194 		LOG_ERR("Failed to enqueue net_buf for 0x%02x", ep);
195 		net_buf_unref(buf);
196 		atomic_clear_bit(&ctx->bits, MSC_BULK_OUT_QUEUED);
197 	}
198 }
199 
msc_stall_bulk_out_ep(struct usbd_class_data * const c_data)200 static void msc_stall_bulk_out_ep(struct usbd_class_data *const c_data)
201 {
202 	uint8_t ep;
203 
204 	ep = msc_get_bulk_out(c_data);
205 	usbd_ep_set_halt(usbd_class_get_ctx(c_data), ep);
206 }
207 
msc_stall_bulk_in_ep(struct usbd_class_data * const c_data)208 static void msc_stall_bulk_in_ep(struct usbd_class_data *const c_data)
209 {
210 	uint8_t ep;
211 
212 	ep = msc_get_bulk_in(c_data);
213 	usbd_ep_set_halt(usbd_class_get_ctx(c_data), ep);
214 }
215 
msc_reset_handler(struct usbd_class_data * c_data)216 static void msc_reset_handler(struct usbd_class_data *c_data)
217 {
218 	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
219 	int i;
220 
221 	LOG_INF("Bulk-Only Mass Storage Reset");
222 	ctx->state = MSC_BBB_EXPECT_CBW;
223 	for (i = 0; i < ctx->registered_luns; i++) {
224 		scsi_reset(&ctx->luns[i]);
225 	}
226 
227 	atomic_clear_bit(&ctx->bits, MSC_BULK_IN_WEDGED);
228 	atomic_clear_bit(&ctx->bits, MSC_BULK_OUT_WEDGED);
229 }
230 
is_cbw_meaningful(struct msc_bot_ctx * const ctx)231 static bool is_cbw_meaningful(struct msc_bot_ctx *const ctx)
232 {
233 	if (ctx->cbw.bmCBWFlags & CBW_FLAGS_RESERVED_MASK) {
234 		/* Reserved bits are set = not meaningful */
235 		return false;
236 	}
237 
238 	if (ctx->cbw.bCBWLUN >= ctx->registered_luns) {
239 		/* Either not registered LUN or invalid (> 0x0F) */
240 		return false;
241 	}
242 
243 	if (ctx->cbw.bCBWCBLength < 1 || ctx->cbw.bCBWCBLength > 16) {
244 		/* Only legal values are 1 to 16, other are reserved */
245 		return false;
246 	}
247 
248 	return true;
249 }
250 
msc_process_read(struct msc_bot_ctx * ctx)251 static void msc_process_read(struct msc_bot_ctx *ctx)
252 {
253 	struct scsi_ctx *lun = &ctx->luns[ctx->cbw.bCBWLUN];
254 	int bytes_queued = 0;
255 	struct net_buf *buf;
256 	uint8_t ep;
257 	size_t len;
258 	int ret;
259 
260 	/* Fill SCSI Data IN buffer if there is no data available */
261 	if (ctx->scsi_bytes == 0) {
262 		ctx->scsi_bytes = scsi_read_data(lun, ctx->scsi_buf);
263 		ctx->scsi_offset = 0;
264 	}
265 
266 	if (atomic_test_and_set_bit(&ctx->bits, MSC_BULK_IN_QUEUED)) {
267 		__ASSERT_NO_MSG(false);
268 		LOG_ERR("IN already queued");
269 		return;
270 	}
271 
272 	ep = msc_get_bulk_in(ctx->class_node);
273 	buf = msc_buf_alloc(ep);
274 	/* The pool is large enough to support all allocations. Failing alloc
275 	 * indicates either a memory leak or logic error.
276 	 */
277 	__ASSERT_NO_MSG(buf);
278 
279 	while (ctx->scsi_bytes - ctx->scsi_offset > 0) {
280 		len = MIN(ctx->scsi_bytes - ctx->scsi_offset,
281 			  MSC_BUF_SIZE - bytes_queued);
282 		if (len == 0) {
283 			/* Either queued as much as possible or there is no more
284 			 * SCSI IN data available
285 			 */
286 			break;
287 		}
288 
289 		net_buf_add_mem(buf, &ctx->scsi_buf[ctx->scsi_offset], len);
290 		bytes_queued += len;
291 		ctx->scsi_offset += len;
292 
293 		if (ctx->scsi_bytes == ctx->scsi_offset) {
294 			/* SCSI buffer can be reused now */
295 			ctx->scsi_bytes = scsi_read_data(lun, ctx->scsi_buf);
296 			ctx->scsi_offset = 0;
297 		}
298 	}
299 
300 	/* Either the net buf is full or there is no more SCSI data */
301 	ctx->csw.dCSWDataResidue -= bytes_queued;
302 	ret = usbd_ep_enqueue(ctx->class_node, buf);
303 	if (ret) {
304 		LOG_ERR("Failed to enqueue net_buf for 0x%02x", ep);
305 		net_buf_unref(buf);
306 		atomic_clear_bit(&ctx->bits, MSC_BULK_IN_QUEUED);
307 	}
308 }
309 
msc_process_cbw(struct msc_bot_ctx * ctx)310 static void msc_process_cbw(struct msc_bot_ctx *ctx)
311 {
312 	struct scsi_ctx *lun = &ctx->luns[ctx->cbw.bCBWLUN];
313 	bool cmd_is_data_read, cmd_is_data_write;
314 	size_t data_len;
315 	int cb_len;
316 
317 	cb_len = scsi_usb_boot_cmd_len(ctx->cbw.CBWCB, ctx->cbw.bCBWCBLength);
318 	data_len = scsi_cmd(lun, ctx->cbw.CBWCB, cb_len, ctx->scsi_buf);
319 	ctx->scsi_bytes = data_len;
320 	ctx->scsi_offset = 0;
321 	cmd_is_data_read = scsi_cmd_is_data_read(lun);
322 	cmd_is_data_write = scsi_cmd_is_data_write(lun);
323 	data_len += scsi_cmd_remaining_data_len(lun);
324 
325 	/* Write commands must not return any data to initiator (host) */
326 	__ASSERT_NO_MSG(cmd_is_data_read || ctx->scsi_bytes == 0);
327 
328 	if (ctx->cbw.dCBWDataTransferLength == 0) {
329 		/* 6.7.1 Hn - Host expects no data transfers */
330 		if (data_len == 0) {
331 			/* Case (1) Hn = Dn */
332 			if (scsi_cmd_get_status(lun) == GOOD) {
333 				ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_PASSED;
334 			} else {
335 				ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_FAILED;
336 			}
337 		} else {
338 			/* Case (2) Hn < Di or (3) Hn < Do */
339 			ctx->csw.bCSWStatus = CSW_STATUS_PHASE_ERROR;
340 		}
341 
342 		ctx->state = MSC_BBB_SEND_CSW;
343 	} else if (data_len == 0) {
344 		/* SCSI target does not want any data, but host either wants to
345 		 * send or receive data. Note that SCSI target data direction is
346 		 * irrelevant, because opcode can simply be not supported. Even
347 		 * if host maliciously issues 0 sectors read and wants to write
348 		 * data as indicated in CB it is still Case (9) Ho > Dn.
349 		 */
350 		if (ctx->cbw.bmCBWFlags & CBW_FLAGS_DIRECTION_IN) {
351 			/* Case (4) Hi > Dn */
352 			msc_stall_bulk_in_ep(ctx->class_node);
353 		} else {
354 			/* Case (9) Ho > Dn */
355 			msc_stall_bulk_out_ep(ctx->class_node);
356 		}
357 
358 		if (scsi_cmd_get_status(lun) == GOOD) {
359 			ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_PASSED;
360 		} else {
361 			ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_FAILED;
362 		}
363 
364 		ctx->state = MSC_BBB_SEND_CSW;
365 	} else if (ctx->cbw.bmCBWFlags & CBW_FLAGS_DIRECTION_IN) {
366 		/* 6.7.2 Hi - Host expects to receive data from device */
367 		if ((data_len > ctx->cbw.dCBWDataTransferLength) ||
368 		    !cmd_is_data_read) {
369 			/* Case (7) Hi < Di or (8) Hi <> Do */
370 			msc_stall_bulk_in_ep(ctx->class_node);
371 			ctx->csw.bCSWStatus = CSW_STATUS_PHASE_ERROR;
372 			ctx->state = MSC_BBB_SEND_CSW;
373 		} else {
374 			/* Case (5) Hi > Di or (6) Hi = Di */
375 			ctx->state = MSC_BBB_PROCESS_READ;
376 		}
377 	} else {
378 		/* 6.7.3 Ho - Host expects to send data to the device */
379 		if ((data_len > ctx->cbw.dCBWDataTransferLength) ||
380 		    !cmd_is_data_write) {
381 			/* Case (10) Ho <> Di or (13) Ho < Do */
382 			msc_stall_bulk_out_ep(ctx->class_node);
383 			ctx->csw.bCSWStatus = CSW_STATUS_PHASE_ERROR;
384 			ctx->state = MSC_BBB_SEND_CSW;
385 		} else {
386 			/* Case (11) Ho > Do or (12) Ho = Do */
387 			ctx->state = MSC_BBB_PROCESS_WRITE;
388 		}
389 	}
390 }
391 
msc_process_write(struct msc_bot_ctx * ctx,uint8_t * buf,size_t len)392 static void msc_process_write(struct msc_bot_ctx *ctx,
393 			      uint8_t *buf, size_t len)
394 {
395 	size_t tmp;
396 	struct scsi_ctx *lun = &ctx->luns[ctx->cbw.bCBWLUN];
397 
398 	ctx->transferred_data += len;
399 
400 	while ((len > 0) && (scsi_cmd_remaining_data_len(lun) > 0)) {
401 		/* Copy received data to the end of SCSI buffer */
402 		tmp = MIN(len, sizeof(ctx->scsi_buf) - ctx->scsi_bytes);
403 		memcpy(&ctx->scsi_buf[ctx->scsi_bytes], buf, tmp);
404 		ctx->scsi_bytes += tmp;
405 		buf += tmp;
406 		len -= tmp;
407 
408 		/* Pass data to SCSI layer when either all transfer data bytes
409 		 * have been received or SCSI buffer is full.
410 		 */
411 		while ((ctx->scsi_bytes >= scsi_cmd_remaining_data_len(lun)) ||
412 		       (ctx->scsi_bytes == sizeof(ctx->scsi_buf))) {
413 			tmp = scsi_write_data(lun, ctx->scsi_buf, ctx->scsi_bytes);
414 			__ASSERT(tmp <= ctx->scsi_bytes,
415 				 "Processed more data than requested");
416 			if (tmp == 0) {
417 				LOG_WRN("SCSI handler didn't process %d bytes",
418 					ctx->scsi_bytes);
419 				ctx->scsi_bytes = 0;
420 			} else {
421 				LOG_DBG("SCSI processed %d out of %d bytes",
422 					tmp, ctx->scsi_bytes);
423 			}
424 
425 			ctx->csw.dCSWDataResidue -= tmp;
426 			if (scsi_cmd_remaining_data_len(lun) == 0) {
427 				/* Abandon any leftover data */
428 				ctx->scsi_bytes = 0;
429 				break;
430 			}
431 
432 			/* Move remaining data at the start of SCSI buffer. Note
433 			 * that the copied length here is zero (and thus no copy
434 			 * happens) when underlying sector size is equal to SCSI
435 			 * buffer size.
436 			 */
437 			memmove(ctx->scsi_buf, &ctx->scsi_buf[tmp], ctx->scsi_bytes - tmp);
438 			ctx->scsi_bytes -= tmp;
439 		}
440 	}
441 
442 	if ((ctx->transferred_data >= ctx->cbw.dCBWDataTransferLength) ||
443 	    (scsi_cmd_remaining_data_len(lun) == 0)) {
444 		if (ctx->transferred_data < ctx->cbw.dCBWDataTransferLength) {
445 			/* Case (11) Ho > Do and the transfer is still in
446 			 * progress. We do not intend to process more data so
447 			 * stall the Bulk-Out pipe.
448 			 */
449 			msc_stall_bulk_out_ep(ctx->class_node);
450 		}
451 
452 		if (scsi_cmd_get_status(lun) == GOOD) {
453 			ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_PASSED;
454 		} else {
455 			ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_FAILED;
456 		}
457 
458 		ctx->state = MSC_BBB_SEND_CSW;
459 	}
460 }
461 
msc_handle_bulk_out(struct msc_bot_ctx * ctx,uint8_t * buf,size_t len)462 static void msc_handle_bulk_out(struct msc_bot_ctx *ctx,
463 				uint8_t *buf, size_t len)
464 {
465 	if (ctx->state == MSC_BBB_EXPECT_CBW) {
466 		if (len == sizeof(struct CBW) && sys_get_le32(buf) == CBW_SIGNATURE) {
467 			memcpy(&ctx->cbw, buf, sizeof(struct CBW));
468 			/* Convert dCBWDataTransferLength endianness, other
469 			 * fields are either single byte or not relevant.
470 			 */
471 			ctx->cbw.dCBWDataTransferLength =
472 				sys_le32_to_cpu(ctx->cbw.dCBWDataTransferLength);
473 			/* Fill CSW with relevant information */
474 			ctx->csw.dCSWSignature = sys_cpu_to_le32(CSW_SIGNATURE);
475 			ctx->csw.dCSWTag = ctx->cbw.dCBWTag;
476 			ctx->csw.dCSWDataResidue = ctx->cbw.dCBWDataTransferLength;
477 			ctx->transferred_data = 0;
478 			if (is_cbw_meaningful(ctx)) {
479 				ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_FAILED;
480 				ctx->state = MSC_BBB_PROCESS_CBW;
481 			} else {
482 				LOG_INF("Not meaningful CBW");
483 				/* Mass Storage Class - Bulk Only Transport
484 				 * does not specify response to not meaningful
485 				 * CBW. Stall Bulk IN and Report Phase Error.
486 				 */
487 				msc_stall_bulk_in_ep(ctx->class_node);
488 				ctx->csw.bCSWStatus = CSW_STATUS_PHASE_ERROR;
489 				ctx->state = MSC_BBB_SEND_CSW;
490 			}
491 		} else {
492 			/* 6.6.1 CBW Not Valid */
493 			LOG_INF("Invalid CBW");
494 			atomic_set_bit(&ctx->bits, MSC_BULK_IN_WEDGED);
495 			atomic_set_bit(&ctx->bits, MSC_BULK_OUT_WEDGED);
496 			msc_stall_bulk_in_ep(ctx->class_node);
497 			msc_stall_bulk_out_ep(ctx->class_node);
498 			ctx->state = MSC_BBB_WAIT_FOR_RESET_RECOVERY;
499 		}
500 	} else if (ctx->state == MSC_BBB_PROCESS_WRITE) {
501 		msc_process_write(ctx, buf, len);
502 	}
503 }
504 
msc_handle_bulk_in(struct msc_bot_ctx * ctx,uint8_t * buf,size_t len)505 static void msc_handle_bulk_in(struct msc_bot_ctx *ctx,
506 			       uint8_t *buf, size_t len)
507 {
508 	if (ctx->state == MSC_BBB_WAIT_FOR_CSW_SENT) {
509 		LOG_DBG("CSW sent");
510 		ctx->state = MSC_BBB_EXPECT_CBW;
511 	} else if (ctx->state == MSC_BBB_PROCESS_READ) {
512 		struct scsi_ctx *lun = &ctx->luns[ctx->cbw.bCBWLUN];
513 
514 		ctx->transferred_data += len;
515 		if (ctx->scsi_bytes == 0) {
516 			if (ctx->csw.dCSWDataResidue > 0) {
517 				/* Case (5) Hi > Di
518 				 * While we may have sent short packet, device
519 				 * shall STALL the Bulk-In pipe (if it does not
520 				 * send padding data).
521 				 */
522 				msc_stall_bulk_in_ep(ctx->class_node);
523 			}
524 			if (scsi_cmd_get_status(lun) == GOOD) {
525 				ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_PASSED;
526 			} else {
527 				ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_FAILED;
528 			}
529 			ctx->state = MSC_BBB_SEND_CSW;
530 		}
531 	}
532 }
533 
msc_send_csw(struct msc_bot_ctx * ctx)534 static void msc_send_csw(struct msc_bot_ctx *ctx)
535 {
536 	struct net_buf *buf;
537 	uint8_t ep;
538 	int ret;
539 
540 	if (atomic_test_and_set_bit(&ctx->bits, MSC_BULK_IN_QUEUED)) {
541 		__ASSERT_NO_MSG(false);
542 		LOG_ERR("IN already queued");
543 		return;
544 	}
545 
546 	/* Convert dCSWDataResidue to LE, other fields are already set */
547 	ctx->csw.dCSWDataResidue = sys_cpu_to_le32(ctx->csw.dCSWDataResidue);
548 	ep = msc_get_bulk_in(ctx->class_node);
549 	buf = msc_buf_alloc(ep);
550 	/* The pool is large enough to support all allocations. Failing alloc
551 	 * indicates either a memory leak or logic error.
552 	 */
553 	__ASSERT_NO_MSG(buf);
554 
555 	net_buf_add_mem(buf, &ctx->csw, sizeof(ctx->csw));
556 	ret = usbd_ep_enqueue(ctx->class_node, buf);
557 	if (ret) {
558 		LOG_ERR("Failed to enqueue net_buf for 0x%02x", ep);
559 		net_buf_unref(buf);
560 		atomic_clear_bit(&ctx->bits, MSC_BULK_IN_QUEUED);
561 	}
562 	ctx->state = MSC_BBB_WAIT_FOR_CSW_SENT;
563 }
564 
usbd_msc_handle_request(struct usbd_class_data * c_data,struct net_buf * buf,int err)565 static void usbd_msc_handle_request(struct usbd_class_data *c_data,
566 				    struct net_buf *buf, int err)
567 {
568 	struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data);
569 	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
570 	struct udc_buf_info *bi;
571 
572 	bi = udc_get_buf_info(buf);
573 	if (err) {
574 		if (err == -ECONNABORTED) {
575 			LOG_WRN("request ep 0x%02x, len %u cancelled",
576 				bi->ep, buf->len);
577 		} else {
578 			LOG_ERR("request ep 0x%02x, len %u failed",
579 				bi->ep, buf->len);
580 		}
581 
582 		goto ep_request_error;
583 	}
584 
585 	if (bi->ep == msc_get_bulk_out(c_data)) {
586 		msc_handle_bulk_out(ctx, buf->data, buf->len);
587 	} else if (bi->ep == msc_get_bulk_in(c_data)) {
588 		msc_handle_bulk_in(ctx, buf->data, buf->len);
589 	}
590 
591 ep_request_error:
592 	if (bi->ep == msc_get_bulk_out(c_data)) {
593 		atomic_clear_bit(&ctx->bits, MSC_BULK_OUT_QUEUED);
594 	} else if (bi->ep == msc_get_bulk_in(c_data)) {
595 		atomic_clear_bit(&ctx->bits, MSC_BULK_IN_QUEUED);
596 	}
597 	usbd_ep_buf_free(uds_ctx, buf);
598 }
599 
usbd_msc_thread(void * arg1,void * arg2,void * arg3)600 static void usbd_msc_thread(void *arg1, void *arg2, void *arg3)
601 {
602 	ARG_UNUSED(arg1);
603 	ARG_UNUSED(arg2);
604 	ARG_UNUSED(arg3);
605 	struct msc_event evt;
606 	struct msc_bot_ctx *ctx;
607 
608 	while (1) {
609 		k_msgq_get(&msc_msgq, &evt, K_FOREVER);
610 
611 		ctx = usbd_class_get_private(evt.c_data);
612 		if (evt.buf == NULL) {
613 			msc_reset_handler(evt.c_data);
614 		} else {
615 			usbd_msc_handle_request(evt.c_data, evt.buf, evt.err);
616 		}
617 
618 		if (!atomic_test_bit(&ctx->bits, MSC_CLASS_ENABLED)) {
619 			continue;
620 		}
621 
622 		switch (ctx->state) {
623 		case MSC_BBB_EXPECT_CBW:
624 		case MSC_BBB_PROCESS_WRITE:
625 			/* Ensure we can accept next OUT packet */
626 			msc_queue_bulk_out_ep(evt.c_data);
627 			break;
628 		default:
629 			break;
630 		}
631 
632 		/* Skip (potentially) response generating code if there is
633 		 * IN data already available for the host to pick up.
634 		 */
635 		if (atomic_test_bit(&ctx->bits, MSC_BULK_IN_QUEUED)) {
636 			continue;
637 		}
638 
639 		if (ctx->state == MSC_BBB_PROCESS_CBW) {
640 			msc_process_cbw(ctx);
641 		}
642 
643 		if (ctx->state == MSC_BBB_PROCESS_READ) {
644 			msc_process_read(ctx);
645 		} else if (ctx->state == MSC_BBB_PROCESS_WRITE) {
646 			msc_queue_bulk_out_ep(evt.c_data);
647 		} else if (ctx->state == MSC_BBB_SEND_CSW) {
648 			msc_send_csw(ctx);
649 		}
650 	}
651 }
652 
msc_bot_schedule_reset(struct usbd_class_data * c_data)653 static void msc_bot_schedule_reset(struct usbd_class_data *c_data)
654 {
655 	struct msc_event request = {
656 		.c_data = c_data,
657 		.buf = NULL, /* Bulk-Only Mass Storage Reset */
658 	};
659 
660 	k_msgq_put(&msc_msgq, &request, K_FOREVER);
661 }
662 
663 /* Feature endpoint halt state handler */
msc_bot_feature_halt(struct usbd_class_data * const c_data,const uint8_t ep,const bool halted)664 static void msc_bot_feature_halt(struct usbd_class_data *const c_data,
665 				 const uint8_t ep, const bool halted)
666 {
667 	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
668 
669 	if (ep == msc_get_bulk_in(c_data) && !halted &&
670 	    atomic_test_bit(&ctx->bits, MSC_BULK_IN_WEDGED)) {
671 		/* Endpoint shall remain halted until Reset Recovery */
672 		usbd_ep_set_halt(usbd_class_get_ctx(c_data), ep);
673 	} else if (ep == msc_get_bulk_out(c_data) && !halted &&
674 	    atomic_test_bit(&ctx->bits, MSC_BULK_OUT_WEDGED)) {
675 		/* Endpoint shall remain halted until Reset Recovery */
676 		usbd_ep_set_halt(usbd_class_get_ctx(c_data), ep);
677 	}
678 }
679 
680 /* USB control request handler to device */
msc_bot_control_to_dev(struct usbd_class_data * const c_data,const struct usb_setup_packet * const setup,const struct net_buf * const buf)681 static int msc_bot_control_to_dev(struct usbd_class_data *const c_data,
682 				  const struct usb_setup_packet *const setup,
683 				  const struct net_buf *const buf)
684 {
685 	if (setup->bRequest == BULK_ONLY_MASS_STORAGE_RESET &&
686 	    setup->wValue == 0 && setup->wLength == 0) {
687 		msc_bot_schedule_reset(c_data);
688 	} else {
689 		errno = -ENOTSUP;
690 	}
691 
692 	return 0;
693 }
694 
695 /* USB control request handler to host */
msc_bot_control_to_host(struct usbd_class_data * const c_data,const struct usb_setup_packet * const setup,struct net_buf * const buf)696 static int msc_bot_control_to_host(struct usbd_class_data *const c_data,
697 				   const struct usb_setup_packet *const setup,
698 				   struct net_buf *const buf)
699 {
700 	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
701 	uint8_t max_lun;
702 
703 	if (setup->bRequest == GET_MAX_LUN &&
704 	    setup->wValue == 0 && setup->wLength >= 1) {
705 		/* If there is no LUN registered we cannot really do anything,
706 		 * because STALLing this request means that device does not
707 		 * support multiple LUNs and host should only address LUN 0.
708 		 */
709 		max_lun = ctx->registered_luns ? ctx->registered_luns - 1 : 0;
710 		net_buf_add_mem(buf, &max_lun, 1);
711 	} else {
712 		errno = -ENOTSUP;
713 	}
714 
715 	return 0;
716 }
717 
718 /* Endpoint request completion event handler */
msc_bot_request_handler(struct usbd_class_data * const c_data,struct net_buf * buf,int err)719 static int msc_bot_request_handler(struct usbd_class_data *const c_data,
720 				   struct net_buf *buf, int err)
721 {
722 	struct msc_event request = {
723 		.c_data = c_data,
724 		.buf = buf,
725 		.err = err,
726 	};
727 
728 	/* Defer request handling to mass storage thread */
729 	k_msgq_put(&msc_msgq, &request, K_FOREVER);
730 
731 	return 0;
732 }
733 
734 /* Class associated configuration is selected */
msc_bot_enable(struct usbd_class_data * const c_data)735 static void msc_bot_enable(struct usbd_class_data *const c_data)
736 {
737 	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
738 
739 	LOG_INF("Enable");
740 	atomic_set_bit(&ctx->bits, MSC_CLASS_ENABLED);
741 	msc_bot_schedule_reset(c_data);
742 }
743 
744 /* Class associated configuration is disabled */
msc_bot_disable(struct usbd_class_data * const c_data)745 static void msc_bot_disable(struct usbd_class_data *const c_data)
746 {
747 	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
748 
749 	LOG_INF("Disable");
750 	atomic_clear_bit(&ctx->bits, MSC_CLASS_ENABLED);
751 }
752 
msc_bot_get_desc(struct usbd_class_data * const c_data,const enum usbd_speed speed)753 static void *msc_bot_get_desc(struct usbd_class_data *const c_data,
754 			      const enum usbd_speed speed)
755 {
756 	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
757 
758 	if (speed == USBD_SPEED_HS) {
759 		return ctx->hs_desc;
760 	}
761 
762 	return ctx->fs_desc;
763 }
764 
765 /* Initialization of the class implementation */
msc_bot_init(struct usbd_class_data * const c_data)766 static int msc_bot_init(struct usbd_class_data *const c_data)
767 {
768 	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
769 
770 	ctx->class_node = c_data;
771 	ctx->state = MSC_BBB_EXPECT_CBW;
772 	ctx->registered_luns = 0;
773 
774 	STRUCT_SECTION_FOREACH(usbd_msc_lun, lun) {
775 		if (ctx->registered_luns >= CONFIG_USBD_MSC_LUNS_PER_INSTANCE) {
776 			LOG_ERR("Cannot register LUN %s", lun->disk);
777 			return -ENOMEM;
778 		}
779 
780 		scsi_init(&ctx->luns[ctx->registered_luns++], lun->disk,
781 			  lun->vendor, lun->product, lun->revision);
782 	}
783 
784 	return 0;
785 }
786 
787 #define DEFINE_MSC_BOT_DESCRIPTOR(n, _)						\
788 static struct msc_bot_desc msc_bot_desc_##n = {					\
789 	.if0 = {								\
790 		.bLength = sizeof(struct usb_if_descriptor),			\
791 		.bDescriptorType = USB_DESC_INTERFACE,				\
792 		.bInterfaceNumber = 0,						\
793 		.bAlternateSetting = 0,						\
794 		.bNumEndpoints = 2,						\
795 		.bInterfaceClass = USB_BCC_MASS_STORAGE,			\
796 		.bInterfaceSubClass = SCSI_TRANSPARENT_COMMAND_SET,		\
797 		.bInterfaceProtocol = BULK_ONLY_TRANSPORT,			\
798 		.iInterface = 0,						\
799 	},									\
800 	.if0_in_ep = {								\
801 		.bLength = sizeof(struct usb_ep_descriptor),			\
802 		.bDescriptorType = USB_DESC_ENDPOINT,				\
803 		.bEndpointAddress = 0x81,					\
804 		.bmAttributes = USB_EP_TYPE_BULK,				\
805 		.wMaxPacketSize = sys_cpu_to_le16(64U),				\
806 		.bInterval = 0,							\
807 	},									\
808 	.if0_out_ep = {								\
809 		.bLength = sizeof(struct usb_ep_descriptor),			\
810 		.bDescriptorType = USB_DESC_ENDPOINT,				\
811 		.bEndpointAddress = 0x01,					\
812 		.bmAttributes = USB_EP_TYPE_BULK,				\
813 		.wMaxPacketSize = sys_cpu_to_le16(64U),				\
814 		.bInterval = 0,							\
815 	},									\
816 	.if0_hs_in_ep = {							\
817 		.bLength = sizeof(struct usb_ep_descriptor),			\
818 		.bDescriptorType = USB_DESC_ENDPOINT,				\
819 		.bEndpointAddress = 0x81,					\
820 		.bmAttributes = USB_EP_TYPE_BULK,				\
821 		.wMaxPacketSize = sys_cpu_to_le16(512U),			\
822 		.bInterval = 0,							\
823 	},									\
824 	.if0_hs_out_ep = {							\
825 		.bLength = sizeof(struct usb_ep_descriptor),			\
826 		.bDescriptorType = USB_DESC_ENDPOINT,				\
827 		.bEndpointAddress = 0x01,					\
828 		.bmAttributes = USB_EP_TYPE_BULK,				\
829 		.wMaxPacketSize = sys_cpu_to_le16(512U),			\
830 		.bInterval = 0,							\
831 	},									\
832 										\
833 	.nil_desc = {								\
834 		.bLength = 0,							\
835 		.bDescriptorType = 0,						\
836 	},									\
837 };										\
838 										\
839 const static struct usb_desc_header *msc_bot_fs_desc_##n[] = {			\
840 	(struct usb_desc_header *) &msc_bot_desc_##n.if0,			\
841 	(struct usb_desc_header *) &msc_bot_desc_##n.if0_in_ep,			\
842 	(struct usb_desc_header *) &msc_bot_desc_##n.if0_out_ep,		\
843 	(struct usb_desc_header *) &msc_bot_desc_##n.nil_desc,			\
844 };										\
845 										\
846 const static struct usb_desc_header *msc_bot_hs_desc_##n[] = {			\
847 	(struct usb_desc_header *) &msc_bot_desc_##n.if0,			\
848 	(struct usb_desc_header *) &msc_bot_desc_##n.if0_hs_in_ep,		\
849 	(struct usb_desc_header *) &msc_bot_desc_##n.if0_hs_out_ep,		\
850 	(struct usb_desc_header *) &msc_bot_desc_##n.nil_desc,			\
851 };
852 
853 
854 struct usbd_class_api msc_bot_api = {
855 	.feature_halt = msc_bot_feature_halt,
856 	.control_to_dev = msc_bot_control_to_dev,
857 	.control_to_host = msc_bot_control_to_host,
858 	.request = msc_bot_request_handler,
859 	.enable = msc_bot_enable,
860 	.disable = msc_bot_disable,
861 	.get_desc = msc_bot_get_desc,
862 	.init = msc_bot_init,
863 };
864 
865 #define DEFINE_MSC_BOT_CLASS_DATA(x, _)					\
866 	static struct msc_bot_ctx msc_bot_ctx_##x = {			\
867 		.desc = &msc_bot_desc_##x,				\
868 		.fs_desc = msc_bot_fs_desc_##x,				\
869 		.hs_desc = msc_bot_hs_desc_##x,				\
870 	};								\
871 									\
872 	USBD_DEFINE_CLASS(msc_##x, &msc_bot_api, &msc_bot_ctx_##x,	\
873 			  &msc_bot_vregs);
874 
875 LISTIFY(MSC_NUM_INSTANCES, DEFINE_MSC_BOT_DESCRIPTOR, ())
876 LISTIFY(MSC_NUM_INSTANCES, DEFINE_MSC_BOT_CLASS_DATA, ())
877 
878 K_THREAD_DEFINE(usbd_msc, CONFIG_USBD_MSC_STACK_SIZE,
879 		usbd_msc_thread, NULL, NULL, NULL,
880 		CONFIG_SYSTEM_WORKQUEUE_PRIORITY, 0, 0);
881