1 /*
2  * Copyright (c) 2022 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <stdbool.h>
9 #include <zephyr/rtio/rtio.h>
10 #include <zephyr/internal/syscall_handler.h>
11 
12 /**
13  * Verify each SQE type operation and its fields ensuring
14  * the iodev is a valid accessible k_object (if given) and
15  * the buffer pointers are valid accessible memory by the calling
16  * thread.
17  *
18  * Each op code that is acceptable from user mode must also be validated.
19  */
rtio_vrfy_sqe(struct rtio_sqe * sqe)20 static inline bool rtio_vrfy_sqe(struct rtio_sqe *sqe)
21 {
22 	if (sqe->iodev != NULL && K_SYSCALL_OBJ(sqe->iodev, K_OBJ_RTIO_IODEV)) {
23 		return false;
24 	}
25 
26 	bool valid_sqe = true;
27 
28 	switch (sqe->op) {
29 	case RTIO_OP_NOP:
30 		break;
31 	case RTIO_OP_TX:
32 		valid_sqe &= K_SYSCALL_MEMORY(sqe->tx.buf, sqe->tx.buf_len, false);
33 		break;
34 	case RTIO_OP_RX:
35 		if ((sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) == 0) {
36 			valid_sqe &= K_SYSCALL_MEMORY(sqe->rx.buf, sqe->rx.buf_len, true);
37 		}
38 		break;
39 	case RTIO_OP_TINY_TX:
40 		break;
41 	case RTIO_OP_TXRX:
42 		valid_sqe &= K_SYSCALL_MEMORY(sqe->txrx.tx_buf, sqe->txrx.buf_len, true);
43 		valid_sqe &= K_SYSCALL_MEMORY(sqe->txrx.rx_buf, sqe->txrx.buf_len, true);
44 		break;
45 	default:
46 		/* RTIO OP must be known and allowable from user mode
47 		 * otherwise it is invalid
48 		 */
49 		valid_sqe = false;
50 	}
51 
52 	return valid_sqe;
53 }
54 
z_vrfy_rtio_release_buffer(struct rtio * r,void * buff,uint32_t buff_len)55 static inline void z_vrfy_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
56 {
57 	K_OOPS(K_SYSCALL_OBJ(r, K_OBJ_RTIO));
58 	z_impl_rtio_release_buffer(r, buff, buff_len);
59 }
60 #include <zephyr/syscalls/rtio_release_buffer_mrsh.c>
61 
z_vrfy_rtio_cqe_get_mempool_buffer(const struct rtio * r,struct rtio_cqe * cqe,uint8_t ** buff,uint32_t * buff_len)62 static inline int z_vrfy_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
63 						     uint8_t **buff, uint32_t *buff_len)
64 {
65 	K_OOPS(K_SYSCALL_OBJ(r, K_OBJ_RTIO));
66 	K_OOPS(K_SYSCALL_MEMORY_READ(cqe, sizeof(struct rtio_cqe)));
67 	K_OOPS(K_SYSCALL_MEMORY_READ(buff, sizeof(void *)));
68 	K_OOPS(K_SYSCALL_MEMORY_READ(buff_len, sizeof(uint32_t)));
69 	return z_impl_rtio_cqe_get_mempool_buffer(r, cqe, buff, buff_len);
70 }
71 #include <zephyr/syscalls/rtio_cqe_get_mempool_buffer_mrsh.c>
72 
z_vrfy_rtio_sqe_cancel(struct rtio_sqe * sqe)73 static inline int z_vrfy_rtio_sqe_cancel(struct rtio_sqe *sqe)
74 {
75 	return z_impl_rtio_sqe_cancel(sqe);
76 }
77 #include <zephyr/syscalls/rtio_sqe_cancel_mrsh.c>
78 
z_vrfy_rtio_sqe_copy_in_get_handles(struct rtio * r,const struct rtio_sqe * sqes,struct rtio_sqe ** handle,size_t sqe_count)79 static inline int z_vrfy_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
80 						      struct rtio_sqe **handle, size_t sqe_count)
81 {
82 	K_OOPS(K_SYSCALL_OBJ(r, K_OBJ_RTIO));
83 
84 	K_OOPS(K_SYSCALL_MEMORY_ARRAY_READ(sqes, sqe_count, sizeof(struct rtio_sqe)));
85 	struct rtio_sqe *sqe;
86 	uint32_t acquirable = rtio_sqe_acquirable(r);
87 
88 	if (acquirable < sqe_count) {
89 		return -ENOMEM;
90 	}
91 
92 
93 	for (int i = 0; i < sqe_count; i++) {
94 		sqe = rtio_sqe_acquire(r);
95 		__ASSERT_NO_MSG(sqe != NULL);
96 		if (handle != NULL && i == 0) {
97 			*handle = sqe;
98 		}
99 		*sqe = sqes[i];
100 
101 		if (!rtio_vrfy_sqe(sqe)) {
102 			rtio_sqe_drop_all(r);
103 			K_OOPS(true);
104 		}
105 	}
106 
107 	/* Already copied *and* verified, no need to redo */
108 	return z_impl_rtio_sqe_copy_in_get_handles(r, NULL, NULL, 0);
109 }
110 #include <zephyr/syscalls/rtio_sqe_copy_in_get_handles_mrsh.c>
111 
z_vrfy_rtio_cqe_copy_out(struct rtio * r,struct rtio_cqe * cqes,size_t cqe_count,k_timeout_t timeout)112 static inline int z_vrfy_rtio_cqe_copy_out(struct rtio *r,
113 					   struct rtio_cqe *cqes,
114 					   size_t cqe_count,
115 					   k_timeout_t timeout)
116 {
117 	K_OOPS(K_SYSCALL_OBJ(r, K_OBJ_RTIO));
118 
119 	K_OOPS(K_SYSCALL_MEMORY_ARRAY_WRITE(cqes, cqe_count, sizeof(struct rtio_cqe)));
120 
121 	return z_impl_rtio_cqe_copy_out(r, cqes, cqe_count, timeout);
122 }
123 #include <zephyr/syscalls/rtio_cqe_copy_out_mrsh.c>
124 
z_vrfy_rtio_submit(struct rtio * r,uint32_t wait_count)125 static inline int z_vrfy_rtio_submit(struct rtio *r, uint32_t wait_count)
126 {
127 	K_OOPS(K_SYSCALL_OBJ(r, K_OBJ_RTIO));
128 
129 #ifdef CONFIG_RTIO_SUBMIT_SEM
130 	K_OOPS(K_SYSCALL_OBJ(r->submit_sem, K_OBJ_SEM));
131 #endif
132 
133 	return z_impl_rtio_submit(r, wait_count);
134 }
135 #include <zephyr/syscalls/rtio_submit_mrsh.c>
136