1 /*
2  * Copyright (c) 2024 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_KERNEL_INCLUDE_PRIORITY_Q_H_
8 #define ZEPHYR_KERNEL_INCLUDE_PRIORITY_Q_H_
9 
10 #include <zephyr/sys/math_extras.h>
11 #include <zephyr/sys/dlist.h>
12 
13 extern int32_t z_sched_prio_cmp(struct k_thread *thread_1,
14 	struct k_thread *thread_2);
15 
16 bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
17 
18 /* Dumb Scheduling */
19 #if defined(CONFIG_SCHED_DUMB)
20 #define _priq_run_init		z_priq_dumb_init
21 #define _priq_run_add		z_priq_dumb_add
22 #define _priq_run_remove	z_priq_dumb_remove
23 # if defined(CONFIG_SCHED_CPU_MASK)
24 #  define _priq_run_best	z_priq_dumb_mask_best
25 # else
26 #  define _priq_run_best	z_priq_dumb_best
27 # endif /* CONFIG_SCHED_CPU_MASK */
28 /* Scalable Scheduling */
29 #elif defined(CONFIG_SCHED_SCALABLE)
30 #define _priq_run_init		z_priq_rb_init
31 #define _priq_run_add		z_priq_rb_add
32 #define _priq_run_remove	z_priq_rb_remove
33 #define _priq_run_best		z_priq_rb_best
34  /* Multi Queue Scheduling */
35 #elif defined(CONFIG_SCHED_MULTIQ)
36 
37 #if defined(CONFIG_64BIT)
38 #define NBITS 64
39 #else
40 #define NBITS 32
41 #endif /* CONFIG_64BIT */
42 #define _priq_run_init		z_priq_mq_init
43 #define _priq_run_add		z_priq_mq_add
44 #define _priq_run_remove	z_priq_mq_remove
45 #define _priq_run_best		z_priq_mq_best
46 static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq, struct k_thread *thread);
47 static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread);
48 #endif
49 
50 /* Scalable Wait Queue */
51 #if defined(CONFIG_WAITQ_SCALABLE)
52 #define _priq_wait_add		z_priq_rb_add
53 #define _priq_wait_remove	z_priq_rb_remove
54 #define _priq_wait_best		z_priq_rb_best
55 /* Dumb Wait Queue */
56 #elif defined(CONFIG_WAITQ_DUMB)
57 #define _priq_wait_add		z_priq_dumb_add
58 #define _priq_wait_remove	z_priq_dumb_remove
59 #define _priq_wait_best		z_priq_dumb_best
60 #endif
61 
z_priq_dumb_init(sys_dlist_t * pq)62 static ALWAYS_INLINE void z_priq_dumb_init(sys_dlist_t *pq)
63 {
64 	sys_dlist_init(pq);
65 }
66 
z_priq_dumb_remove(sys_dlist_t * pq,struct k_thread * thread)67 static ALWAYS_INLINE void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
68 {
69 	ARG_UNUSED(pq);
70 
71 	sys_dlist_remove(&thread->base.qnode_dlist);
72 }
73 
z_priq_dumb_best(sys_dlist_t * pq)74 static ALWAYS_INLINE struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
75 {
76 	struct k_thread *thread = NULL;
77 	sys_dnode_t *n = sys_dlist_peek_head(pq);
78 
79 	if (n != NULL) {
80 		thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
81 	}
82 	return thread;
83 }
84 
z_priq_rb_init(struct _priq_rb * pq)85 static ALWAYS_INLINE void z_priq_rb_init(struct _priq_rb *pq)
86 {
87 	*pq = (struct _priq_rb) {
88 		.tree = {
89 			.lessthan_fn = z_priq_rb_lessthan,
90 		}
91 	};
92 }
93 
z_priq_rb_add(struct _priq_rb * pq,struct k_thread * thread)94 static ALWAYS_INLINE void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
95 {
96 	struct k_thread *t;
97 
98 	thread->base.order_key = pq->next_order_key;
99 	++pq->next_order_key;
100 
101 	/* Renumber at wraparound.  This is tiny code, and in practice
102 	 * will almost never be hit on real systems.  BUT on very
103 	 * long-running systems where a priq never completely empties
104 	 * AND that contains very large numbers of threads, it can be
105 	 * a latency glitch to loop over all the threads like this.
106 	 */
107 	if (!pq->next_order_key) {
108 		RB_FOR_EACH_CONTAINER(&pq->tree, t, base.qnode_rb) {
109 			t->base.order_key = pq->next_order_key;
110 			++pq->next_order_key;
111 		}
112 	}
113 
114 	rb_insert(&pq->tree, &thread->base.qnode_rb);
115 }
116 
z_priq_rb_remove(struct _priq_rb * pq,struct k_thread * thread)117 static ALWAYS_INLINE void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
118 {
119 	rb_remove(&pq->tree, &thread->base.qnode_rb);
120 
121 	if (!pq->tree.root) {
122 		pq->next_order_key = 0;
123 	}
124 }
125 
z_priq_rb_best(struct _priq_rb * pq)126 static ALWAYS_INLINE struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
127 {
128 	struct k_thread *thread = NULL;
129 	struct rbnode *n = rb_get_min(&pq->tree);
130 
131 	if (n != NULL) {
132 		thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb);
133 	}
134 	return thread;
135 }
136 
z_priq_mq_best(struct _priq_mq * pq)137 static ALWAYS_INLINE struct k_thread *z_priq_mq_best(struct _priq_mq *pq)
138 {
139 	struct k_thread *thread = NULL;
140 
141 	for (int i = 0; i < PRIQ_BITMAP_SIZE; ++i) {
142 		if (!pq->bitmask[i]) {
143 			continue;
144 		}
145 
146 #ifdef CONFIG_64BIT
147 		sys_dlist_t *l = &pq->queues[i * 64 + u64_count_trailing_zeros(pq->bitmask[i])];
148 #else
149 		sys_dlist_t *l = &pq->queues[i * 32 + u32_count_trailing_zeros(pq->bitmask[i])];
150 #endif
151 		sys_dnode_t *n = sys_dlist_peek_head(l);
152 
153 		if (n != NULL) {
154 			thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
155 			break;
156 		}
157 	}
158 
159 	return thread;
160 }
161 
162 
163 #ifdef CONFIG_SCHED_MULTIQ
164 
165 struct prio_info {
166 	uint8_t offset_prio;
167 	uint8_t idx;
168 	uint8_t bit;
169 };
170 
get_prio_info(int8_t old_prio)171 static ALWAYS_INLINE struct prio_info get_prio_info(int8_t old_prio)
172 {
173 	struct prio_info ret;
174 
175 	ret.offset_prio = old_prio - K_HIGHEST_THREAD_PRIO;
176 	ret.idx = ret.offset_prio / NBITS;
177 	ret.bit = ret.offset_prio % NBITS;
178 
179 	return ret;
180 }
181 
z_priq_mq_init(struct _priq_mq * q)182 static ALWAYS_INLINE void z_priq_mq_init(struct _priq_mq *q)
183 {
184 	for (int i = 0; i < ARRAY_SIZE(q->queues); i++) {
185 		sys_dlist_init(&q->queues[i]);
186 	}
187 }
188 
z_priq_mq_add(struct _priq_mq * pq,struct k_thread * thread)189 static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq,
190 					struct k_thread *thread)
191 {
192 	struct prio_info pos = get_prio_info(thread->base.prio);
193 
194 	sys_dlist_append(&pq->queues[pos.offset_prio], &thread->base.qnode_dlist);
195 	pq->bitmask[pos.idx] |= BIT(pos.bit);
196 }
197 
z_priq_mq_remove(struct _priq_mq * pq,struct k_thread * thread)198 static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq,
199 					   struct k_thread *thread)
200 {
201 	struct prio_info pos = get_prio_info(thread->base.prio);
202 
203 	sys_dlist_remove(&thread->base.qnode_dlist);
204 	if (sys_dlist_is_empty(&pq->queues[pos.offset_prio])) {
205 		pq->bitmask[pos.idx] &= ~BIT(pos.bit);
206 	}
207 }
208 #endif /* CONFIG_SCHED_MULTIQ */
209 
210 
211 
212 #ifdef CONFIG_SCHED_CPU_MASK
z_priq_dumb_mask_best(sys_dlist_t * pq)213 static ALWAYS_INLINE struct k_thread *z_priq_dumb_mask_best(sys_dlist_t *pq)
214 {
215 	/* With masks enabled we need to be prepared to walk the list
216 	 * looking for one we can run
217 	 */
218 	struct k_thread *thread;
219 
220 	SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) {
221 		if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) {
222 			return thread;
223 		}
224 	}
225 	return NULL;
226 }
227 #endif /* CONFIG_SCHED_CPU_MASK */
228 
229 
230 #if defined(CONFIG_SCHED_DUMB) || defined(CONFIG_WAITQ_DUMB)
z_priq_dumb_add(sys_dlist_t * pq,struct k_thread * thread)231 static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq,
232 					  struct k_thread *thread)
233 {
234 	struct k_thread *t;
235 
236 	SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) {
237 		if (z_sched_prio_cmp(thread, t) > 0) {
238 			sys_dlist_insert(&t->base.qnode_dlist,
239 					 &thread->base.qnode_dlist);
240 			return;
241 		}
242 	}
243 
244 	sys_dlist_append(pq, &thread->base.qnode_dlist);
245 }
246 #endif /* CONFIG_SCHED_DUMB || CONFIG_WAITQ_DUMB */
247 
248 #endif /* ZEPHYR_KERNEL_INCLUDE_PRIORITY_Q_H_ */
249