1 /*
2  * Copyright (c) 2024 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_KERNEL_INCLUDE_PRIORITY_Q_H_
8 #define ZEPHYR_KERNEL_INCLUDE_PRIORITY_Q_H_
9 
10 #include <zephyr/sys/math_extras.h>
11 #include <zephyr/sys/dlist.h>
12 
13 extern int32_t z_sched_prio_cmp(struct k_thread *thread_1,
14 	struct k_thread *thread_2);
15 
16 bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
17 
18 /* Dumb Scheduling */
19 #if defined(CONFIG_SCHED_DUMB)
20 #define _priq_run_add		z_priq_dumb_add
21 #define _priq_run_remove	z_priq_dumb_remove
22 # if defined(CONFIG_SCHED_CPU_MASK)
23 #  define _priq_run_best	z_priq_dumb_mask_best
24 # else
25 #  define _priq_run_best	z_priq_dumb_best
26 # endif /* CONFIG_SCHED_CPU_MASK */
27 /* Scalable Scheduling */
28 #elif defined(CONFIG_SCHED_SCALABLE)
29 #define _priq_run_add		z_priq_rb_add
30 #define _priq_run_remove	z_priq_rb_remove
31 #define _priq_run_best		z_priq_rb_best
32  /* Multi Queue Scheduling */
33 #elif defined(CONFIG_SCHED_MULTIQ)
34 
35 #if defined(CONFIG_64BIT)
36 #define NBITS 64
37 #else
38 #define NBITS 32
39 #endif /* CONFIG_64BIT */
40 
41 #define _priq_run_add		z_priq_mq_add
42 #define _priq_run_remove	z_priq_mq_remove
43 #define _priq_run_best		z_priq_mq_best
44 static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq, struct k_thread *thread);
45 static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread);
46 #endif
47 
48 /* Scalable Wait Queue */
49 #if defined(CONFIG_WAITQ_SCALABLE)
50 #define _priq_wait_add		z_priq_rb_add
51 #define _priq_wait_remove	z_priq_rb_remove
52 #define _priq_wait_best		z_priq_rb_best
53 /* Dumb Wait Queue */
54 #elif defined(CONFIG_WAITQ_DUMB)
55 #define _priq_wait_add		z_priq_dumb_add
56 #define _priq_wait_remove	z_priq_dumb_remove
57 #define _priq_wait_best		z_priq_dumb_best
58 #endif
59 
z_priq_dumb_remove(sys_dlist_t * pq,struct k_thread * thread)60 static ALWAYS_INLINE void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
61 {
62 	ARG_UNUSED(pq);
63 
64 	sys_dlist_remove(&thread->base.qnode_dlist);
65 }
66 
z_priq_dumb_best(sys_dlist_t * pq)67 static ALWAYS_INLINE struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
68 {
69 	struct k_thread *thread = NULL;
70 	sys_dnode_t *n = sys_dlist_peek_head(pq);
71 
72 	if (n != NULL) {
73 		thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
74 	}
75 	return thread;
76 }
77 
z_priq_rb_add(struct _priq_rb * pq,struct k_thread * thread)78 static ALWAYS_INLINE void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
79 {
80 	struct k_thread *t;
81 
82 	thread->base.order_key = pq->next_order_key;
83 	++pq->next_order_key;
84 
85 	/* Renumber at wraparound.  This is tiny code, and in practice
86 	 * will almost never be hit on real systems.  BUT on very
87 	 * long-running systems where a priq never completely empties
88 	 * AND that contains very large numbers of threads, it can be
89 	 * a latency glitch to loop over all the threads like this.
90 	 */
91 	if (!pq->next_order_key) {
92 		RB_FOR_EACH_CONTAINER(&pq->tree, t, base.qnode_rb) {
93 			t->base.order_key = pq->next_order_key;
94 			++pq->next_order_key;
95 		}
96 	}
97 
98 	rb_insert(&pq->tree, &thread->base.qnode_rb);
99 }
100 
z_priq_rb_remove(struct _priq_rb * pq,struct k_thread * thread)101 static ALWAYS_INLINE void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
102 {
103 	rb_remove(&pq->tree, &thread->base.qnode_rb);
104 
105 	if (!pq->tree.root) {
106 		pq->next_order_key = 0;
107 	}
108 }
109 
z_priq_rb_best(struct _priq_rb * pq)110 static ALWAYS_INLINE struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
111 {
112 	struct k_thread *thread = NULL;
113 	struct rbnode *n = rb_get_min(&pq->tree);
114 
115 	if (n != NULL) {
116 		thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb);
117 	}
118 	return thread;
119 }
120 
z_priq_mq_best(struct _priq_mq * pq)121 static ALWAYS_INLINE struct k_thread *z_priq_mq_best(struct _priq_mq *pq)
122 {
123 	struct k_thread *thread = NULL;
124 
125 	for (int i = 0; i < PRIQ_BITMAP_SIZE; ++i) {
126 		if (!pq->bitmask[i]) {
127 			continue;
128 		}
129 
130 #ifdef CONFIG_64BIT
131 		sys_dlist_t *l = &pq->queues[i * 64 + u64_count_trailing_zeros(pq->bitmask[i])];
132 #else
133 		sys_dlist_t *l = &pq->queues[i * 32 + u32_count_trailing_zeros(pq->bitmask[i])];
134 #endif
135 		sys_dnode_t *n = sys_dlist_peek_head(l);
136 
137 		if (n != NULL) {
138 			thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
139 			break;
140 		}
141 	}
142 
143 	return thread;
144 }
145 
146 
147 #ifdef CONFIG_SCHED_MULTIQ
148 
149 struct prio_info {
150 	uint8_t offset_prio;
151 	uint8_t idx;
152 	uint8_t bit;
153 };
154 
get_prio_info(int8_t old_prio)155 static ALWAYS_INLINE struct prio_info get_prio_info(int8_t old_prio)
156 {
157 	struct prio_info ret;
158 
159 	ret.offset_prio = old_prio - K_HIGHEST_THREAD_PRIO;
160 	ret.idx = ret.offset_prio / NBITS;
161 	ret.bit = ret.offset_prio % NBITS;
162 
163 	return ret;
164 }
165 
z_priq_mq_add(struct _priq_mq * pq,struct k_thread * thread)166 static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq,
167 					struct k_thread *thread)
168 {
169 	struct prio_info pos = get_prio_info(thread->base.prio);
170 
171 	sys_dlist_append(&pq->queues[pos.offset_prio], &thread->base.qnode_dlist);
172 	pq->bitmask[pos.idx] |= BIT(pos.bit);
173 }
174 
z_priq_mq_remove(struct _priq_mq * pq,struct k_thread * thread)175 static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq,
176 					   struct k_thread *thread)
177 {
178 	struct prio_info pos = get_prio_info(thread->base.prio);
179 
180 	sys_dlist_remove(&thread->base.qnode_dlist);
181 	if (sys_dlist_is_empty(&pq->queues[pos.offset_prio])) {
182 		pq->bitmask[pos.idx] &= ~BIT(pos.bit);
183 	}
184 }
185 #endif /* CONFIG_SCHED_MULTIQ */
186 
187 
188 
189 #ifdef CONFIG_SCHED_CPU_MASK
z_priq_dumb_mask_best(sys_dlist_t * pq)190 static ALWAYS_INLINE struct k_thread *z_priq_dumb_mask_best(sys_dlist_t *pq)
191 {
192 	/* With masks enabled we need to be prepared to walk the list
193 	 * looking for one we can run
194 	 */
195 	struct k_thread *thread;
196 
197 	SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) {
198 		if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) {
199 			return thread;
200 		}
201 	}
202 	return NULL;
203 }
204 #endif /* CONFIG_SCHED_CPU_MASK */
205 
206 
207 #if defined(CONFIG_SCHED_DUMB) || defined(CONFIG_WAITQ_DUMB)
z_priq_dumb_add(sys_dlist_t * pq,struct k_thread * thread)208 static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq,
209 					  struct k_thread *thread)
210 {
211 	struct k_thread *t;
212 
213 	SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) {
214 		if (z_sched_prio_cmp(thread, t) > 0) {
215 			sys_dlist_insert(&t->base.qnode_dlist,
216 					 &thread->base.qnode_dlist);
217 			return;
218 		}
219 	}
220 
221 	sys_dlist_append(pq, &thread->base.qnode_dlist);
222 }
223 #endif /* CONFIG_SCHED_DUMB || CONFIG_WAITQ_DUMB */
224 
225 #endif /* ZEPHYR_KERNEL_INCLUDE_PRIORITY_Q_H_ */
226