1 /*
2 * Copyright (c) 2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <timeout_q.h>
7 #include "msgdev.h"
8
9 /* This file implements a fake device that creates and enqueues
10 * "struct msg" messages for handling by the rest of the test. It's
11 * based on Zephyr kernel timeouts only.
12 */
13
14 /* Note: we use the internal timeout API to get tick precision,
15 * k_timer limits us to milliseconds.
16 */
17 static struct _timeout timeout;
18
19 /* The "proc_cyc" parameter in the message, indicating how many cycles
20 * the target thread should delay while "processing" the message, will
21 * be a random number between zero and this value.
22 */
23 uint32_t max_duty_cyc;
24
25 uint32_t msg_seq;
26
27 K_MSGQ_DEFINE(hw_msgs, sizeof(struct msg), MAX_EVENTS, sizeof(uint32_t));
28
29 static void timeout_reset(void);
30
31 /* Use a custom RNG for good statistics, sys_rand32_get() is just a
32 * timer counter on some platforms. Note that this is used only
33 * inside the ISR and needs no locking for the otherwise non-atomic
34 * state.
35 */
rand32(void)36 static uint32_t rand32(void)
37 {
38 static uint64_t state;
39
40 if (!state) {
41 state = ((uint64_t)k_cycle_get_32()) << 16;
42 }
43
44 /* MMIX LCRNG parameters */
45 state = state * 6364136223846793005ULL + 1442695040888963407ULL;
46 return (uint32_t)(state >> 32);
47 }
48
49 /* This acts as the "ISR" for our fake device. It "reads from the
50 * hardware" a single timestamped message which needs to be dispatched
51 * (by the MetaIRQ) to a random thread, with a random argument
52 * indicating how long the thread should "process" the message.
53 */
dev_timer_expired(struct _timeout * t)54 static void dev_timer_expired(struct _timeout *t)
55 {
56 __ASSERT_NO_MSG(t == &timeout);
57 uint32_t timestamp = k_cycle_get_32();
58 struct msg m;
59
60 m.seq = msg_seq++;
61 m.timestamp = timestamp;
62 m.target = rand32() % NUM_THREADS;
63 m.proc_cyc = rand32() % max_duty_cyc;
64
65 int ret = k_msgq_put(&hw_msgs, &m, K_NO_WAIT);
66
67 if (ret != 0) {
68 printk("ERROR: Queue full, event dropped!\n");
69 }
70
71 if (m.seq < MAX_EVENTS) {
72 timeout_reset();
73 }
74 }
75
timeout_reset(void)76 static void timeout_reset(void)
77 {
78 uint32_t ticks = rand32() % MAX_EVENT_DELAY_TICKS;
79
80 z_add_timeout(&timeout, dev_timer_expired, Z_TIMEOUT_TICKS(ticks));
81 }
82
message_dev_init(void)83 void message_dev_init(void)
84 {
85 /* Compute a bound for the proc_cyc message parameter such
86 * that on average we request a known percent of available
87 * CPU. We want the load to sometimes back up and require
88 * queueing, but to be achievable over time.
89 */
90 uint64_t cyc_per_tick = k_ticks_to_cyc_near64(1);
91 uint64_t avg_ticks_per_event = MAX_EVENT_DELAY_TICKS / 2;
92 uint64_t avg_cyc_per_event = cyc_per_tick * avg_ticks_per_event;
93
94 max_duty_cyc = (2 * avg_cyc_per_event * AVERAGE_LOAD_TARGET_PCT) / 100;
95
96 z_add_timeout(&timeout, dev_timer_expired, K_NO_WAIT);
97 }
98
message_dev_fetch(struct msg * m)99 void message_dev_fetch(struct msg *m)
100 {
101 int ret = k_msgq_get(&hw_msgs, m, K_FOREVER);
102
103 __ASSERT_NO_MSG(ret == 0);
104 }
105