1/* This file is part of the CivetWeb web server. 2 * See https://github.com/civetweb/civetweb/ 3 * (C) 2014-2018 by the CivetWeb authors, MIT license. 4 */ 5 6#if !defined(MAX_TIMERS) 7#define MAX_TIMERS MAX_WORKER_THREADS 8#endif 9 10typedef int (*taction)(void *arg); 11 12struct ttimer { 13 double time; 14 double period; 15 taction action; 16 void *arg; 17}; 18 19struct ttimers { 20 pthread_t threadid; /* Timer thread ID */ 21 pthread_mutex_t mutex; /* Protects timer lists */ 22 struct ttimer *timers; /* List of timers */ 23 unsigned timer_count; /* Current size of timer list */ 24 unsigned timer_capacity; /* Capacity of timer list */ 25#if defined(_WIN32) 26 DWORD last_tick; 27 uint64_t now_tick64; 28#endif 29}; 30 31 32TIMER_API double 33timer_getcurrenttime(struct mg_context *ctx) 34{ 35#if defined(_WIN32) 36 /* GetTickCount returns milliseconds since system start as 37 * unsigned 32 bit value. It will wrap around every 49.7 days. 38 * We need to use a 64 bit counter (will wrap in 500 mio. years), 39 * by adding the 32 bit difference since the last call to a 40 * 64 bit counter. This algorithm will only work, if this 41 * function is called at least once every 7 weeks. */ 42 uint64_t now_tick64 = 0; 43 DWORD now_tick = GetTickCount(); 44 45 if (ctx->timers) { 46 pthread_mutex_lock(&ctx->timers->mutex); 47 ctx->timers->now_tick64 += now_tick - ctx->timers->last_tick; 48 now_tick64 = ctx->timers->now_tick64; 49 ctx->timers->last_tick = now_tick; 50 pthread_mutex_unlock(&ctx->timers->mutex); 51 } 52 return (double)now_tick64 * 1.0E-3; 53#else 54 struct timespec now_ts; 55 56 (void)ctx; 57 clock_gettime(CLOCK_MONOTONIC, &now_ts); 58 return (double)now_ts.tv_sec + (double)now_ts.tv_nsec * 1.0E-9; 59#endif 60} 61 62 63TIMER_API int 64timer_add(struct mg_context *ctx, 65 double next_time, 66 double period, 67 int is_relative, 68 taction action, 69 void *arg) 70{ 71 int error = 0; 72 double now; 73 74 if (!ctx->timers) { 75 return 1; 76 } 77 78 now = timer_getcurrenttime(ctx); 79 80 /* HCP24: if is_relative = 0 and next_time < now 81 * action will be called so fast as possible 82 * if additional period > 0 83 * action will be called so fast as possible 84 * n times until (next_time + (n * period)) > now 85 * then the period is working 86 * Solution: 87 * if next_time < now then we set next_time = now. 88 * The first callback will be so fast as possible (now) 89 * but the next callback on period 90 */ 91 if (is_relative) { 92 next_time += now; 93 } 94 95 /* You can not set timers into the past */ 96 if (next_time < now) { 97 next_time = now; 98 } 99 100 pthread_mutex_lock(&ctx->timers->mutex); 101 if (ctx->timers->timer_count == MAX_TIMERS) { 102 error = 1; 103 } else if (ctx->timers->timer_count == ctx->timers->timer_capacity) { 104 unsigned capacity = (ctx->timers->timer_capacity * 2) + 1; 105 struct ttimer *timers = 106 (struct ttimer *)mg_realloc_ctx(ctx->timers->timers, 107 capacity * sizeof(struct ttimer), 108 ctx); 109 if (timers) { 110 ctx->timers->timers = timers; 111 ctx->timers->timer_capacity = capacity; 112 } else { 113 error = 1; 114 } 115 } 116 if (!error) { 117 /* Insert new timer into a sorted list. */ 118 /* The linear list is still most efficient for short lists (small 119 * number of timers) - if there are many timers, different 120 * algorithms will work better. */ 121 unsigned u = ctx->timers->timer_count; 122 for (; (u > 0) && (ctx->timers->timers[u - 1].time > next_time); u--) { 123 ctx->timers->timers[u] = ctx->timers->timers[u - 1]; 124 } 125 ctx->timers->timers[u].time = next_time; 126 ctx->timers->timers[u].period = period; 127 ctx->timers->timers[u].action = action; 128 ctx->timers->timers[u].arg = arg; 129 ctx->timers->timer_count++; 130 } 131 pthread_mutex_unlock(&ctx->timers->mutex); 132 return error; 133} 134 135 136static void 137timer_thread_run(void *thread_func_param) 138{ 139 struct mg_context *ctx = (struct mg_context *)thread_func_param; 140 double d; 141 unsigned u; 142 int re_schedule; 143 struct ttimer t; 144 145 mg_set_thread_name("timer"); 146 147 if (ctx->callbacks.init_thread) { 148 /* Timer thread */ 149 ctx->callbacks.init_thread(ctx, 2); 150 } 151 152 d = timer_getcurrenttime(ctx); 153 154 while (ctx->stop_flag == 0) { 155 pthread_mutex_lock(&ctx->timers->mutex); 156 if ((ctx->timers->timer_count > 0) 157 && (d >= ctx->timers->timers[0].time)) { 158 t = ctx->timers->timers[0]; 159 for (u = 1; u < ctx->timers->timer_count; u++) { 160 ctx->timers->timers[u - 1] = ctx->timers->timers[u]; 161 } 162 ctx->timers->timer_count--; 163 pthread_mutex_unlock(&ctx->timers->mutex); 164 re_schedule = t.action(t.arg); 165 if (re_schedule && (t.period > 0)) { 166 timer_add(ctx, t.time + t.period, t.period, 0, t.action, t.arg); 167 } 168 continue; 169 } else { 170 pthread_mutex_unlock(&ctx->timers->mutex); 171 } 172 173/* 10 ms seems reasonable. 174 * A faster loop (smaller sleep value) increases CPU load, 175 * a slower loop (higher sleep value) decreases timer accuracy. 176 */ 177#if defined(_WIN32) 178 Sleep(10); 179#else 180 usleep(10000); 181#endif 182 183 d = timer_getcurrenttime(ctx); 184 } 185} 186 187 188#if defined(_WIN32) 189static unsigned __stdcall timer_thread(void *thread_func_param) 190{ 191 timer_thread_run(thread_func_param); 192 return 0; 193} 194#else 195static void * 196timer_thread(void *thread_func_param) 197{ 198 struct sigaction sa; 199 200 /* Ignore SIGPIPE */ 201 memset(&sa, 0, sizeof(sa)); 202 sa.sa_handler = SIG_IGN; 203 sigaction(SIGPIPE, &sa, NULL); 204 205 timer_thread_run(thread_func_param); 206 return NULL; 207} 208#endif /* _WIN32 */ 209 210 211TIMER_API int 212timers_init(struct mg_context *ctx) 213{ 214 /* Initialize timers data structure */ 215 ctx->timers = 216 (struct ttimers *)mg_calloc_ctx(sizeof(struct ttimers), 1, ctx); 217 218 if (!ctx->timers) { 219 return -1; 220 } 221 ctx->timers->timers = NULL; 222 223 /* Initialize mutex */ 224 if (0 != pthread_mutex_init(&ctx->timers->mutex, NULL)) { 225 mg_free(ctx->timers); 226 ctx->timers = NULL; 227 return -1; 228 } 229 230 /* For some systems timer_getcurrenttime does some initialization 231 * during the first call. Call it once now, ignore the result. */ 232 (void)timer_getcurrenttime(ctx); 233 234 /* Start timer thread */ 235 if (mg_start_thread_with_id(timer_thread, ctx, &ctx->timers->threadid) 236 != 0) { 237 (void)pthread_mutex_destroy(&ctx->timers->mutex); 238 mg_free(ctx->timers); 239 ctx->timers = NULL; 240 return -1; 241 } 242 243 return 0; 244} 245 246 247TIMER_API void 248timers_exit(struct mg_context *ctx) 249{ 250 if (ctx->timers) { 251 mg_join_thread(ctx->timers->threadid); 252 (void)pthread_mutex_destroy(&ctx->timers->mutex); 253 mg_free(ctx->timers->timers); 254 mg_free(ctx->timers); 255 ctx->timers = NULL; 256 } 257} 258 259 260/* End of timer.inl */ 261