1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "chre/core/event_loop.h"
18
19 #include "chre/core/event.h"
20 #include "chre/core/event_loop_manager.h"
21 #include "chre/core/nanoapp.h"
22 #include "chre/platform/context.h"
23 #include "chre/platform/fatal_error.h"
24 #include "chre/platform/log.h"
25 #include "chre/platform/system_time.h"
26 #include "chre/util/conditional_lock_guard.h"
27 #include "chre/util/lock_guard.h"
28 #include "chre/util/system/debug_dump.h"
29 #include "chre/util/time.h"
30 #include "chre_api/chre/version.h"
31
32 namespace chre {
33
34 // Out of line declaration required for nonintegral static types
35 constexpr Nanoseconds EventLoop::kIntervalWakeupBucket;
36
37 namespace {
38
39 /**
40 * Populates a chreNanoappInfo structure using info from the given Nanoapp
41 * instance.
42 *
43 * @param app A potentially null pointer to the Nanoapp to read from
44 * @param info The structure to populate - should not be null, but this function
45 * will handle that input
46 *
47 * @return true if neither app nor info were null, and info was populated
48 */
populateNanoappInfo(const Nanoapp * app,struct chreNanoappInfo * info)49 bool populateNanoappInfo(const Nanoapp *app, struct chreNanoappInfo *info) {
50 bool success = false;
51
52 if (app != nullptr && info != nullptr) {
53 info->appId = app->getAppId();
54 info->version = app->getAppVersion();
55 info->instanceId = app->getInstanceId();
56 success = true;
57 }
58
59 return success;
60 }
61
62 } // anonymous namespace
63
findNanoappInstanceIdByAppId(uint64_t appId,uint32_t * instanceId) const64 bool EventLoop::findNanoappInstanceIdByAppId(uint64_t appId,
65 uint32_t *instanceId) const {
66 CHRE_ASSERT(instanceId != nullptr);
67 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
68
69 bool found = false;
70 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
71 if (app->getAppId() == appId) {
72 *instanceId = app->getInstanceId();
73 found = true;
74 break;
75 }
76 }
77
78 return found;
79 }
80
forEachNanoapp(NanoappCallbackFunction * callback,void * data)81 void EventLoop::forEachNanoapp(NanoappCallbackFunction *callback, void *data) {
82 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
83
84 for (const UniquePtr<Nanoapp> &nanoapp : mNanoapps) {
85 callback(nanoapp.get(), data);
86 }
87 }
88
invokeMessageFreeFunction(uint64_t appId,chreMessageFreeFunction * freeFunction,void * message,size_t messageSize)89 void EventLoop::invokeMessageFreeFunction(uint64_t appId,
90 chreMessageFreeFunction *freeFunction,
91 void *message, size_t messageSize) {
92 Nanoapp *nanoapp = lookupAppByAppId(appId);
93 if (nanoapp == nullptr) {
94 LOGE("Couldn't find app 0x%016" PRIx64 " for message free callback", appId);
95 } else {
96 auto prevCurrentApp = mCurrentApp;
97 mCurrentApp = nanoapp;
98 freeFunction(message, messageSize);
99 mCurrentApp = prevCurrentApp;
100 }
101 }
102
run()103 void EventLoop::run() {
104 LOGI("EventLoop start");
105
106 bool havePendingEvents = false;
107 while (mRunning) {
108 // Events are delivered in two stages: first they arrive in the inbound
109 // event queue mEvents (potentially posted from another thread), then within
110 // this context these events are distributed to smaller event queues
111 // associated with each Nanoapp that should receive the event. Once the
112 // event is delivered to all interested Nanoapps, its free callback is
113 // invoked.
114 if (!havePendingEvents || !mEvents.empty()) {
115 if (mEvents.size() > mMaxEventPoolUsage) {
116 mMaxEventPoolUsage = mEvents.size();
117 }
118
119 // mEvents.pop() will be a blocking call if mEvents.empty()
120 Event *event = mEvents.pop();
121 // Need size() + 1 since the to-be-processed event has already been
122 // removed.
123 mPowerControlManager.preEventLoopProcess(mEvents.size() + 1);
124 distributeEvent(event);
125 }
126
127 havePendingEvents = deliverEvents();
128
129 mPowerControlManager.postEventLoopProcess(mEvents.size());
130 }
131
132 // Deliver any events sitting in Nanoapps' own queues (we could drop them to
133 // exit faster, but this is less code and should complete quickly under normal
134 // conditions), then purge the main queue of events pending distribution. All
135 // nanoapps should be prevented from sending events or messages at this point
136 // via currentNanoappIsStopping() returning true.
137 flushNanoappEventQueues();
138 while (!mEvents.empty()) {
139 freeEvent(mEvents.pop());
140 }
141
142 // Unload all running nanoapps
143 while (!mNanoapps.empty()) {
144 unloadNanoappAtIndex(mNanoapps.size() - 1);
145 }
146
147 LOGI("Exiting EventLoop");
148 }
149
startNanoapp(UniquePtr<Nanoapp> & nanoapp)150 bool EventLoop::startNanoapp(UniquePtr<Nanoapp> &nanoapp) {
151 CHRE_ASSERT(!nanoapp.isNull());
152 bool success = false;
153 auto *eventLoopManager = EventLoopManagerSingleton::get();
154 EventLoop &eventLoop = eventLoopManager->getEventLoop();
155 uint32_t existingInstanceId;
156
157 if (nanoapp.isNull()) {
158 // no-op, invalid argument
159 } else if (nanoapp->getTargetApiVersion() <
160 CHRE_FIRST_SUPPORTED_API_VERSION) {
161 LOGE("Incompatible nanoapp (target ver 0x%" PRIx32
162 ", first supported ver 0x%" PRIx32 ")",
163 nanoapp->getTargetApiVersion(),
164 static_cast<uint32_t>(CHRE_FIRST_SUPPORTED_API_VERSION));
165 } else if (eventLoop.findNanoappInstanceIdByAppId(nanoapp->getAppId(),
166 &existingInstanceId)) {
167 LOGE("App with ID 0x%016" PRIx64
168 " already exists as instance ID 0x%" PRIx32,
169 nanoapp->getAppId(), existingInstanceId);
170 } else if (!mNanoapps.prepareForPush()) {
171 LOG_OOM();
172 } else {
173 nanoapp->setInstanceId(eventLoopManager->getNextInstanceId());
174 LOGD("Instance ID %" PRIu32 " assigned to app ID 0x%016" PRIx64,
175 nanoapp->getInstanceId(), nanoapp->getAppId());
176
177 Nanoapp *newNanoapp = nanoapp.get();
178 {
179 LockGuard<Mutex> lock(mNanoappsLock);
180 mNanoapps.push_back(std::move(nanoapp));
181 // After this point, nanoapp is null as we've transferred ownership into
182 // mNanoapps.back() - use newNanoapp to reference it
183 }
184
185 mCurrentApp = newNanoapp;
186 success = newNanoapp->start();
187 mCurrentApp = nullptr;
188 if (!success) {
189 // TODO: to be fully safe, need to purge/flush any events and messages
190 // sent by the nanoapp here (but don't call nanoappEnd). For now, we just
191 // destroy the Nanoapp instance.
192 LOGE("Nanoapp %" PRIu32 " failed to start", newNanoapp->getInstanceId());
193
194 // Note that this lock protects against concurrent read and modification
195 // of mNanoapps, but we are assured that no new nanoapps were added since
196 // we pushed the new nanoapp
197 LockGuard<Mutex> lock(mNanoappsLock);
198 mNanoapps.pop_back();
199 } else {
200 notifyAppStatusChange(CHRE_EVENT_NANOAPP_STARTED, *newNanoapp);
201 }
202 }
203
204 return success;
205 }
206
unloadNanoapp(uint32_t instanceId,bool allowSystemNanoappUnload)207 bool EventLoop::unloadNanoapp(uint32_t instanceId,
208 bool allowSystemNanoappUnload) {
209 bool unloaded = false;
210
211 for (size_t i = 0; i < mNanoapps.size(); i++) {
212 if (instanceId == mNanoapps[i]->getInstanceId()) {
213 if (!allowSystemNanoappUnload && mNanoapps[i]->isSystemNanoapp()) {
214 LOGE("Refusing to unload system nanoapp");
215 } else {
216 // Make sure all messages sent by this nanoapp at least have their
217 // associated free callback processing pending in the event queue (i.e.
218 // there are no messages pending delivery to the host)
219 EventLoopManagerSingleton::get()
220 ->getHostCommsManager()
221 .flushMessagesSentByNanoapp(mNanoapps[i]->getAppId());
222
223 // Distribute all inbound events we have at this time - here we're
224 // interested in handling any message free callbacks generated by
225 // flushMessagesSentByNanoapp()
226 flushInboundEventQueue();
227
228 // Mark that this nanoapp is stopping early, so it can't send events or
229 // messages during the nanoapp event queue flush
230 mStoppingNanoapp = mNanoapps[i].get();
231
232 // Process any pending events, with the intent of ensuring that we free
233 // all events generated by this nanoapp
234 flushNanoappEventQueues();
235
236 // Post the unload event now (so we can reference the Nanoapp instance
237 // directly), but nanoapps won't get it until after the unload completes
238 notifyAppStatusChange(CHRE_EVENT_NANOAPP_STOPPED, *mStoppingNanoapp);
239
240 // Finally, we are at a point where there should not be any pending
241 // events or messages sent by the app that could potentially reference
242 // the nanoapp's memory, so we are safe to unload it
243 unloadNanoappAtIndex(i);
244 mStoppingNanoapp = nullptr;
245
246 // TODO: right now we assume that the nanoapp will clean up all of its
247 // resource allocations in its nanoappEnd callback (memory, sensor
248 // subscriptions, etc.), otherwise we're leaking resources. We should
249 // perform resource cleanup automatically here to avoid these types of
250 // potential leaks.
251
252 LOGD("Unloaded nanoapp with instanceId %" PRIu32, instanceId);
253 unloaded = true;
254 }
255 break;
256 }
257 }
258
259 return unloaded;
260 }
261
postEventOrDie(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint32_t targetInstanceId,uint16_t targetGroupMask)262 void EventLoop::postEventOrDie(uint16_t eventType, void *eventData,
263 chreEventCompleteFunction *freeCallback,
264 uint32_t targetInstanceId,
265 uint16_t targetGroupMask) {
266 if (mRunning) {
267 if (!allocateAndPostEvent(eventType, eventData, freeCallback,
268 kSystemInstanceId, targetInstanceId,
269 targetGroupMask)) {
270 FATAL_ERROR("Failed to post critical system event 0x%" PRIx16, eventType);
271 }
272 } else if (freeCallback != nullptr) {
273 freeCallback(eventType, eventData);
274 }
275 }
276
postSystemEvent(uint16_t eventType,void * eventData,SystemEventCallbackFunction * callback,void * extraData)277 bool EventLoop::postSystemEvent(uint16_t eventType, void *eventData,
278 SystemEventCallbackFunction *callback,
279 void *extraData) {
280 if (mRunning) {
281 Event *event =
282 mEventPool.allocate(eventType, eventData, callback, extraData);
283
284 if (event == nullptr || !mEvents.push(event)) {
285 FATAL_ERROR("Failed to post critical system event 0x%" PRIx16, eventType);
286 }
287 return true;
288 }
289 return false;
290 }
291
postLowPriorityEventOrFree(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint32_t senderInstanceId,uint32_t targetInstanceId,uint16_t targetGroupMask)292 bool EventLoop::postLowPriorityEventOrFree(
293 uint16_t eventType, void *eventData,
294 chreEventCompleteFunction *freeCallback, uint32_t senderInstanceId,
295 uint32_t targetInstanceId, uint16_t targetGroupMask) {
296 bool eventPosted = false;
297
298 if (mRunning) {
299 if (mEventPool.getFreeBlockCount() > kMinReservedHighPriorityEventCount) {
300 eventPosted = allocateAndPostEvent(eventType, eventData, freeCallback,
301 senderInstanceId, targetInstanceId,
302 targetGroupMask);
303 if (!eventPosted) {
304 LOGE("Failed to allocate event 0x%" PRIx16 " to instanceId %" PRIu32,
305 eventType, targetInstanceId);
306 }
307 }
308 }
309
310 if (!eventPosted && freeCallback != nullptr) {
311 freeCallback(eventType, eventData);
312 }
313
314 return eventPosted;
315 }
316
stop()317 void EventLoop::stop() {
318 auto callback = [](uint16_t /*type*/, void *data, void * /*extraData*/) {
319 auto *obj = static_cast<EventLoop *>(data);
320 obj->onStopComplete();
321 };
322
323 // Stop accepting new events and tell the main loop to finish
324 postSystemEvent(static_cast<uint16_t>(SystemCallbackType::Shutdown),
325 /*eventData=*/this, callback, /*extraData=*/nullptr);
326 }
327
onStopComplete()328 void EventLoop::onStopComplete() {
329 mRunning = false;
330 }
331
findNanoappByInstanceId(uint32_t instanceId) const332 Nanoapp *EventLoop::findNanoappByInstanceId(uint32_t instanceId) const {
333 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
334 return lookupAppByInstanceId(instanceId);
335 }
336
populateNanoappInfoForAppId(uint64_t appId,struct chreNanoappInfo * info) const337 bool EventLoop::populateNanoappInfoForAppId(
338 uint64_t appId, struct chreNanoappInfo *info) const {
339 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
340 Nanoapp *app = lookupAppByAppId(appId);
341 return populateNanoappInfo(app, info);
342 }
343
populateNanoappInfoForInstanceId(uint32_t instanceId,struct chreNanoappInfo * info) const344 bool EventLoop::populateNanoappInfoForInstanceId(
345 uint32_t instanceId, struct chreNanoappInfo *info) const {
346 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
347 Nanoapp *app = lookupAppByInstanceId(instanceId);
348 return populateNanoappInfo(app, info);
349 }
350
currentNanoappIsStopping() const351 bool EventLoop::currentNanoappIsStopping() const {
352 return (mCurrentApp == mStoppingNanoapp || !mRunning);
353 }
354
logStateToBuffer(DebugDumpWrapper & debugDump) const355 void EventLoop::logStateToBuffer(DebugDumpWrapper &debugDump) const {
356 debugDump.print("\nEvent Loop:\n");
357 debugDump.print(" Max event pool usage: %zu/%zu\n", mMaxEventPoolUsage,
358 kMaxEventCount);
359
360 Nanoseconds timeSince =
361 SystemTime::getMonotonicTime() - mTimeLastWakeupBucketCycled;
362 uint64_t timeSinceMins =
363 timeSince.toRawNanoseconds() / kOneMinuteInNanoseconds;
364 uint64_t durationMins =
365 kIntervalWakeupBucket.toRawNanoseconds() / kOneMinuteInNanoseconds;
366 debugDump.print(" Nanoapp host wakeup tracking: cycled %" PRIu64
367 "mins ago, bucketDuration=%" PRIu64 "mins\n",
368 timeSinceMins, durationMins);
369
370 debugDump.print("\nNanoapps:\n");
371 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
372 app->logStateToBuffer(debugDump);
373 }
374 }
375
allocateAndPostEvent(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint32_t senderInstanceId,uint32_t targetInstanceId,uint16_t targetGroupMask)376 bool EventLoop::allocateAndPostEvent(uint16_t eventType, void *eventData,
377 chreEventCompleteFunction *freeCallback,
378 uint32_t senderInstanceId,
379 uint32_t targetInstanceId,
380 uint16_t targetGroupMask) {
381 bool success = false;
382
383 Event *event =
384 mEventPool.allocate(eventType, eventData, freeCallback, senderInstanceId,
385 targetInstanceId, targetGroupMask);
386 if (event != nullptr) {
387 success = mEvents.push(event);
388 }
389
390 return success;
391 }
392
deliverEvents()393 bool EventLoop::deliverEvents() {
394 bool havePendingEvents = false;
395
396 // Do one loop of round-robin. We might want to have some kind of priority or
397 // time sharing in the future, but this should be good enough for now.
398 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
399 if (app->hasPendingEvent()) {
400 havePendingEvents |= deliverNextEvent(app);
401 }
402 }
403
404 return havePendingEvents;
405 }
406
deliverNextEvent(const UniquePtr<Nanoapp> & app)407 bool EventLoop::deliverNextEvent(const UniquePtr<Nanoapp> &app) {
408 // TODO: cleaner way to set/clear this? RAII-style?
409 mCurrentApp = app.get();
410 Event *event = app->processNextEvent();
411 mCurrentApp = nullptr;
412
413 if (event->isUnreferenced()) {
414 freeEvent(event);
415 }
416
417 return app->hasPendingEvent();
418 }
419
distributeEvent(Event * event)420 void EventLoop::distributeEvent(Event *event) {
421 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
422 if ((event->targetInstanceId == chre::kBroadcastInstanceId &&
423 app->isRegisteredForBroadcastEvent(event->eventType,
424 event->targetAppGroupMask)) ||
425 event->targetInstanceId == app->getInstanceId()) {
426 app->postEvent(event);
427 }
428 }
429
430 if (event->isUnreferenced()) {
431 // Log if an event unicast to a nanoapp isn't delivered, as this is could be
432 // a bug (e.g. something isn't properly keeping track of when nanoapps are
433 // unloaded), though it could just be a harmless transient issue (e.g. race
434 // condition with nanoapp unload, where we post an event to a nanoapp just
435 // after queues are flushed while it's unloading)
436 if (event->targetInstanceId != kBroadcastInstanceId &&
437 event->targetInstanceId != kSystemInstanceId) {
438 LOGW("Dropping event 0x%" PRIx16 " from instanceId %" PRIu32 "->%" PRIu32,
439 event->eventType, event->senderInstanceId, event->targetInstanceId);
440 }
441 freeEvent(event);
442 }
443 }
444
flushInboundEventQueue()445 void EventLoop::flushInboundEventQueue() {
446 while (!mEvents.empty()) {
447 distributeEvent(mEvents.pop());
448 }
449 }
450
flushNanoappEventQueues()451 void EventLoop::flushNanoappEventQueues() {
452 while (deliverEvents())
453 ;
454 }
455
freeEvent(Event * event)456 void EventLoop::freeEvent(Event *event) {
457 if (event->hasFreeCallback()) {
458 // TODO: find a better way to set the context to the creator of the event
459 mCurrentApp = lookupAppByInstanceId(event->senderInstanceId);
460 event->invokeFreeCallback();
461 mCurrentApp = nullptr;
462 }
463
464 mEventPool.deallocate(event);
465 }
466
lookupAppByAppId(uint64_t appId) const467 Nanoapp *EventLoop::lookupAppByAppId(uint64_t appId) const {
468 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
469 if (app->getAppId() == appId) {
470 return app.get();
471 }
472 }
473
474 return nullptr;
475 }
476
lookupAppByInstanceId(uint32_t instanceId) const477 Nanoapp *EventLoop::lookupAppByInstanceId(uint32_t instanceId) const {
478 // The system instance ID always has nullptr as its Nanoapp pointer, so can
479 // skip iterating through the nanoapp list for that case
480 if (instanceId != kSystemInstanceId) {
481 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
482 if (app->getInstanceId() == instanceId) {
483 return app.get();
484 }
485 }
486 }
487
488 return nullptr;
489 }
490
notifyAppStatusChange(uint16_t eventType,const Nanoapp & nanoapp)491 void EventLoop::notifyAppStatusChange(uint16_t eventType,
492 const Nanoapp &nanoapp) {
493 auto *info = memoryAlloc<chreNanoappInfo>();
494 if (info == nullptr) {
495 LOG_OOM();
496 } else {
497 info->appId = nanoapp.getAppId();
498 info->version = nanoapp.getAppVersion();
499 info->instanceId = nanoapp.getInstanceId();
500
501 postEventOrDie(eventType, info, freeEventDataCallback);
502 }
503 }
504
unloadNanoappAtIndex(size_t index)505 void EventLoop::unloadNanoappAtIndex(size_t index) {
506 const UniquePtr<Nanoapp> &nanoapp = mNanoapps[index];
507
508 // Lock here to prevent the nanoapp instance from being accessed between the
509 // time it is ended and fully erased
510 LockGuard<Mutex> lock(mNanoappsLock);
511
512 // Let the app know it's going away
513 mCurrentApp = nanoapp.get();
514 nanoapp->end();
515 mCurrentApp = nullptr;
516
517 // Destroy the Nanoapp instance
518 mNanoapps.erase(index);
519 }
520
handleNanoappWakeupBuckets()521 void EventLoop::handleNanoappWakeupBuckets() {
522 Nanoseconds now = SystemTime::getMonotonicTime();
523 Nanoseconds duration = now - mTimeLastWakeupBucketCycled;
524 if (duration > kIntervalWakeupBucket) {
525 size_t numBuckets = static_cast<size_t>(
526 duration.toRawNanoseconds() / kIntervalWakeupBucket.toRawNanoseconds());
527 mTimeLastWakeupBucketCycled = now;
528 for (auto &nanoapp : mNanoapps) {
529 nanoapp->cycleWakeupBuckets(numBuckets);
530 }
531 }
532 }
533
534 } // namespace chre
535