1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5 
6 #include <linux/debugfs.h>
7 
8 #include "gt/intel_gt.h"
9 #include "i915_drv.h"
10 #include "i915_memcpy.h"
11 #include "intel_guc_log.h"
12 
13 static void guc_log_capture_logs(struct intel_guc_log *log);
14 
15 /**
16  * DOC: GuC firmware log
17  *
18  * Firmware log is enabled by setting i915.guc_log_level to the positive level.
19  * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
20  * i915_guc_load_status will print out firmware loading status and scratch
21  * registers value.
22  */
23 
guc_action_flush_log_complete(struct intel_guc * guc)24 static int guc_action_flush_log_complete(struct intel_guc *guc)
25 {
26 	u32 action[] = {
27 		INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
28 	};
29 
30 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
31 }
32 
guc_action_flush_log(struct intel_guc * guc)33 static int guc_action_flush_log(struct intel_guc *guc)
34 {
35 	u32 action[] = {
36 		INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
37 		0
38 	};
39 
40 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
41 }
42 
guc_action_control_log(struct intel_guc * guc,bool enable,bool default_logging,u32 verbosity)43 static int guc_action_control_log(struct intel_guc *guc, bool enable,
44 				  bool default_logging, u32 verbosity)
45 {
46 	u32 action[] = {
47 		INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
48 		(enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) |
49 		(verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
50 		(default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0)
51 	};
52 
53 	GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
54 
55 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
56 }
57 
log_to_guc(struct intel_guc_log * log)58 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
59 {
60 	return container_of(log, struct intel_guc, log);
61 }
62 
guc_log_enable_flush_events(struct intel_guc_log * log)63 static void guc_log_enable_flush_events(struct intel_guc_log *log)
64 {
65 	intel_guc_enable_msg(log_to_guc(log),
66 			     INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
67 			     INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
68 }
69 
guc_log_disable_flush_events(struct intel_guc_log * log)70 static void guc_log_disable_flush_events(struct intel_guc_log *log)
71 {
72 	intel_guc_disable_msg(log_to_guc(log),
73 			      INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
74 			      INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
75 }
76 
77 /*
78  * Sub buffer switch callback. Called whenever relay has to switch to a new
79  * sub buffer, relay stays on the same sub buffer if 0 is returned.
80  */
subbuf_start_callback(struct rchan_buf * buf,void * subbuf,void * prev_subbuf,size_t prev_padding)81 static int subbuf_start_callback(struct rchan_buf *buf,
82 				 void *subbuf,
83 				 void *prev_subbuf,
84 				 size_t prev_padding)
85 {
86 	/*
87 	 * Use no-overwrite mode by default, where relay will stop accepting
88 	 * new data if there are no empty sub buffers left.
89 	 * There is no strict synchronization enforced by relay between Consumer
90 	 * and Producer. In overwrite mode, there is a possibility of getting
91 	 * inconsistent/garbled data, the producer could be writing on to the
92 	 * same sub buffer from which Consumer is reading. This can't be avoided
93 	 * unless Consumer is fast enough and can always run in tandem with
94 	 * Producer.
95 	 */
96 	if (relay_buf_full(buf))
97 		return 0;
98 
99 	return 1;
100 }
101 
102 /*
103  * file_create() callback. Creates relay file in debugfs.
104  */
create_buf_file_callback(const char * filename,struct dentry * parent,umode_t mode,struct rchan_buf * buf,int * is_global)105 static struct dentry *create_buf_file_callback(const char *filename,
106 					       struct dentry *parent,
107 					       umode_t mode,
108 					       struct rchan_buf *buf,
109 					       int *is_global)
110 {
111 	struct dentry *buf_file;
112 
113 	/*
114 	 * This to enable the use of a single buffer for the relay channel and
115 	 * correspondingly have a single file exposed to User, through which
116 	 * it can collect the logs in order without any post-processing.
117 	 * Need to set 'is_global' even if parent is NULL for early logging.
118 	 */
119 	*is_global = 1;
120 
121 	if (!parent)
122 		return NULL;
123 
124 	buf_file = debugfs_create_file(filename, mode,
125 				       parent, buf, &relay_file_operations);
126 	if (IS_ERR(buf_file))
127 		return NULL;
128 
129 	return buf_file;
130 }
131 
132 /*
133  * file_remove() default callback. Removes relay file in debugfs.
134  */
remove_buf_file_callback(struct dentry * dentry)135 static int remove_buf_file_callback(struct dentry *dentry)
136 {
137 	debugfs_remove(dentry);
138 	return 0;
139 }
140 
141 /* relay channel callbacks */
142 static struct rchan_callbacks relay_callbacks = {
143 	.subbuf_start = subbuf_start_callback,
144 	.create_buf_file = create_buf_file_callback,
145 	.remove_buf_file = remove_buf_file_callback,
146 };
147 
guc_move_to_next_buf(struct intel_guc_log * log)148 static void guc_move_to_next_buf(struct intel_guc_log *log)
149 {
150 	/*
151 	 * Make sure the updates made in the sub buffer are visible when
152 	 * Consumer sees the following update to offset inside the sub buffer.
153 	 */
154 	smp_wmb();
155 
156 	/* All data has been written, so now move the offset of sub buffer. */
157 	relay_reserve(log->relay.channel, log->vma->obj->base.size);
158 
159 	/* Switch to the next sub buffer */
160 	relay_flush(log->relay.channel);
161 }
162 
guc_get_write_buffer(struct intel_guc_log * log)163 static void *guc_get_write_buffer(struct intel_guc_log *log)
164 {
165 	/*
166 	 * Just get the base address of a new sub buffer and copy data into it
167 	 * ourselves. NULL will be returned in no-overwrite mode, if all sub
168 	 * buffers are full. Could have used the relay_write() to indirectly
169 	 * copy the data, but that would have been bit convoluted, as we need to
170 	 * write to only certain locations inside a sub buffer which cannot be
171 	 * done without using relay_reserve() along with relay_write(). So its
172 	 * better to use relay_reserve() alone.
173 	 */
174 	return relay_reserve(log->relay.channel, 0);
175 }
176 
guc_check_log_buf_overflow(struct intel_guc_log * log,enum guc_log_buffer_type type,unsigned int full_cnt)177 static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
178 				       enum guc_log_buffer_type type,
179 				       unsigned int full_cnt)
180 {
181 	unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
182 	bool overflow = false;
183 
184 	if (full_cnt != prev_full_cnt) {
185 		overflow = true;
186 
187 		log->stats[type].overflow = full_cnt;
188 		log->stats[type].sampled_overflow += full_cnt - prev_full_cnt;
189 
190 		if (full_cnt < prev_full_cnt) {
191 			/* buffer_full_cnt is a 4 bit counter */
192 			log->stats[type].sampled_overflow += 16;
193 		}
194 
195 		dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev,
196 				       "GuC log buffer overflow\n");
197 	}
198 
199 	return overflow;
200 }
201 
guc_get_log_buffer_size(enum guc_log_buffer_type type)202 static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
203 {
204 	switch (type) {
205 	case GUC_ISR_LOG_BUFFER:
206 		return ISR_BUFFER_SIZE;
207 	case GUC_DPC_LOG_BUFFER:
208 		return DPC_BUFFER_SIZE;
209 	case GUC_CRASH_DUMP_LOG_BUFFER:
210 		return CRASH_BUFFER_SIZE;
211 	default:
212 		MISSING_CASE(type);
213 	}
214 
215 	return 0;
216 }
217 
guc_read_update_log_buffer(struct intel_guc_log * log)218 static void guc_read_update_log_buffer(struct intel_guc_log *log)
219 {
220 	unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
221 	struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
222 	struct guc_log_buffer_state log_buf_state_local;
223 	enum guc_log_buffer_type type;
224 	void *src_data, *dst_data;
225 	bool new_overflow;
226 
227 	mutex_lock(&log->relay.lock);
228 
229 	if (WARN_ON(!intel_guc_log_relay_enabled(log)))
230 		goto out_unlock;
231 
232 	/* Get the pointer to shared GuC log buffer */
233 	log_buf_state = src_data = log->relay.buf_addr;
234 
235 	/* Get the pointer to local buffer to store the logs */
236 	log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
237 
238 	if (unlikely(!log_buf_snapshot_state)) {
239 		/*
240 		 * Used rate limited to avoid deluge of messages, logs might be
241 		 * getting consumed by User at a slow rate.
242 		 */
243 		DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
244 		log->relay.full_count++;
245 
246 		goto out_unlock;
247 	}
248 
249 	/* Actual logs are present from the 2nd page */
250 	src_data += PAGE_SIZE;
251 	dst_data += PAGE_SIZE;
252 
253 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
254 		/*
255 		 * Make a copy of the state structure, inside GuC log buffer
256 		 * (which is uncached mapped), on the stack to avoid reading
257 		 * from it multiple times.
258 		 */
259 		memcpy(&log_buf_state_local, log_buf_state,
260 		       sizeof(struct guc_log_buffer_state));
261 		buffer_size = guc_get_log_buffer_size(type);
262 		read_offset = log_buf_state_local.read_ptr;
263 		write_offset = log_buf_state_local.sampled_write_ptr;
264 		full_cnt = log_buf_state_local.buffer_full_cnt;
265 
266 		/* Bookkeeping stuff */
267 		log->stats[type].flush += log_buf_state_local.flush_to_file;
268 		new_overflow = guc_check_log_buf_overflow(log, type, full_cnt);
269 
270 		/* Update the state of shared log buffer */
271 		log_buf_state->read_ptr = write_offset;
272 		log_buf_state->flush_to_file = 0;
273 		log_buf_state++;
274 
275 		/* First copy the state structure in snapshot buffer */
276 		memcpy(log_buf_snapshot_state, &log_buf_state_local,
277 		       sizeof(struct guc_log_buffer_state));
278 
279 		/*
280 		 * The write pointer could have been updated by GuC firmware,
281 		 * after sending the flush interrupt to Host, for consistency
282 		 * set write pointer value to same value of sampled_write_ptr
283 		 * in the snapshot buffer.
284 		 */
285 		log_buf_snapshot_state->write_ptr = write_offset;
286 		log_buf_snapshot_state++;
287 
288 		/* Now copy the actual logs. */
289 		if (unlikely(new_overflow)) {
290 			/* copy the whole buffer in case of overflow */
291 			read_offset = 0;
292 			write_offset = buffer_size;
293 		} else if (unlikely((read_offset > buffer_size) ||
294 				    (write_offset > buffer_size))) {
295 			DRM_ERROR("invalid log buffer state\n");
296 			/* copy whole buffer as offsets are unreliable */
297 			read_offset = 0;
298 			write_offset = buffer_size;
299 		}
300 
301 		/* Just copy the newly written data */
302 		if (read_offset > write_offset) {
303 			i915_memcpy_from_wc(dst_data, src_data, write_offset);
304 			bytes_to_copy = buffer_size - read_offset;
305 		} else {
306 			bytes_to_copy = write_offset - read_offset;
307 		}
308 		i915_memcpy_from_wc(dst_data + read_offset,
309 				    src_data + read_offset, bytes_to_copy);
310 
311 		src_data += buffer_size;
312 		dst_data += buffer_size;
313 	}
314 
315 	guc_move_to_next_buf(log);
316 
317 out_unlock:
318 	mutex_unlock(&log->relay.lock);
319 }
320 
capture_logs_work(struct work_struct * work)321 static void capture_logs_work(struct work_struct *work)
322 {
323 	struct intel_guc_log *log =
324 		container_of(work, struct intel_guc_log, relay.flush_work);
325 
326 	guc_log_capture_logs(log);
327 }
328 
guc_log_map(struct intel_guc_log * log)329 static int guc_log_map(struct intel_guc_log *log)
330 {
331 	void *vaddr;
332 
333 	lockdep_assert_held(&log->relay.lock);
334 
335 	if (!log->vma)
336 		return -ENODEV;
337 
338 	/*
339 	 * Create a WC (Uncached for read) vmalloc mapping of log
340 	 * buffer pages, so that we can directly get the data
341 	 * (up-to-date) from memory.
342 	 */
343 	vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
344 	if (IS_ERR(vaddr))
345 		return PTR_ERR(vaddr);
346 
347 	log->relay.buf_addr = vaddr;
348 
349 	return 0;
350 }
351 
guc_log_unmap(struct intel_guc_log * log)352 static void guc_log_unmap(struct intel_guc_log *log)
353 {
354 	lockdep_assert_held(&log->relay.lock);
355 
356 	i915_gem_object_unpin_map(log->vma->obj);
357 	log->relay.buf_addr = NULL;
358 }
359 
intel_guc_log_init_early(struct intel_guc_log * log)360 void intel_guc_log_init_early(struct intel_guc_log *log)
361 {
362 	mutex_init(&log->relay.lock);
363 	INIT_WORK(&log->relay.flush_work, capture_logs_work);
364 }
365 
guc_log_relay_create(struct intel_guc_log * log)366 static int guc_log_relay_create(struct intel_guc_log *log)
367 {
368 	struct intel_guc *guc = log_to_guc(log);
369 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
370 	struct rchan *guc_log_relay_chan;
371 	size_t n_subbufs, subbuf_size;
372 	int ret;
373 
374 	lockdep_assert_held(&log->relay.lock);
375 	GEM_BUG_ON(!log->vma);
376 
377 	 /* Keep the size of sub buffers same as shared log buffer */
378 	subbuf_size = log->vma->size;
379 
380 	/*
381 	 * Store up to 8 snapshots, which is large enough to buffer sufficient
382 	 * boot time logs and provides enough leeway to User, in terms of
383 	 * latency, for consuming the logs from relay. Also doesn't take
384 	 * up too much memory.
385 	 */
386 	n_subbufs = 8;
387 
388 	guc_log_relay_chan = relay_open("guc_log",
389 					dev_priv->drm.primary->debugfs_root,
390 					subbuf_size, n_subbufs,
391 					&relay_callbacks, dev_priv);
392 	if (!guc_log_relay_chan) {
393 		DRM_ERROR("Couldn't create relay chan for GuC logging\n");
394 
395 		ret = -ENOMEM;
396 		return ret;
397 	}
398 
399 	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
400 	log->relay.channel = guc_log_relay_chan;
401 
402 	return 0;
403 }
404 
guc_log_relay_destroy(struct intel_guc_log * log)405 static void guc_log_relay_destroy(struct intel_guc_log *log)
406 {
407 	lockdep_assert_held(&log->relay.lock);
408 
409 	relay_close(log->relay.channel);
410 	log->relay.channel = NULL;
411 }
412 
guc_log_capture_logs(struct intel_guc_log * log)413 static void guc_log_capture_logs(struct intel_guc_log *log)
414 {
415 	struct intel_guc *guc = log_to_guc(log);
416 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
417 	intel_wakeref_t wakeref;
418 
419 	guc_read_update_log_buffer(log);
420 
421 	/*
422 	 * Generally device is expected to be active only at this
423 	 * time, so get/put should be really quick.
424 	 */
425 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
426 		guc_action_flush_log_complete(guc);
427 }
428 
__get_default_log_level(struct intel_guc_log * log)429 static u32 __get_default_log_level(struct intel_guc_log *log)
430 {
431 	/* A negative value means "use platform/config default" */
432 	if (i915_modparams.guc_log_level < 0) {
433 		return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
434 			IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
435 			GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE;
436 	}
437 
438 	if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
439 		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
440 			 "guc_log_level", i915_modparams.guc_log_level,
441 			 "verbosity too high");
442 		return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
443 			IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
444 			GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
445 	}
446 
447 	GEM_BUG_ON(i915_modparams.guc_log_level < GUC_LOG_LEVEL_DISABLED);
448 	GEM_BUG_ON(i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX);
449 	return i915_modparams.guc_log_level;
450 }
451 
intel_guc_log_create(struct intel_guc_log * log)452 int intel_guc_log_create(struct intel_guc_log *log)
453 {
454 	struct intel_guc *guc = log_to_guc(log);
455 	struct i915_vma *vma;
456 	u32 guc_log_size;
457 	int ret;
458 
459 	GEM_BUG_ON(log->vma);
460 
461 	/*
462 	 *  GuC Log buffer Layout
463 	 *
464 	 *  +===============================+ 00B
465 	 *  |    Crash dump state header    |
466 	 *  +-------------------------------+ 32B
467 	 *  |       DPC state header        |
468 	 *  +-------------------------------+ 64B
469 	 *  |       ISR state header        |
470 	 *  +-------------------------------+ 96B
471 	 *  |                               |
472 	 *  +===============================+ PAGE_SIZE (4KB)
473 	 *  |        Crash Dump logs        |
474 	 *  +===============================+ + CRASH_SIZE
475 	 *  |           DPC logs            |
476 	 *  +===============================+ + DPC_SIZE
477 	 *  |           ISR logs            |
478 	 *  +===============================+ + ISR_SIZE
479 	 */
480 	guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE +
481 			ISR_BUFFER_SIZE;
482 
483 	vma = intel_guc_allocate_vma(guc, guc_log_size);
484 	if (IS_ERR(vma)) {
485 		ret = PTR_ERR(vma);
486 		goto err;
487 	}
488 
489 	log->vma = vma;
490 
491 	log->level = __get_default_log_level(log);
492 	DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
493 			 log->level, enableddisabled(log->level),
494 			 yesno(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
495 			 GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
496 
497 	return 0;
498 
499 err:
500 	DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret);
501 	return ret;
502 }
503 
intel_guc_log_destroy(struct intel_guc_log * log)504 void intel_guc_log_destroy(struct intel_guc_log *log)
505 {
506 	i915_vma_unpin_and_release(&log->vma, 0);
507 }
508 
intel_guc_log_set_level(struct intel_guc_log * log,u32 level)509 int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
510 {
511 	struct intel_guc *guc = log_to_guc(log);
512 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
513 	intel_wakeref_t wakeref;
514 	int ret = 0;
515 
516 	BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
517 	GEM_BUG_ON(!log->vma);
518 
519 	/*
520 	 * GuC is recognizing log levels starting from 0 to max, we're using 0
521 	 * as indication that logging should be disabled.
522 	 */
523 	if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
524 		return -EINVAL;
525 
526 	mutex_lock(&dev_priv->drm.struct_mutex);
527 
528 	if (log->level == level)
529 		goto out_unlock;
530 
531 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
532 		ret = guc_action_control_log(guc,
533 					     GUC_LOG_LEVEL_IS_VERBOSE(level),
534 					     GUC_LOG_LEVEL_IS_ENABLED(level),
535 					     GUC_LOG_LEVEL_TO_VERBOSITY(level));
536 	if (ret) {
537 		DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
538 		goto out_unlock;
539 	}
540 
541 	log->level = level;
542 
543 out_unlock:
544 	mutex_unlock(&dev_priv->drm.struct_mutex);
545 
546 	return ret;
547 }
548 
intel_guc_log_relay_enabled(const struct intel_guc_log * log)549 bool intel_guc_log_relay_enabled(const struct intel_guc_log *log)
550 {
551 	return log->relay.buf_addr;
552 }
553 
intel_guc_log_relay_open(struct intel_guc_log * log)554 int intel_guc_log_relay_open(struct intel_guc_log *log)
555 {
556 	int ret;
557 
558 	if (!log->vma)
559 		return -ENODEV;
560 
561 	mutex_lock(&log->relay.lock);
562 
563 	if (intel_guc_log_relay_enabled(log)) {
564 		ret = -EEXIST;
565 		goto out_unlock;
566 	}
567 
568 	/*
569 	 * We require SSE 4.1 for fast reads from the GuC log buffer and
570 	 * it should be present on the chipsets supporting GuC based
571 	 * submisssions.
572 	 */
573 	if (!i915_has_memcpy_from_wc()) {
574 		ret = -ENXIO;
575 		goto out_unlock;
576 	}
577 
578 	ret = guc_log_relay_create(log);
579 	if (ret)
580 		goto out_unlock;
581 
582 	ret = guc_log_map(log);
583 	if (ret)
584 		goto out_relay;
585 
586 	mutex_unlock(&log->relay.lock);
587 
588 	guc_log_enable_flush_events(log);
589 
590 	/*
591 	 * When GuC is logging without us relaying to userspace, we're ignoring
592 	 * the flush notification. This means that we need to unconditionally
593 	 * flush on relay enabling, since GuC only notifies us once.
594 	 */
595 	queue_work(system_highpri_wq, &log->relay.flush_work);
596 
597 	return 0;
598 
599 out_relay:
600 	guc_log_relay_destroy(log);
601 out_unlock:
602 	mutex_unlock(&log->relay.lock);
603 
604 	return ret;
605 }
606 
intel_guc_log_relay_flush(struct intel_guc_log * log)607 void intel_guc_log_relay_flush(struct intel_guc_log *log)
608 {
609 	struct intel_guc *guc = log_to_guc(log);
610 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
611 	intel_wakeref_t wakeref;
612 
613 	/*
614 	 * Before initiating the forceful flush, wait for any pending/ongoing
615 	 * flush to complete otherwise forceful flush may not actually happen.
616 	 */
617 	flush_work(&log->relay.flush_work);
618 
619 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
620 		guc_action_flush_log(guc);
621 
622 	/* GuC would have updated log buffer by now, so capture it */
623 	guc_log_capture_logs(log);
624 }
625 
intel_guc_log_relay_close(struct intel_guc_log * log)626 void intel_guc_log_relay_close(struct intel_guc_log *log)
627 {
628 	struct intel_guc *guc = log_to_guc(log);
629 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
630 
631 	guc_log_disable_flush_events(log);
632 	intel_synchronize_irq(i915);
633 
634 	flush_work(&log->relay.flush_work);
635 
636 	mutex_lock(&log->relay.lock);
637 	GEM_BUG_ON(!intel_guc_log_relay_enabled(log));
638 	guc_log_unmap(log);
639 	guc_log_relay_destroy(log);
640 	mutex_unlock(&log->relay.lock);
641 }
642 
intel_guc_log_handle_flush_event(struct intel_guc_log * log)643 void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
644 {
645 	queue_work(system_highpri_wq, &log->relay.flush_work);
646 }
647