1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "intel_guc_ct.h"
8 
9 #ifdef CONFIG_DRM_I915_DEBUG_GUC
10 #define CT_DEBUG_DRIVER(...)	DRM_DEBUG_DRIVER(__VA_ARGS__)
11 #else
12 #define CT_DEBUG_DRIVER(...)	do { } while (0)
13 #endif
14 
15 struct ct_request {
16 	struct list_head link;
17 	u32 fence;
18 	u32 status;
19 	u32 response_len;
20 	u32 *response_buf;
21 };
22 
23 struct ct_incoming_request {
24 	struct list_head link;
25 	u32 msg[];
26 };
27 
28 enum { CTB_SEND = 0, CTB_RECV = 1 };
29 
30 enum { CTB_OWNER_HOST = 0 };
31 
32 static void ct_incoming_request_worker_func(struct work_struct *w);
33 
34 /**
35  * intel_guc_ct_init_early - Initialize CT state without requiring device access
36  * @ct: pointer to CT struct
37  */
intel_guc_ct_init_early(struct intel_guc_ct * ct)38 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
39 {
40 	/* we're using static channel owners */
41 	ct->host_channel.owner = CTB_OWNER_HOST;
42 
43 	spin_lock_init(&ct->lock);
44 	INIT_LIST_HEAD(&ct->pending_requests);
45 	INIT_LIST_HEAD(&ct->incoming_requests);
46 	INIT_WORK(&ct->worker, ct_incoming_request_worker_func);
47 }
48 
ct_to_guc(struct intel_guc_ct * ct)49 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
50 {
51 	return container_of(ct, struct intel_guc, ct);
52 }
53 
guc_ct_buffer_type_to_str(u32 type)54 static inline const char *guc_ct_buffer_type_to_str(u32 type)
55 {
56 	switch (type) {
57 	case INTEL_GUC_CT_BUFFER_TYPE_SEND:
58 		return "SEND";
59 	case INTEL_GUC_CT_BUFFER_TYPE_RECV:
60 		return "RECV";
61 	default:
62 		return "<invalid>";
63 	}
64 }
65 
guc_ct_buffer_desc_init(struct guc_ct_buffer_desc * desc,u32 cmds_addr,u32 size,u32 owner)66 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
67 				    u32 cmds_addr, u32 size, u32 owner)
68 {
69 	CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
70 			desc, cmds_addr, size, owner);
71 	memset(desc, 0, sizeof(*desc));
72 	desc->addr = cmds_addr;
73 	desc->size = size;
74 	desc->owner = owner;
75 }
76 
guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc * desc)77 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
78 {
79 	CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
80 			desc, desc->head, desc->tail);
81 	desc->head = 0;
82 	desc->tail = 0;
83 	desc->is_in_error = 0;
84 }
85 
guc_action_register_ct_buffer(struct intel_guc * guc,u32 desc_addr,u32 type)86 static int guc_action_register_ct_buffer(struct intel_guc *guc,
87 					 u32 desc_addr,
88 					 u32 type)
89 {
90 	u32 action[] = {
91 		INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
92 		desc_addr,
93 		sizeof(struct guc_ct_buffer_desc),
94 		type
95 	};
96 	int err;
97 
98 	/* Can't use generic send(), CT registration must go over MMIO */
99 	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
100 	if (err)
101 		DRM_ERROR("CT: register %s buffer failed; err=%d\n",
102 			  guc_ct_buffer_type_to_str(type), err);
103 	return err;
104 }
105 
guc_action_deregister_ct_buffer(struct intel_guc * guc,u32 owner,u32 type)106 static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
107 					   u32 owner,
108 					   u32 type)
109 {
110 	u32 action[] = {
111 		INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
112 		owner,
113 		type
114 	};
115 	int err;
116 
117 	/* Can't use generic send(), CT deregistration must go over MMIO */
118 	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
119 	if (err)
120 		DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
121 			  guc_ct_buffer_type_to_str(type), owner, err);
122 	return err;
123 }
124 
ctch_init(struct intel_guc * guc,struct intel_guc_ct_channel * ctch)125 static int ctch_init(struct intel_guc *guc,
126 		     struct intel_guc_ct_channel *ctch)
127 {
128 	struct i915_vma *vma;
129 	void *blob;
130 	int err;
131 	int i;
132 
133 	GEM_BUG_ON(ctch->vma);
134 
135 	/* We allocate 1 page to hold both descriptors and both buffers.
136 	 *       ___________.....................
137 	 *      |desc (SEND)|                   :
138 	 *      |___________|                   PAGE/4
139 	 *      :___________....................:
140 	 *      |desc (RECV)|                   :
141 	 *      |___________|                   PAGE/4
142 	 *      :_______________________________:
143 	 *      |cmds (SEND)                    |
144 	 *      |                               PAGE/4
145 	 *      |_______________________________|
146 	 *      |cmds (RECV)                    |
147 	 *      |                               PAGE/4
148 	 *      |_______________________________|
149 	 *
150 	 * Each message can use a maximum of 32 dwords and we don't expect to
151 	 * have more than 1 in flight at any time, so we have enough space.
152 	 * Some logic further ahead will rely on the fact that there is only 1
153 	 * page and that it is always mapped, so if the size is changed the
154 	 * other code will need updating as well.
155 	 */
156 
157 	/* allocate vma */
158 	vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
159 	if (IS_ERR(vma)) {
160 		err = PTR_ERR(vma);
161 		goto err_out;
162 	}
163 	ctch->vma = vma;
164 
165 	/* map first page */
166 	blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
167 	if (IS_ERR(blob)) {
168 		err = PTR_ERR(blob);
169 		goto err_vma;
170 	}
171 	CT_DEBUG_DRIVER("CT: vma base=%#x\n",
172 			intel_guc_ggtt_offset(guc, ctch->vma));
173 
174 	/* store pointers to desc and cmds */
175 	for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
176 		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
177 		ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
178 		ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
179 	}
180 
181 	return 0;
182 
183 err_vma:
184 	i915_vma_unpin_and_release(&ctch->vma, 0);
185 err_out:
186 	CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
187 			ctch->owner, err);
188 	return err;
189 }
190 
ctch_fini(struct intel_guc * guc,struct intel_guc_ct_channel * ctch)191 static void ctch_fini(struct intel_guc *guc,
192 		      struct intel_guc_ct_channel *ctch)
193 {
194 	GEM_BUG_ON(ctch->enabled);
195 
196 	i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
197 }
198 
ctch_enable(struct intel_guc * guc,struct intel_guc_ct_channel * ctch)199 static int ctch_enable(struct intel_guc *guc,
200 		       struct intel_guc_ct_channel *ctch)
201 {
202 	u32 base;
203 	int err;
204 	int i;
205 
206 	GEM_BUG_ON(!ctch->vma);
207 
208 	GEM_BUG_ON(ctch->enabled);
209 
210 	/* vma should be already allocated and map'ed */
211 	base = intel_guc_ggtt_offset(guc, ctch->vma);
212 
213 	/* (re)initialize descriptors
214 	 * cmds buffers are in the second half of the blob page
215 	 */
216 	for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
217 		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
218 		guc_ct_buffer_desc_init(ctch->ctbs[i].desc,
219 					base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
220 					PAGE_SIZE/4,
221 					ctch->owner);
222 	}
223 
224 	/* register buffers, starting wirh RECV buffer
225 	 * descriptors are in first half of the blob
226 	 */
227 	err = guc_action_register_ct_buffer(guc,
228 					    base + PAGE_SIZE/4 * CTB_RECV,
229 					    INTEL_GUC_CT_BUFFER_TYPE_RECV);
230 	if (unlikely(err))
231 		goto err_out;
232 
233 	err = guc_action_register_ct_buffer(guc,
234 					    base + PAGE_SIZE/4 * CTB_SEND,
235 					    INTEL_GUC_CT_BUFFER_TYPE_SEND);
236 	if (unlikely(err))
237 		goto err_deregister;
238 
239 	ctch->enabled = true;
240 
241 	return 0;
242 
243 err_deregister:
244 	guc_action_deregister_ct_buffer(guc,
245 					ctch->owner,
246 					INTEL_GUC_CT_BUFFER_TYPE_RECV);
247 err_out:
248 	DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
249 	return err;
250 }
251 
ctch_disable(struct intel_guc * guc,struct intel_guc_ct_channel * ctch)252 static void ctch_disable(struct intel_guc *guc,
253 			 struct intel_guc_ct_channel *ctch)
254 {
255 	GEM_BUG_ON(!ctch->enabled);
256 
257 	ctch->enabled = false;
258 
259 	guc_action_deregister_ct_buffer(guc,
260 					ctch->owner,
261 					INTEL_GUC_CT_BUFFER_TYPE_SEND);
262 	guc_action_deregister_ct_buffer(guc,
263 					ctch->owner,
264 					INTEL_GUC_CT_BUFFER_TYPE_RECV);
265 }
266 
ctch_get_next_fence(struct intel_guc_ct_channel * ctch)267 static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
268 {
269 	/* For now it's trivial */
270 	return ++ctch->next_fence;
271 }
272 
273 /**
274  * DOC: CTB Host to GuC request
275  *
276  * Format of the CTB Host to GuC request message is as follows::
277  *
278  *      +------------+---------+---------+---------+---------+
279  *      |   msg[0]   |   [1]   |   [2]   |   ...   |  [n-1]  |
280  *      +------------+---------+---------+---------+---------+
281  *      |   MESSAGE  |       MESSAGE PAYLOAD                 |
282  *      +   HEADER   +---------+---------+---------+---------+
283  *      |            |    0    |    1    |   ...   |    n    |
284  *      +============+=========+=========+=========+=========+
285  *      |  len >= 1  |  FENCE  |     request specific data   |
286  *      +------+-----+---------+---------+---------+---------+
287  *
288  *                   ^-----------------len-------------------^
289  */
290 
ctb_write(struct intel_guc_ct_buffer * ctb,const u32 * action,u32 len,u32 fence,bool want_response)291 static int ctb_write(struct intel_guc_ct_buffer *ctb,
292 		     const u32 *action,
293 		     u32 len /* in dwords */,
294 		     u32 fence,
295 		     bool want_response)
296 {
297 	struct guc_ct_buffer_desc *desc = ctb->desc;
298 	u32 head = desc->head / 4;	/* in dwords */
299 	u32 tail = desc->tail / 4;	/* in dwords */
300 	u32 size = desc->size / 4;	/* in dwords */
301 	u32 used;			/* in dwords */
302 	u32 header;
303 	u32 *cmds = ctb->cmds;
304 	unsigned int i;
305 
306 	GEM_BUG_ON(desc->size % 4);
307 	GEM_BUG_ON(desc->head % 4);
308 	GEM_BUG_ON(desc->tail % 4);
309 	GEM_BUG_ON(tail >= size);
310 
311 	/*
312 	 * tail == head condition indicates empty. GuC FW does not support
313 	 * using up the entire buffer to get tail == head meaning full.
314 	 */
315 	if (tail < head)
316 		used = (size - head) + tail;
317 	else
318 		used = tail - head;
319 
320 	/* make sure there is a space including extra dw for the fence */
321 	if (unlikely(used + len + 1 >= size))
322 		return -ENOSPC;
323 
324 	/*
325 	 * Write the message. The format is the following:
326 	 * DW0: header (including action code)
327 	 * DW1: fence
328 	 * DW2+: action data
329 	 */
330 	header = (len << GUC_CT_MSG_LEN_SHIFT) |
331 		 (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
332 		 (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
333 		 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
334 
335 	CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
336 			4, &header, 4, &fence,
337 			4 * (len - 1), &action[1]);
338 
339 	cmds[tail] = header;
340 	tail = (tail + 1) % size;
341 
342 	cmds[tail] = fence;
343 	tail = (tail + 1) % size;
344 
345 	for (i = 1; i < len; i++) {
346 		cmds[tail] = action[i];
347 		tail = (tail + 1) % size;
348 	}
349 
350 	/* now update desc tail (back in bytes) */
351 	desc->tail = tail * 4;
352 	GEM_BUG_ON(desc->tail > desc->size);
353 
354 	return 0;
355 }
356 
357 /**
358  * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
359  * @desc:	buffer descriptor
360  * @fence:	response fence
361  * @status:	placeholder for status
362  *
363  * Guc will update CT buffer descriptor with new fence and status
364  * after processing the command identified by the fence. Wait for
365  * specified fence and then read from the descriptor status of the
366  * command.
367  *
368  * Return:
369  * *	0 response received (status is valid)
370  * *	-ETIMEDOUT no response within hardcoded timeout
371  * *	-EPROTO no response, CT buffer is in error
372  */
wait_for_ctb_desc_update(struct guc_ct_buffer_desc * desc,u32 fence,u32 * status)373 static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
374 				    u32 fence,
375 				    u32 *status)
376 {
377 	int err;
378 
379 	/*
380 	 * Fast commands should complete in less than 10us, so sample quickly
381 	 * up to that length of time, then switch to a slower sleep-wait loop.
382 	 * No GuC command should ever take longer than 10ms.
383 	 */
384 #define done (READ_ONCE(desc->fence) == fence)
385 	err = wait_for_us(done, 10);
386 	if (err)
387 		err = wait_for(done, 10);
388 #undef done
389 
390 	if (unlikely(err)) {
391 		DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
392 			  fence, desc->fence);
393 
394 		if (WARN_ON(desc->is_in_error)) {
395 			/* Something went wrong with the messaging, try to reset
396 			 * the buffer and hope for the best
397 			 */
398 			guc_ct_buffer_desc_reset(desc);
399 			err = -EPROTO;
400 		}
401 	}
402 
403 	*status = desc->status;
404 	return err;
405 }
406 
407 /**
408  * wait_for_ct_request_update - Wait for CT request state update.
409  * @req:	pointer to pending request
410  * @status:	placeholder for status
411  *
412  * For each sent request, Guc shall send bac CT response message.
413  * Our message handler will update status of tracked request once
414  * response message with given fence is received. Wait here and
415  * check for valid response status value.
416  *
417  * Return:
418  * *	0 response received (status is valid)
419  * *	-ETIMEDOUT no response within hardcoded timeout
420  */
wait_for_ct_request_update(struct ct_request * req,u32 * status)421 static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
422 {
423 	int err;
424 
425 	/*
426 	 * Fast commands should complete in less than 10us, so sample quickly
427 	 * up to that length of time, then switch to a slower sleep-wait loop.
428 	 * No GuC command should ever take longer than 10ms.
429 	 */
430 #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
431 	err = wait_for_us(done, 10);
432 	if (err)
433 		err = wait_for(done, 10);
434 #undef done
435 
436 	if (unlikely(err))
437 		DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
438 
439 	*status = req->status;
440 	return err;
441 }
442 
ctch_send(struct intel_guc_ct * ct,struct intel_guc_ct_channel * ctch,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size,u32 * status)443 static int ctch_send(struct intel_guc_ct *ct,
444 		     struct intel_guc_ct_channel *ctch,
445 		     const u32 *action,
446 		     u32 len,
447 		     u32 *response_buf,
448 		     u32 response_buf_size,
449 		     u32 *status)
450 {
451 	struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
452 	struct guc_ct_buffer_desc *desc = ctb->desc;
453 	struct ct_request request;
454 	unsigned long flags;
455 	u32 fence;
456 	int err;
457 
458 	GEM_BUG_ON(!ctch->enabled);
459 	GEM_BUG_ON(!len);
460 	GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
461 	GEM_BUG_ON(!response_buf && response_buf_size);
462 
463 	fence = ctch_get_next_fence(ctch);
464 	request.fence = fence;
465 	request.status = 0;
466 	request.response_len = response_buf_size;
467 	request.response_buf = response_buf;
468 
469 	spin_lock_irqsave(&ct->lock, flags);
470 	list_add_tail(&request.link, &ct->pending_requests);
471 	spin_unlock_irqrestore(&ct->lock, flags);
472 
473 	err = ctb_write(ctb, action, len, fence, !!response_buf);
474 	if (unlikely(err))
475 		goto unlink;
476 
477 	intel_guc_notify(ct_to_guc(ct));
478 
479 	if (response_buf)
480 		err = wait_for_ct_request_update(&request, status);
481 	else
482 		err = wait_for_ctb_desc_update(desc, fence, status);
483 	if (unlikely(err))
484 		goto unlink;
485 
486 	if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
487 		err = -EIO;
488 		goto unlink;
489 	}
490 
491 	if (response_buf) {
492 		/* There shall be no data in the status */
493 		WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
494 		/* Return actual response len */
495 		err = request.response_len;
496 	} else {
497 		/* There shall be no response payload */
498 		WARN_ON(request.response_len);
499 		/* Return data decoded from the status dword */
500 		err = INTEL_GUC_MSG_TO_DATA(*status);
501 	}
502 
503 unlink:
504 	spin_lock_irqsave(&ct->lock, flags);
505 	list_del(&request.link);
506 	spin_unlock_irqrestore(&ct->lock, flags);
507 
508 	return err;
509 }
510 
511 /*
512  * Command Transport (CT) buffer based GuC send function.
513  */
intel_guc_send_ct(struct intel_guc * guc,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size)514 int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
515 		      u32 *response_buf, u32 response_buf_size)
516 {
517 	struct intel_guc_ct *ct = &guc->ct;
518 	struct intel_guc_ct_channel *ctch = &ct->host_channel;
519 	u32 status = ~0; /* undefined */
520 	int ret;
521 
522 	mutex_lock(&guc->send_mutex);
523 
524 	ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size,
525 			&status);
526 	if (unlikely(ret < 0)) {
527 		DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
528 			  action[0], ret, status);
529 	} else if (unlikely(ret)) {
530 		CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
531 				action[0], ret, ret);
532 	}
533 
534 	mutex_unlock(&guc->send_mutex);
535 	return ret;
536 }
537 
ct_header_get_len(u32 header)538 static inline unsigned int ct_header_get_len(u32 header)
539 {
540 	return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
541 }
542 
ct_header_get_action(u32 header)543 static inline unsigned int ct_header_get_action(u32 header)
544 {
545 	return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
546 }
547 
ct_header_is_response(u32 header)548 static inline bool ct_header_is_response(u32 header)
549 {
550 	return !!(header & GUC_CT_MSG_IS_RESPONSE);
551 }
552 
ctb_read(struct intel_guc_ct_buffer * ctb,u32 * data)553 static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
554 {
555 	struct guc_ct_buffer_desc *desc = ctb->desc;
556 	u32 head = desc->head / 4;	/* in dwords */
557 	u32 tail = desc->tail / 4;	/* in dwords */
558 	u32 size = desc->size / 4;	/* in dwords */
559 	u32 *cmds = ctb->cmds;
560 	s32 available;			/* in dwords */
561 	unsigned int len;
562 	unsigned int i;
563 
564 	GEM_BUG_ON(desc->size % 4);
565 	GEM_BUG_ON(desc->head % 4);
566 	GEM_BUG_ON(desc->tail % 4);
567 	GEM_BUG_ON(tail >= size);
568 	GEM_BUG_ON(head >= size);
569 
570 	/* tail == head condition indicates empty */
571 	available = tail - head;
572 	if (unlikely(available == 0))
573 		return -ENODATA;
574 
575 	/* beware of buffer wrap case */
576 	if (unlikely(available < 0))
577 		available += size;
578 	CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
579 	GEM_BUG_ON(available < 0);
580 
581 	data[0] = cmds[head];
582 	head = (head + 1) % size;
583 
584 	/* message len with header */
585 	len = ct_header_get_len(data[0]) + 1;
586 	if (unlikely(len > (u32)available)) {
587 		DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
588 			  4, data,
589 			  4 * (head + available - 1 > size ?
590 			       size - head : available - 1), &cmds[head],
591 			  4 * (head + available - 1 > size ?
592 			       available - 1 - size + head : 0), &cmds[0]);
593 		return -EPROTO;
594 	}
595 
596 	for (i = 1; i < len; i++) {
597 		data[i] = cmds[head];
598 		head = (head + 1) % size;
599 	}
600 	CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
601 
602 	desc->head = head * 4;
603 	return 0;
604 }
605 
606 /**
607  * DOC: CTB GuC to Host response
608  *
609  * Format of the CTB GuC to Host response message is as follows::
610  *
611  *      +------------+---------+---------+---------+---------+---------+
612  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
613  *      +------------+---------+---------+---------+---------+---------+
614  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
615  *      +   HEADER   +---------+---------+---------+---------+---------+
616  *      |            |    0    |    1    |    2    |   ...   |    n    |
617  *      +============+=========+=========+=========+=========+=========+
618  *      |  len >= 2  |  FENCE  |  STATUS |   response specific data    |
619  *      +------+-----+---------+---------+---------+---------+---------+
620  *
621  *                   ^-----------------------len-----------------------^
622  */
623 
ct_handle_response(struct intel_guc_ct * ct,const u32 * msg)624 static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
625 {
626 	u32 header = msg[0];
627 	u32 len = ct_header_get_len(header);
628 	u32 msglen = len + 1; /* total message length including header */
629 	u32 fence;
630 	u32 status;
631 	u32 datalen;
632 	struct ct_request *req;
633 	bool found = false;
634 
635 	GEM_BUG_ON(!ct_header_is_response(header));
636 	GEM_BUG_ON(!in_irq());
637 
638 	/* Response payload shall at least include fence and status */
639 	if (unlikely(len < 2)) {
640 		DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
641 		return -EPROTO;
642 	}
643 
644 	fence = msg[1];
645 	status = msg[2];
646 	datalen = len - 2;
647 
648 	/* Format of the status follows RESPONSE message */
649 	if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
650 		DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
651 		return -EPROTO;
652 	}
653 
654 	CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
655 
656 	spin_lock(&ct->lock);
657 	list_for_each_entry(req, &ct->pending_requests, link) {
658 		if (unlikely(fence != req->fence)) {
659 			CT_DEBUG_DRIVER("CT: request %u awaits response\n",
660 					req->fence);
661 			continue;
662 		}
663 		if (unlikely(datalen > req->response_len)) {
664 			DRM_ERROR("CT: response %u too long %*ph\n",
665 				  req->fence, 4 * msglen, msg);
666 			datalen = 0;
667 		}
668 		if (datalen)
669 			memcpy(req->response_buf, msg + 3, 4 * datalen);
670 		req->response_len = datalen;
671 		WRITE_ONCE(req->status, status);
672 		found = true;
673 		break;
674 	}
675 	spin_unlock(&ct->lock);
676 
677 	if (!found)
678 		DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
679 	return 0;
680 }
681 
ct_process_request(struct intel_guc_ct * ct,u32 action,u32 len,const u32 * payload)682 static void ct_process_request(struct intel_guc_ct *ct,
683 			       u32 action, u32 len, const u32 *payload)
684 {
685 	struct intel_guc *guc = ct_to_guc(ct);
686 	int ret;
687 
688 	CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
689 
690 	switch (action) {
691 	case INTEL_GUC_ACTION_DEFAULT:
692 		ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
693 		if (unlikely(ret))
694 			goto fail_unexpected;
695 		break;
696 
697 	default:
698 fail_unexpected:
699 		DRM_ERROR("CT: unexpected request %x %*ph\n",
700 			  action, 4 * len, payload);
701 		break;
702 	}
703 }
704 
ct_process_incoming_requests(struct intel_guc_ct * ct)705 static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
706 {
707 	unsigned long flags;
708 	struct ct_incoming_request *request;
709 	u32 header;
710 	u32 *payload;
711 	bool done;
712 
713 	spin_lock_irqsave(&ct->lock, flags);
714 	request = list_first_entry_or_null(&ct->incoming_requests,
715 					   struct ct_incoming_request, link);
716 	if (request)
717 		list_del(&request->link);
718 	done = !!list_empty(&ct->incoming_requests);
719 	spin_unlock_irqrestore(&ct->lock, flags);
720 
721 	if (!request)
722 		return true;
723 
724 	header = request->msg[0];
725 	payload = &request->msg[1];
726 	ct_process_request(ct,
727 			   ct_header_get_action(header),
728 			   ct_header_get_len(header),
729 			   payload);
730 
731 	kfree(request);
732 	return done;
733 }
734 
ct_incoming_request_worker_func(struct work_struct * w)735 static void ct_incoming_request_worker_func(struct work_struct *w)
736 {
737 	struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker);
738 	bool done;
739 
740 	done = ct_process_incoming_requests(ct);
741 	if (!done)
742 		queue_work(system_unbound_wq, &ct->worker);
743 }
744 
745 /**
746  * DOC: CTB GuC to Host request
747  *
748  * Format of the CTB GuC to Host request message is as follows::
749  *
750  *      +------------+---------+---------+---------+---------+---------+
751  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
752  *      +------------+---------+---------+---------+---------+---------+
753  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
754  *      +   HEADER   +---------+---------+---------+---------+---------+
755  *      |            |    0    |    1    |    2    |   ...   |    n    |
756  *      +============+=========+=========+=========+=========+=========+
757  *      |     len    |            request specific data                |
758  *      +------+-----+---------+---------+---------+---------+---------+
759  *
760  *                   ^-----------------------len-----------------------^
761  */
762 
ct_handle_request(struct intel_guc_ct * ct,const u32 * msg)763 static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
764 {
765 	u32 header = msg[0];
766 	u32 len = ct_header_get_len(header);
767 	u32 msglen = len + 1; /* total message length including header */
768 	struct ct_incoming_request *request;
769 	unsigned long flags;
770 
771 	GEM_BUG_ON(ct_header_is_response(header));
772 
773 	request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
774 	if (unlikely(!request)) {
775 		DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
776 		return 0; /* XXX: -ENOMEM ? */
777 	}
778 	memcpy(request->msg, msg, 4 * msglen);
779 
780 	spin_lock_irqsave(&ct->lock, flags);
781 	list_add_tail(&request->link, &ct->incoming_requests);
782 	spin_unlock_irqrestore(&ct->lock, flags);
783 
784 	queue_work(system_unbound_wq, &ct->worker);
785 	return 0;
786 }
787 
ct_process_host_channel(struct intel_guc_ct * ct)788 static void ct_process_host_channel(struct intel_guc_ct *ct)
789 {
790 	struct intel_guc_ct_channel *ctch = &ct->host_channel;
791 	struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV];
792 	u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
793 	int err = 0;
794 
795 	if (!ctch->enabled)
796 		return;
797 
798 	do {
799 		err = ctb_read(ctb, msg);
800 		if (err)
801 			break;
802 
803 		if (ct_header_is_response(msg[0]))
804 			err = ct_handle_response(ct, msg);
805 		else
806 			err = ct_handle_request(ct, msg);
807 	} while (!err);
808 
809 	if (GEM_WARN_ON(err == -EPROTO)) {
810 		DRM_ERROR("CT: corrupted message detected!\n");
811 		ctb->desc->is_in_error = 1;
812 	}
813 }
814 
815 /*
816  * When we're communicating with the GuC over CT, GuC uses events
817  * to notify us about new messages being posted on the RECV buffer.
818  */
intel_guc_to_host_event_handler_ct(struct intel_guc * guc)819 void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
820 {
821 	struct intel_guc_ct *ct = &guc->ct;
822 
823 	ct_process_host_channel(ct);
824 }
825 
826 /**
827  * intel_guc_ct_init - Init CT communication
828  * @ct: pointer to CT struct
829  *
830  * Allocate memory required for communication via
831  * the CT channel.
832  *
833  * Return: 0 on success, a negative errno code on failure.
834  */
intel_guc_ct_init(struct intel_guc_ct * ct)835 int intel_guc_ct_init(struct intel_guc_ct *ct)
836 {
837 	struct intel_guc *guc = ct_to_guc(ct);
838 	struct intel_guc_ct_channel *ctch = &ct->host_channel;
839 	int err;
840 
841 	err = ctch_init(guc, ctch);
842 	if (unlikely(err)) {
843 		DRM_ERROR("CT: can't open channel %d; err=%d\n",
844 			  ctch->owner, err);
845 		return err;
846 	}
847 
848 	GEM_BUG_ON(!ctch->vma);
849 	return 0;
850 }
851 
852 /**
853  * intel_guc_ct_fini - Fini CT communication
854  * @ct: pointer to CT struct
855  *
856  * Deallocate memory required for communication via
857  * the CT channel.
858  */
intel_guc_ct_fini(struct intel_guc_ct * ct)859 void intel_guc_ct_fini(struct intel_guc_ct *ct)
860 {
861 	struct intel_guc *guc = ct_to_guc(ct);
862 	struct intel_guc_ct_channel *ctch = &ct->host_channel;
863 
864 	ctch_fini(guc, ctch);
865 }
866 
867 /**
868  * intel_guc_ct_enable - Enable buffer based command transport.
869  * @ct: pointer to CT struct
870  *
871  * Return: 0 on success, a negative errno code on failure.
872  */
intel_guc_ct_enable(struct intel_guc_ct * ct)873 int intel_guc_ct_enable(struct intel_guc_ct *ct)
874 {
875 	struct intel_guc *guc = ct_to_guc(ct);
876 	struct intel_guc_ct_channel *ctch = &ct->host_channel;
877 
878 	if (ctch->enabled)
879 		return 0;
880 
881 	return ctch_enable(guc, ctch);
882 }
883 
884 /**
885  * intel_guc_ct_disable - Disable buffer based command transport.
886  * @ct: pointer to CT struct
887  */
intel_guc_ct_disable(struct intel_guc_ct * ct)888 void intel_guc_ct_disable(struct intel_guc_ct *ct)
889 {
890 	struct intel_guc *guc = ct_to_guc(ct);
891 	struct intel_guc_ct_channel *ctch = &ct->host_channel;
892 
893 	if (!ctch->enabled)
894 		return;
895 
896 	ctch_disable(guc, ctch);
897 }
898