1 /*
2  * Copyright © 2016-2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "i915_drv.h"
25 #include "intel_guc_ct.h"
26 
27 #ifdef CONFIG_DRM_I915_DEBUG_GUC
28 #define CT_DEBUG_DRIVER(...)	DRM_DEBUG_DRIVER(__VA_ARGS__)
29 #else
30 #define CT_DEBUG_DRIVER(...)	do { } while (0)
31 #endif
32 
33 struct ct_request {
34 	struct list_head link;
35 	u32 fence;
36 	u32 status;
37 	u32 response_len;
38 	u32 *response_buf;
39 };
40 
41 struct ct_incoming_request {
42 	struct list_head link;
43 	u32 msg[];
44 };
45 
46 enum { CTB_SEND = 0, CTB_RECV = 1 };
47 
48 enum { CTB_OWNER_HOST = 0 };
49 
50 static void ct_incoming_request_worker_func(struct work_struct *w);
51 
52 /**
53  * intel_guc_ct_init_early - Initialize CT state without requiring device access
54  * @ct: pointer to CT struct
55  */
intel_guc_ct_init_early(struct intel_guc_ct * ct)56 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
57 {
58 	/* we're using static channel owners */
59 	ct->host_channel.owner = CTB_OWNER_HOST;
60 
61 	spin_lock_init(&ct->lock);
62 	INIT_LIST_HEAD(&ct->pending_requests);
63 	INIT_LIST_HEAD(&ct->incoming_requests);
64 	INIT_WORK(&ct->worker, ct_incoming_request_worker_func);
65 }
66 
ct_to_guc(struct intel_guc_ct * ct)67 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
68 {
69 	return container_of(ct, struct intel_guc, ct);
70 }
71 
guc_ct_buffer_type_to_str(u32 type)72 static inline const char *guc_ct_buffer_type_to_str(u32 type)
73 {
74 	switch (type) {
75 	case INTEL_GUC_CT_BUFFER_TYPE_SEND:
76 		return "SEND";
77 	case INTEL_GUC_CT_BUFFER_TYPE_RECV:
78 		return "RECV";
79 	default:
80 		return "<invalid>";
81 	}
82 }
83 
guc_ct_buffer_desc_init(struct guc_ct_buffer_desc * desc,u32 cmds_addr,u32 size,u32 owner)84 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
85 				    u32 cmds_addr, u32 size, u32 owner)
86 {
87 	CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
88 			desc, cmds_addr, size, owner);
89 	memset(desc, 0, sizeof(*desc));
90 	desc->addr = cmds_addr;
91 	desc->size = size;
92 	desc->owner = owner;
93 }
94 
guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc * desc)95 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
96 {
97 	CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
98 			desc, desc->head, desc->tail);
99 	desc->head = 0;
100 	desc->tail = 0;
101 	desc->is_in_error = 0;
102 }
103 
guc_action_register_ct_buffer(struct intel_guc * guc,u32 desc_addr,u32 type)104 static int guc_action_register_ct_buffer(struct intel_guc *guc,
105 					 u32 desc_addr,
106 					 u32 type)
107 {
108 	u32 action[] = {
109 		INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
110 		desc_addr,
111 		sizeof(struct guc_ct_buffer_desc),
112 		type
113 	};
114 	int err;
115 
116 	/* Can't use generic send(), CT registration must go over MMIO */
117 	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
118 	if (err)
119 		DRM_ERROR("CT: register %s buffer failed; err=%d\n",
120 			  guc_ct_buffer_type_to_str(type), err);
121 	return err;
122 }
123 
guc_action_deregister_ct_buffer(struct intel_guc * guc,u32 owner,u32 type)124 static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
125 					   u32 owner,
126 					   u32 type)
127 {
128 	u32 action[] = {
129 		INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
130 		owner,
131 		type
132 	};
133 	int err;
134 
135 	/* Can't use generic send(), CT deregistration must go over MMIO */
136 	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
137 	if (err)
138 		DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
139 			  guc_ct_buffer_type_to_str(type), owner, err);
140 	return err;
141 }
142 
ctch_is_open(struct intel_guc_ct_channel * ctch)143 static bool ctch_is_open(struct intel_guc_ct_channel *ctch)
144 {
145 	return ctch->vma != NULL;
146 }
147 
ctch_init(struct intel_guc * guc,struct intel_guc_ct_channel * ctch)148 static int ctch_init(struct intel_guc *guc,
149 		     struct intel_guc_ct_channel *ctch)
150 {
151 	struct i915_vma *vma;
152 	void *blob;
153 	int err;
154 	int i;
155 
156 	GEM_BUG_ON(ctch->vma);
157 
158 	/* We allocate 1 page to hold both descriptors and both buffers.
159 	 *       ___________.....................
160 	 *      |desc (SEND)|                   :
161 	 *      |___________|                   PAGE/4
162 	 *      :___________....................:
163 	 *      |desc (RECV)|                   :
164 	 *      |___________|                   PAGE/4
165 	 *      :_______________________________:
166 	 *      |cmds (SEND)                    |
167 	 *      |                               PAGE/4
168 	 *      |_______________________________|
169 	 *      |cmds (RECV)                    |
170 	 *      |                               PAGE/4
171 	 *      |_______________________________|
172 	 *
173 	 * Each message can use a maximum of 32 dwords and we don't expect to
174 	 * have more than 1 in flight at any time, so we have enough space.
175 	 * Some logic further ahead will rely on the fact that there is only 1
176 	 * page and that it is always mapped, so if the size is changed the
177 	 * other code will need updating as well.
178 	 */
179 
180 	/* allocate vma */
181 	vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
182 	if (IS_ERR(vma)) {
183 		err = PTR_ERR(vma);
184 		goto err_out;
185 	}
186 	ctch->vma = vma;
187 
188 	/* map first page */
189 	blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
190 	if (IS_ERR(blob)) {
191 		err = PTR_ERR(blob);
192 		goto err_vma;
193 	}
194 	CT_DEBUG_DRIVER("CT: vma base=%#x\n",
195 			intel_guc_ggtt_offset(guc, ctch->vma));
196 
197 	/* store pointers to desc and cmds */
198 	for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
199 		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
200 		ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
201 		ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
202 	}
203 
204 	return 0;
205 
206 err_vma:
207 	i915_vma_unpin_and_release(&ctch->vma);
208 err_out:
209 	CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
210 			ctch->owner, err);
211 	return err;
212 }
213 
ctch_fini(struct intel_guc * guc,struct intel_guc_ct_channel * ctch)214 static void ctch_fini(struct intel_guc *guc,
215 		      struct intel_guc_ct_channel *ctch)
216 {
217 	GEM_BUG_ON(!ctch->vma);
218 
219 	i915_gem_object_unpin_map(ctch->vma->obj);
220 	i915_vma_unpin_and_release(&ctch->vma);
221 }
222 
ctch_open(struct intel_guc * guc,struct intel_guc_ct_channel * ctch)223 static int ctch_open(struct intel_guc *guc,
224 		     struct intel_guc_ct_channel *ctch)
225 {
226 	u32 base;
227 	int err;
228 	int i;
229 
230 	CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
231 			ctch->owner, yesno(ctch_is_open(ctch)));
232 
233 	if (!ctch->vma) {
234 		err = ctch_init(guc, ctch);
235 		if (unlikely(err))
236 			goto err_out;
237 		GEM_BUG_ON(!ctch->vma);
238 	}
239 
240 	/* vma should be already allocated and map'ed */
241 	base = intel_guc_ggtt_offset(guc, ctch->vma);
242 
243 	/* (re)initialize descriptors
244 	 * cmds buffers are in the second half of the blob page
245 	 */
246 	for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
247 		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
248 		guc_ct_buffer_desc_init(ctch->ctbs[i].desc,
249 					base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
250 					PAGE_SIZE/4,
251 					ctch->owner);
252 	}
253 
254 	/* register buffers, starting wirh RECV buffer
255 	 * descriptors are in first half of the blob
256 	 */
257 	err = guc_action_register_ct_buffer(guc,
258 					    base + PAGE_SIZE/4 * CTB_RECV,
259 					    INTEL_GUC_CT_BUFFER_TYPE_RECV);
260 	if (unlikely(err))
261 		goto err_fini;
262 
263 	err = guc_action_register_ct_buffer(guc,
264 					    base + PAGE_SIZE/4 * CTB_SEND,
265 					    INTEL_GUC_CT_BUFFER_TYPE_SEND);
266 	if (unlikely(err))
267 		goto err_deregister;
268 
269 	return 0;
270 
271 err_deregister:
272 	guc_action_deregister_ct_buffer(guc,
273 					ctch->owner,
274 					INTEL_GUC_CT_BUFFER_TYPE_RECV);
275 err_fini:
276 	ctch_fini(guc, ctch);
277 err_out:
278 	DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
279 	return err;
280 }
281 
ctch_close(struct intel_guc * guc,struct intel_guc_ct_channel * ctch)282 static void ctch_close(struct intel_guc *guc,
283 		       struct intel_guc_ct_channel *ctch)
284 {
285 	GEM_BUG_ON(!ctch_is_open(ctch));
286 
287 	guc_action_deregister_ct_buffer(guc,
288 					ctch->owner,
289 					INTEL_GUC_CT_BUFFER_TYPE_SEND);
290 	guc_action_deregister_ct_buffer(guc,
291 					ctch->owner,
292 					INTEL_GUC_CT_BUFFER_TYPE_RECV);
293 	ctch_fini(guc, ctch);
294 }
295 
ctch_get_next_fence(struct intel_guc_ct_channel * ctch)296 static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
297 {
298 	/* For now it's trivial */
299 	return ++ctch->next_fence;
300 }
301 
302 /**
303  * DOC: CTB Host to GuC request
304  *
305  * Format of the CTB Host to GuC request message is as follows::
306  *
307  *      +------------+---------+---------+---------+---------+
308  *      |   msg[0]   |   [1]   |   [2]   |   ...   |  [n-1]  |
309  *      +------------+---------+---------+---------+---------+
310  *      |   MESSAGE  |       MESSAGE PAYLOAD                 |
311  *      +   HEADER   +---------+---------+---------+---------+
312  *      |            |    0    |    1    |   ...   |    n    |
313  *      +============+=========+=========+=========+=========+
314  *      |  len >= 1  |  FENCE  |     request specific data   |
315  *      +------+-----+---------+---------+---------+---------+
316  *
317  *                   ^-----------------len-------------------^
318  */
319 
ctb_write(struct intel_guc_ct_buffer * ctb,const u32 * action,u32 len,u32 fence,bool want_response)320 static int ctb_write(struct intel_guc_ct_buffer *ctb,
321 		     const u32 *action,
322 		     u32 len /* in dwords */,
323 		     u32 fence,
324 		     bool want_response)
325 {
326 	struct guc_ct_buffer_desc *desc = ctb->desc;
327 	u32 head = desc->head / 4;	/* in dwords */
328 	u32 tail = desc->tail / 4;	/* in dwords */
329 	u32 size = desc->size / 4;	/* in dwords */
330 	u32 used;			/* in dwords */
331 	u32 header;
332 	u32 *cmds = ctb->cmds;
333 	unsigned int i;
334 
335 	GEM_BUG_ON(desc->size % 4);
336 	GEM_BUG_ON(desc->head % 4);
337 	GEM_BUG_ON(desc->tail % 4);
338 	GEM_BUG_ON(tail >= size);
339 
340 	/*
341 	 * tail == head condition indicates empty. GuC FW does not support
342 	 * using up the entire buffer to get tail == head meaning full.
343 	 */
344 	if (tail < head)
345 		used = (size - head) + tail;
346 	else
347 		used = tail - head;
348 
349 	/* make sure there is a space including extra dw for the fence */
350 	if (unlikely(used + len + 1 >= size))
351 		return -ENOSPC;
352 
353 	/*
354 	 * Write the message. The format is the following:
355 	 * DW0: header (including action code)
356 	 * DW1: fence
357 	 * DW2+: action data
358 	 */
359 	header = (len << GUC_CT_MSG_LEN_SHIFT) |
360 		 (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
361 		 (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
362 		 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
363 
364 	CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
365 			4, &header, 4, &fence,
366 			4 * (len - 1), &action[1]);
367 
368 	cmds[tail] = header;
369 	tail = (tail + 1) % size;
370 
371 	cmds[tail] = fence;
372 	tail = (tail + 1) % size;
373 
374 	for (i = 1; i < len; i++) {
375 		cmds[tail] = action[i];
376 		tail = (tail + 1) % size;
377 	}
378 
379 	/* now update desc tail (back in bytes) */
380 	desc->tail = tail * 4;
381 	GEM_BUG_ON(desc->tail > desc->size);
382 
383 	return 0;
384 }
385 
386 /**
387  * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
388  * @desc:	buffer descriptor
389  * @fence:	response fence
390  * @status:	placeholder for status
391  *
392  * Guc will update CT buffer descriptor with new fence and status
393  * after processing the command identified by the fence. Wait for
394  * specified fence and then read from the descriptor status of the
395  * command.
396  *
397  * Return:
398  * *	0 response received (status is valid)
399  * *	-ETIMEDOUT no response within hardcoded timeout
400  * *	-EPROTO no response, CT buffer is in error
401  */
wait_for_ctb_desc_update(struct guc_ct_buffer_desc * desc,u32 fence,u32 * status)402 static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
403 				    u32 fence,
404 				    u32 *status)
405 {
406 	int err;
407 
408 	/*
409 	 * Fast commands should complete in less than 10us, so sample quickly
410 	 * up to that length of time, then switch to a slower sleep-wait loop.
411 	 * No GuC command should ever take longer than 10ms.
412 	 */
413 #define done (READ_ONCE(desc->fence) == fence)
414 	err = wait_for_us(done, 10);
415 	if (err)
416 		err = wait_for(done, 10);
417 #undef done
418 
419 	if (unlikely(err)) {
420 		DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
421 			  fence, desc->fence);
422 
423 		if (WARN_ON(desc->is_in_error)) {
424 			/* Something went wrong with the messaging, try to reset
425 			 * the buffer and hope for the best
426 			 */
427 			guc_ct_buffer_desc_reset(desc);
428 			err = -EPROTO;
429 		}
430 	}
431 
432 	*status = desc->status;
433 	return err;
434 }
435 
436 /**
437  * wait_for_ct_request_update - Wait for CT request state update.
438  * @req:	pointer to pending request
439  * @status:	placeholder for status
440  *
441  * For each sent request, Guc shall send bac CT response message.
442  * Our message handler will update status of tracked request once
443  * response message with given fence is received. Wait here and
444  * check for valid response status value.
445  *
446  * Return:
447  * *	0 response received (status is valid)
448  * *	-ETIMEDOUT no response within hardcoded timeout
449  */
wait_for_ct_request_update(struct ct_request * req,u32 * status)450 static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
451 {
452 	int err;
453 
454 	/*
455 	 * Fast commands should complete in less than 10us, so sample quickly
456 	 * up to that length of time, then switch to a slower sleep-wait loop.
457 	 * No GuC command should ever take longer than 10ms.
458 	 */
459 #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
460 	err = wait_for_us(done, 10);
461 	if (err)
462 		err = wait_for(done, 10);
463 #undef done
464 
465 	if (unlikely(err))
466 		DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
467 
468 	*status = req->status;
469 	return err;
470 }
471 
ctch_send(struct intel_guc_ct * ct,struct intel_guc_ct_channel * ctch,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size,u32 * status)472 static int ctch_send(struct intel_guc_ct *ct,
473 		     struct intel_guc_ct_channel *ctch,
474 		     const u32 *action,
475 		     u32 len,
476 		     u32 *response_buf,
477 		     u32 response_buf_size,
478 		     u32 *status)
479 {
480 	struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
481 	struct guc_ct_buffer_desc *desc = ctb->desc;
482 	struct ct_request request;
483 	unsigned long flags;
484 	u32 fence;
485 	int err;
486 
487 	GEM_BUG_ON(!ctch_is_open(ctch));
488 	GEM_BUG_ON(!len);
489 	GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
490 	GEM_BUG_ON(!response_buf && response_buf_size);
491 
492 	fence = ctch_get_next_fence(ctch);
493 	request.fence = fence;
494 	request.status = 0;
495 	request.response_len = response_buf_size;
496 	request.response_buf = response_buf;
497 
498 	spin_lock_irqsave(&ct->lock, flags);
499 	list_add_tail(&request.link, &ct->pending_requests);
500 	spin_unlock_irqrestore(&ct->lock, flags);
501 
502 	err = ctb_write(ctb, action, len, fence, !!response_buf);
503 	if (unlikely(err))
504 		goto unlink;
505 
506 	intel_guc_notify(ct_to_guc(ct));
507 
508 	if (response_buf)
509 		err = wait_for_ct_request_update(&request, status);
510 	else
511 		err = wait_for_ctb_desc_update(desc, fence, status);
512 	if (unlikely(err))
513 		goto unlink;
514 
515 	if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
516 		err = -EIO;
517 		goto unlink;
518 	}
519 
520 	if (response_buf) {
521 		/* There shall be no data in the status */
522 		WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
523 		/* Return actual response len */
524 		err = request.response_len;
525 	} else {
526 		/* There shall be no response payload */
527 		WARN_ON(request.response_len);
528 		/* Return data decoded from the status dword */
529 		err = INTEL_GUC_MSG_TO_DATA(*status);
530 	}
531 
532 unlink:
533 	spin_lock_irqsave(&ct->lock, flags);
534 	list_del(&request.link);
535 	spin_unlock_irqrestore(&ct->lock, flags);
536 
537 	return err;
538 }
539 
540 /*
541  * Command Transport (CT) buffer based GuC send function.
542  */
intel_guc_send_ct(struct intel_guc * guc,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size)543 static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
544 			     u32 *response_buf, u32 response_buf_size)
545 {
546 	struct intel_guc_ct *ct = &guc->ct;
547 	struct intel_guc_ct_channel *ctch = &ct->host_channel;
548 	u32 status = ~0; /* undefined */
549 	int ret;
550 
551 	mutex_lock(&guc->send_mutex);
552 
553 	ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size,
554 			&status);
555 	if (unlikely(ret < 0)) {
556 		DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
557 			  action[0], ret, status);
558 	} else if (unlikely(ret)) {
559 		CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
560 				action[0], ret, ret);
561 	}
562 
563 	mutex_unlock(&guc->send_mutex);
564 	return ret;
565 }
566 
ct_header_get_len(u32 header)567 static inline unsigned int ct_header_get_len(u32 header)
568 {
569 	return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
570 }
571 
ct_header_get_action(u32 header)572 static inline unsigned int ct_header_get_action(u32 header)
573 {
574 	return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
575 }
576 
ct_header_is_response(u32 header)577 static inline bool ct_header_is_response(u32 header)
578 {
579 	return ct_header_get_action(header) == INTEL_GUC_ACTION_DEFAULT;
580 }
581 
ctb_read(struct intel_guc_ct_buffer * ctb,u32 * data)582 static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
583 {
584 	struct guc_ct_buffer_desc *desc = ctb->desc;
585 	u32 head = desc->head / 4;	/* in dwords */
586 	u32 tail = desc->tail / 4;	/* in dwords */
587 	u32 size = desc->size / 4;	/* in dwords */
588 	u32 *cmds = ctb->cmds;
589 	s32 available;			/* in dwords */
590 	unsigned int len;
591 	unsigned int i;
592 
593 	GEM_BUG_ON(desc->size % 4);
594 	GEM_BUG_ON(desc->head % 4);
595 	GEM_BUG_ON(desc->tail % 4);
596 	GEM_BUG_ON(tail >= size);
597 	GEM_BUG_ON(head >= size);
598 
599 	/* tail == head condition indicates empty */
600 	available = tail - head;
601 	if (unlikely(available == 0))
602 		return -ENODATA;
603 
604 	/* beware of buffer wrap case */
605 	if (unlikely(available < 0))
606 		available += size;
607 	CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
608 	GEM_BUG_ON(available < 0);
609 
610 	data[0] = cmds[head];
611 	head = (head + 1) % size;
612 
613 	/* message len with header */
614 	len = ct_header_get_len(data[0]) + 1;
615 	if (unlikely(len > (u32)available)) {
616 		DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
617 			  4, data,
618 			  4 * (head + available - 1 > size ?
619 			       size - head : available - 1), &cmds[head],
620 			  4 * (head + available - 1 > size ?
621 			       available - 1 - size + head : 0), &cmds[0]);
622 		return -EPROTO;
623 	}
624 
625 	for (i = 1; i < len; i++) {
626 		data[i] = cmds[head];
627 		head = (head + 1) % size;
628 	}
629 	CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
630 
631 	desc->head = head * 4;
632 	return 0;
633 }
634 
635 /**
636  * DOC: CTB GuC to Host response
637  *
638  * Format of the CTB GuC to Host response message is as follows::
639  *
640  *      +------------+---------+---------+---------+---------+---------+
641  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
642  *      +------------+---------+---------+---------+---------+---------+
643  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
644  *      +   HEADER   +---------+---------+---------+---------+---------+
645  *      |            |    0    |    1    |    2    |   ...   |    n    |
646  *      +============+=========+=========+=========+=========+=========+
647  *      |  len >= 2  |  FENCE  |  STATUS |   response specific data    |
648  *      +------+-----+---------+---------+---------+---------+---------+
649  *
650  *                   ^-----------------------len-----------------------^
651  */
652 
ct_handle_response(struct intel_guc_ct * ct,const u32 * msg)653 static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
654 {
655 	u32 header = msg[0];
656 	u32 len = ct_header_get_len(header);
657 	u32 msglen = len + 1; /* total message length including header */
658 	u32 fence;
659 	u32 status;
660 	u32 datalen;
661 	struct ct_request *req;
662 	bool found = false;
663 
664 	GEM_BUG_ON(!ct_header_is_response(header));
665 	GEM_BUG_ON(!in_irq());
666 
667 	/* Response payload shall at least include fence and status */
668 	if (unlikely(len < 2)) {
669 		DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
670 		return -EPROTO;
671 	}
672 
673 	fence = msg[1];
674 	status = msg[2];
675 	datalen = len - 2;
676 
677 	/* Format of the status follows RESPONSE message */
678 	if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
679 		DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
680 		return -EPROTO;
681 	}
682 
683 	CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
684 
685 	spin_lock(&ct->lock);
686 	list_for_each_entry(req, &ct->pending_requests, link) {
687 		if (unlikely(fence != req->fence)) {
688 			CT_DEBUG_DRIVER("CT: request %u awaits response\n",
689 					req->fence);
690 			continue;
691 		}
692 		if (unlikely(datalen > req->response_len)) {
693 			DRM_ERROR("CT: response %u too long %*ph\n",
694 				  req->fence, 4 * msglen, msg);
695 			datalen = 0;
696 		}
697 		if (datalen)
698 			memcpy(req->response_buf, msg + 3, 4 * datalen);
699 		req->response_len = datalen;
700 		WRITE_ONCE(req->status, status);
701 		found = true;
702 		break;
703 	}
704 	spin_unlock(&ct->lock);
705 
706 	if (!found)
707 		DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
708 	return 0;
709 }
710 
ct_process_request(struct intel_guc_ct * ct,u32 action,u32 len,const u32 * payload)711 static void ct_process_request(struct intel_guc_ct *ct,
712 			       u32 action, u32 len, const u32 *payload)
713 {
714 	struct intel_guc *guc = ct_to_guc(ct);
715 
716 	CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
717 
718 	switch (action) {
719 	case INTEL_GUC_ACTION_DEFAULT:
720 		if (unlikely(len < 1))
721 			goto fail_unexpected;
722 		intel_guc_to_host_process_recv_msg(guc, *payload);
723 		break;
724 
725 	default:
726 fail_unexpected:
727 		DRM_ERROR("CT: unexpected request %x %*ph\n",
728 			  action, 4 * len, payload);
729 		break;
730 	}
731 }
732 
ct_process_incoming_requests(struct intel_guc_ct * ct)733 static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
734 {
735 	unsigned long flags;
736 	struct ct_incoming_request *request;
737 	u32 header;
738 	u32 *payload;
739 	bool done;
740 
741 	spin_lock_irqsave(&ct->lock, flags);
742 	request = list_first_entry_or_null(&ct->incoming_requests,
743 					   struct ct_incoming_request, link);
744 	if (request)
745 		list_del(&request->link);
746 	done = !!list_empty(&ct->incoming_requests);
747 	spin_unlock_irqrestore(&ct->lock, flags);
748 
749 	if (!request)
750 		return true;
751 
752 	header = request->msg[0];
753 	payload = &request->msg[1];
754 	ct_process_request(ct,
755 			   ct_header_get_action(header),
756 			   ct_header_get_len(header),
757 			   payload);
758 
759 	kfree(request);
760 	return done;
761 }
762 
ct_incoming_request_worker_func(struct work_struct * w)763 static void ct_incoming_request_worker_func(struct work_struct *w)
764 {
765 	struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker);
766 	bool done;
767 
768 	done = ct_process_incoming_requests(ct);
769 	if (!done)
770 		queue_work(system_unbound_wq, &ct->worker);
771 }
772 
773 /**
774  * DOC: CTB GuC to Host request
775  *
776  * Format of the CTB GuC to Host request message is as follows::
777  *
778  *      +------------+---------+---------+---------+---------+---------+
779  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
780  *      +------------+---------+---------+---------+---------+---------+
781  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
782  *      +   HEADER   +---------+---------+---------+---------+---------+
783  *      |            |    0    |    1    |    2    |   ...   |    n    |
784  *      +============+=========+=========+=========+=========+=========+
785  *      |     len    |            request specific data                |
786  *      +------+-----+---------+---------+---------+---------+---------+
787  *
788  *                   ^-----------------------len-----------------------^
789  */
790 
ct_handle_request(struct intel_guc_ct * ct,const u32 * msg)791 static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
792 {
793 	u32 header = msg[0];
794 	u32 len = ct_header_get_len(header);
795 	u32 msglen = len + 1; /* total message length including header */
796 	struct ct_incoming_request *request;
797 	unsigned long flags;
798 
799 	GEM_BUG_ON(ct_header_is_response(header));
800 
801 	request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
802 	if (unlikely(!request)) {
803 		DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
804 		return 0; /* XXX: -ENOMEM ? */
805 	}
806 	memcpy(request->msg, msg, 4 * msglen);
807 
808 	spin_lock_irqsave(&ct->lock, flags);
809 	list_add_tail(&request->link, &ct->incoming_requests);
810 	spin_unlock_irqrestore(&ct->lock, flags);
811 
812 	queue_work(system_unbound_wq, &ct->worker);
813 	return 0;
814 }
815 
ct_process_host_channel(struct intel_guc_ct * ct)816 static void ct_process_host_channel(struct intel_guc_ct *ct)
817 {
818 	struct intel_guc_ct_channel *ctch = &ct->host_channel;
819 	struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV];
820 	u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
821 	int err = 0;
822 
823 	if (!ctch_is_open(ctch))
824 		return;
825 
826 	do {
827 		err = ctb_read(ctb, msg);
828 		if (err)
829 			break;
830 
831 		if (ct_header_is_response(msg[0]))
832 			err = ct_handle_response(ct, msg);
833 		else
834 			err = ct_handle_request(ct, msg);
835 	} while (!err);
836 
837 	if (GEM_WARN_ON(err == -EPROTO)) {
838 		DRM_ERROR("CT: corrupted message detected!\n");
839 		ctb->desc->is_in_error = 1;
840 	}
841 }
842 
843 /*
844  * When we're communicating with the GuC over CT, GuC uses events
845  * to notify us about new messages being posted on the RECV buffer.
846  */
intel_guc_to_host_event_handler_ct(struct intel_guc * guc)847 static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
848 {
849 	struct intel_guc_ct *ct = &guc->ct;
850 
851 	ct_process_host_channel(ct);
852 }
853 
854 /**
855  * intel_guc_ct_enable - Enable buffer based command transport.
856  * @ct: pointer to CT struct
857  *
858  * Shall only be called for platforms with HAS_GUC_CT.
859  *
860  * Return: 0 on success, a negative errno code on failure.
861  */
intel_guc_ct_enable(struct intel_guc_ct * ct)862 int intel_guc_ct_enable(struct intel_guc_ct *ct)
863 {
864 	struct intel_guc *guc = ct_to_guc(ct);
865 	struct drm_i915_private *i915 = guc_to_i915(guc);
866 	struct intel_guc_ct_channel *ctch = &ct->host_channel;
867 	int err;
868 
869 	GEM_BUG_ON(!HAS_GUC_CT(i915));
870 
871 	err = ctch_open(guc, ctch);
872 	if (unlikely(err))
873 		return err;
874 
875 	/* Switch into cmd transport buffer based send() */
876 	guc->send = intel_guc_send_ct;
877 	guc->handler = intel_guc_to_host_event_handler_ct;
878 	DRM_INFO("CT: %s\n", enableddisabled(true));
879 	return 0;
880 }
881 
882 /**
883  * intel_guc_ct_disable - Disable buffer based command transport.
884  * @ct: pointer to CT struct
885  *
886  * Shall only be called for platforms with HAS_GUC_CT.
887  */
intel_guc_ct_disable(struct intel_guc_ct * ct)888 void intel_guc_ct_disable(struct intel_guc_ct *ct)
889 {
890 	struct intel_guc *guc = ct_to_guc(ct);
891 	struct drm_i915_private *i915 = guc_to_i915(guc);
892 	struct intel_guc_ct_channel *ctch = &ct->host_channel;
893 
894 	GEM_BUG_ON(!HAS_GUC_CT(i915));
895 
896 	if (!ctch_is_open(ctch))
897 		return;
898 
899 	ctch_close(guc, ctch);
900 
901 	/* Disable send */
902 	guc->send = intel_guc_send_nop;
903 	guc->handler = intel_guc_to_host_event_handler_nop;
904 	DRM_INFO("CT: %s\n", enableddisabled(false));
905 }
906