1 /*
2  * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "msgqueue.h"
24 #include <engine/falcon.h>
25 #include <subdev/secboot.h>
26 
27 /* Queues identifiers */
28 enum {
29 	/* High Priority Command Queue for Host -> PMU communication */
30 	MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ = 0,
31 	/* Low Priority Command Queue for Host -> PMU communication */
32 	MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ = 1,
33 	/* Message queue for PMU -> Host communication */
34 	MSGQUEUE_0137C63D_MESSAGE_QUEUE = 4,
35 	MSGQUEUE_0137C63D_NUM_QUEUES = 5,
36 };
37 
38 struct msgqueue_0137c63d {
39 	struct nvkm_msgqueue base;
40 
41 	struct nvkm_msgqueue_queue queue[MSGQUEUE_0137C63D_NUM_QUEUES];
42 };
43 #define msgqueue_0137c63d(q) \
44 	container_of(q, struct msgqueue_0137c63d, base)
45 
46 struct msgqueue_0137bca5 {
47 	struct msgqueue_0137c63d base;
48 
49 	u64 wpr_addr;
50 };
51 #define msgqueue_0137bca5(q) \
52 	container_of(container_of(q, struct msgqueue_0137c63d, base), \
53 		     struct msgqueue_0137bca5, base);
54 
55 static struct nvkm_msgqueue_queue *
msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue * queue,enum msgqueue_msg_priority priority)56 msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue,
57 			    enum msgqueue_msg_priority priority)
58 {
59 	struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
60 	const struct nvkm_subdev *subdev = priv->base.falcon->owner;
61 
62 	switch (priority) {
63 	case MSGQUEUE_MSG_PRIORITY_HIGH:
64 		return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ];
65 	case MSGQUEUE_MSG_PRIORITY_LOW:
66 		return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ];
67 	default:
68 		nvkm_error(subdev, "invalid command queue!\n");
69 		return ERR_PTR(-EINVAL);
70 	}
71 }
72 
73 static void
msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue * queue)74 msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue *queue)
75 {
76 	struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
77 	struct nvkm_msgqueue_queue *q_queue =
78 		&priv->queue[MSGQUEUE_0137C63D_MESSAGE_QUEUE];
79 
80 	nvkm_msgqueue_process_msgs(&priv->base, q_queue);
81 }
82 
83 /* Init unit */
84 #define MSGQUEUE_0137C63D_UNIT_INIT 0x07
85 
86 enum {
87 	INIT_MSG_INIT = 0x0,
88 };
89 
90 static void
init_gen_cmdline(struct nvkm_msgqueue * queue,void * buf)91 init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
92 {
93 	struct {
94 		u32 reserved;
95 		u32 freq_hz;
96 		u32 trace_size;
97 		u32 trace_dma_base;
98 		u16 trace_dma_base1;
99 		u8 trace_dma_offset;
100 		u32 trace_dma_idx;
101 		bool secure_mode;
102 		bool raise_priv_sec;
103 		struct {
104 			u32 dma_base;
105 			u16 dma_base1;
106 			u8 dma_offset;
107 			u16 fb_size;
108 			u8 dma_idx;
109 		} gc6_ctx;
110 		u8 pad;
111 	} *args = buf;
112 
113 	args->secure_mode = 1;
114 }
115 
116 /* forward declaration */
117 static int acr_init_wpr(struct nvkm_msgqueue *queue);
118 
119 static int
init_callback(struct nvkm_msgqueue * _queue,struct nvkm_msgqueue_hdr * hdr)120 init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
121 {
122 	struct msgqueue_0137c63d *priv = msgqueue_0137c63d(_queue);
123 	struct {
124 		struct nvkm_msgqueue_msg base;
125 
126 		u8 pad;
127 		u16 os_debug_entry_point;
128 
129 		struct {
130 			u16 size;
131 			u16 offset;
132 			u8 index;
133 			u8 pad;
134 		} queue_info[MSGQUEUE_0137C63D_NUM_QUEUES];
135 
136 		u16 sw_managed_area_offset;
137 		u16 sw_managed_area_size;
138 	} *init = (void *)hdr;
139 	const struct nvkm_subdev *subdev = _queue->falcon->owner;
140 	int i;
141 
142 	if (init->base.hdr.unit_id != MSGQUEUE_0137C63D_UNIT_INIT) {
143 		nvkm_error(subdev, "expected message from init unit\n");
144 		return -EINVAL;
145 	}
146 
147 	if (init->base.msg_type != INIT_MSG_INIT) {
148 		nvkm_error(subdev, "expected PMU init msg\n");
149 		return -EINVAL;
150 	}
151 
152 	for (i = 0; i < MSGQUEUE_0137C63D_NUM_QUEUES; i++) {
153 		struct nvkm_msgqueue_queue *queue = &priv->queue[i];
154 
155 		mutex_init(&queue->mutex);
156 
157 		queue->index = init->queue_info[i].index;
158 		queue->offset = init->queue_info[i].offset;
159 		queue->size = init->queue_info[i].size;
160 
161 		if (i != MSGQUEUE_0137C63D_MESSAGE_QUEUE) {
162 			queue->head_reg = 0x4a0 + (queue->index * 4);
163 			queue->tail_reg = 0x4b0 + (queue->index * 4);
164 		} else {
165 			queue->head_reg = 0x4c8;
166 			queue->tail_reg = 0x4cc;
167 		}
168 
169 		nvkm_debug(subdev,
170 			   "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
171 			   i, queue->index, queue->offset, queue->size);
172 	}
173 
174 	/* Complete initialization by initializing WPR region */
175 	return acr_init_wpr(&priv->base);
176 }
177 
178 static const struct nvkm_msgqueue_init_func
179 msgqueue_0137c63d_init_func = {
180 	.gen_cmdline = init_gen_cmdline,
181 	.init_callback = init_callback,
182 };
183 
184 
185 
186 /* ACR unit */
187 #define MSGQUEUE_0137C63D_UNIT_ACR 0x0a
188 
189 enum {
190 	ACR_CMD_INIT_WPR_REGION = 0x00,
191 	ACR_CMD_BOOTSTRAP_FALCON = 0x01,
192 	ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS = 0x03,
193 };
194 
195 static void
acr_init_wpr_callback(struct nvkm_msgqueue * queue,struct nvkm_msgqueue_hdr * hdr)196 acr_init_wpr_callback(struct nvkm_msgqueue *queue,
197 		      struct nvkm_msgqueue_hdr *hdr)
198 {
199 	struct {
200 		struct nvkm_msgqueue_msg base;
201 		u32 error_code;
202 	} *msg = (void *)hdr;
203 	const struct nvkm_subdev *subdev = queue->falcon->owner;
204 
205 	if (msg->error_code) {
206 		nvkm_error(subdev, "ACR WPR init failure: %d\n",
207 			   msg->error_code);
208 		return;
209 	}
210 
211 	nvkm_debug(subdev, "ACR WPR init complete\n");
212 	complete_all(&queue->init_done);
213 }
214 
215 static int
acr_init_wpr(struct nvkm_msgqueue * queue)216 acr_init_wpr(struct nvkm_msgqueue *queue)
217 {
218 	/*
219 	 * region_id:	region ID in WPR region
220 	 * wpr_offset:	offset in WPR region
221 	 */
222 	struct {
223 		struct nvkm_msgqueue_hdr hdr;
224 		u8 cmd_type;
225 		u32 region_id;
226 		u32 wpr_offset;
227 	} cmd;
228 	memset(&cmd, 0, sizeof(cmd));
229 
230 	cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
231 	cmd.hdr.size = sizeof(cmd);
232 	cmd.cmd_type = ACR_CMD_INIT_WPR_REGION;
233 	cmd.region_id = 0x01;
234 	cmd.wpr_offset = 0x00;
235 
236 	nvkm_msgqueue_post(queue, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
237 			   acr_init_wpr_callback, NULL, false);
238 
239 	return 0;
240 }
241 
242 
243 static void
acr_boot_falcon_callback(struct nvkm_msgqueue * priv,struct nvkm_msgqueue_hdr * hdr)244 acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
245 			 struct nvkm_msgqueue_hdr *hdr)
246 {
247 	struct acr_bootstrap_falcon_msg {
248 		struct nvkm_msgqueue_msg base;
249 
250 		u32 falcon_id;
251 	} *msg = (void *)hdr;
252 	const struct nvkm_subdev *subdev = priv->falcon->owner;
253 	u32 falcon_id = msg->falcon_id;
254 
255 	if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
256 		nvkm_error(subdev, "in bootstrap falcon callback:\n");
257 		nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
258 		return;
259 	}
260 	nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
261 }
262 
263 enum {
264 	ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
265 	ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
266 };
267 
268 static int
acr_boot_falcon(struct nvkm_msgqueue * priv,enum nvkm_secboot_falcon falcon)269 acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
270 {
271 	DECLARE_COMPLETION_ONSTACK(completed);
272 	/*
273 	 * flags      - Flag specifying RESET or no RESET.
274 	 * falcon id  - Falcon id specifying falcon to bootstrap.
275 	 */
276 	struct {
277 		struct nvkm_msgqueue_hdr hdr;
278 		u8 cmd_type;
279 		u32 flags;
280 		u32 falcon_id;
281 	} cmd;
282 
283 	memset(&cmd, 0, sizeof(cmd));
284 
285 	cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
286 	cmd.hdr.size = sizeof(cmd);
287 	cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
288 	cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
289 	cmd.falcon_id = falcon;
290 	nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
291 			acr_boot_falcon_callback, &completed, true);
292 
293 	if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
294 		return -ETIMEDOUT;
295 
296 	return 0;
297 }
298 
299 static void
acr_boot_multiple_falcons_callback(struct nvkm_msgqueue * priv,struct nvkm_msgqueue_hdr * hdr)300 acr_boot_multiple_falcons_callback(struct nvkm_msgqueue *priv,
301 				   struct nvkm_msgqueue_hdr *hdr)
302 {
303 	struct acr_bootstrap_falcon_msg {
304 		struct nvkm_msgqueue_msg base;
305 
306 		u32 falcon_mask;
307 	} *msg = (void *)hdr;
308 	const struct nvkm_subdev *subdev = priv->falcon->owner;
309 	unsigned long falcon_mask = msg->falcon_mask;
310 	u32 falcon_id, falcon_treated = 0;
311 
312 	for_each_set_bit(falcon_id, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
313 		nvkm_debug(subdev, "%s booted\n",
314 			   nvkm_secboot_falcon_name[falcon_id]);
315 		falcon_treated |= BIT(falcon_id);
316 	}
317 
318 	if (falcon_treated != msg->falcon_mask) {
319 		nvkm_error(subdev, "in bootstrap falcon callback:\n");
320 		nvkm_error(subdev, "invalid falcon mask 0x%x\n",
321 			   msg->falcon_mask);
322 		return;
323 	}
324 }
325 
326 static int
acr_boot_multiple_falcons(struct nvkm_msgqueue * priv,unsigned long falcon_mask)327 acr_boot_multiple_falcons(struct nvkm_msgqueue *priv, unsigned long falcon_mask)
328 {
329 	DECLARE_COMPLETION_ONSTACK(completed);
330 	/*
331 	 * flags      - Flag specifying RESET or no RESET.
332 	 * falcon id  - Falcon id specifying falcon to bootstrap.
333 	 */
334 	struct {
335 		struct nvkm_msgqueue_hdr hdr;
336 		u8 cmd_type;
337 		u32 flags;
338 		u32 falcon_mask;
339 		u32 use_va_mask;
340 		u32 wpr_lo;
341 		u32 wpr_hi;
342 	} cmd;
343 	struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv);
344 
345 	memset(&cmd, 0, sizeof(cmd));
346 
347 	cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
348 	cmd.hdr.size = sizeof(cmd);
349 	cmd.cmd_type = ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS;
350 	cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
351 	cmd.falcon_mask = falcon_mask;
352 	cmd.wpr_lo = lower_32_bits(queue->wpr_addr);
353 	cmd.wpr_hi = upper_32_bits(queue->wpr_addr);
354 	nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
355 			acr_boot_multiple_falcons_callback, &completed, true);
356 
357 	if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
358 		return -ETIMEDOUT;
359 
360 	return 0;
361 }
362 
363 static const struct nvkm_msgqueue_acr_func
364 msgqueue_0137c63d_acr_func = {
365 	.boot_falcon = acr_boot_falcon,
366 };
367 
368 static const struct nvkm_msgqueue_acr_func
369 msgqueue_0137bca5_acr_func = {
370 	.boot_falcon = acr_boot_falcon,
371 	.boot_multiple_falcons = acr_boot_multiple_falcons,
372 };
373 
374 static void
msgqueue_0137c63d_dtor(struct nvkm_msgqueue * queue)375 msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue)
376 {
377 	kfree(msgqueue_0137c63d(queue));
378 }
379 
380 static const struct nvkm_msgqueue_func
381 msgqueue_0137c63d_func = {
382 	.init_func = &msgqueue_0137c63d_init_func,
383 	.acr_func = &msgqueue_0137c63d_acr_func,
384 	.cmd_queue = msgqueue_0137c63d_cmd_queue,
385 	.recv = msgqueue_0137c63d_process_msgs,
386 	.dtor = msgqueue_0137c63d_dtor,
387 };
388 
389 int
msgqueue_0137c63d_new(struct nvkm_falcon * falcon,const struct nvkm_secboot * sb,struct nvkm_msgqueue ** queue)390 msgqueue_0137c63d_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
391 		      struct nvkm_msgqueue **queue)
392 {
393 	struct msgqueue_0137c63d *ret;
394 
395 	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
396 	if (!ret)
397 		return -ENOMEM;
398 
399 	*queue = &ret->base;
400 
401 	nvkm_msgqueue_ctor(&msgqueue_0137c63d_func, falcon, &ret->base);
402 
403 	return 0;
404 }
405 
406 static const struct nvkm_msgqueue_func
407 msgqueue_0137bca5_func = {
408 	.init_func = &msgqueue_0137c63d_init_func,
409 	.acr_func = &msgqueue_0137bca5_acr_func,
410 	.cmd_queue = msgqueue_0137c63d_cmd_queue,
411 	.recv = msgqueue_0137c63d_process_msgs,
412 	.dtor = msgqueue_0137c63d_dtor,
413 };
414 
415 int
msgqueue_0137bca5_new(struct nvkm_falcon * falcon,const struct nvkm_secboot * sb,struct nvkm_msgqueue ** queue)416 msgqueue_0137bca5_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
417 		      struct nvkm_msgqueue **queue)
418 {
419 	struct msgqueue_0137bca5 *ret;
420 
421 	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
422 	if (!ret)
423 		return -ENOMEM;
424 
425 	*queue = &ret->base.base;
426 
427 	/*
428 	 * FIXME this must be set to the address of a *GPU* mapping within the
429 	 * ACR address space!
430 	 */
431 	/* ret->wpr_addr = sb->wpr_addr; */
432 
433 	nvkm_msgqueue_ctor(&msgqueue_0137bca5_func, falcon, &ret->base.base);
434 
435 	return 0;
436 }
437