1 /*
2  * Copyright 2019 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <subdev/mmu.h>
25 
26 #include <nvfw/flcn.h>
27 
28 void
gp108_acr_hsfw_bld(struct nvkm_acr * acr,struct nvkm_acr_hsf * hsf)29 gp108_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
30 {
31 	struct flcn_bl_dmem_desc_v2 hsdesc = {
32 		.ctx_dma = FALCON_DMAIDX_VIRT,
33 		.code_dma_base = hsf->vma->addr,
34 		.non_sec_code_off = hsf->non_sec_addr,
35 		.non_sec_code_size = hsf->non_sec_size,
36 		.sec_code_off = hsf->sec_addr,
37 		.sec_code_size = hsf->sec_size,
38 		.code_entry_point = 0,
39 		.data_dma_base = hsf->vma->addr + hsf->data_addr,
40 		.data_size = hsf->data_size,
41 		.argc = 0,
42 		.argv = 0,
43 	};
44 
45 	flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hsdesc);
46 
47 	nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0);
48 }
49 
50 const struct nvkm_acr_hsf_func
51 gp108_acr_unload_0 = {
52 	.load = gm200_acr_unload_load,
53 	.boot = gm200_acr_unload_boot,
54 	.bld = gp108_acr_hsfw_bld,
55 };
56 
57 MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
58 MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
59 
60 MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin");
61 MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin");
62 
63 static const struct nvkm_acr_hsf_fwif
64 gp108_acr_unload_fwif[] = {
65 	{ 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 },
66 	{}
67 };
68 
69 static const struct nvkm_acr_hsf_func
70 gp108_acr_load_0 = {
71 	.load = gp102_acr_load_load,
72 	.boot = gm200_acr_load_boot,
73 	.bld = gp108_acr_hsfw_bld,
74 };
75 
76 MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
77 MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
78 
79 MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin");
80 MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin");
81 
82 static const struct nvkm_acr_hsf_fwif
83 gp108_acr_load_fwif[] = {
84 	{ 0, nvkm_acr_hsfw_load, &gp108_acr_load_0 },
85 	{}
86 };
87 
88 static const struct nvkm_acr_func
89 gp108_acr = {
90 	.load = gp108_acr_load_fwif,
91 	.unload = gp108_acr_unload_fwif,
92 	.wpr_parse = gp102_acr_wpr_parse,
93 	.wpr_layout = gp102_acr_wpr_layout,
94 	.wpr_alloc = gp102_acr_wpr_alloc,
95 	.wpr_build = gp102_acr_wpr_build,
96 	.wpr_patch = gp102_acr_wpr_patch,
97 	.wpr_check = gm200_acr_wpr_check,
98 	.init = gm200_acr_init,
99 };
100 
101 static const struct nvkm_acr_fwif
102 gp108_acr_fwif[] = {
103 	{  0, gp102_acr_load, &gp108_acr },
104 	{ -1, gm200_acr_nofw, &gm200_acr },
105 	{}
106 };
107 
108 int
gp108_acr_new(struct nvkm_device * device,int index,struct nvkm_acr ** pacr)109 gp108_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
110 {
111 	return nvkm_acr_new_(gp108_acr_fwif, device, index, pacr);
112 }
113