1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "acr_r361.h"
24 
25 #include <engine/falcon.h>
26 #include <core/msgqueue.h>
27 #include <subdev/pmu.h>
28 #include <engine/sec2.h>
29 
30 static void
acr_r361_generate_flcn_bl_desc(const struct nvkm_acr * acr,const struct ls_ucode_img * img,u64 wpr_addr,void * _desc)31 acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
32 			       const struct ls_ucode_img *img, u64 wpr_addr,
33 			       void *_desc)
34 {
35 	struct acr_r361_flcn_bl_desc *desc = _desc;
36 	const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
37 	u64 base, addr_code, addr_data;
38 
39 	base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
40 	addr_code = base + pdesc->app_resident_code_offset;
41 	addr_data = base + pdesc->app_resident_data_offset;
42 
43 	desc->ctx_dma = FALCON_DMAIDX_UCODE;
44 	desc->code_dma_base = u64_to_flcn64(addr_code);
45 	desc->non_sec_code_off = pdesc->app_resident_code_offset;
46 	desc->non_sec_code_size = pdesc->app_resident_code_size;
47 	desc->code_entry_point = pdesc->app_imem_entry;
48 	desc->data_dma_base = u64_to_flcn64(addr_data);
49 	desc->data_size = pdesc->app_resident_data_size;
50 }
51 
52 void
acr_r361_generate_hs_bl_desc(const struct hsf_load_header * hdr,void * _bl_desc,u64 offset)53 acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
54 			    u64 offset)
55 {
56 	struct acr_r361_flcn_bl_desc *bl_desc = _bl_desc;
57 
58 	bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
59 	bl_desc->code_dma_base = u64_to_flcn64(offset);
60 	bl_desc->non_sec_code_off = hdr->non_sec_code_off;
61 	bl_desc->non_sec_code_size = hdr->non_sec_code_size;
62 	bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
63 	bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
64 	bl_desc->code_entry_point = 0;
65 	bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
66 	bl_desc->data_size = hdr->data_size;
67 }
68 
69 const struct acr_r352_ls_func
70 acr_r361_ls_fecs_func = {
71 	.load = acr_ls_ucode_load_fecs,
72 	.generate_bl_desc = acr_r361_generate_flcn_bl_desc,
73 	.bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
74 };
75 
76 const struct acr_r352_ls_func
77 acr_r361_ls_gpccs_func = {
78 	.load = acr_ls_ucode_load_gpccs,
79 	.generate_bl_desc = acr_r361_generate_flcn_bl_desc,
80 	.bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
81 	/* GPCCS will be loaded using PRI */
82 	.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
83 };
84 
85 struct acr_r361_pmu_bl_desc {
86 	u32 reserved;
87 	u32 dma_idx;
88 	struct flcn_u64 code_dma_base;
89 	u32 total_code_size;
90 	u32 code_size_to_load;
91 	u32 code_entry_point;
92 	struct flcn_u64 data_dma_base;
93 	u32 data_size;
94 	struct flcn_u64 overlay_dma_base;
95 	u32 argc;
96 	u32 argv;
97 };
98 
99 static void
acr_r361_generate_pmu_bl_desc(const struct nvkm_acr * acr,const struct ls_ucode_img * img,u64 wpr_addr,void * _desc)100 acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
101 			      const struct ls_ucode_img *img, u64 wpr_addr,
102 			      void *_desc)
103 {
104 	const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
105 	const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
106 	struct acr_r361_pmu_bl_desc *desc = _desc;
107 	u64 base, addr_code, addr_data;
108 	u32 addr_args;
109 
110 	base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
111 	addr_code = base + pdesc->app_resident_code_offset;
112 	addr_data = base + pdesc->app_resident_data_offset;
113 	addr_args = pmu->falcon->data.limit;
114 	addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
115 
116 	desc->dma_idx = FALCON_DMAIDX_UCODE;
117 	desc->code_dma_base = u64_to_flcn64(addr_code);
118 	desc->total_code_size = pdesc->app_size;
119 	desc->code_size_to_load = pdesc->app_resident_code_size;
120 	desc->code_entry_point = pdesc->app_imem_entry;
121 	desc->data_dma_base = u64_to_flcn64(addr_data);
122 	desc->data_size = pdesc->app_resident_data_size;
123 	desc->overlay_dma_base = u64_to_flcn64(addr_code);
124 	desc->argc = 1;
125 	desc->argv = addr_args;
126 }
127 
128 const struct acr_r352_ls_func
129 acr_r361_ls_pmu_func = {
130 	.load = acr_ls_ucode_load_pmu,
131 	.generate_bl_desc = acr_r361_generate_pmu_bl_desc,
132 	.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
133 	.post_run = acr_ls_pmu_post_run,
134 };
135 
136 static void
acr_r361_generate_sec2_bl_desc(const struct nvkm_acr * acr,const struct ls_ucode_img * img,u64 wpr_addr,void * _desc)137 acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
138 			       const struct ls_ucode_img *img, u64 wpr_addr,
139 			       void *_desc)
140 {
141 	const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
142 	const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
143 	struct acr_r361_pmu_bl_desc *desc = _desc;
144 	u64 base, addr_code, addr_data;
145 	u32 addr_args;
146 
147 	base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
148 	/* For some reason we should not add app_resident_code_offset here */
149 	addr_code = base;
150 	addr_data = base + pdesc->app_resident_data_offset;
151 	addr_args = sec->falcon->data.limit;
152 	addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
153 
154 	desc->dma_idx = FALCON_SEC2_DMAIDX_UCODE;
155 	desc->code_dma_base = u64_to_flcn64(addr_code);
156 	desc->total_code_size = pdesc->app_size;
157 	desc->code_size_to_load = pdesc->app_resident_code_size;
158 	desc->code_entry_point = pdesc->app_imem_entry;
159 	desc->data_dma_base = u64_to_flcn64(addr_data);
160 	desc->data_size = pdesc->app_resident_data_size;
161 	desc->overlay_dma_base = u64_to_flcn64(addr_code);
162 	desc->argc = 1;
163 	/* args are stored at the beginning of EMEM */
164 	desc->argv = 0x01000000;
165 }
166 
167 const struct acr_r352_ls_func
168 acr_r361_ls_sec2_func = {
169 	.load = acr_ls_ucode_load_sec2,
170 	.generate_bl_desc = acr_r361_generate_sec2_bl_desc,
171 	.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
172 	.post_run = acr_ls_sec2_post_run,
173 };
174 
175 
176 const struct acr_r352_func
177 acr_r361_func = {
178 	.fixup_hs_desc = acr_r352_fixup_hs_desc,
179 	.generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
180 	.hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
181 	.ls_ucode_img_load = acr_r352_ls_ucode_img_load,
182 	.ls_fill_headers = acr_r352_ls_fill_headers,
183 	.ls_write_wpr = acr_r352_ls_write_wpr,
184 	.ls_func = {
185 		[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
186 		[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
187 		[NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
188 		[NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
189 	},
190 };
191 
192 struct nvkm_acr *
acr_r361_new(unsigned long managed_falcons)193 acr_r361_new(unsigned long managed_falcons)
194 {
195 	return acr_r352_new_(&acr_r361_func, NVKM_SECBOOT_FALCON_PMU,
196 			     managed_falcons);
197 }
198