1 /*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "acr_r367.h"
24 #include "acr_r361.h"
25
26 #include <core/gpuobj.h>
27
28 /*
29 * r367 ACR: new LS signature format requires a rewrite of LS firmware and
30 * blob creation functions. Also the hsflcn_desc layout has changed slightly.
31 */
32
33 #define LSF_LSB_DEPMAP_SIZE 11
34
35 /**
36 * struct acr_r367_lsf_lsb_header - LS firmware header
37 *
38 * See also struct acr_r352_lsf_lsb_header for documentation.
39 */
40 struct acr_r367_lsf_lsb_header {
41 /**
42 * LS falcon signatures
43 * @prd_keys: signature to use in production mode
44 * @dgb_keys: signature to use in debug mode
45 * @b_prd_present: whether the production key is present
46 * @b_dgb_present: whether the debug key is present
47 * @falcon_id: ID of the falcon the ucode applies to
48 */
49 struct {
50 u8 prd_keys[2][16];
51 u8 dbg_keys[2][16];
52 u32 b_prd_present;
53 u32 b_dbg_present;
54 u32 falcon_id;
55 u32 supports_versioning;
56 u32 version;
57 u32 depmap_count;
58 u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4];
59 u8 kdf[16];
60 } signature;
61 u32 ucode_off;
62 u32 ucode_size;
63 u32 data_size;
64 u32 bl_code_size;
65 u32 bl_imem_off;
66 u32 bl_data_off;
67 u32 bl_data_size;
68 u32 app_code_off;
69 u32 app_code_size;
70 u32 app_data_off;
71 u32 app_data_size;
72 u32 flags;
73 };
74
75 /**
76 * struct acr_r367_lsf_wpr_header - LS blob WPR Header
77 *
78 * See also struct acr_r352_lsf_wpr_header for documentation.
79 */
80 struct acr_r367_lsf_wpr_header {
81 u32 falcon_id;
82 u32 lsb_offset;
83 u32 bootstrap_owner;
84 u32 lazy_bootstrap;
85 u32 bin_version;
86 u32 status;
87 #define LSF_IMAGE_STATUS_NONE 0
88 #define LSF_IMAGE_STATUS_COPY 1
89 #define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
90 #define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
91 #define LSF_IMAGE_STATUS_VALIDATION_DONE 4
92 #define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
93 #define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
94 #define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED 7
95 };
96
97 /**
98 * struct ls_ucode_img_r367 - ucode image augmented with r367 headers
99 */
100 struct ls_ucode_img_r367 {
101 struct ls_ucode_img base;
102
103 struct acr_r367_lsf_wpr_header wpr_header;
104 struct acr_r367_lsf_lsb_header lsb_header;
105 };
106 #define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base)
107
108 struct ls_ucode_img *
acr_r367_ls_ucode_img_load(const struct acr_r352 * acr,const struct nvkm_secboot * sb,enum nvkm_secboot_falcon falcon_id)109 acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
110 const struct nvkm_secboot *sb,
111 enum nvkm_secboot_falcon falcon_id)
112 {
113 const struct nvkm_subdev *subdev = acr->base.subdev;
114 struct ls_ucode_img_r367 *img;
115 int ret;
116
117 img = kzalloc(sizeof(*img), GFP_KERNEL);
118 if (!img)
119 return ERR_PTR(-ENOMEM);
120
121 img->base.falcon_id = falcon_id;
122
123 ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
124 if (ret) {
125 kfree(img->base.ucode_data);
126 kfree(img->base.sig);
127 kfree(img);
128 return ERR_PTR(ret);
129 }
130
131 /* Check that the signature size matches our expectations... */
132 if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
133 nvkm_error(subdev, "invalid signature size for %s falcon!\n",
134 nvkm_secboot_falcon_name[falcon_id]);
135 return ERR_PTR(-EINVAL);
136 }
137
138 /* Copy signature to the right place */
139 memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
140
141 /* not needed? the signature should already have the right value */
142 img->lsb_header.signature.falcon_id = falcon_id;
143
144 return &img->base;
145 }
146
147 #define LSF_LSB_HEADER_ALIGN 256
148 #define LSF_BL_DATA_ALIGN 256
149 #define LSF_BL_DATA_SIZE_ALIGN 256
150 #define LSF_BL_CODE_SIZE_ALIGN 256
151 #define LSF_UCODE_DATA_ALIGN 4096
152
153 static u32
acr_r367_ls_img_fill_headers(struct acr_r352 * acr,struct ls_ucode_img_r367 * img,u32 offset)154 acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
155 struct ls_ucode_img_r367 *img, u32 offset)
156 {
157 struct ls_ucode_img *_img = &img->base;
158 struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
159 struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
160 struct ls_ucode_img_desc *desc = &_img->ucode_desc;
161 const struct acr_r352_ls_func *func =
162 acr->func->ls_func[_img->falcon_id];
163
164 /* Fill WPR header */
165 whdr->falcon_id = _img->falcon_id;
166 whdr->bootstrap_owner = acr->base.boot_falcon;
167 whdr->bin_version = lhdr->signature.version;
168 whdr->status = LSF_IMAGE_STATUS_COPY;
169
170 /* Skip bootstrapping falcons started by someone else than ACR */
171 if (acr->lazy_bootstrap & BIT(_img->falcon_id))
172 whdr->lazy_bootstrap = 1;
173
174 /* Align, save off, and include an LSB header size */
175 offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
176 whdr->lsb_offset = offset;
177 offset += sizeof(*lhdr);
178
179 /*
180 * Align, save off, and include the original (static) ucode
181 * image size
182 */
183 offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
184 _img->ucode_off = lhdr->ucode_off = offset;
185 offset += _img->ucode_size;
186
187 /*
188 * For falcons that use a boot loader (BL), we append a loader
189 * desc structure on the end of the ucode image and consider
190 * this the boot loader data. The host will then copy the loader
191 * desc args to this space within the WPR region (before locking
192 * down) and the HS bin will then copy them to DMEM 0 for the
193 * loader.
194 */
195 lhdr->bl_code_size = ALIGN(desc->bootloader_size,
196 LSF_BL_CODE_SIZE_ALIGN);
197 lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
198 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
199 lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
200 lhdr->bl_code_size - lhdr->ucode_size;
201 /*
202 * Though the BL is located at 0th offset of the image, the VA
203 * is different to make sure that it doesn't collide the actual
204 * OS VA range
205 */
206 lhdr->bl_imem_off = desc->bootloader_imem_offset;
207 lhdr->app_code_off = desc->app_start_offset +
208 desc->app_resident_code_offset;
209 lhdr->app_code_size = desc->app_resident_code_size;
210 lhdr->app_data_off = desc->app_start_offset +
211 desc->app_resident_data_offset;
212 lhdr->app_data_size = desc->app_resident_data_size;
213
214 lhdr->flags = func->lhdr_flags;
215 if (_img->falcon_id == acr->base.boot_falcon)
216 lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
217
218 /* Align and save off BL descriptor size */
219 lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
220
221 /*
222 * Align, save off, and include the additional BL data
223 */
224 offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
225 lhdr->bl_data_off = offset;
226 offset += lhdr->bl_data_size;
227
228 return offset;
229 }
230
231 int
acr_r367_ls_fill_headers(struct acr_r352 * acr,struct list_head * imgs)232 acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
233 {
234 struct ls_ucode_img_r367 *img;
235 struct list_head *l;
236 u32 count = 0;
237 u32 offset;
238
239 /* Count the number of images to manage */
240 list_for_each(l, imgs)
241 count++;
242
243 /*
244 * Start with an array of WPR headers at the base of the WPR.
245 * The expectation here is that the secure falcon will do a single DMA
246 * read of this array and cache it internally so it's ok to pack these.
247 * Also, we add 1 to the falcon count to indicate the end of the array.
248 */
249 offset = sizeof(img->wpr_header) * (count + 1);
250
251 /*
252 * Walk the managed falcons, accounting for the LSB structs
253 * as well as the ucode images.
254 */
255 list_for_each_entry(img, imgs, base.node) {
256 offset = acr_r367_ls_img_fill_headers(acr, img, offset);
257 }
258
259 return offset;
260 }
261
262 int
acr_r367_ls_write_wpr(struct acr_r352 * acr,struct list_head * imgs,struct nvkm_gpuobj * wpr_blob,u64 wpr_addr)263 acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
264 struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
265 {
266 struct ls_ucode_img *_img;
267 u32 pos = 0;
268 u32 max_desc_size = 0;
269 u8 *gdesc;
270
271 list_for_each_entry(_img, imgs, node) {
272 const struct acr_r352_ls_func *ls_func =
273 acr->func->ls_func[_img->falcon_id];
274
275 max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
276 }
277
278 gdesc = kmalloc(max_desc_size, GFP_KERNEL);
279 if (!gdesc)
280 return -ENOMEM;
281
282 nvkm_kmap(wpr_blob);
283
284 list_for_each_entry(_img, imgs, node) {
285 struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
286 const struct acr_r352_ls_func *ls_func =
287 acr->func->ls_func[_img->falcon_id];
288
289 nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
290 sizeof(img->wpr_header));
291
292 nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
293 &img->lsb_header, sizeof(img->lsb_header));
294
295 /* Generate and write BL descriptor */
296 memset(gdesc, 0, ls_func->bl_desc_size);
297 ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
298
299 nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
300 gdesc, ls_func->bl_desc_size);
301
302 /* Copy ucode */
303 nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
304 _img->ucode_data, _img->ucode_size);
305
306 pos += sizeof(img->wpr_header);
307 }
308
309 nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
310
311 nvkm_done(wpr_blob);
312
313 kfree(gdesc);
314
315 return 0;
316 }
317
318 struct acr_r367_hsflcn_desc {
319 u8 reserved_dmem[0x200];
320 u32 signatures[4];
321 u32 wpr_region_id;
322 u32 wpr_offset;
323 u32 mmu_memory_range;
324 #define FLCN_ACR_MAX_REGIONS 2
325 struct {
326 u32 no_regions;
327 struct {
328 u32 start_addr;
329 u32 end_addr;
330 u32 region_id;
331 u32 read_mask;
332 u32 write_mask;
333 u32 client_mask;
334 u32 shadow_mem_start_addr;
335 } region_props[FLCN_ACR_MAX_REGIONS];
336 } regions;
337 u32 ucode_blob_size;
338 u64 ucode_blob_base __aligned(8);
339 struct {
340 u32 vpr_enabled;
341 u32 vpr_start;
342 u32 vpr_end;
343 u32 hdcp_policies;
344 } vpr_desc;
345 };
346
347 void
acr_r367_fixup_hs_desc(struct acr_r352 * acr,struct nvkm_secboot * sb,void * _desc)348 acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
349 void *_desc)
350 {
351 struct acr_r367_hsflcn_desc *desc = _desc;
352 struct nvkm_gpuobj *ls_blob = acr->ls_blob;
353
354 /* WPR region information if WPR is not fixed */
355 if (sb->wpr_size == 0) {
356 u64 wpr_start = ls_blob->addr;
357 u64 wpr_end = ls_blob->addr + ls_blob->size;
358
359 if (acr->func->shadow_blob)
360 wpr_start += ls_blob->size / 2;
361
362 desc->wpr_region_id = 1;
363 desc->regions.no_regions = 2;
364 desc->regions.region_props[0].start_addr = wpr_start >> 8;
365 desc->regions.region_props[0].end_addr = wpr_end >> 8;
366 desc->regions.region_props[0].region_id = 1;
367 desc->regions.region_props[0].read_mask = 0xf;
368 desc->regions.region_props[0].write_mask = 0xc;
369 desc->regions.region_props[0].client_mask = 0x2;
370 if (acr->func->shadow_blob)
371 desc->regions.region_props[0].shadow_mem_start_addr =
372 ls_blob->addr >> 8;
373 else
374 desc->regions.region_props[0].shadow_mem_start_addr = 0;
375 } else {
376 desc->ucode_blob_base = ls_blob->addr;
377 desc->ucode_blob_size = ls_blob->size;
378 }
379 }
380
381 const struct acr_r352_func
382 acr_r367_func = {
383 .fixup_hs_desc = acr_r367_fixup_hs_desc,
384 .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
385 .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
386 .shadow_blob = true,
387 .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
388 .ls_fill_headers = acr_r367_ls_fill_headers,
389 .ls_write_wpr = acr_r367_ls_write_wpr,
390 .ls_func = {
391 [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
392 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
393 [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
394 [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
395 },
396 };
397
398 struct nvkm_acr *
acr_r367_new(enum nvkm_secboot_falcon boot_falcon,unsigned long managed_falcons)399 acr_r367_new(enum nvkm_secboot_falcon boot_falcon,
400 unsigned long managed_falcons)
401 {
402 return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons);
403 }
404