1 /*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "acr_r361.h"
24
25 #include <core/gpuobj.h>
26
27 /*
28 * r364 ACR: hsflcn_desc structure has changed to introduce the shadow_mem
29 * parameter.
30 */
31
32 struct acr_r364_hsflcn_desc {
33 union {
34 u8 reserved_dmem[0x200];
35 u32 signatures[4];
36 } ucode_reserved_space;
37 u32 wpr_region_id;
38 u32 wpr_offset;
39 u32 mmu_memory_range;
40 struct {
41 u32 no_regions;
42 struct {
43 u32 start_addr;
44 u32 end_addr;
45 u32 region_id;
46 u32 read_mask;
47 u32 write_mask;
48 u32 client_mask;
49 u32 shadow_mem_start_addr;
50 } region_props[2];
51 } regions;
52 u32 ucode_blob_size;
53 u64 ucode_blob_base __aligned(8);
54 struct {
55 u32 vpr_enabled;
56 u32 vpr_start;
57 u32 vpr_end;
58 u32 hdcp_policies;
59 } vpr_desc;
60 };
61
62 static void
acr_r364_fixup_hs_desc(struct acr_r352 * acr,struct nvkm_secboot * sb,void * _desc)63 acr_r364_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
64 void *_desc)
65 {
66 struct acr_r364_hsflcn_desc *desc = _desc;
67 struct nvkm_gpuobj *ls_blob = acr->ls_blob;
68
69 /* WPR region information if WPR is not fixed */
70 if (sb->wpr_size == 0) {
71 u64 wpr_start = ls_blob->addr;
72 u64 wpr_end = ls_blob->addr + ls_blob->size;
73
74 if (acr->func->shadow_blob)
75 wpr_start += ls_blob->size / 2;
76
77 desc->wpr_region_id = 1;
78 desc->regions.no_regions = 2;
79 desc->regions.region_props[0].start_addr = wpr_start >> 8;
80 desc->regions.region_props[0].end_addr = wpr_end >> 8;
81 desc->regions.region_props[0].region_id = 1;
82 desc->regions.region_props[0].read_mask = 0xf;
83 desc->regions.region_props[0].write_mask = 0xc;
84 desc->regions.region_props[0].client_mask = 0x2;
85 if (acr->func->shadow_blob)
86 desc->regions.region_props[0].shadow_mem_start_addr =
87 ls_blob->addr >> 8;
88 else
89 desc->regions.region_props[0].shadow_mem_start_addr = 0;
90 } else {
91 desc->ucode_blob_base = ls_blob->addr;
92 desc->ucode_blob_size = ls_blob->size;
93 }
94 }
95
96 const struct acr_r352_func
97 acr_r364_func = {
98 .fixup_hs_desc = acr_r364_fixup_hs_desc,
99 .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
100 .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
101 .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
102 .ls_fill_headers = acr_r352_ls_fill_headers,
103 .ls_write_wpr = acr_r352_ls_write_wpr,
104 .ls_func = {
105 [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
106 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
107 [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
108 },
109 };
110
111
112 struct nvkm_acr *
acr_r364_new(unsigned long managed_falcons)113 acr_r364_new(unsigned long managed_falcons)
114 {
115 return acr_r352_new_(&acr_r364_func, NVKM_SECBOOT_FALCON_PMU,
116 managed_falcons);
117 }
118