1 /*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24 #include "gf100.h"
25 #include "ctxgf100.h"
26
27 #include <subdev/secboot.h>
28
29 #include <nvif/class.h>
30
31 /*******************************************************************************
32 * PGRAPH engine/subdev functions
33 ******************************************************************************/
34
35 int
gm200_gr_rops(struct gf100_gr * gr)36 gm200_gr_rops(struct gf100_gr *gr)
37 {
38 return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
39 }
40
41 void
gm200_gr_init_ds_hww_esr_2(struct gf100_gr * gr)42 gm200_gr_init_ds_hww_esr_2(struct gf100_gr *gr)
43 {
44 struct nvkm_device *device = gr->base.engine.subdev.device;
45 nvkm_wr32(device, 0x405848, 0xc0000000);
46 nvkm_mask(device, 0x40584c, 0x00000001, 0x00000001);
47 }
48
49 void
gm200_gr_init_num_active_ltcs(struct gf100_gr * gr)50 gm200_gr_init_num_active_ltcs(struct gf100_gr *gr)
51 {
52 struct nvkm_device *device = gr->base.engine.subdev.device;
53 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
54 nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
55 }
56
57 void
gm200_gr_init_gpc_mmu(struct gf100_gr * gr)58 gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
59 {
60 struct nvkm_device *device = gr->base.engine.subdev.device;
61
62 nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf0001fff);
63 nvkm_wr32(device, 0x418890, 0x00000000);
64 nvkm_wr32(device, 0x418894, 0x00000000);
65
66 nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
67 nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
68 nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
69 }
70
71 static void
gm200_gr_init_rop_active_fbps(struct gf100_gr * gr)72 gm200_gr_init_rop_active_fbps(struct gf100_gr *gr)
73 {
74 struct nvkm_device *device = gr->base.engine.subdev.device;
75 const u32 fbp_count = nvkm_rd32(device, 0x12006c);
76 nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
77 nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
78 }
79
80 static u8
81 gm200_gr_tile_map_6_24[] = {
82 0, 1, 2, 3, 4, 5, 3, 4, 5, 0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5, 0, 1, 2,
83 };
84
85 static u8
86 gm200_gr_tile_map_4_16[] = {
87 0, 1, 2, 3, 2, 3, 0, 1, 3, 0, 1, 2, 1, 2, 3, 0,
88 };
89
90 static u8
91 gm200_gr_tile_map_2_8[] = {
92 0, 1, 1, 0, 0, 1, 1, 0,
93 };
94
95 void
gm200_gr_oneinit_sm_id(struct gf100_gr * gr)96 gm200_gr_oneinit_sm_id(struct gf100_gr *gr)
97 {
98 /*XXX: There's a different algorithm here I've not yet figured out. */
99 gf100_gr_oneinit_sm_id(gr);
100 }
101
102 void
gm200_gr_oneinit_tiles(struct gf100_gr * gr)103 gm200_gr_oneinit_tiles(struct gf100_gr *gr)
104 {
105 /*XXX: Not sure what this is about. The algorithm from NVGPU
106 * seems to work for all boards I tried from earlier (and
107 * later) GPUs except in these specific configurations.
108 *
109 * Let's just hardcode them for now.
110 */
111 if (gr->gpc_nr == 2 && gr->tpc_total == 8) {
112 memcpy(gr->tile, gm200_gr_tile_map_2_8, gr->tpc_total);
113 gr->screen_tile_row_offset = 1;
114 } else
115 if (gr->gpc_nr == 4 && gr->tpc_total == 16) {
116 memcpy(gr->tile, gm200_gr_tile_map_4_16, gr->tpc_total);
117 gr->screen_tile_row_offset = 4;
118 } else
119 if (gr->gpc_nr == 6 && gr->tpc_total == 24) {
120 memcpy(gr->tile, gm200_gr_tile_map_6_24, gr->tpc_total);
121 gr->screen_tile_row_offset = 5;
122 } else {
123 gf100_gr_oneinit_tiles(gr);
124 }
125 }
126
127 int
gm200_gr_new_(const struct gf100_gr_func * func,struct nvkm_device * device,int index,struct nvkm_gr ** pgr)128 gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
129 int index, struct nvkm_gr **pgr)
130 {
131 struct gf100_gr *gr;
132 int ret;
133
134 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
135 return -ENOMEM;
136 *pgr = &gr->base;
137
138 ret = gf100_gr_ctor(func, device, index, gr);
139 if (ret)
140 return ret;
141
142 /* Load firmwares for non-secure falcons */
143 if (!nvkm_secboot_is_managed(device->secboot,
144 NVKM_SECBOOT_FALCON_FECS)) {
145 if ((ret = gf100_gr_ctor_fw(gr, "gr/fecs_inst", &gr->fuc409c)) ||
146 (ret = gf100_gr_ctor_fw(gr, "gr/fecs_data", &gr->fuc409d)))
147 return ret;
148 }
149 if (!nvkm_secboot_is_managed(device->secboot,
150 NVKM_SECBOOT_FALCON_GPCCS)) {
151 if ((ret = gf100_gr_ctor_fw(gr, "gr/gpccs_inst", &gr->fuc41ac)) ||
152 (ret = gf100_gr_ctor_fw(gr, "gr/gpccs_data", &gr->fuc41ad)))
153 return ret;
154 }
155
156 if ((ret = gk20a_gr_av_to_init(gr, "gr/sw_nonctx", &gr->fuc_sw_nonctx)) ||
157 (ret = gk20a_gr_aiv_to_init(gr, "gr/sw_ctx", &gr->fuc_sw_ctx)) ||
158 (ret = gk20a_gr_av_to_init(gr, "gr/sw_bundle_init", &gr->fuc_bundle)) ||
159 (ret = gk20a_gr_av_to_method(gr, "gr/sw_method_init", &gr->fuc_method)))
160 return ret;
161
162 return 0;
163 }
164
165 static const struct gf100_gr_func
166 gm200_gr = {
167 .oneinit_tiles = gm200_gr_oneinit_tiles,
168 .oneinit_sm_id = gm200_gr_oneinit_sm_id,
169 .init = gf100_gr_init,
170 .init_gpc_mmu = gm200_gr_init_gpc_mmu,
171 .init_bios = gm107_gr_init_bios,
172 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
173 .init_zcull = gf117_gr_init_zcull,
174 .init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
175 .init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
176 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
177 .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
178 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
179 .init_419cc0 = gf100_gr_init_419cc0,
180 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
181 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
182 .init_504430 = gm107_gr_init_504430,
183 .init_shader_exceptions = gm107_gr_init_shader_exceptions,
184 .init_400054 = gm107_gr_init_400054,
185 .trap_mp = gf100_gr_trap_mp,
186 .rops = gm200_gr_rops,
187 .tpc_nr = 4,
188 .ppc_nr = 2,
189 .grctx = &gm200_grctx,
190 .zbc = &gf100_gr_zbc,
191 .sclass = {
192 { -1, -1, FERMI_TWOD_A },
193 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
194 { -1, -1, MAXWELL_B, &gf100_fermi },
195 { -1, -1, MAXWELL_COMPUTE_B },
196 {}
197 }
198 };
199
200 int
gm200_gr_new(struct nvkm_device * device,int index,struct nvkm_gr ** pgr)201 gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
202 {
203 return gm200_gr_new_(&gm200_gr, device, index, pgr);
204 }
205