1 /*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "amdgpu.h"
24 #include "soc15.h"
25
26 #include "soc15_common.h"
27 #include "amdgpu_xcp.h"
28 #include "gfx_v9_4_3.h"
29 #include "gfxhub_v1_2.h"
30 #include "sdma_v4_4_2.h"
31
32 #define XCP_INST_MASK(num_inst, xcp_id) \
33 (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
34
35 #define AMDGPU_XCP_OPS_KFD (1 << 0)
36
aqua_vanjaram_doorbell_index_init(struct amdgpu_device * adev)37 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
38 {
39 int i;
40
41 adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
42
43 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
44
45 adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
46 adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
47 adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
48
49 adev->doorbell_index.sdma_doorbell_range = 20;
50 for (i = 0; i < adev->sdma.num_instances; i++)
51 adev->doorbell_index.sdma_engine[i] =
52 AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
53 i * (adev->doorbell_index.sdma_doorbell_range >> 1);
54
55 adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
56 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
57
58 adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
59 adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
60
61 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
62 }
63
aqua_vanjaram_set_xcp_id(struct amdgpu_device * adev,uint32_t inst_idx,struct amdgpu_ring * ring)64 static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
65 uint32_t inst_idx, struct amdgpu_ring *ring)
66 {
67 int xcp_id;
68 enum AMDGPU_XCP_IP_BLOCK ip_blk;
69 uint32_t inst_mask;
70
71 ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
72 if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
73 return;
74
75 inst_mask = 1 << inst_idx;
76
77 switch (ring->funcs->type) {
78 case AMDGPU_HW_IP_GFX:
79 case AMDGPU_RING_TYPE_COMPUTE:
80 case AMDGPU_RING_TYPE_KIQ:
81 ip_blk = AMDGPU_XCP_GFX;
82 break;
83 case AMDGPU_RING_TYPE_SDMA:
84 ip_blk = AMDGPU_XCP_SDMA;
85 break;
86 case AMDGPU_RING_TYPE_VCN_ENC:
87 case AMDGPU_RING_TYPE_VCN_JPEG:
88 ip_blk = AMDGPU_XCP_VCN;
89 if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
90 inst_mask = 1 << (inst_idx * 2);
91 break;
92 default:
93 DRM_ERROR("Not support ring type %d!", ring->funcs->type);
94 return;
95 }
96
97 for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
98 if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
99 ring->xcp_id = xcp_id;
100 break;
101 }
102 }
103 }
104
aqua_vanjaram_xcp_gpu_sched_update(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int sel_xcp_id)105 static void aqua_vanjaram_xcp_gpu_sched_update(
106 struct amdgpu_device *adev,
107 struct amdgpu_ring *ring,
108 unsigned int sel_xcp_id)
109 {
110 unsigned int *num_gpu_sched;
111
112 num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
113 .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
114 adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
115 .sched[(*num_gpu_sched)++] = &ring->sched;
116 DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
117 sel_xcp_id, ring->funcs->type,
118 ring->hw_prio, *num_gpu_sched);
119 }
120
aqua_vanjaram_xcp_sched_list_update(struct amdgpu_device * adev)121 static int aqua_vanjaram_xcp_sched_list_update(
122 struct amdgpu_device *adev)
123 {
124 struct amdgpu_ring *ring;
125 int i;
126
127 for (i = 0; i < MAX_XCP; i++) {
128 atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
129 memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
130 }
131
132 if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
133 return 0;
134
135 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
136 ring = adev->rings[i];
137 if (!ring || !ring->sched.ready || ring->no_scheduler)
138 continue;
139
140 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
141
142 /* VCN is shared by two partitions under CPX MODE */
143 if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
144 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
145 adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
146 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
147 }
148
149 return 0;
150 }
151
aqua_vanjaram_update_partition_sched_list(struct amdgpu_device * adev)152 static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
153 {
154 int i;
155
156 for (i = 0; i < adev->num_rings; i++) {
157 struct amdgpu_ring *ring = adev->rings[i];
158
159 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
160 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
161 aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
162 else
163 aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
164 }
165
166 return aqua_vanjaram_xcp_sched_list_update(adev);
167 }
168
aqua_vanjaram_select_scheds(struct amdgpu_device * adev,u32 hw_ip,u32 hw_prio,struct amdgpu_fpriv * fpriv,unsigned int * num_scheds,struct drm_gpu_scheduler *** scheds)169 static int aqua_vanjaram_select_scheds(
170 struct amdgpu_device *adev,
171 u32 hw_ip,
172 u32 hw_prio,
173 struct amdgpu_fpriv *fpriv,
174 unsigned int *num_scheds,
175 struct drm_gpu_scheduler ***scheds)
176 {
177 u32 sel_xcp_id;
178 int i;
179
180 if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
181 u32 least_ref_cnt = ~0;
182
183 fpriv->xcp_id = 0;
184 for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
185 u32 total_ref_cnt;
186
187 total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
188 if (total_ref_cnt < least_ref_cnt) {
189 fpriv->xcp_id = i;
190 least_ref_cnt = total_ref_cnt;
191 }
192 }
193 }
194 sel_xcp_id = fpriv->xcp_id;
195
196 if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
197 *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
198 *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
199 atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
200 DRM_DEBUG("Selected partition #%d", sel_xcp_id);
201 } else {
202 DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
203 return -ENOENT;
204 }
205
206 return 0;
207 }
208
aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device * adev,enum amd_hw_ip_block_type block,int8_t inst)209 static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
210 enum amd_hw_ip_block_type block,
211 int8_t inst)
212 {
213 int8_t dev_inst;
214
215 switch (block) {
216 case GC_HWIP:
217 case SDMA0_HWIP:
218 /* Both JPEG and VCN as JPEG is only alias of VCN */
219 case VCN_HWIP:
220 dev_inst = adev->ip_map.dev_inst[block][inst];
221 break;
222 default:
223 /* For rest of the IPs, no look up required.
224 * Assume 'logical instance == physical instance' for all configs. */
225 dev_inst = inst;
226 break;
227 }
228
229 return dev_inst;
230 }
231
aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device * adev,enum amd_hw_ip_block_type block,uint32_t mask)232 static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
233 enum amd_hw_ip_block_type block,
234 uint32_t mask)
235 {
236 uint32_t dev_mask = 0;
237 int8_t log_inst, dev_inst;
238
239 while (mask) {
240 log_inst = ffs(mask) - 1;
241 dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
242 dev_mask |= (1 << dev_inst);
243 mask &= ~(1 << log_inst);
244 }
245
246 return dev_mask;
247 }
248
aqua_vanjaram_populate_ip_map(struct amdgpu_device * adev,enum amd_hw_ip_block_type ip_block,uint32_t inst_mask)249 static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
250 enum amd_hw_ip_block_type ip_block,
251 uint32_t inst_mask)
252 {
253 int l = 0, i;
254
255 while (inst_mask) {
256 i = ffs(inst_mask) - 1;
257 adev->ip_map.dev_inst[ip_block][l++] = i;
258 inst_mask &= ~(1 << i);
259 }
260 for (; l < HWIP_MAX_INSTANCE; l++)
261 adev->ip_map.dev_inst[ip_block][l] = -1;
262 }
263
aqua_vanjaram_ip_map_init(struct amdgpu_device * adev)264 void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
265 {
266 u32 ip_map[][2] = {
267 { GC_HWIP, adev->gfx.xcc_mask },
268 { SDMA0_HWIP, adev->sdma.sdma_mask },
269 { VCN_HWIP, adev->vcn.inst_mask },
270 };
271 int i;
272
273 for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
274 aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
275
276 adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
277 adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
278 }
279
280 /* Fixed pattern for smn addressing on different AIDs:
281 * bit[34]: indicate cross AID access
282 * bit[33:32]: indicate target AID id
283 * AID id range is 0 ~ 3 as maximum AID number is 4.
284 */
aqua_vanjaram_encode_ext_smn_addressing(int ext_id)285 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
286 {
287 u64 ext_offset;
288
289 /* local routing and bit[34:32] will be zeros */
290 if (ext_id == 0)
291 return 0;
292
293 /* Initiated from host, accessing to all non-zero aids are cross traffic */
294 ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
295
296 return ext_offset;
297 }
298
aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr)299 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
300 {
301 enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
302 struct amdgpu_device *adev = xcp_mgr->adev;
303
304 if (adev->nbio.funcs->get_compute_partition_mode)
305 mode = adev->nbio.funcs->get_compute_partition_mode(adev);
306
307 return mode;
308 }
309
__aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr * xcp_mgr,int mode)310 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
311 {
312 int num_xcc, num_xcc_per_xcp = 0;
313
314 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
315
316 switch (mode) {
317 case AMDGPU_SPX_PARTITION_MODE:
318 num_xcc_per_xcp = num_xcc;
319 break;
320 case AMDGPU_DPX_PARTITION_MODE:
321 num_xcc_per_xcp = num_xcc / 2;
322 break;
323 case AMDGPU_TPX_PARTITION_MODE:
324 num_xcc_per_xcp = num_xcc / 3;
325 break;
326 case AMDGPU_QPX_PARTITION_MODE:
327 num_xcc_per_xcp = num_xcc / 4;
328 break;
329 case AMDGPU_CPX_PARTITION_MODE:
330 num_xcc_per_xcp = 1;
331 break;
332 }
333
334 return num_xcc_per_xcp;
335 }
336
__aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,enum AMDGPU_XCP_IP_BLOCK ip_id,struct amdgpu_xcp_ip * ip)337 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
338 enum AMDGPU_XCP_IP_BLOCK ip_id,
339 struct amdgpu_xcp_ip *ip)
340 {
341 struct amdgpu_device *adev = xcp_mgr->adev;
342 int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
343 int num_sdma, num_vcn;
344
345 num_sdma = adev->sdma.num_instances;
346 num_vcn = adev->vcn.num_vcn_inst;
347
348 switch (xcp_mgr->mode) {
349 case AMDGPU_SPX_PARTITION_MODE:
350 num_sdma_xcp = num_sdma;
351 num_vcn_xcp = num_vcn;
352 break;
353 case AMDGPU_DPX_PARTITION_MODE:
354 num_sdma_xcp = num_sdma / 2;
355 num_vcn_xcp = num_vcn / 2;
356 break;
357 case AMDGPU_TPX_PARTITION_MODE:
358 num_sdma_xcp = num_sdma / 3;
359 num_vcn_xcp = num_vcn / 3;
360 break;
361 case AMDGPU_QPX_PARTITION_MODE:
362 num_sdma_xcp = num_sdma / 4;
363 num_vcn_xcp = num_vcn / 4;
364 break;
365 case AMDGPU_CPX_PARTITION_MODE:
366 num_sdma_xcp = 2;
367 num_vcn_xcp = num_vcn ? 1 : 0;
368 break;
369 default:
370 return -EINVAL;
371 }
372
373 num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
374
375 switch (ip_id) {
376 case AMDGPU_XCP_GFXHUB:
377 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
378 ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
379 break;
380 case AMDGPU_XCP_GFX:
381 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
382 ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
383 break;
384 case AMDGPU_XCP_SDMA:
385 ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
386 ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
387 break;
388 case AMDGPU_XCP_VCN:
389 ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id);
390 /* TODO : Assign IP funcs */
391 break;
392 default:
393 return -EINVAL;
394 }
395
396 ip->ip_id = ip_id;
397
398 return 0;
399 }
400
401 static enum amdgpu_gfx_partition
__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr * xcp_mgr)402 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
403 {
404 struct amdgpu_device *adev = xcp_mgr->adev;
405 int num_xcc;
406
407 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
408
409 if (adev->gmc.num_mem_partitions == 1)
410 return AMDGPU_SPX_PARTITION_MODE;
411
412 if (adev->gmc.num_mem_partitions == num_xcc)
413 return AMDGPU_CPX_PARTITION_MODE;
414
415 if (adev->gmc.num_mem_partitions == num_xcc / 2)
416 return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
417 AMDGPU_QPX_PARTITION_MODE;
418
419 if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
420 return AMDGPU_DPX_PARTITION_MODE;
421
422 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
423 }
424
__aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr * xcp_mgr,enum amdgpu_gfx_partition mode)425 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
426 enum amdgpu_gfx_partition mode)
427 {
428 struct amdgpu_device *adev = xcp_mgr->adev;
429 int num_xcc, num_xccs_per_xcp;
430
431 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
432 switch (mode) {
433 case AMDGPU_SPX_PARTITION_MODE:
434 return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
435 case AMDGPU_DPX_PARTITION_MODE:
436 return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
437 case AMDGPU_TPX_PARTITION_MODE:
438 return (adev->gmc.num_mem_partitions == 1 ||
439 adev->gmc.num_mem_partitions == 3) &&
440 ((num_xcc % 3) == 0);
441 case AMDGPU_QPX_PARTITION_MODE:
442 num_xccs_per_xcp = num_xcc / 4;
443 return (adev->gmc.num_mem_partitions == 1 ||
444 adev->gmc.num_mem_partitions == 4) &&
445 (num_xccs_per_xcp >= 2);
446 case AMDGPU_CPX_PARTITION_MODE:
447 return ((num_xcc > 1) &&
448 (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
449 (num_xcc % adev->gmc.num_mem_partitions) == 0);
450 default:
451 return false;
452 }
453
454 return false;
455 }
456
__aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)457 static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
458 {
459 /* TODO:
460 * Stop user queues and threads, and make sure GPU is empty of work.
461 */
462
463 if (flags & AMDGPU_XCP_OPS_KFD)
464 amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
465
466 return 0;
467 }
468
__aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)469 static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
470 {
471 int ret = 0;
472
473 if (flags & AMDGPU_XCP_OPS_KFD) {
474 amdgpu_amdkfd_device_probe(xcp_mgr->adev);
475 amdgpu_amdkfd_device_init(xcp_mgr->adev);
476 /* If KFD init failed, return failure */
477 if (!xcp_mgr->adev->kfd.init_complete)
478 ret = -EIO;
479 }
480
481 return ret;
482 }
483
aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode,int * num_xcps)484 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
485 int mode, int *num_xcps)
486 {
487 int num_xcc_per_xcp, num_xcc, ret;
488 struct amdgpu_device *adev;
489 u32 flags = 0;
490
491 adev = xcp_mgr->adev;
492 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
493
494 if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
495 mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
496 } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
497 dev_err(adev->dev,
498 "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
499 amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
500 return -EINVAL;
501 }
502
503 if (adev->kfd.init_complete)
504 flags |= AMDGPU_XCP_OPS_KFD;
505
506 if (flags & AMDGPU_XCP_OPS_KFD) {
507 ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
508 if (ret)
509 goto out;
510 }
511
512 ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
513 if (ret)
514 goto unlock;
515
516 num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
517 if (adev->gfx.funcs->switch_partition_mode)
518 adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
519 num_xcc_per_xcp);
520
521 /* Init info about new xcps */
522 *num_xcps = num_xcc / num_xcc_per_xcp;
523 amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
524
525 ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
526 unlock:
527 if (flags & AMDGPU_XCP_OPS_KFD)
528 amdgpu_amdkfd_unlock_kfd(adev);
529 out:
530 return ret;
531 }
532
__aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device * adev,int xcc_id,uint8_t * mem_id)533 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
534 int xcc_id, uint8_t *mem_id)
535 {
536 /* memory/spatial modes validation check is already done */
537 *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
538 *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
539
540 return 0;
541 }
542
aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr * xcp_mgr,struct amdgpu_xcp * xcp,uint8_t * mem_id)543 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
544 struct amdgpu_xcp *xcp, uint8_t *mem_id)
545 {
546 struct amdgpu_numa_info numa_info;
547 struct amdgpu_device *adev;
548 uint32_t xcc_mask;
549 int r, i, xcc_id;
550
551 adev = xcp_mgr->adev;
552 /* TODO: BIOS is not returning the right info now
553 * Check on this later
554 */
555 /*
556 if (adev->gmc.gmc_funcs->query_mem_partition_mode)
557 mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
558 */
559 if (adev->gmc.num_mem_partitions == 1) {
560 /* Only one range */
561 *mem_id = 0;
562 return 0;
563 }
564
565 r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
566 if (r || !xcc_mask)
567 return -EINVAL;
568
569 xcc_id = ffs(xcc_mask) - 1;
570 if (!adev->gmc.is_app_apu)
571 return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
572
573 r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
574
575 if (r)
576 return r;
577
578 r = -EINVAL;
579 for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
580 if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
581 *mem_id = i;
582 r = 0;
583 break;
584 }
585 }
586
587 return r;
588 }
589
aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,enum AMDGPU_XCP_IP_BLOCK ip_id,struct amdgpu_xcp_ip * ip)590 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
591 enum AMDGPU_XCP_IP_BLOCK ip_id,
592 struct amdgpu_xcp_ip *ip)
593 {
594 if (!ip)
595 return -EINVAL;
596
597 return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
598 }
599
600 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
601 .switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
602 .query_partition_mode = &aqua_vanjaram_query_partition_mode,
603 .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
604 .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
605 .select_scheds = &aqua_vanjaram_select_scheds,
606 .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
607 };
608
aqua_vanjaram_xcp_mgr_init(struct amdgpu_device * adev)609 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
610 {
611 int ret;
612
613 ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
614 &aqua_vanjaram_xcp_funcs);
615 if (ret)
616 return ret;
617
618 /* TODO: Default memory node affinity init */
619
620 return ret;
621 }
622
aqua_vanjaram_init_soc_config(struct amdgpu_device * adev)623 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
624 {
625 u32 mask, inst_mask = adev->sdma.sdma_mask;
626 int ret, i;
627
628 /* generally 1 AID supports 4 instances */
629 adev->sdma.num_inst_per_aid = 4;
630 adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
631
632 adev->aid_mask = i = 1;
633 inst_mask >>= adev->sdma.num_inst_per_aid;
634
635 for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
636 inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
637 if ((inst_mask & mask) == mask)
638 adev->aid_mask |= (1 << i);
639 }
640
641 /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
642 * addressed based on logical instance ids.
643 */
644 adev->vcn.harvest_config = 0;
645 adev->vcn.num_inst_per_aid = 1;
646 adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
647 adev->jpeg.harvest_config = 0;
648 adev->jpeg.num_inst_per_aid = 1;
649 adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
650
651 ret = aqua_vanjaram_xcp_mgr_init(adev);
652 if (ret)
653 return ret;
654
655 aqua_vanjaram_ip_map_init(adev);
656
657 return 0;
658 }
659