1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "kfd_device_queue_manager.h"
25 #include "gca/gfx_8_0_enum.h"
26 #include "gca/gfx_8_0_sh_mask.h"
27 #include "gca/gfx_8_0_enum.h"
28 #include "oss/oss_3_0_sh_mask.h"
29 
30 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
31 				   struct qcm_process_device *qpd,
32 				   enum cache_policy default_policy,
33 				   enum cache_policy alternate_policy,
34 				   void __user *alternate_aperture_base,
35 				   uint64_t alternate_aperture_size);
36 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
37 			struct qcm_process_device *qpd,
38 			enum cache_policy default_policy,
39 			enum cache_policy alternate_policy,
40 			void __user *alternate_aperture_base,
41 			uint64_t alternate_aperture_size);
42 static int update_qpd_vi(struct device_queue_manager *dqm,
43 					struct qcm_process_device *qpd);
44 static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
45 			struct qcm_process_device *qpd);
46 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
47 				struct qcm_process_device *qpd);
48 static void init_sdma_vm_tonga(struct device_queue_manager *dqm,
49 			struct queue *q,
50 			struct qcm_process_device *qpd);
51 
device_queue_manager_init_vi(struct device_queue_manager_asic_ops * asic_ops)52 void device_queue_manager_init_vi(
53 		struct device_queue_manager_asic_ops *asic_ops)
54 {
55 	asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi;
56 	asic_ops->update_qpd = update_qpd_vi;
57 	asic_ops->init_sdma_vm = init_sdma_vm;
58 }
59 
device_queue_manager_init_vi_tonga(struct device_queue_manager_asic_ops * asic_ops)60 void device_queue_manager_init_vi_tonga(
61 		struct device_queue_manager_asic_ops *asic_ops)
62 {
63 	asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi_tonga;
64 	asic_ops->update_qpd = update_qpd_vi_tonga;
65 	asic_ops->init_sdma_vm = init_sdma_vm_tonga;
66 }
67 
compute_sh_mem_bases_64bit(unsigned int top_address_nybble)68 static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
69 {
70 	/* In 64-bit mode, we can only control the top 3 bits of the LDS,
71 	 * scratch and GPUVM apertures.
72 	 * The hardware fills in the remaining 59 bits according to the
73 	 * following pattern:
74 	 * LDS:		X0000000'00000000 - X0000001'00000000 (4GB)
75 	 * Scratch:	X0000001'00000000 - X0000002'00000000 (4GB)
76 	 * GPUVM:	Y0010000'00000000 - Y0020000'00000000 (1TB)
77 	 *
78 	 * (where X/Y is the configurable nybble with the low-bit 0)
79 	 *
80 	 * LDS and scratch will have the same top nybble programmed in the
81 	 * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
82 	 * GPUVM can have a different top nybble programmed in the
83 	 * top 3 bits of SH_MEM_BASES.SHARED_BASE.
84 	 * We don't bother to support different top nybbles
85 	 * for LDS/Scratch and GPUVM.
86 	 */
87 
88 	WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
89 		top_address_nybble == 0);
90 
91 	return top_address_nybble << 12 |
92 			(top_address_nybble << 12) <<
93 			SH_MEM_BASES__SHARED_BASE__SHIFT;
94 }
95 
set_cache_memory_policy_vi(struct device_queue_manager * dqm,struct qcm_process_device * qpd,enum cache_policy default_policy,enum cache_policy alternate_policy,void __user * alternate_aperture_base,uint64_t alternate_aperture_size)96 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
97 				   struct qcm_process_device *qpd,
98 				   enum cache_policy default_policy,
99 				   enum cache_policy alternate_policy,
100 				   void __user *alternate_aperture_base,
101 				   uint64_t alternate_aperture_size)
102 {
103 	uint32_t default_mtype;
104 	uint32_t ape1_mtype;
105 
106 	default_mtype = (default_policy == cache_policy_coherent) ?
107 			MTYPE_CC :
108 			MTYPE_NC;
109 
110 	ape1_mtype = (alternate_policy == cache_policy_coherent) ?
111 			MTYPE_CC :
112 			MTYPE_NC;
113 
114 	qpd->sh_mem_config = (qpd->sh_mem_config &
115 			SH_MEM_CONFIG__ADDRESS_MODE_MASK) |
116 		SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
117 				SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
118 		default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
119 		ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
120 		SH_MEM_CONFIG__PRIVATE_ATC_MASK;
121 
122 	return true;
123 }
124 
set_cache_memory_policy_vi_tonga(struct device_queue_manager * dqm,struct qcm_process_device * qpd,enum cache_policy default_policy,enum cache_policy alternate_policy,void __user * alternate_aperture_base,uint64_t alternate_aperture_size)125 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
126 		struct qcm_process_device *qpd,
127 		enum cache_policy default_policy,
128 		enum cache_policy alternate_policy,
129 		void __user *alternate_aperture_base,
130 		uint64_t alternate_aperture_size)
131 {
132 	uint32_t default_mtype;
133 	uint32_t ape1_mtype;
134 
135 	default_mtype = (default_policy == cache_policy_coherent) ?
136 			MTYPE_UC :
137 			MTYPE_NC;
138 
139 	ape1_mtype = (alternate_policy == cache_policy_coherent) ?
140 			MTYPE_UC :
141 			MTYPE_NC;
142 
143 	qpd->sh_mem_config =
144 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
145 				   SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
146 			default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
147 			ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
148 
149 	return true;
150 }
151 
update_qpd_vi(struct device_queue_manager * dqm,struct qcm_process_device * qpd)152 static int update_qpd_vi(struct device_queue_manager *dqm,
153 					struct qcm_process_device *qpd)
154 {
155 	struct kfd_process_device *pdd;
156 	unsigned int temp;
157 
158 	pdd = qpd_to_pdd(qpd);
159 
160 	/* check if sh_mem_config register already configured */
161 	if (qpd->sh_mem_config == 0) {
162 		qpd->sh_mem_config =
163 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
164 				SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
165 			MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
166 			MTYPE_CC << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
167 			SH_MEM_CONFIG__PRIVATE_ATC_MASK;
168 
169 		qpd->sh_mem_ape1_limit = 0;
170 		qpd->sh_mem_ape1_base = 0;
171 	}
172 
173 	if (qpd->pqm->process->is_32bit_user_mode) {
174 		temp = get_sh_mem_bases_32(pdd);
175 		qpd->sh_mem_bases = temp << SH_MEM_BASES__SHARED_BASE__SHIFT;
176 		qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA32 <<
177 					SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
178 	} else {
179 		temp = get_sh_mem_bases_nybble_64(pdd);
180 		qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
181 		qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 <<
182 			SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
183 		qpd->sh_mem_config |= 1  <<
184 			SH_MEM_CONFIG__PRIVATE_ATC__SHIFT;
185 	}
186 
187 	pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
188 		qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
189 
190 	return 0;
191 }
192 
update_qpd_vi_tonga(struct device_queue_manager * dqm,struct qcm_process_device * qpd)193 static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
194 			struct qcm_process_device *qpd)
195 {
196 	struct kfd_process_device *pdd;
197 	unsigned int temp;
198 
199 	pdd = qpd_to_pdd(qpd);
200 
201 	/* check if sh_mem_config register already configured */
202 	if (qpd->sh_mem_config == 0) {
203 		qpd->sh_mem_config =
204 				SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
205 					SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
206 				MTYPE_UC <<
207 					SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
208 				MTYPE_UC <<
209 					SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
210 
211 		qpd->sh_mem_ape1_limit = 0;
212 		qpd->sh_mem_ape1_base = 0;
213 	}
214 
215 	/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
216 	 * aperture addresses.
217 	 */
218 	temp = get_sh_mem_bases_nybble_64(pdd);
219 	qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
220 
221 	pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n",
222 		temp, qpd->sh_mem_bases);
223 
224 	return 0;
225 }
226 
init_sdma_vm(struct device_queue_manager * dqm,struct queue * q,struct qcm_process_device * qpd)227 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
228 				struct qcm_process_device *qpd)
229 {
230 	uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
231 
232 	if (q->process->is_32bit_user_mode)
233 		value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
234 				get_sh_mem_bases_32(qpd_to_pdd(qpd));
235 	else
236 		value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
237 				SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
238 				SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
239 
240 	q->properties.sdma_vm_addr = value;
241 }
242 
init_sdma_vm_tonga(struct device_queue_manager * dqm,struct queue * q,struct qcm_process_device * qpd)243 static void init_sdma_vm_tonga(struct device_queue_manager *dqm,
244 			struct queue *q,
245 			struct qcm_process_device *qpd)
246 {
247 	/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
248 	 * aperture addresses.
249 	 */
250 	q->properties.sdma_vm_addr =
251 		((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
252 		 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
253 		SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
254 }
255