1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "amdgpu_vm.h"
24 #include "amdgpu_job.h"
25 #include "amdgpu_object.h"
26 #include "amdgpu_trace.h"
27
28 #define AMDGPU_VM_SDMA_MIN_NUM_DW 256u
29 #define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u)
30
31 /**
32 * amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped
33 *
34 * @table: newly allocated or validated PD/PT
35 */
amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm * table)36 static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
37 {
38 int r;
39
40 r = amdgpu_ttm_alloc_gart(&table->bo.tbo);
41 if (r)
42 return r;
43
44 if (table->shadow)
45 r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
46
47 return r;
48 }
49
50 /**
51 * amdgpu_vm_sdma_prepare - prepare SDMA command submission
52 *
53 * @p: see amdgpu_vm_update_params definition
54 * @resv: reservation object with embedded fence
55 * @sync_mode: synchronization mode
56 *
57 * Returns:
58 * Negativ errno, 0 for success.
59 */
amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params * p,struct dma_resv * resv,enum amdgpu_sync_mode sync_mode)60 static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
61 struct dma_resv *resv,
62 enum amdgpu_sync_mode sync_mode)
63 {
64 enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
65 : AMDGPU_IB_POOL_DELAYED;
66 unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
67 int r;
68
69 r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job);
70 if (r)
71 return r;
72
73 p->num_dw_left = ndw;
74
75 if (!resv)
76 return 0;
77
78 return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
79 }
80
81 /**
82 * amdgpu_vm_sdma_commit - commit SDMA command submission
83 *
84 * @p: see amdgpu_vm_update_params definition
85 * @fence: resulting fence
86 *
87 * Returns:
88 * Negativ errno, 0 for success.
89 */
amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params * p,struct dma_fence ** fence)90 static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
91 struct dma_fence **fence)
92 {
93 struct amdgpu_ib *ib = p->job->ibs;
94 struct drm_sched_entity *entity;
95 struct amdgpu_ring *ring;
96 struct dma_fence *f;
97 int r;
98
99 entity = p->immediate ? &p->vm->immediate : &p->vm->delayed;
100 ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
101
102 WARN_ON(ib->length_dw == 0);
103 amdgpu_ring_pad_ib(ring, ib);
104 WARN_ON(ib->length_dw > p->num_dw_left);
105 r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
106 if (r)
107 goto error;
108
109 if (p->unlocked) {
110 struct dma_fence *tmp = dma_fence_get(f);
111
112 swap(p->vm->last_unlocked, tmp);
113 dma_fence_put(tmp);
114 } else {
115 dma_resv_add_fence(p->vm->root.bo->tbo.base.resv, f,
116 DMA_RESV_USAGE_BOOKKEEP);
117 }
118
119 if (fence && !p->immediate) {
120 /*
121 * Most hw generations now have a separate queue for page table
122 * updates, but when the queue is shared with userspace we need
123 * the extra CPU round trip to correctly flush the TLB.
124 */
125 set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags);
126 swap(*fence, f);
127 }
128 dma_fence_put(f);
129 return 0;
130
131 error:
132 amdgpu_job_free(p->job);
133 return r;
134 }
135
136 /**
137 * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
138 *
139 * @p: see amdgpu_vm_update_params definition
140 * @bo: PD/PT to update
141 * @pe: addr of the page entry
142 * @count: number of page entries to copy
143 *
144 * Traces the parameters and calls the DMA function to copy the PTEs.
145 */
amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params * p,struct amdgpu_bo * bo,uint64_t pe,unsigned count)146 static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
147 struct amdgpu_bo *bo, uint64_t pe,
148 unsigned count)
149 {
150 struct amdgpu_ib *ib = p->job->ibs;
151 uint64_t src = ib->gpu_addr;
152
153 src += p->num_dw_left * 4;
154
155 pe += amdgpu_gmc_sign_extend(amdgpu_bo_gpu_offset_no_check(bo));
156 trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate);
157
158 amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
159 }
160
161 /**
162 * amdgpu_vm_sdma_set_ptes - helper to call the right asic function
163 *
164 * @p: see amdgpu_vm_update_params definition
165 * @bo: PD/PT to update
166 * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
167 * @addr: dst addr to write into pe
168 * @count: number of page entries to update
169 * @incr: increase next addr by incr bytes
170 * @flags: hw access flags
171 *
172 * Traces the parameters and calls the right asic functions
173 * to setup the page table using the DMA.
174 */
amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params * p,struct amdgpu_bo * bo,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint64_t flags)175 static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
176 struct amdgpu_bo *bo, uint64_t pe,
177 uint64_t addr, unsigned count,
178 uint32_t incr, uint64_t flags)
179 {
180 struct amdgpu_ib *ib = p->job->ibs;
181
182 pe += amdgpu_gmc_sign_extend(amdgpu_bo_gpu_offset_no_check(bo));
183 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
184 if (count < 3) {
185 amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
186 count, incr);
187 } else {
188 amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr,
189 count, incr, flags);
190 }
191 }
192
193 /**
194 * amdgpu_vm_sdma_update - execute VM update
195 *
196 * @p: see amdgpu_vm_update_params definition
197 * @vmbo: PD/PT to update
198 * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
199 * @addr: dst addr to write into pe
200 * @count: number of page entries to update
201 * @incr: increase next addr by incr bytes
202 * @flags: hw access flags
203 *
204 * Reserve space in the IB, setup mapping buffer on demand and write commands to
205 * the IB.
206 */
amdgpu_vm_sdma_update(struct amdgpu_vm_update_params * p,struct amdgpu_bo_vm * vmbo,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint64_t flags)207 static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
208 struct amdgpu_bo_vm *vmbo, uint64_t pe,
209 uint64_t addr, unsigned count, uint32_t incr,
210 uint64_t flags)
211 {
212 struct amdgpu_bo *bo = &vmbo->bo;
213 enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
214 : AMDGPU_IB_POOL_DELAYED;
215 struct dma_resv_iter cursor;
216 unsigned int i, ndw, nptes;
217 struct dma_fence *fence;
218 uint64_t *pte;
219 int r;
220
221 /* Wait for PD/PT moves to be completed */
222 dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
223 dma_resv_for_each_fence_unlocked(&cursor, fence) {
224 r = amdgpu_sync_fence(&p->job->sync, fence);
225 if (r) {
226 dma_resv_iter_end(&cursor);
227 return r;
228 }
229 }
230 dma_resv_iter_end(&cursor);
231
232 do {
233 ndw = p->num_dw_left;
234 ndw -= p->job->ibs->length_dw;
235
236 if (ndw < 32) {
237 r = amdgpu_vm_sdma_commit(p, NULL);
238 if (r)
239 return r;
240
241 /* estimate how many dw we need */
242 ndw = 32;
243 if (p->pages_addr)
244 ndw += count * 2;
245 ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
246 ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
247
248 r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool,
249 &p->job);
250 if (r)
251 return r;
252
253 p->num_dw_left = ndw;
254 }
255
256 if (!p->pages_addr) {
257 /* set page commands needed */
258 if (vmbo->shadow)
259 amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr,
260 count, incr, flags);
261 amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
262 incr, flags);
263 return 0;
264 }
265
266 /* copy commands needed */
267 ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
268 (vmbo->shadow ? 2 : 1);
269
270 /* for padding */
271 ndw -= 7;
272
273 nptes = min(count, ndw / 2);
274
275 /* Put the PTEs at the end of the IB. */
276 p->num_dw_left -= nptes * 2;
277 pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
278 for (i = 0; i < nptes; ++i, addr += incr) {
279 pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
280 pte[i] |= flags;
281 }
282
283 if (vmbo->shadow)
284 amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes);
285 amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
286
287 pe += nptes * 8;
288 count -= nptes;
289 } while (count);
290
291 return 0;
292 }
293
294 const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
295 .map_table = amdgpu_vm_sdma_map_table,
296 .prepare = amdgpu_vm_sdma_prepare,
297 .update = amdgpu_vm_sdma_update,
298 .commit = amdgpu_vm_sdma_commit
299 };
300