1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 #ifndef __AMDGPU_VM_H__ 25 #define __AMDGPU_VM_H__ 26 27 #include <linux/idr.h> 28 #include <linux/kfifo.h> 29 #include <linux/rbtree.h> 30 #include <drm/gpu_scheduler.h> 31 #include <drm/drm_file.h> 32 33 #include "amdgpu_sync.h" 34 #include "amdgpu_ring.h" 35 #include "amdgpu_ids.h" 36 37 struct amdgpu_bo_va; 38 struct amdgpu_job; 39 struct amdgpu_bo_list_entry; 40 41 /* 42 * GPUVM handling 43 */ 44 45 /* Maximum number of PTEs the hardware can write with one command */ 46 #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF 47 48 /* number of entries in page table */ 49 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size) 50 51 /* PTBs (Page Table Blocks) need to be aligned to 32K */ 52 #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 53 54 #define AMDGPU_PTE_VALID (1ULL << 0) 55 #define AMDGPU_PTE_SYSTEM (1ULL << 1) 56 #define AMDGPU_PTE_SNOOPED (1ULL << 2) 57 58 /* VI only */ 59 #define AMDGPU_PTE_EXECUTABLE (1ULL << 4) 60 61 #define AMDGPU_PTE_READABLE (1ULL << 5) 62 #define AMDGPU_PTE_WRITEABLE (1ULL << 6) 63 64 #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7) 65 66 /* TILED for VEGA10, reserved for older ASICs */ 67 #define AMDGPU_PTE_PRT (1ULL << 51) 68 69 /* PDE is handled as PTE for VEGA10 */ 70 #define AMDGPU_PDE_PTE (1ULL << 54) 71 72 /* PTE is handled as PDE for VEGA10 (Translate Further) */ 73 #define AMDGPU_PTE_TF (1ULL << 56) 74 75 /* PDE Block Fragment Size for VEGA10 */ 76 #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59) 77 78 79 /* For GFX9 */ 80 #define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57) 81 #define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL) 82 83 #define AMDGPU_MTYPE_NC 0 84 #define AMDGPU_MTYPE_CC 2 85 86 #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \ 87 | AMDGPU_PTE_SNOOPED \ 88 | AMDGPU_PTE_EXECUTABLE \ 89 | AMDGPU_PTE_READABLE \ 90 | AMDGPU_PTE_WRITEABLE \ 91 | AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_CC)) 92 93 /* How to programm VM fault handling */ 94 #define AMDGPU_VM_FAULT_STOP_NEVER 0 95 #define AMDGPU_VM_FAULT_STOP_FIRST 1 96 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 97 98 /* max number of VMHUB */ 99 #define AMDGPU_MAX_VMHUBS 2 100 #define AMDGPU_GFXHUB 0 101 #define AMDGPU_MMHUB 1 102 103 /* hardcode that limit for now */ 104 #define AMDGPU_VA_RESERVED_SIZE (1ULL << 20) 105 106 /* VA hole for 48bit addresses on Vega10 */ 107 #define AMDGPU_VA_HOLE_START 0x0000800000000000ULL 108 #define AMDGPU_VA_HOLE_END 0xffff800000000000ULL 109 110 /* 111 * Hardware is programmed as if the hole doesn't exists with start and end 112 * address values. 113 * 114 * This mask is used to remove the upper 16bits of the VA and so come up with 115 * the linear addr value. 116 */ 117 #define AMDGPU_VA_HOLE_MASK 0x0000ffffffffffffULL 118 119 /* max vmids dedicated for process */ 120 #define AMDGPU_VM_MAX_RESERVED_VMID 1 121 122 #define AMDGPU_VM_CONTEXT_GFX 0 123 #define AMDGPU_VM_CONTEXT_COMPUTE 1 124 125 /* See vm_update_mode */ 126 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) 127 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) 128 129 /* VMPT level enumerate, and the hiberachy is: 130 * PDB2->PDB1->PDB0->PTB 131 */ 132 enum amdgpu_vm_level { 133 AMDGPU_VM_PDB2, 134 AMDGPU_VM_PDB1, 135 AMDGPU_VM_PDB0, 136 AMDGPU_VM_PTB 137 }; 138 139 /* base structure for tracking BO usage in a VM */ 140 struct amdgpu_vm_bo_base { 141 /* constant after initialization */ 142 struct amdgpu_vm *vm; 143 struct amdgpu_bo *bo; 144 145 /* protected by bo being reserved */ 146 struct list_head bo_list; 147 148 /* protected by spinlock */ 149 struct list_head vm_status; 150 151 /* protected by the BO being reserved */ 152 bool moved; 153 }; 154 155 struct amdgpu_vm_pt { 156 struct amdgpu_vm_bo_base base; 157 bool huge; 158 159 /* array of page tables, one for each directory entry */ 160 struct amdgpu_vm_pt *entries; 161 }; 162 163 #define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr)) 164 #define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48) 165 #define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL) 166 167 168 struct amdgpu_task_info { 169 char process_name[TASK_COMM_LEN]; 170 char task_name[TASK_COMM_LEN]; 171 pid_t pid; 172 pid_t tgid; 173 }; 174 175 struct amdgpu_vm { 176 /* tree of virtual addresses mapped */ 177 struct rb_root_cached va; 178 179 /* BOs who needs a validation */ 180 struct list_head evicted; 181 182 /* PT BOs which relocated and their parent need an update */ 183 struct list_head relocated; 184 185 /* BOs moved, but not yet updated in the PT */ 186 struct list_head moved; 187 spinlock_t moved_lock; 188 189 /* All BOs of this VM not currently in the state machine */ 190 struct list_head idle; 191 192 /* BO mappings freed, but not yet updated in the PT */ 193 struct list_head freed; 194 195 /* contains the page directory */ 196 struct amdgpu_vm_pt root; 197 struct dma_fence *last_update; 198 199 /* Scheduler entity for page table updates */ 200 struct drm_sched_entity entity; 201 202 unsigned int pasid; 203 /* dedicated to vm */ 204 struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; 205 206 /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ 207 bool use_cpu_for_update; 208 209 /* Flag to indicate ATS support from PTE for GFX9 */ 210 bool pte_support_ats; 211 212 /* Up to 128 pending retry page faults */ 213 DECLARE_KFIFO(faults, u64, 128); 214 215 /* Limit non-retry fault storms */ 216 unsigned int fault_credit; 217 218 /* Points to the KFD process VM info */ 219 struct amdkfd_process_info *process_info; 220 221 /* List node in amdkfd_process_info.vm_list_head */ 222 struct list_head vm_list_node; 223 224 /* Valid while the PD is reserved or fenced */ 225 uint64_t pd_phys_addr; 226 227 /* Some basic info about the task */ 228 struct amdgpu_task_info task_info; 229 }; 230 231 struct amdgpu_vm_manager { 232 /* Handling of VMIDs */ 233 struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS]; 234 235 /* Handling of VM fences */ 236 u64 fence_context; 237 unsigned seqno[AMDGPU_MAX_RINGS]; 238 239 uint64_t max_pfn; 240 uint32_t num_level; 241 uint32_t block_size; 242 uint32_t fragment_size; 243 enum amdgpu_vm_level root_level; 244 /* vram base address for page table entry */ 245 u64 vram_base_offset; 246 /* vm pte handling */ 247 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 248 struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; 249 unsigned vm_pte_num_rings; 250 atomic_t vm_pte_next_ring; 251 252 /* partial resident texture handling */ 253 spinlock_t prt_lock; 254 atomic_t num_prt_users; 255 256 /* controls how VM page tables are updated for Graphics and Compute. 257 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU 258 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU 259 */ 260 int vm_update_mode; 261 262 /* PASID to VM mapping, will be used in interrupt context to 263 * look up VM of a page fault 264 */ 265 struct idr pasid_idr; 266 spinlock_t pasid_lock; 267 }; 268 269 void amdgpu_vm_manager_init(struct amdgpu_device *adev); 270 void amdgpu_vm_manager_fini(struct amdgpu_device *adev); 271 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, 272 int vm_context, unsigned int pasid); 273 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); 274 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 275 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev, 276 unsigned int pasid); 277 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 278 struct list_head *validated, 279 struct amdgpu_bo_list_entry *entry); 280 bool amdgpu_vm_ready(struct amdgpu_vm *vm); 281 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, 282 int (*callback)(void *p, struct amdgpu_bo *bo), 283 void *param); 284 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, 285 struct amdgpu_vm *vm, 286 uint64_t saddr, uint64_t size); 287 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); 288 int amdgpu_vm_update_directories(struct amdgpu_device *adev, 289 struct amdgpu_vm *vm); 290 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 291 struct amdgpu_vm *vm, 292 struct dma_fence **fence); 293 int amdgpu_vm_handle_moved(struct amdgpu_device *adev, 294 struct amdgpu_vm *vm); 295 int amdgpu_vm_bo_update(struct amdgpu_device *adev, 296 struct amdgpu_bo_va *bo_va, 297 bool clear); 298 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 299 struct amdgpu_bo *bo, bool evicted); 300 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 301 struct amdgpu_bo *bo); 302 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 303 struct amdgpu_vm *vm, 304 struct amdgpu_bo *bo); 305 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 306 struct amdgpu_bo_va *bo_va, 307 uint64_t addr, uint64_t offset, 308 uint64_t size, uint64_t flags); 309 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, 310 struct amdgpu_bo_va *bo_va, 311 uint64_t addr, uint64_t offset, 312 uint64_t size, uint64_t flags); 313 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 314 struct amdgpu_bo_va *bo_va, 315 uint64_t addr); 316 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, 317 struct amdgpu_vm *vm, 318 uint64_t saddr, uint64_t size); 319 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, 320 uint64_t addr); 321 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); 322 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 323 struct amdgpu_bo_va *bo_va); 324 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, 325 uint32_t fragment_size_default, unsigned max_level, 326 unsigned max_bits); 327 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 328 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, 329 struct amdgpu_job *job); 330 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); 331 332 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, 333 struct amdgpu_task_info *task_info); 334 335 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); 336 337 #endif 338