1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /*
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef KFD_SVM_H_
26 #define KFD_SVM_H_
27
28 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
29
30 #include <linux/rwsem.h>
31 #include <linux/list.h>
32 #include <linux/mutex.h>
33 #include <linux/sched/mm.h>
34 #include <linux/hmm.h>
35 #include "amdgpu.h"
36 #include "kfd_priv.h"
37
38 #define SVM_RANGE_VRAM_DOMAIN (1UL << 0)
39 #define SVM_ADEV_PGMAP_OWNER(adev)\
40 ((adev)->hive ? (void *)(adev)->hive : (void *)(adev))
41
42 struct svm_range_bo {
43 struct amdgpu_bo *bo;
44 struct kref kref;
45 struct list_head range_list; /* all svm ranges shared this bo */
46 spinlock_t list_lock;
47 struct amdgpu_amdkfd_fence *eviction_fence;
48 struct work_struct eviction_work;
49 uint32_t evicting;
50 struct work_struct release_work;
51 struct kfd_node *node;
52 };
53
54 enum svm_work_list_ops {
55 SVM_OP_NULL,
56 SVM_OP_UNMAP_RANGE,
57 SVM_OP_UPDATE_RANGE_NOTIFIER,
58 SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP,
59 SVM_OP_ADD_RANGE,
60 SVM_OP_ADD_RANGE_AND_MAP
61 };
62
63 struct svm_work_list_item {
64 enum svm_work_list_ops op;
65 struct mm_struct *mm;
66 };
67
68 /**
69 * struct svm_range - shared virtual memory range
70 *
71 * @svms: list of svm ranges, structure defined in kfd_process
72 * @migrate_mutex: to serialize range migration, validation and mapping update
73 * @start: range start address in pages
74 * @last: range last address in pages
75 * @it_node: node [start, last] stored in interval tree, start, last are page
76 * aligned, page size is (last - start + 1)
77 * @list: link list node, used to scan all ranges of svms
78 * @update_list:link list node used to add to update_list
79 * @mapping: bo_va mapping structure to create and update GPU page table
80 * @npages: number of pages
81 * @dma_addr: dma mapping address on each GPU for system memory physical page
82 * @ttm_res: vram ttm resource map
83 * @offset: range start offset within mm_nodes
84 * @svm_bo: struct to manage splited amdgpu_bo
85 * @svm_bo_list:link list node, to scan all ranges which share same svm_bo
86 * @lock: protect prange start, last, child_list, svm_bo_list
87 * @saved_flags:save/restore current PF_MEMALLOC flags
88 * @flags: flags defined as KFD_IOCTL_SVM_FLAG_*
89 * @perferred_loc: perferred location, 0 for CPU, or GPU id
90 * @perfetch_loc: last prefetch location, 0 for CPU, or GPU id
91 * @actual_loc: the actual location, 0 for CPU, or GPU id
92 * @granularity:migration granularity, log2 num pages
93 * @invalid: not 0 means cpu page table is invalidated
94 * @validate_timestamp: system timestamp when range is validated
95 * @notifier: register mmu interval notifier
96 * @work_item: deferred work item information
97 * @deferred_list: list header used to add range to deferred list
98 * @child_list: list header for split ranges which are not added to svms yet
99 * @bitmap_access: index bitmap of GPUs which can access the range
100 * @bitmap_aip: index bitmap of GPUs which can access the range in place
101 *
102 * Data structure for virtual memory range shared by CPU and GPUs, it can be
103 * allocated from system memory ram or device vram, and migrate from ram to vram
104 * or from vram to ram.
105 */
106 struct svm_range {
107 struct svm_range_list *svms;
108 struct mutex migrate_mutex;
109 unsigned long start;
110 unsigned long last;
111 struct interval_tree_node it_node;
112 struct list_head list;
113 struct list_head update_list;
114 uint64_t npages;
115 dma_addr_t *dma_addr[MAX_GPU_INSTANCE];
116 struct ttm_resource *ttm_res;
117 uint64_t offset;
118 struct svm_range_bo *svm_bo;
119 struct list_head svm_bo_list;
120 struct mutex lock;
121 unsigned int saved_flags;
122 uint32_t flags;
123 uint32_t preferred_loc;
124 uint32_t prefetch_loc;
125 uint32_t actual_loc;
126 uint8_t granularity;
127 atomic_t invalid;
128 ktime_t validate_timestamp;
129 struct mmu_interval_notifier notifier;
130 struct svm_work_list_item work_item;
131 struct list_head deferred_list;
132 struct list_head child_list;
133 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
134 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
135 bool validated_once;
136 bool mapped_to_gpu;
137 bool is_error_flag;
138 };
139
svm_range_lock(struct svm_range * prange)140 static inline void svm_range_lock(struct svm_range *prange)
141 {
142 mutex_lock(&prange->lock);
143 prange->saved_flags = memalloc_noreclaim_save();
144
145 }
svm_range_unlock(struct svm_range * prange)146 static inline void svm_range_unlock(struct svm_range *prange)
147 {
148 memalloc_noreclaim_restore(prange->saved_flags);
149 mutex_unlock(&prange->lock);
150 }
151
svm_range_bo_ref(struct svm_range_bo * svm_bo)152 static inline struct svm_range_bo *svm_range_bo_ref(struct svm_range_bo *svm_bo)
153 {
154 if (svm_bo)
155 kref_get(&svm_bo->kref);
156
157 return svm_bo;
158 }
159
160 int svm_range_list_init(struct kfd_process *p);
161 void svm_range_list_fini(struct kfd_process *p);
162 int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
163 uint64_t size, uint32_t nattrs,
164 struct kfd_ioctl_svm_attribute *attrs);
165 struct svm_range *svm_range_from_addr(struct svm_range_list *svms,
166 unsigned long addr,
167 struct svm_range **parent);
168 struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange,
169 uint32_t gpu_id);
170 int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
171 bool clear);
172 void svm_range_vram_node_free(struct svm_range *prange);
173 int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
174 unsigned long addr, struct svm_range *parent,
175 struct svm_range *prange);
176 int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
177 uint32_t vmid, uint32_t node_id, uint64_t addr,
178 bool write_fault);
179 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence);
180 void svm_range_add_list_work(struct svm_range_list *svms,
181 struct svm_range *prange, struct mm_struct *mm,
182 enum svm_work_list_ops op);
183 void schedule_deferred_list_work(struct svm_range_list *svms);
184 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
185 unsigned long offset, unsigned long npages);
186 void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma);
187 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
188 uint64_t *svm_priv_data_size);
189 int kfd_criu_checkpoint_svm(struct kfd_process *p,
190 uint8_t __user *user_priv_data,
191 uint64_t *priv_offset);
192 int kfd_criu_restore_svm(struct kfd_process *p,
193 uint8_t __user *user_priv_ptr,
194 uint64_t *priv_data_offset,
195 uint64_t max_priv_data_size);
196 int kfd_criu_resume_svm(struct kfd_process *p);
197 struct kfd_process_device *
198 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node);
199 void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm);
200
201 /* SVM API and HMM page migration work together, device memory type
202 * is initialized to not 0 when page migration register device memory.
203 */
204 #define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
205 (adev)->gmc.is_app_apu)
206
207 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
208
209 void svm_range_set_max_pages(struct amdgpu_device *adev);
210 int svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled);
211
212 #else
213
214 struct kfd_process;
215
svm_range_list_init(struct kfd_process * p)216 static inline int svm_range_list_init(struct kfd_process *p)
217 {
218 return 0;
219 }
svm_range_list_fini(struct kfd_process * p)220 static inline void svm_range_list_fini(struct kfd_process *p)
221 {
222 /* empty */
223 }
224
svm_range_restore_pages(struct amdgpu_device * adev,unsigned int pasid,uint32_t client_id,uint32_t node_id,uint64_t addr,bool write_fault)225 static inline int svm_range_restore_pages(struct amdgpu_device *adev,
226 unsigned int pasid,
227 uint32_t client_id, uint32_t node_id,
228 uint64_t addr, bool write_fault)
229 {
230 return -EFAULT;
231 }
232
svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence * fence)233 static inline int svm_range_schedule_evict_svm_bo(
234 struct amdgpu_amdkfd_fence *fence)
235 {
236 WARN_ONCE(1, "SVM eviction fence triggered, but SVM is disabled");
237 return -EINVAL;
238 }
239
svm_range_get_info(struct kfd_process * p,uint32_t * num_svm_ranges,uint64_t * svm_priv_data_size)240 static inline int svm_range_get_info(struct kfd_process *p,
241 uint32_t *num_svm_ranges,
242 uint64_t *svm_priv_data_size)
243 {
244 *num_svm_ranges = 0;
245 *svm_priv_data_size = 0;
246 return 0;
247 }
248
kfd_criu_checkpoint_svm(struct kfd_process * p,uint8_t __user * user_priv_data,uint64_t * priv_offset)249 static inline int kfd_criu_checkpoint_svm(struct kfd_process *p,
250 uint8_t __user *user_priv_data,
251 uint64_t *priv_offset)
252 {
253 return 0;
254 }
255
kfd_criu_restore_svm(struct kfd_process * p,uint8_t __user * user_priv_ptr,uint64_t * priv_data_offset,uint64_t max_priv_data_size)256 static inline int kfd_criu_restore_svm(struct kfd_process *p,
257 uint8_t __user *user_priv_ptr,
258 uint64_t *priv_data_offset,
259 uint64_t max_priv_data_size)
260 {
261 return -EINVAL;
262 }
263
kfd_criu_resume_svm(struct kfd_process * p)264 static inline int kfd_criu_resume_svm(struct kfd_process *p)
265 {
266 return 0;
267 }
268
svm_range_set_max_pages(struct amdgpu_device * adev)269 static inline void svm_range_set_max_pages(struct amdgpu_device *adev)
270 {
271 }
272
273 #define KFD_IS_SVM_API_SUPPORTED(dev) false
274
275 #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
276
277 #endif /* KFD_SVM_H_ */
278