1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include "habanalabs.h"
9
10 #include <linux/slab.h>
11
hl_encaps_handle_do_release(struct kref * ref)12 void hl_encaps_handle_do_release(struct kref *ref)
13 {
14 struct hl_cs_encaps_sig_handle *handle =
15 container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
16 struct hl_ctx *ctx = handle->hdev->compute_ctx;
17 struct hl_encaps_signals_mgr *mgr = &ctx->sig_mgr;
18
19 spin_lock(&mgr->lock);
20 idr_remove(&mgr->handles, handle->id);
21 spin_unlock(&mgr->lock);
22
23 kfree(handle);
24 }
25
hl_encaps_handle_do_release_sob(struct kref * ref)26 static void hl_encaps_handle_do_release_sob(struct kref *ref)
27 {
28 struct hl_cs_encaps_sig_handle *handle =
29 container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
30 struct hl_ctx *ctx = handle->hdev->compute_ctx;
31 struct hl_encaps_signals_mgr *mgr = &ctx->sig_mgr;
32
33 /* if we're here, then there was a signals reservation but cs with
34 * encaps signals wasn't submitted, so need to put refcount
35 * to hw_sob taken at the reservation.
36 */
37 hw_sob_put(handle->hw_sob);
38
39 spin_lock(&mgr->lock);
40 idr_remove(&mgr->handles, handle->id);
41 spin_unlock(&mgr->lock);
42
43 kfree(handle);
44 }
45
hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr * mgr)46 static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr)
47 {
48 spin_lock_init(&mgr->lock);
49 idr_init(&mgr->handles);
50 }
51
hl_encaps_sig_mgr_fini(struct hl_device * hdev,struct hl_encaps_signals_mgr * mgr)52 static void hl_encaps_sig_mgr_fini(struct hl_device *hdev,
53 struct hl_encaps_signals_mgr *mgr)
54 {
55 struct hl_cs_encaps_sig_handle *handle;
56 struct idr *idp;
57 u32 id;
58
59 idp = &mgr->handles;
60
61 if (!idr_is_empty(idp)) {
62 dev_warn(hdev->dev, "device released while some encaps signals handles are still allocated\n");
63 idr_for_each_entry(idp, handle, id)
64 kref_put(&handle->refcount,
65 hl_encaps_handle_do_release_sob);
66 }
67
68 idr_destroy(&mgr->handles);
69 }
70
hl_ctx_fini(struct hl_ctx * ctx)71 static void hl_ctx_fini(struct hl_ctx *ctx)
72 {
73 struct hl_device *hdev = ctx->hdev;
74 int i;
75
76 /* Release all allocated HW block mapped list entries and destroy
77 * the mutex.
78 */
79 hl_hw_block_mem_fini(ctx);
80
81 /*
82 * If we arrived here, there are no jobs waiting for this context
83 * on its queues so we can safely remove it.
84 * This is because for each CS, we increment the ref count and for
85 * every CS that was finished we decrement it and we won't arrive
86 * to this function unless the ref count is 0
87 */
88
89 for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
90 hl_fence_put(ctx->cs_pending[i]);
91
92 kfree(ctx->cs_pending);
93
94 if (ctx->asid != HL_KERNEL_ASID_ID) {
95 dev_dbg(hdev->dev, "closing user context %d\n", ctx->asid);
96
97 /* The engines are stopped as there is no executing CS, but the
98 * Coresight might be still working by accessing addresses
99 * related to the stopped engines. Hence stop it explicitly.
100 * Stop only if this is the compute context, as there can be
101 * only one compute context
102 */
103 if ((hdev->in_debug) && (hdev->compute_ctx == ctx))
104 hl_device_set_debug_mode(hdev, false);
105
106 hdev->asic_funcs->ctx_fini(ctx);
107 hl_cb_va_pool_fini(ctx);
108 hl_vm_ctx_fini(ctx);
109 hl_asid_free(hdev, ctx->asid);
110 hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
111
112 /* Scrub both SRAM and DRAM */
113 hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
114 } else {
115 dev_dbg(hdev->dev, "closing kernel context\n");
116 hdev->asic_funcs->ctx_fini(ctx);
117 hl_vm_ctx_fini(ctx);
118 hl_mmu_ctx_fini(ctx);
119 }
120 }
121
hl_ctx_do_release(struct kref * ref)122 void hl_ctx_do_release(struct kref *ref)
123 {
124 struct hl_ctx *ctx;
125
126 ctx = container_of(ref, struct hl_ctx, refcount);
127
128 hl_ctx_fini(ctx);
129
130 if (ctx->hpriv)
131 hl_hpriv_put(ctx->hpriv);
132
133 kfree(ctx);
134 }
135
hl_ctx_create(struct hl_device * hdev,struct hl_fpriv * hpriv)136 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
137 {
138 struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr;
139 struct hl_ctx *ctx;
140 int rc;
141
142 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
143 if (!ctx) {
144 rc = -ENOMEM;
145 goto out_err;
146 }
147
148 mutex_lock(&mgr->ctx_lock);
149 rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
150 mutex_unlock(&mgr->ctx_lock);
151
152 if (rc < 0) {
153 dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
154 goto free_ctx;
155 }
156
157 ctx->handle = rc;
158
159 rc = hl_ctx_init(hdev, ctx, false);
160 if (rc)
161 goto remove_from_idr;
162
163 hl_hpriv_get(hpriv);
164 ctx->hpriv = hpriv;
165
166 /* TODO: remove for multiple contexts per process */
167 hpriv->ctx = ctx;
168
169 /* TODO: remove the following line for multiple process support */
170 hdev->compute_ctx = ctx;
171
172 return 0;
173
174 remove_from_idr:
175 mutex_lock(&mgr->ctx_lock);
176 idr_remove(&mgr->ctx_handles, ctx->handle);
177 mutex_unlock(&mgr->ctx_lock);
178 free_ctx:
179 kfree(ctx);
180 out_err:
181 return rc;
182 }
183
hl_ctx_free(struct hl_device * hdev,struct hl_ctx * ctx)184 void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
185 {
186 if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
187 return;
188 }
189
hl_ctx_init(struct hl_device * hdev,struct hl_ctx * ctx,bool is_kernel_ctx)190 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
191 {
192 int rc = 0;
193
194 ctx->hdev = hdev;
195
196 kref_init(&ctx->refcount);
197
198 ctx->cs_sequence = 1;
199 spin_lock_init(&ctx->cs_lock);
200 atomic_set(&ctx->thread_ctx_switch_token, 1);
201 ctx->thread_ctx_switch_wait_token = 0;
202 ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
203 sizeof(struct hl_fence *),
204 GFP_KERNEL);
205 if (!ctx->cs_pending)
206 return -ENOMEM;
207
208 hl_hw_block_mem_init(ctx);
209
210 if (is_kernel_ctx) {
211 ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
212 rc = hl_vm_ctx_init(ctx);
213 if (rc) {
214 dev_err(hdev->dev, "Failed to init mem ctx module\n");
215 rc = -ENOMEM;
216 goto err_hw_block_mem_fini;
217 }
218
219 rc = hdev->asic_funcs->ctx_init(ctx);
220 if (rc) {
221 dev_err(hdev->dev, "ctx_init failed\n");
222 goto err_vm_ctx_fini;
223 }
224 } else {
225 ctx->asid = hl_asid_alloc(hdev);
226 if (!ctx->asid) {
227 dev_err(hdev->dev, "No free ASID, failed to create context\n");
228 rc = -ENOMEM;
229 goto err_hw_block_mem_fini;
230 }
231
232 rc = hl_vm_ctx_init(ctx);
233 if (rc) {
234 dev_err(hdev->dev, "Failed to init mem ctx module\n");
235 rc = -ENOMEM;
236 goto err_asid_free;
237 }
238
239 rc = hl_cb_va_pool_init(ctx);
240 if (rc) {
241 dev_err(hdev->dev,
242 "Failed to init VA pool for mapped CB\n");
243 goto err_vm_ctx_fini;
244 }
245
246 rc = hdev->asic_funcs->ctx_init(ctx);
247 if (rc) {
248 dev_err(hdev->dev, "ctx_init failed\n");
249 goto err_cb_va_pool_fini;
250 }
251
252 hl_encaps_sig_mgr_init(&ctx->sig_mgr);
253
254 dev_dbg(hdev->dev, "create user context %d\n", ctx->asid);
255 }
256
257 return 0;
258
259 err_cb_va_pool_fini:
260 hl_cb_va_pool_fini(ctx);
261 err_vm_ctx_fini:
262 hl_vm_ctx_fini(ctx);
263 err_asid_free:
264 if (ctx->asid != HL_KERNEL_ASID_ID)
265 hl_asid_free(hdev, ctx->asid);
266 err_hw_block_mem_fini:
267 hl_hw_block_mem_fini(ctx);
268 kfree(ctx->cs_pending);
269
270 return rc;
271 }
272
hl_ctx_get(struct hl_device * hdev,struct hl_ctx * ctx)273 void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx)
274 {
275 kref_get(&ctx->refcount);
276 }
277
hl_ctx_put(struct hl_ctx * ctx)278 int hl_ctx_put(struct hl_ctx *ctx)
279 {
280 return kref_put(&ctx->refcount, hl_ctx_do_release);
281 }
282
283 /*
284 * hl_ctx_get_fence_locked - get CS fence under CS lock
285 *
286 * @ctx: pointer to the context structure.
287 * @seq: CS sequences number
288 *
289 * @return valid fence pointer on success, NULL if fence is gone, otherwise
290 * error pointer.
291 *
292 * NOTE: this function shall be called with cs_lock locked
293 */
hl_ctx_get_fence_locked(struct hl_ctx * ctx,u64 seq)294 static struct hl_fence *hl_ctx_get_fence_locked(struct hl_ctx *ctx, u64 seq)
295 {
296 struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
297 struct hl_fence *fence;
298
299 if (seq >= ctx->cs_sequence)
300 return ERR_PTR(-EINVAL);
301
302 if (seq + asic_prop->max_pending_cs < ctx->cs_sequence)
303 return NULL;
304
305 fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
306 hl_fence_get(fence);
307 return fence;
308 }
309
hl_ctx_get_fence(struct hl_ctx * ctx,u64 seq)310 struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
311 {
312 struct hl_fence *fence;
313
314 spin_lock(&ctx->cs_lock);
315
316 fence = hl_ctx_get_fence_locked(ctx, seq);
317
318 spin_unlock(&ctx->cs_lock);
319
320 return fence;
321 }
322
323 /*
324 * hl_ctx_get_fences - get multiple CS fences under the same CS lock
325 *
326 * @ctx: pointer to the context structure.
327 * @seq_arr: array of CS sequences to wait for
328 * @fence: fence array to store the CS fences
329 * @arr_len: length of seq_arr and fence_arr
330 *
331 * @return 0 on success, otherwise non 0 error code
332 */
hl_ctx_get_fences(struct hl_ctx * ctx,u64 * seq_arr,struct hl_fence ** fence,u32 arr_len)333 int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
334 struct hl_fence **fence, u32 arr_len)
335 {
336 struct hl_fence **fence_arr_base = fence;
337 int i, rc = 0;
338
339 spin_lock(&ctx->cs_lock);
340
341 for (i = 0; i < arr_len; i++, fence++) {
342 u64 seq = seq_arr[i];
343
344 *fence = hl_ctx_get_fence_locked(ctx, seq);
345
346 if (IS_ERR(*fence)) {
347 dev_err(ctx->hdev->dev,
348 "Failed to get fence for CS with seq 0x%llx\n",
349 seq);
350 rc = PTR_ERR(*fence);
351 break;
352 }
353 }
354
355 spin_unlock(&ctx->cs_lock);
356
357 if (rc)
358 hl_fences_put(fence_arr_base, i);
359
360 return rc;
361 }
362
363 /*
364 * hl_ctx_mgr_init - initialize the context manager
365 *
366 * @mgr: pointer to context manager structure
367 *
368 * This manager is an object inside the hpriv object of the user process.
369 * The function is called when a user process opens the FD.
370 */
hl_ctx_mgr_init(struct hl_ctx_mgr * mgr)371 void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr)
372 {
373 mutex_init(&mgr->ctx_lock);
374 idr_init(&mgr->ctx_handles);
375 }
376
377 /*
378 * hl_ctx_mgr_fini - finalize the context manager
379 *
380 * @hdev: pointer to device structure
381 * @mgr: pointer to context manager structure
382 *
383 * This function goes over all the contexts in the manager and frees them.
384 * It is called when a process closes the FD.
385 */
hl_ctx_mgr_fini(struct hl_device * hdev,struct hl_ctx_mgr * mgr)386 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
387 {
388 struct hl_ctx *ctx;
389 struct idr *idp;
390 u32 id;
391
392 idp = &mgr->ctx_handles;
393
394 idr_for_each_entry(idp, ctx, id)
395 hl_ctx_free(hdev, ctx);
396
397 idr_destroy(&mgr->ctx_handles);
398 mutex_destroy(&mgr->ctx_lock);
399 }
400