1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2019 IBM Corp.
3 #include <linux/idr.h>
4 #include "ocxl_internal.h"
5
ocxl_fn_get(struct ocxl_fn * fn)6 static struct ocxl_fn *ocxl_fn_get(struct ocxl_fn *fn)
7 {
8 return (get_device(&fn->dev) == NULL) ? NULL : fn;
9 }
10
ocxl_fn_put(struct ocxl_fn * fn)11 static void ocxl_fn_put(struct ocxl_fn *fn)
12 {
13 put_device(&fn->dev);
14 }
15
alloc_afu(struct ocxl_fn * fn)16 static struct ocxl_afu *alloc_afu(struct ocxl_fn *fn)
17 {
18 struct ocxl_afu *afu;
19
20 afu = kzalloc(sizeof(struct ocxl_afu), GFP_KERNEL);
21 if (!afu)
22 return NULL;
23
24 kref_init(&afu->kref);
25 mutex_init(&afu->contexts_lock);
26 mutex_init(&afu->afu_control_lock);
27 idr_init(&afu->contexts_idr);
28 afu->fn = fn;
29 ocxl_fn_get(fn);
30 return afu;
31 }
32
free_afu(struct kref * kref)33 static void free_afu(struct kref *kref)
34 {
35 struct ocxl_afu *afu = container_of(kref, struct ocxl_afu, kref);
36
37 idr_destroy(&afu->contexts_idr);
38 ocxl_fn_put(afu->fn);
39 kfree(afu);
40 }
41
ocxl_afu_get(struct ocxl_afu * afu)42 void ocxl_afu_get(struct ocxl_afu *afu)
43 {
44 kref_get(&afu->kref);
45 }
46 EXPORT_SYMBOL_GPL(ocxl_afu_get);
47
ocxl_afu_put(struct ocxl_afu * afu)48 void ocxl_afu_put(struct ocxl_afu *afu)
49 {
50 kref_put(&afu->kref, free_afu);
51 }
52 EXPORT_SYMBOL_GPL(ocxl_afu_put);
53
assign_afu_actag(struct ocxl_afu * afu)54 static int assign_afu_actag(struct ocxl_afu *afu)
55 {
56 struct ocxl_fn *fn = afu->fn;
57 int actag_count, actag_offset;
58 struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
59
60 /*
61 * if there were not enough actags for the function, each afu
62 * reduces its count as well
63 */
64 actag_count = afu->config.actag_supported *
65 fn->actag_enabled / fn->actag_supported;
66 actag_offset = ocxl_actag_afu_alloc(fn, actag_count);
67 if (actag_offset < 0) {
68 dev_err(&pci_dev->dev, "Can't allocate %d actags for AFU: %d\n",
69 actag_count, actag_offset);
70 return actag_offset;
71 }
72 afu->actag_base = fn->actag_base + actag_offset;
73 afu->actag_enabled = actag_count;
74
75 ocxl_config_set_afu_actag(pci_dev, afu->config.dvsec_afu_control_pos,
76 afu->actag_base, afu->actag_enabled);
77 dev_dbg(&pci_dev->dev, "actag base=%d enabled=%d\n",
78 afu->actag_base, afu->actag_enabled);
79 return 0;
80 }
81
reclaim_afu_actag(struct ocxl_afu * afu)82 static void reclaim_afu_actag(struct ocxl_afu *afu)
83 {
84 struct ocxl_fn *fn = afu->fn;
85 int start_offset, size;
86
87 start_offset = afu->actag_base - fn->actag_base;
88 size = afu->actag_enabled;
89 ocxl_actag_afu_free(afu->fn, start_offset, size);
90 }
91
assign_afu_pasid(struct ocxl_afu * afu)92 static int assign_afu_pasid(struct ocxl_afu *afu)
93 {
94 struct ocxl_fn *fn = afu->fn;
95 int pasid_count, pasid_offset;
96 struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
97
98 /*
99 * We only support the case where the function configuration
100 * requested enough PASIDs to cover all AFUs.
101 */
102 pasid_count = 1 << afu->config.pasid_supported_log;
103 pasid_offset = ocxl_pasid_afu_alloc(fn, pasid_count);
104 if (pasid_offset < 0) {
105 dev_err(&pci_dev->dev, "Can't allocate %d PASIDs for AFU: %d\n",
106 pasid_count, pasid_offset);
107 return pasid_offset;
108 }
109 afu->pasid_base = fn->pasid_base + pasid_offset;
110 afu->pasid_count = 0;
111 afu->pasid_max = pasid_count;
112
113 ocxl_config_set_afu_pasid(pci_dev, afu->config.dvsec_afu_control_pos,
114 afu->pasid_base,
115 afu->config.pasid_supported_log);
116 dev_dbg(&pci_dev->dev, "PASID base=%d, enabled=%d\n",
117 afu->pasid_base, pasid_count);
118 return 0;
119 }
120
reclaim_afu_pasid(struct ocxl_afu * afu)121 static void reclaim_afu_pasid(struct ocxl_afu *afu)
122 {
123 struct ocxl_fn *fn = afu->fn;
124 int start_offset, size;
125
126 start_offset = afu->pasid_base - fn->pasid_base;
127 size = 1 << afu->config.pasid_supported_log;
128 ocxl_pasid_afu_free(afu->fn, start_offset, size);
129 }
130
reserve_fn_bar(struct ocxl_fn * fn,int bar)131 static int reserve_fn_bar(struct ocxl_fn *fn, int bar)
132 {
133 struct pci_dev *dev = to_pci_dev(fn->dev.parent);
134 int rc, idx;
135
136 if (bar != 0 && bar != 2 && bar != 4)
137 return -EINVAL;
138
139 idx = bar >> 1;
140 if (fn->bar_used[idx]++ == 0) {
141 rc = pci_request_region(dev, bar, "ocxl");
142 if (rc)
143 return rc;
144 }
145 return 0;
146 }
147
release_fn_bar(struct ocxl_fn * fn,int bar)148 static void release_fn_bar(struct ocxl_fn *fn, int bar)
149 {
150 struct pci_dev *dev = to_pci_dev(fn->dev.parent);
151 int idx;
152
153 if (bar != 0 && bar != 2 && bar != 4)
154 return;
155
156 idx = bar >> 1;
157 if (--fn->bar_used[idx] == 0)
158 pci_release_region(dev, bar);
159 WARN_ON(fn->bar_used[idx] < 0);
160 }
161
map_mmio_areas(struct ocxl_afu * afu)162 static int map_mmio_areas(struct ocxl_afu *afu)
163 {
164 int rc;
165 struct pci_dev *pci_dev = to_pci_dev(afu->fn->dev.parent);
166
167 rc = reserve_fn_bar(afu->fn, afu->config.global_mmio_bar);
168 if (rc)
169 return rc;
170
171 rc = reserve_fn_bar(afu->fn, afu->config.pp_mmio_bar);
172 if (rc) {
173 release_fn_bar(afu->fn, afu->config.global_mmio_bar);
174 return rc;
175 }
176
177 afu->global_mmio_start =
178 pci_resource_start(pci_dev, afu->config.global_mmio_bar) +
179 afu->config.global_mmio_offset;
180 afu->pp_mmio_start =
181 pci_resource_start(pci_dev, afu->config.pp_mmio_bar) +
182 afu->config.pp_mmio_offset;
183
184 afu->global_mmio_ptr = ioremap(afu->global_mmio_start,
185 afu->config.global_mmio_size);
186 if (!afu->global_mmio_ptr) {
187 release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
188 release_fn_bar(afu->fn, afu->config.global_mmio_bar);
189 dev_err(&pci_dev->dev, "Error mapping global mmio area\n");
190 return -ENOMEM;
191 }
192
193 /*
194 * Leave an empty page between the per-process mmio area and
195 * the AFU interrupt mappings
196 */
197 afu->irq_base_offset = afu->config.pp_mmio_stride + PAGE_SIZE;
198 return 0;
199 }
200
unmap_mmio_areas(struct ocxl_afu * afu)201 static void unmap_mmio_areas(struct ocxl_afu *afu)
202 {
203 if (afu->global_mmio_ptr) {
204 iounmap(afu->global_mmio_ptr);
205 afu->global_mmio_ptr = NULL;
206 }
207 afu->global_mmio_start = 0;
208 afu->pp_mmio_start = 0;
209 release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
210 release_fn_bar(afu->fn, afu->config.global_mmio_bar);
211 }
212
configure_afu(struct ocxl_afu * afu,u8 afu_idx,struct pci_dev * dev)213 static int configure_afu(struct ocxl_afu *afu, u8 afu_idx, struct pci_dev *dev)
214 {
215 int rc;
216
217 rc = ocxl_config_read_afu(dev, &afu->fn->config, &afu->config, afu_idx);
218 if (rc)
219 return rc;
220
221 rc = assign_afu_actag(afu);
222 if (rc)
223 return rc;
224
225 rc = assign_afu_pasid(afu);
226 if (rc)
227 goto err_free_actag;
228
229 rc = map_mmio_areas(afu);
230 if (rc)
231 goto err_free_pasid;
232
233 return 0;
234
235 err_free_pasid:
236 reclaim_afu_pasid(afu);
237 err_free_actag:
238 reclaim_afu_actag(afu);
239 return rc;
240 }
241
deconfigure_afu(struct ocxl_afu * afu)242 static void deconfigure_afu(struct ocxl_afu *afu)
243 {
244 unmap_mmio_areas(afu);
245 reclaim_afu_pasid(afu);
246 reclaim_afu_actag(afu);
247 }
248
activate_afu(struct pci_dev * dev,struct ocxl_afu * afu)249 static int activate_afu(struct pci_dev *dev, struct ocxl_afu *afu)
250 {
251 ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 1);
252
253 return 0;
254 }
255
deactivate_afu(struct ocxl_afu * afu)256 static void deactivate_afu(struct ocxl_afu *afu)
257 {
258 struct pci_dev *dev = to_pci_dev(afu->fn->dev.parent);
259
260 ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 0);
261 }
262
init_afu(struct pci_dev * dev,struct ocxl_fn * fn,u8 afu_idx)263 static int init_afu(struct pci_dev *dev, struct ocxl_fn *fn, u8 afu_idx)
264 {
265 int rc;
266 struct ocxl_afu *afu;
267
268 afu = alloc_afu(fn);
269 if (!afu)
270 return -ENOMEM;
271
272 rc = configure_afu(afu, afu_idx, dev);
273 if (rc) {
274 ocxl_afu_put(afu);
275 return rc;
276 }
277
278 rc = activate_afu(dev, afu);
279 if (rc) {
280 deconfigure_afu(afu);
281 ocxl_afu_put(afu);
282 return rc;
283 }
284
285 list_add_tail(&afu->list, &fn->afu_list);
286
287 return 0;
288 }
289
remove_afu(struct ocxl_afu * afu)290 static void remove_afu(struct ocxl_afu *afu)
291 {
292 list_del(&afu->list);
293 ocxl_context_detach_all(afu);
294 deactivate_afu(afu);
295 deconfigure_afu(afu);
296 ocxl_afu_put(afu); // matches the implicit get in alloc_afu
297 }
298
alloc_function(void)299 static struct ocxl_fn *alloc_function(void)
300 {
301 struct ocxl_fn *fn;
302
303 fn = kzalloc(sizeof(struct ocxl_fn), GFP_KERNEL);
304 if (!fn)
305 return NULL;
306
307 INIT_LIST_HEAD(&fn->afu_list);
308 INIT_LIST_HEAD(&fn->pasid_list);
309 INIT_LIST_HEAD(&fn->actag_list);
310
311 return fn;
312 }
313
free_function(struct ocxl_fn * fn)314 static void free_function(struct ocxl_fn *fn)
315 {
316 WARN_ON(!list_empty(&fn->afu_list));
317 WARN_ON(!list_empty(&fn->pasid_list));
318 kfree(fn);
319 }
320
free_function_dev(struct device * dev)321 static void free_function_dev(struct device *dev)
322 {
323 struct ocxl_fn *fn = container_of(dev, struct ocxl_fn, dev);
324
325 free_function(fn);
326 }
327
set_function_device(struct ocxl_fn * fn,struct pci_dev * dev)328 static int set_function_device(struct ocxl_fn *fn, struct pci_dev *dev)
329 {
330 int rc;
331
332 fn->dev.parent = &dev->dev;
333 fn->dev.release = free_function_dev;
334 rc = dev_set_name(&fn->dev, "ocxlfn.%s", dev_name(&dev->dev));
335 if (rc)
336 return rc;
337 return 0;
338 }
339
assign_function_actag(struct ocxl_fn * fn)340 static int assign_function_actag(struct ocxl_fn *fn)
341 {
342 struct pci_dev *dev = to_pci_dev(fn->dev.parent);
343 u16 base, enabled, supported;
344 int rc;
345
346 rc = ocxl_config_get_actag_info(dev, &base, &enabled, &supported);
347 if (rc)
348 return rc;
349
350 fn->actag_base = base;
351 fn->actag_enabled = enabled;
352 fn->actag_supported = supported;
353
354 ocxl_config_set_actag(dev, fn->config.dvsec_function_pos,
355 fn->actag_base, fn->actag_enabled);
356 dev_dbg(&fn->dev, "actag range starting at %d, enabled %d\n",
357 fn->actag_base, fn->actag_enabled);
358 return 0;
359 }
360
set_function_pasid(struct ocxl_fn * fn)361 static int set_function_pasid(struct ocxl_fn *fn)
362 {
363 struct pci_dev *dev = to_pci_dev(fn->dev.parent);
364 int rc, desired_count, max_count;
365
366 /* A function may not require any PASID */
367 if (fn->config.max_pasid_log < 0)
368 return 0;
369
370 rc = ocxl_config_get_pasid_info(dev, &max_count);
371 if (rc)
372 return rc;
373
374 desired_count = 1 << fn->config.max_pasid_log;
375
376 if (desired_count > max_count) {
377 dev_err(&fn->dev,
378 "Function requires more PASIDs than is available (%d vs. %d)\n",
379 desired_count, max_count);
380 return -ENOSPC;
381 }
382
383 fn->pasid_base = 0;
384 return 0;
385 }
386
configure_function(struct ocxl_fn * fn,struct pci_dev * dev)387 static int configure_function(struct ocxl_fn *fn, struct pci_dev *dev)
388 {
389 int rc;
390
391 rc = pci_enable_device(dev);
392 if (rc) {
393 dev_err(&dev->dev, "pci_enable_device failed: %d\n", rc);
394 return rc;
395 }
396
397 /*
398 * Once it has been confirmed to work on our hardware, we
399 * should reset the function, to force the adapter to restart
400 * from scratch.
401 * A function reset would also reset all its AFUs.
402 *
403 * Some hints for implementation:
404 *
405 * - there's not status bit to know when the reset is done. We
406 * should try reading the config space to know when it's
407 * done.
408 * - probably something like:
409 * Reset
410 * wait 100ms
411 * issue config read
412 * allow device up to 1 sec to return success on config
413 * read before declaring it broken
414 *
415 * Some shared logic on the card (CFG, TLX) won't be reset, so
416 * there's no guarantee that it will be enough.
417 */
418 rc = ocxl_config_read_function(dev, &fn->config);
419 if (rc)
420 return rc;
421
422 rc = set_function_device(fn, dev);
423 if (rc)
424 return rc;
425
426 rc = assign_function_actag(fn);
427 if (rc)
428 return rc;
429
430 rc = set_function_pasid(fn);
431 if (rc)
432 return rc;
433
434 rc = ocxl_link_setup(dev, 0, &fn->link);
435 if (rc)
436 return rc;
437
438 rc = ocxl_config_set_TL(dev, fn->config.dvsec_tl_pos);
439 if (rc) {
440 ocxl_link_release(dev, fn->link);
441 return rc;
442 }
443 return 0;
444 }
445
deconfigure_function(struct ocxl_fn * fn)446 static void deconfigure_function(struct ocxl_fn *fn)
447 {
448 struct pci_dev *dev = to_pci_dev(fn->dev.parent);
449
450 ocxl_link_release(dev, fn->link);
451 pci_disable_device(dev);
452 }
453
init_function(struct pci_dev * dev)454 static struct ocxl_fn *init_function(struct pci_dev *dev)
455 {
456 struct ocxl_fn *fn;
457 int rc;
458
459 fn = alloc_function();
460 if (!fn)
461 return ERR_PTR(-ENOMEM);
462
463 rc = configure_function(fn, dev);
464 if (rc) {
465 free_function(fn);
466 return ERR_PTR(rc);
467 }
468
469 rc = device_register(&fn->dev);
470 if (rc) {
471 deconfigure_function(fn);
472 put_device(&fn->dev);
473 return ERR_PTR(rc);
474 }
475 return fn;
476 }
477
478 // Device detection & initialisation
479
ocxl_function_open(struct pci_dev * dev)480 struct ocxl_fn *ocxl_function_open(struct pci_dev *dev)
481 {
482 int rc, afu_count = 0;
483 u8 afu;
484 struct ocxl_fn *fn;
485
486 if (!radix_enabled()) {
487 dev_err(&dev->dev, "Unsupported memory model (hash)\n");
488 return ERR_PTR(-ENODEV);
489 }
490
491 fn = init_function(dev);
492 if (IS_ERR(fn)) {
493 dev_err(&dev->dev, "function init failed: %li\n",
494 PTR_ERR(fn));
495 return fn;
496 }
497
498 for (afu = 0; afu <= fn->config.max_afu_index; afu++) {
499 rc = ocxl_config_check_afu_index(dev, &fn->config, afu);
500 if (rc > 0) {
501 rc = init_afu(dev, fn, afu);
502 if (rc) {
503 dev_err(&dev->dev,
504 "Can't initialize AFU index %d\n", afu);
505 continue;
506 }
507 afu_count++;
508 }
509 }
510 dev_info(&dev->dev, "%d AFU(s) configured\n", afu_count);
511 return fn;
512 }
513 EXPORT_SYMBOL_GPL(ocxl_function_open);
514
ocxl_function_afu_list(struct ocxl_fn * fn)515 struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn)
516 {
517 return &fn->afu_list;
518 }
519 EXPORT_SYMBOL_GPL(ocxl_function_afu_list);
520
ocxl_function_fetch_afu(struct ocxl_fn * fn,u8 afu_idx)521 struct ocxl_afu *ocxl_function_fetch_afu(struct ocxl_fn *fn, u8 afu_idx)
522 {
523 struct ocxl_afu *afu;
524
525 list_for_each_entry(afu, &fn->afu_list, list) {
526 if (afu->config.idx == afu_idx)
527 return afu;
528 }
529
530 return NULL;
531 }
532 EXPORT_SYMBOL_GPL(ocxl_function_fetch_afu);
533
ocxl_function_config(struct ocxl_fn * fn)534 const struct ocxl_fn_config *ocxl_function_config(struct ocxl_fn *fn)
535 {
536 return &fn->config;
537 }
538 EXPORT_SYMBOL_GPL(ocxl_function_config);
539
ocxl_function_close(struct ocxl_fn * fn)540 void ocxl_function_close(struct ocxl_fn *fn)
541 {
542 struct ocxl_afu *afu, *tmp;
543
544 list_for_each_entry_safe(afu, tmp, &fn->afu_list, list) {
545 remove_afu(afu);
546 }
547
548 deconfigure_function(fn);
549 device_unregister(&fn->dev);
550 }
551 EXPORT_SYMBOL_GPL(ocxl_function_close);
552
553 // AFU Metadata
554
ocxl_afu_config(struct ocxl_afu * afu)555 struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu)
556 {
557 return &afu->config;
558 }
559 EXPORT_SYMBOL_GPL(ocxl_afu_config);
560
ocxl_afu_set_private(struct ocxl_afu * afu,void * private)561 void ocxl_afu_set_private(struct ocxl_afu *afu, void *private)
562 {
563 afu->private = private;
564 }
565 EXPORT_SYMBOL_GPL(ocxl_afu_set_private);
566
ocxl_afu_get_private(struct ocxl_afu * afu)567 void *ocxl_afu_get_private(struct ocxl_afu *afu)
568 {
569 if (afu)
570 return afu->private;
571
572 return NULL;
573 }
574 EXPORT_SYMBOL_GPL(ocxl_afu_get_private);
575