1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <linux/irq.h>
10 #include <linux/msi.h>
11 #include <uapi/linux/idxd.h>
12 #include "../dmaengine.h"
13 #include "idxd.h"
14 #include "registers.h"
15
16 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
17 u32 *status);
18
19 /* Interrupt control bits */
idxd_mask_msix_vector(struct idxd_device * idxd,int vec_id)20 void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
21 {
22 struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
23
24 pci_msi_mask_irq(data);
25 }
26
idxd_mask_msix_vectors(struct idxd_device * idxd)27 void idxd_mask_msix_vectors(struct idxd_device *idxd)
28 {
29 struct pci_dev *pdev = idxd->pdev;
30 int msixcnt = pci_msix_vec_count(pdev);
31 int i;
32
33 for (i = 0; i < msixcnt; i++)
34 idxd_mask_msix_vector(idxd, i);
35 }
36
idxd_unmask_msix_vector(struct idxd_device * idxd,int vec_id)37 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
38 {
39 struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
40
41 pci_msi_unmask_irq(data);
42 }
43
idxd_unmask_error_interrupts(struct idxd_device * idxd)44 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
45 {
46 union genctrl_reg genctrl;
47
48 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
49 genctrl.softerr_int_en = 1;
50 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
51 }
52
idxd_mask_error_interrupts(struct idxd_device * idxd)53 void idxd_mask_error_interrupts(struct idxd_device *idxd)
54 {
55 union genctrl_reg genctrl;
56
57 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
58 genctrl.softerr_int_en = 0;
59 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
60 }
61
free_hw_descs(struct idxd_wq * wq)62 static void free_hw_descs(struct idxd_wq *wq)
63 {
64 int i;
65
66 for (i = 0; i < wq->num_descs; i++)
67 kfree(wq->hw_descs[i]);
68
69 kfree(wq->hw_descs);
70 }
71
alloc_hw_descs(struct idxd_wq * wq,int num)72 static int alloc_hw_descs(struct idxd_wq *wq, int num)
73 {
74 struct device *dev = &wq->idxd->pdev->dev;
75 int i;
76 int node = dev_to_node(dev);
77
78 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
79 GFP_KERNEL, node);
80 if (!wq->hw_descs)
81 return -ENOMEM;
82
83 for (i = 0; i < num; i++) {
84 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
85 GFP_KERNEL, node);
86 if (!wq->hw_descs[i]) {
87 free_hw_descs(wq);
88 return -ENOMEM;
89 }
90 }
91
92 return 0;
93 }
94
free_descs(struct idxd_wq * wq)95 static void free_descs(struct idxd_wq *wq)
96 {
97 int i;
98
99 for (i = 0; i < wq->num_descs; i++)
100 kfree(wq->descs[i]);
101
102 kfree(wq->descs);
103 }
104
alloc_descs(struct idxd_wq * wq,int num)105 static int alloc_descs(struct idxd_wq *wq, int num)
106 {
107 struct device *dev = &wq->idxd->pdev->dev;
108 int i;
109 int node = dev_to_node(dev);
110
111 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
112 GFP_KERNEL, node);
113 if (!wq->descs)
114 return -ENOMEM;
115
116 for (i = 0; i < num; i++) {
117 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
118 GFP_KERNEL, node);
119 if (!wq->descs[i]) {
120 free_descs(wq);
121 return -ENOMEM;
122 }
123 }
124
125 return 0;
126 }
127
128 /* WQ control bits */
idxd_wq_alloc_resources(struct idxd_wq * wq)129 int idxd_wq_alloc_resources(struct idxd_wq *wq)
130 {
131 struct idxd_device *idxd = wq->idxd;
132 struct device *dev = &idxd->pdev->dev;
133 int rc, num_descs, i;
134
135 if (wq->type != IDXD_WQT_KERNEL)
136 return 0;
137
138 wq->num_descs = wq->size;
139 num_descs = wq->size;
140
141 rc = alloc_hw_descs(wq, num_descs);
142 if (rc < 0)
143 return rc;
144
145 wq->compls_size = num_descs * sizeof(struct dsa_completion_record);
146 wq->compls = dma_alloc_coherent(dev, wq->compls_size,
147 &wq->compls_addr, GFP_KERNEL);
148 if (!wq->compls) {
149 rc = -ENOMEM;
150 goto fail_alloc_compls;
151 }
152
153 rc = alloc_descs(wq, num_descs);
154 if (rc < 0)
155 goto fail_alloc_descs;
156
157 rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
158 dev_to_node(dev));
159 if (rc < 0)
160 goto fail_sbitmap_init;
161
162 for (i = 0; i < num_descs; i++) {
163 struct idxd_desc *desc = wq->descs[i];
164
165 desc->hw = wq->hw_descs[i];
166 desc->completion = &wq->compls[i];
167 desc->compl_dma = wq->compls_addr +
168 sizeof(struct dsa_completion_record) * i;
169 desc->id = i;
170 desc->wq = wq;
171 desc->cpu = -1;
172 dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
173 desc->txd.tx_submit = idxd_dma_tx_submit;
174 }
175
176 return 0;
177
178 fail_sbitmap_init:
179 free_descs(wq);
180 fail_alloc_descs:
181 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
182 fail_alloc_compls:
183 free_hw_descs(wq);
184 return rc;
185 }
186
idxd_wq_free_resources(struct idxd_wq * wq)187 void idxd_wq_free_resources(struct idxd_wq *wq)
188 {
189 struct device *dev = &wq->idxd->pdev->dev;
190
191 if (wq->type != IDXD_WQT_KERNEL)
192 return;
193
194 free_hw_descs(wq);
195 free_descs(wq);
196 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
197 sbitmap_queue_free(&wq->sbq);
198 }
199
idxd_wq_enable(struct idxd_wq * wq)200 int idxd_wq_enable(struct idxd_wq *wq)
201 {
202 struct idxd_device *idxd = wq->idxd;
203 struct device *dev = &idxd->pdev->dev;
204 u32 status;
205
206 if (wq->state == IDXD_WQ_ENABLED) {
207 dev_dbg(dev, "WQ %d already enabled\n", wq->id);
208 return -ENXIO;
209 }
210
211 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
212
213 if (status != IDXD_CMDSTS_SUCCESS &&
214 status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
215 dev_dbg(dev, "WQ enable failed: %#x\n", status);
216 return -ENXIO;
217 }
218
219 wq->state = IDXD_WQ_ENABLED;
220 dev_dbg(dev, "WQ %d enabled\n", wq->id);
221 return 0;
222 }
223
idxd_wq_disable(struct idxd_wq * wq)224 int idxd_wq_disable(struct idxd_wq *wq)
225 {
226 struct idxd_device *idxd = wq->idxd;
227 struct device *dev = &idxd->pdev->dev;
228 u32 status, operand;
229
230 dev_dbg(dev, "Disabling WQ %d\n", wq->id);
231
232 if (wq->state != IDXD_WQ_ENABLED) {
233 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
234 return 0;
235 }
236
237 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
238 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
239
240 if (status != IDXD_CMDSTS_SUCCESS) {
241 dev_dbg(dev, "WQ disable failed: %#x\n", status);
242 return -ENXIO;
243 }
244
245 wq->state = IDXD_WQ_DISABLED;
246 dev_dbg(dev, "WQ %d disabled\n", wq->id);
247 return 0;
248 }
249
idxd_wq_drain(struct idxd_wq * wq)250 void idxd_wq_drain(struct idxd_wq *wq)
251 {
252 struct idxd_device *idxd = wq->idxd;
253 struct device *dev = &idxd->pdev->dev;
254 u32 operand;
255
256 if (wq->state != IDXD_WQ_ENABLED) {
257 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
258 return;
259 }
260
261 dev_dbg(dev, "Draining WQ %d\n", wq->id);
262 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
263 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
264 }
265
idxd_wq_map_portal(struct idxd_wq * wq)266 int idxd_wq_map_portal(struct idxd_wq *wq)
267 {
268 struct idxd_device *idxd = wq->idxd;
269 struct pci_dev *pdev = idxd->pdev;
270 struct device *dev = &pdev->dev;
271 resource_size_t start;
272
273 start = pci_resource_start(pdev, IDXD_WQ_BAR);
274 start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
275
276 wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
277 if (!wq->dportal)
278 return -ENOMEM;
279 dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);
280
281 return 0;
282 }
283
idxd_wq_unmap_portal(struct idxd_wq * wq)284 void idxd_wq_unmap_portal(struct idxd_wq *wq)
285 {
286 struct device *dev = &wq->idxd->pdev->dev;
287
288 devm_iounmap(dev, wq->dportal);
289 }
290
idxd_wq_disable_cleanup(struct idxd_wq * wq)291 void idxd_wq_disable_cleanup(struct idxd_wq *wq)
292 {
293 struct idxd_device *idxd = wq->idxd;
294 struct device *dev = &idxd->pdev->dev;
295 int i, wq_offset;
296
297 lockdep_assert_held(&idxd->dev_lock);
298 memset(wq->wqcfg, 0, idxd->wqcfg_size);
299 wq->type = IDXD_WQT_NONE;
300 wq->size = 0;
301 wq->group = NULL;
302 wq->threshold = 0;
303 wq->priority = 0;
304 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
305 memset(wq->name, 0, WQ_NAME_SIZE);
306
307 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
308 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
309 iowrite32(0, idxd->reg_base + wq_offset);
310 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
311 wq->id, i, wq_offset,
312 ioread32(idxd->reg_base + wq_offset));
313 }
314 }
315
316 /* Device control bits */
idxd_is_enabled(struct idxd_device * idxd)317 static inline bool idxd_is_enabled(struct idxd_device *idxd)
318 {
319 union gensts_reg gensts;
320
321 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
322
323 if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
324 return true;
325 return false;
326 }
327
328 /*
329 * This is function is only used for reset during probe and will
330 * poll for completion. Once the device is setup with interrupts,
331 * all commands will be done via interrupt completion.
332 */
idxd_device_init_reset(struct idxd_device * idxd)333 void idxd_device_init_reset(struct idxd_device *idxd)
334 {
335 struct device *dev = &idxd->pdev->dev;
336 union idxd_command_reg cmd;
337 unsigned long flags;
338
339 memset(&cmd, 0, sizeof(cmd));
340 cmd.cmd = IDXD_CMD_RESET_DEVICE;
341 dev_dbg(dev, "%s: sending reset for init.\n", __func__);
342 spin_lock_irqsave(&idxd->dev_lock, flags);
343 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
344
345 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
346 IDXD_CMDSTS_ACTIVE)
347 cpu_relax();
348 spin_unlock_irqrestore(&idxd->dev_lock, flags);
349 }
350
idxd_cmd_exec(struct idxd_device * idxd,int cmd_code,u32 operand,u32 * status)351 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
352 u32 *status)
353 {
354 union idxd_command_reg cmd;
355 DECLARE_COMPLETION_ONSTACK(done);
356 unsigned long flags;
357
358 memset(&cmd, 0, sizeof(cmd));
359 cmd.cmd = cmd_code;
360 cmd.operand = operand;
361 cmd.int_req = 1;
362
363 spin_lock_irqsave(&idxd->dev_lock, flags);
364 wait_event_lock_irq(idxd->cmd_waitq,
365 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
366 idxd->dev_lock);
367
368 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
369 __func__, cmd_code, operand);
370
371 idxd->cmd_status = 0;
372 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
373 idxd->cmd_done = &done;
374 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
375
376 /*
377 * After command submitted, release lock and go to sleep until
378 * the command completes via interrupt.
379 */
380 spin_unlock_irqrestore(&idxd->dev_lock, flags);
381 wait_for_completion(&done);
382 spin_lock_irqsave(&idxd->dev_lock, flags);
383 if (status) {
384 *status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
385 idxd->cmd_status = *status & GENMASK(7, 0);
386 }
387
388 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
389 /* Wake up other pending commands */
390 wake_up(&idxd->cmd_waitq);
391 spin_unlock_irqrestore(&idxd->dev_lock, flags);
392 }
393
idxd_device_enable(struct idxd_device * idxd)394 int idxd_device_enable(struct idxd_device *idxd)
395 {
396 struct device *dev = &idxd->pdev->dev;
397 u32 status;
398
399 if (idxd_is_enabled(idxd)) {
400 dev_dbg(dev, "Device already enabled\n");
401 return -ENXIO;
402 }
403
404 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
405
406 /* If the command is successful or if the device was enabled */
407 if (status != IDXD_CMDSTS_SUCCESS &&
408 status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
409 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
410 return -ENXIO;
411 }
412
413 idxd->state = IDXD_DEV_ENABLED;
414 return 0;
415 }
416
idxd_device_wqs_clear_state(struct idxd_device * idxd)417 void idxd_device_wqs_clear_state(struct idxd_device *idxd)
418 {
419 int i;
420
421 lockdep_assert_held(&idxd->dev_lock);
422
423 for (i = 0; i < idxd->max_wqs; i++) {
424 struct idxd_wq *wq = &idxd->wqs[i];
425
426 if (wq->state == IDXD_WQ_ENABLED) {
427 idxd_wq_disable_cleanup(wq);
428 wq->state = IDXD_WQ_DISABLED;
429 }
430 }
431 }
432
idxd_device_disable(struct idxd_device * idxd)433 int idxd_device_disable(struct idxd_device *idxd)
434 {
435 struct device *dev = &idxd->pdev->dev;
436 u32 status;
437 unsigned long flags;
438
439 if (!idxd_is_enabled(idxd)) {
440 dev_dbg(dev, "Device is not enabled\n");
441 return 0;
442 }
443
444 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
445
446 /* If the command is successful or if the device was disabled */
447 if (status != IDXD_CMDSTS_SUCCESS &&
448 !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
449 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
450 return -ENXIO;
451 }
452
453 spin_lock_irqsave(&idxd->dev_lock, flags);
454 idxd_device_wqs_clear_state(idxd);
455 idxd->state = IDXD_DEV_CONF_READY;
456 spin_unlock_irqrestore(&idxd->dev_lock, flags);
457 return 0;
458 }
459
idxd_device_reset(struct idxd_device * idxd)460 void idxd_device_reset(struct idxd_device *idxd)
461 {
462 unsigned long flags;
463
464 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
465 spin_lock_irqsave(&idxd->dev_lock, flags);
466 idxd_device_wqs_clear_state(idxd);
467 idxd->state = IDXD_DEV_CONF_READY;
468 spin_unlock_irqrestore(&idxd->dev_lock, flags);
469 }
470
471 /* Device configuration bits */
idxd_group_config_write(struct idxd_group * group)472 static void idxd_group_config_write(struct idxd_group *group)
473 {
474 struct idxd_device *idxd = group->idxd;
475 struct device *dev = &idxd->pdev->dev;
476 int i;
477 u32 grpcfg_offset;
478
479 dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
480
481 /* setup GRPWQCFG */
482 for (i = 0; i < 4; i++) {
483 grpcfg_offset = idxd->grpcfg_offset +
484 group->id * 64 + i * sizeof(u64);
485 iowrite64(group->grpcfg.wqs[i],
486 idxd->reg_base + grpcfg_offset);
487 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
488 group->id, i, grpcfg_offset,
489 ioread64(idxd->reg_base + grpcfg_offset));
490 }
491
492 /* setup GRPENGCFG */
493 grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
494 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
495 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
496 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
497
498 /* setup GRPFLAGS */
499 grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
500 iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
501 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
502 group->id, grpcfg_offset,
503 ioread32(idxd->reg_base + grpcfg_offset));
504 }
505
idxd_groups_config_write(struct idxd_device * idxd)506 static int idxd_groups_config_write(struct idxd_device *idxd)
507
508 {
509 union gencfg_reg reg;
510 int i;
511 struct device *dev = &idxd->pdev->dev;
512
513 /* Setup bandwidth token limit */
514 if (idxd->token_limit) {
515 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
516 reg.token_limit = idxd->token_limit;
517 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
518 }
519
520 dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
521 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
522
523 for (i = 0; i < idxd->max_groups; i++) {
524 struct idxd_group *group = &idxd->groups[i];
525
526 idxd_group_config_write(group);
527 }
528
529 return 0;
530 }
531
idxd_wq_config_write(struct idxd_wq * wq)532 static int idxd_wq_config_write(struct idxd_wq *wq)
533 {
534 struct idxd_device *idxd = wq->idxd;
535 struct device *dev = &idxd->pdev->dev;
536 u32 wq_offset;
537 int i;
538
539 if (!wq->group)
540 return 0;
541
542 memset(wq->wqcfg, 0, idxd->wqcfg_size);
543
544 /* byte 0-3 */
545 wq->wqcfg->wq_size = wq->size;
546
547 if (wq->size == 0) {
548 dev_warn(dev, "Incorrect work queue size: 0\n");
549 return -EINVAL;
550 }
551
552 /* bytes 4-7 */
553 wq->wqcfg->wq_thresh = wq->threshold;
554
555 /* byte 8-11 */
556 wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
557 wq->wqcfg->mode = 1;
558 wq->wqcfg->priority = wq->priority;
559
560 /* bytes 12-15 */
561 wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
562 wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
563
564 dev_dbg(dev, "WQ %d CFGs\n", wq->id);
565 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
566 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
567 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
568 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
569 wq->id, i, wq_offset,
570 ioread32(idxd->reg_base + wq_offset));
571 }
572
573 return 0;
574 }
575
idxd_wqs_config_write(struct idxd_device * idxd)576 static int idxd_wqs_config_write(struct idxd_device *idxd)
577 {
578 int i, rc;
579
580 for (i = 0; i < idxd->max_wqs; i++) {
581 struct idxd_wq *wq = &idxd->wqs[i];
582
583 rc = idxd_wq_config_write(wq);
584 if (rc < 0)
585 return rc;
586 }
587
588 return 0;
589 }
590
idxd_group_flags_setup(struct idxd_device * idxd)591 static void idxd_group_flags_setup(struct idxd_device *idxd)
592 {
593 int i;
594
595 /* TC-A 0 and TC-B 1 should be defaults */
596 for (i = 0; i < idxd->max_groups; i++) {
597 struct idxd_group *group = &idxd->groups[i];
598
599 if (group->tc_a == -1)
600 group->tc_a = group->grpcfg.flags.tc_a = 0;
601 else
602 group->grpcfg.flags.tc_a = group->tc_a;
603 if (group->tc_b == -1)
604 group->tc_b = group->grpcfg.flags.tc_b = 1;
605 else
606 group->grpcfg.flags.tc_b = group->tc_b;
607 group->grpcfg.flags.use_token_limit = group->use_token_limit;
608 group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
609 if (group->tokens_allowed)
610 group->grpcfg.flags.tokens_allowed =
611 group->tokens_allowed;
612 else
613 group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
614 }
615 }
616
idxd_engines_setup(struct idxd_device * idxd)617 static int idxd_engines_setup(struct idxd_device *idxd)
618 {
619 int i, engines = 0;
620 struct idxd_engine *eng;
621 struct idxd_group *group;
622
623 for (i = 0; i < idxd->max_groups; i++) {
624 group = &idxd->groups[i];
625 group->grpcfg.engines = 0;
626 }
627
628 for (i = 0; i < idxd->max_engines; i++) {
629 eng = &idxd->engines[i];
630 group = eng->group;
631
632 if (!group)
633 continue;
634
635 group->grpcfg.engines |= BIT(eng->id);
636 engines++;
637 }
638
639 if (!engines)
640 return -EINVAL;
641
642 return 0;
643 }
644
idxd_wqs_setup(struct idxd_device * idxd)645 static int idxd_wqs_setup(struct idxd_device *idxd)
646 {
647 struct idxd_wq *wq;
648 struct idxd_group *group;
649 int i, j, configured = 0;
650 struct device *dev = &idxd->pdev->dev;
651
652 for (i = 0; i < idxd->max_groups; i++) {
653 group = &idxd->groups[i];
654 for (j = 0; j < 4; j++)
655 group->grpcfg.wqs[j] = 0;
656 }
657
658 for (i = 0; i < idxd->max_wqs; i++) {
659 wq = &idxd->wqs[i];
660 group = wq->group;
661
662 if (!wq->group)
663 continue;
664 if (!wq->size)
665 continue;
666
667 if (!wq_dedicated(wq)) {
668 dev_warn(dev, "No shared workqueue support.\n");
669 return -EINVAL;
670 }
671
672 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
673 configured++;
674 }
675
676 if (configured == 0)
677 return -EINVAL;
678
679 return 0;
680 }
681
idxd_device_config(struct idxd_device * idxd)682 int idxd_device_config(struct idxd_device *idxd)
683 {
684 int rc;
685
686 lockdep_assert_held(&idxd->dev_lock);
687 rc = idxd_wqs_setup(idxd);
688 if (rc < 0)
689 return rc;
690
691 rc = idxd_engines_setup(idxd);
692 if (rc < 0)
693 return rc;
694
695 idxd_group_flags_setup(idxd);
696
697 rc = idxd_wqs_config_write(idxd);
698 if (rc < 0)
699 return rc;
700
701 rc = idxd_groups_config_write(idxd);
702 if (rc < 0)
703 return rc;
704
705 return 0;
706 }
707