Lines Matching refs:idxd

78 		struct idxd_device *idxd = confdev_to_idxd(dev);  in idxd_config_bus_match()  local
80 if (idxd->state != IDXD_DEV_CONF_READY) in idxd_config_bus_match()
85 struct idxd_device *idxd = wq->idxd; in idxd_config_bus_match() local
87 if (idxd->state < IDXD_DEV_CONF_READY) in idxd_config_bus_match()
111 struct idxd_device *idxd = confdev_to_idxd(dev); in idxd_config_bus_probe() local
113 if (idxd->state != IDXD_DEV_CONF_READY) { in idxd_config_bus_probe()
122 spin_lock_irqsave(&idxd->dev_lock, flags); in idxd_config_bus_probe()
123 rc = idxd_device_config(idxd); in idxd_config_bus_probe()
124 spin_unlock_irqrestore(&idxd->dev_lock, flags); in idxd_config_bus_probe()
132 rc = idxd_device_enable(idxd); in idxd_config_bus_probe()
141 rc = idxd_register_dma_device(idxd); in idxd_config_bus_probe()
150 struct idxd_device *idxd = wq->idxd; in idxd_config_bus_probe() local
154 if (idxd->state != IDXD_DEV_ENABLED) { in idxd_config_bus_probe()
185 spin_lock_irqsave(&idxd->dev_lock, flags); in idxd_config_bus_probe()
186 rc = idxd_device_config(idxd); in idxd_config_bus_probe()
187 spin_unlock_irqrestore(&idxd->dev_lock, flags); in idxd_config_bus_probe()
242 struct idxd_device *idxd = wq->idxd; in disable_wq() local
243 struct device *dev = &idxd->pdev->dev; in disable_wq()
290 struct idxd_device *idxd = confdev_to_idxd(dev); in idxd_config_bus_remove() local
294 dev_name(&idxd->conf_dev)); in idxd_config_bus_remove()
295 for (i = 0; i < idxd->max_wqs; i++) { in idxd_config_bus_remove()
296 struct idxd_wq *wq = &idxd->wqs[i]; in idxd_config_bus_remove()
301 dev_name(&idxd->conf_dev)); in idxd_config_bus_remove()
305 idxd_unregister_dma_device(idxd); in idxd_config_bus_remove()
306 rc = idxd_device_disable(idxd); in idxd_config_bus_remove()
307 for (i = 0; i < idxd->max_wqs; i++) { in idxd_config_bus_remove()
308 struct idxd_wq *wq = &idxd->wqs[i]; in idxd_config_bus_remove()
355 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd) in idxd_get_bus_type() argument
357 return idxd_bus_types[idxd->type]; in idxd_get_bus_type()
360 static struct device_type *idxd_get_device_type(struct idxd_device *idxd) in idxd_get_device_type() argument
362 if (idxd->type == IDXD_TYPE_DSA) in idxd_get_device_type()
414 struct idxd_device *idxd = engine->idxd; in engine_group_id_store() local
423 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in engine_group_id_store()
426 if (id > idxd->max_groups - 1 || id < -1) in engine_group_id_store()
441 engine->group = &idxd->groups[id]; in engine_group_id_store()
467 static void idxd_set_free_tokens(struct idxd_device *idxd) in idxd_set_free_tokens() argument
471 for (i = 0, tokens = 0; i < idxd->max_groups; i++) { in idxd_set_free_tokens()
472 struct idxd_group *g = &idxd->groups[i]; in idxd_set_free_tokens()
477 idxd->nr_tokens = idxd->max_tokens - tokens; in idxd_set_free_tokens()
496 struct idxd_device *idxd = group->idxd; in group_tokens_reserved_store() local
504 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_tokens_reserved_store()
507 if (idxd->state == IDXD_DEV_ENABLED) in group_tokens_reserved_store()
510 if (val > idxd->max_tokens) in group_tokens_reserved_store()
513 if (val > idxd->nr_tokens + group->tokens_reserved) in group_tokens_reserved_store()
517 idxd_set_free_tokens(idxd); in group_tokens_reserved_store()
541 struct idxd_device *idxd = group->idxd; in group_tokens_allowed_store() local
549 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_tokens_allowed_store()
552 if (idxd->state == IDXD_DEV_ENABLED) in group_tokens_allowed_store()
556 val > group->tokens_reserved + idxd->nr_tokens) in group_tokens_allowed_store()
583 struct idxd_device *idxd = group->idxd; in group_use_token_limit_store() local
591 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_use_token_limit_store()
594 if (idxd->state == IDXD_DEV_ENABLED) in group_use_token_limit_store()
597 if (idxd->token_limit == 0) in group_use_token_limit_store()
615 struct idxd_device *idxd = group->idxd; in group_engines_show() local
617 for (i = 0; i < idxd->max_engines; i++) { in group_engines_show()
618 struct idxd_engine *engine = &idxd->engines[i]; in group_engines_show()
625 idxd->id, engine->id); in group_engines_show()
644 struct idxd_device *idxd = group->idxd; in group_work_queues_show() local
646 for (i = 0; i < idxd->max_wqs; i++) { in group_work_queues_show()
647 struct idxd_wq *wq = &idxd->wqs[i]; in group_work_queues_show()
654 idxd->id, wq->id); in group_work_queues_show()
682 struct idxd_device *idxd = group->idxd; in group_traffic_class_a_store() local
690 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_traffic_class_a_store()
693 if (idxd->state == IDXD_DEV_ENABLED) in group_traffic_class_a_store()
723 struct idxd_device *idxd = group->idxd; in group_traffic_class_b_store() local
731 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_traffic_class_b_store()
734 if (idxd->state == IDXD_DEV_ENABLED) in group_traffic_class_b_store()
814 struct idxd_device *idxd = wq->idxd; in wq_group_id_store() local
823 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_group_id_store()
829 if (id > idxd->max_groups - 1 || id < -1) in wq_group_id_store()
840 group = &idxd->groups[id]; in wq_group_id_store()
867 struct idxd_device *idxd = wq->idxd; in wq_mode_store() local
869 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_mode_store()
896 static int total_claimed_wq_size(struct idxd_device *idxd) in total_claimed_wq_size() argument
901 for (i = 0; i < idxd->max_wqs; i++) { in total_claimed_wq_size()
902 struct idxd_wq *wq = &idxd->wqs[i]; in total_claimed_wq_size()
916 struct idxd_device *idxd = wq->idxd; in wq_size_store() local
923 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_size_store()
929 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) in wq_size_store()
953 struct idxd_device *idxd = wq->idxd; in wq_priority_store() local
960 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_priority_store()
1094 struct idxd_device *idxd = wq->idxd; in wq_max_transfer_size_store() local
1105 if (xfer_size > idxd->max_xfer_bytes) in wq_max_transfer_size_store()
1128 struct idxd_device *idxd = wq->idxd; in wq_max_batch_size_store() local
1139 if (batch_size > idxd->max_batch_size) in wq_max_batch_size_store()
1178 struct idxd_device *idxd = in version_show() local
1181 return sprintf(buf, "%#x\n", idxd->hw.version); in version_show()
1189 struct idxd_device *idxd = in max_work_queues_size_show() local
1192 return sprintf(buf, "%u\n", idxd->max_wq_size); in max_work_queues_size_show()
1199 struct idxd_device *idxd = in max_groups_show() local
1202 return sprintf(buf, "%u\n", idxd->max_groups); in max_groups_show()
1209 struct idxd_device *idxd = in max_work_queues_show() local
1212 return sprintf(buf, "%u\n", idxd->max_wqs); in max_work_queues_show()
1219 struct idxd_device *idxd = in max_engines_show() local
1222 return sprintf(buf, "%u\n", idxd->max_engines); in max_engines_show()
1229 struct idxd_device *idxd = in numa_node_show() local
1232 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); in numa_node_show()
1239 struct idxd_device *idxd = in max_batch_size_show() local
1242 return sprintf(buf, "%u\n", idxd->max_batch_size); in max_batch_size_show()
1250 struct idxd_device *idxd = in max_transfer_size_show() local
1253 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes); in max_transfer_size_show()
1260 struct idxd_device *idxd = in op_cap_show() local
1263 return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]); in op_cap_show()
1270 struct idxd_device *idxd = in gen_cap_show() local
1273 return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits); in gen_cap_show()
1280 struct idxd_device *idxd = in configurable_show() local
1284 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); in configurable_show()
1291 struct idxd_device *idxd = in clients_show() local
1296 spin_lock_irqsave(&idxd->dev_lock, flags); in clients_show()
1297 for (i = 0; i < idxd->max_wqs; i++) { in clients_show()
1298 struct idxd_wq *wq = &idxd->wqs[i]; in clients_show()
1302 spin_unlock_irqrestore(&idxd->dev_lock, flags); in clients_show()
1311 struct idxd_device *idxd = in state_show() local
1314 switch (idxd->state) { in state_show()
1331 struct idxd_device *idxd = in errors_show() local
1336 spin_lock_irqsave(&idxd->dev_lock, flags); in errors_show()
1338 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]); in errors_show()
1339 spin_unlock_irqrestore(&idxd->dev_lock, flags); in errors_show()
1349 struct idxd_device *idxd = in max_tokens_show() local
1352 return sprintf(buf, "%u\n", idxd->max_tokens); in max_tokens_show()
1359 struct idxd_device *idxd = in token_limit_show() local
1362 return sprintf(buf, "%u\n", idxd->token_limit); in token_limit_show()
1369 struct idxd_device *idxd = in token_limit_store() local
1378 if (idxd->state == IDXD_DEV_ENABLED) in token_limit_store()
1381 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in token_limit_store()
1384 if (!idxd->hw.group_cap.token_limit) in token_limit_store()
1387 if (val > idxd->hw.group_cap.total_tokens) in token_limit_store()
1390 idxd->token_limit = val; in token_limit_store()
1398 struct idxd_device *idxd = in cdev_major_show() local
1401 return sprintf(buf, "%u\n", idxd->major); in cdev_major_show()
1408 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); in cmd_status_show() local
1410 return sprintf(buf, "%#x\n", idxd->cmd_status); in cmd_status_show()
1445 static int idxd_setup_engine_sysfs(struct idxd_device *idxd) in idxd_setup_engine_sysfs() argument
1447 struct device *dev = &idxd->pdev->dev; in idxd_setup_engine_sysfs()
1450 for (i = 0; i < idxd->max_engines; i++) { in idxd_setup_engine_sysfs()
1451 struct idxd_engine *engine = &idxd->engines[i]; in idxd_setup_engine_sysfs()
1453 engine->conf_dev.parent = &idxd->conf_dev; in idxd_setup_engine_sysfs()
1455 idxd->id, engine->id); in idxd_setup_engine_sysfs()
1456 engine->conf_dev.bus = idxd_get_bus_type(idxd); in idxd_setup_engine_sysfs()
1472 struct idxd_engine *engine = &idxd->engines[i]; in idxd_setup_engine_sysfs()
1479 static int idxd_setup_group_sysfs(struct idxd_device *idxd) in idxd_setup_group_sysfs() argument
1481 struct device *dev = &idxd->pdev->dev; in idxd_setup_group_sysfs()
1484 for (i = 0; i < idxd->max_groups; i++) { in idxd_setup_group_sysfs()
1485 struct idxd_group *group = &idxd->groups[i]; in idxd_setup_group_sysfs()
1487 group->conf_dev.parent = &idxd->conf_dev; in idxd_setup_group_sysfs()
1489 idxd->id, group->id); in idxd_setup_group_sysfs()
1490 group->conf_dev.bus = idxd_get_bus_type(idxd); in idxd_setup_group_sysfs()
1506 struct idxd_group *group = &idxd->groups[i]; in idxd_setup_group_sysfs()
1513 static int idxd_setup_wq_sysfs(struct idxd_device *idxd) in idxd_setup_wq_sysfs() argument
1515 struct device *dev = &idxd->pdev->dev; in idxd_setup_wq_sysfs()
1518 for (i = 0; i < idxd->max_wqs; i++) { in idxd_setup_wq_sysfs()
1519 struct idxd_wq *wq = &idxd->wqs[i]; in idxd_setup_wq_sysfs()
1521 wq->conf_dev.parent = &idxd->conf_dev; in idxd_setup_wq_sysfs()
1522 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id); in idxd_setup_wq_sysfs()
1523 wq->conf_dev.bus = idxd_get_bus_type(idxd); in idxd_setup_wq_sysfs()
1539 struct idxd_wq *wq = &idxd->wqs[i]; in idxd_setup_wq_sysfs()
1546 static int idxd_setup_device_sysfs(struct idxd_device *idxd) in idxd_setup_device_sysfs() argument
1548 struct device *dev = &idxd->pdev->dev; in idxd_setup_device_sysfs()
1552 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id); in idxd_setup_device_sysfs()
1553 idxd->conf_dev.parent = dev; in idxd_setup_device_sysfs()
1554 dev_set_name(&idxd->conf_dev, "%s", devname); in idxd_setup_device_sysfs()
1555 idxd->conf_dev.bus = idxd_get_bus_type(idxd); in idxd_setup_device_sysfs()
1556 idxd->conf_dev.groups = idxd_attribute_groups; in idxd_setup_device_sysfs()
1557 idxd->conf_dev.type = idxd_get_device_type(idxd); in idxd_setup_device_sysfs()
1559 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev)); in idxd_setup_device_sysfs()
1560 rc = device_register(&idxd->conf_dev); in idxd_setup_device_sysfs()
1562 put_device(&idxd->conf_dev); in idxd_setup_device_sysfs()
1569 int idxd_setup_sysfs(struct idxd_device *idxd) in idxd_setup_sysfs() argument
1571 struct device *dev = &idxd->pdev->dev; in idxd_setup_sysfs()
1574 rc = idxd_setup_device_sysfs(idxd); in idxd_setup_sysfs()
1580 rc = idxd_setup_wq_sysfs(idxd); in idxd_setup_sysfs()
1587 rc = idxd_setup_group_sysfs(idxd); in idxd_setup_sysfs()
1594 rc = idxd_setup_engine_sysfs(idxd); in idxd_setup_sysfs()
1604 void idxd_cleanup_sysfs(struct idxd_device *idxd) in idxd_cleanup_sysfs() argument
1608 for (i = 0; i < idxd->max_wqs; i++) { in idxd_cleanup_sysfs()
1609 struct idxd_wq *wq = &idxd->wqs[i]; in idxd_cleanup_sysfs()
1614 for (i = 0; i < idxd->max_engines; i++) { in idxd_cleanup_sysfs()
1615 struct idxd_engine *engine = &idxd->engines[i]; in idxd_cleanup_sysfs()
1620 for (i = 0; i < idxd->max_groups; i++) { in idxd_cleanup_sysfs()
1621 struct idxd_group *group = &idxd->groups[i]; in idxd_cleanup_sysfs()
1626 device_unregister(&idxd->conf_dev); in idxd_cleanup_sysfs()