Lines Matching +full:0 +full:xd
45 UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
46 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
85 req->result.err = 0; in tb_xdomain_copy()
114 * @xd: XDomain to send the message
122 * Return: %0 in case of success and negative errno in case of failure
124 int tb_xdomain_response(struct tb_xdomain *xd, const void *response, in tb_xdomain_response() argument
127 return __tb_xdomain_response(xd->tb->ctl, response, size, type); in tb_xdomain_response()
161 * @xd: XDomain to send the request
174 * Return: %0 in case of success and negative errno in case of failure
176 int tb_xdomain_request(struct tb_xdomain *xd, const void *request, in tb_xdomain_request() argument
181 return __tb_xdomain_request(xd->tb->ctl, request, request_size, in tb_xdomain_request()
207 return 0; in tb_xdp_handle_error()
223 return 0; in tb_xdp_handle_error()
233 memset(&req, 0, sizeof(req)); in tb_xdp_uuid_request()
237 memset(&res, 0, sizeof(res)); in tb_xdp_uuid_request()
250 return 0; in tb_xdp_uuid_request()
258 memset(&res, 0, sizeof(res)); in tb_xdp_uuid_response()
275 memset(&res, 0, sizeof(res)); in tb_xdp_error_response()
300 memset(&req, 0, sizeof(req)); in tb_xdp_properties_request()
306 len = 0; in tb_xdp_properties_request()
307 data_len = 0; in tb_xdp_properties_request()
393 return 0; in tb_xdp_properties_response()
438 memset(&req, 0, sizeof(req)); in tb_xdp_properties_changed_request()
443 memset(&res, 0, sizeof(res)); in tb_xdp_properties_changed_request()
459 memset(&res, 0, sizeof(res)); in tb_xdp_properties_changed_response()
486 return 0; in tb_register_protocol_handler()
512 int ret = 0; in tb_xdp_handle_request()
541 struct tb_xdomain *xd; in tb_xdp_handle_request() local
550 xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid); in tb_xdp_handle_request()
551 if (xd) { in tb_xdp_handle_request()
552 queue_delayed_work(tb->wq, &xd->get_properties_work, in tb_xdp_handle_request()
554 tb_xdomain_put(xd); in tb_xdp_handle_request()
692 return sprintf(buf, "0x%08x\n", svc->prtcstns); in prtcstns_show()
727 struct tb_xdomain *xd = tb_service_parent(svc); in tb_service_release() local
729 ida_simple_remove(&xd->service_ids, svc->id); in tb_service_release()
744 struct tb_xdomain *xd = data; in remove_missing_service() local
749 return 0; in remove_missing_service()
751 if (!tb_property_find(xd->properties, svc->key, in remove_missing_service()
755 return 0; in remove_missing_service()
765 return 0; in find_service()
794 return 0; in populate_service()
797 static void enumerate_services(struct tb_xdomain *xd) in enumerate_services() argument
808 device_for_each_child_reverse(&xd->dev, xd, remove_missing_service); in enumerate_services()
811 tb_property_for_each(xd->properties, p) { in enumerate_services()
816 dev = device_find_child(&xd->dev, p, find_service); in enumerate_services()
831 id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); in enumerate_services()
832 if (id < 0) { in enumerate_services()
839 svc->dev.parent = &xd->dev; in enumerate_services()
840 dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id); in enumerate_services()
849 static int populate_properties(struct tb_xdomain *xd, in populate_properties() argument
858 xd->device = p->value.immediate; in populate_properties()
863 xd->vendor = p->value.immediate; in populate_properties()
865 kfree(xd->device_name); in populate_properties()
866 xd->device_name = NULL; in populate_properties()
867 kfree(xd->vendor_name); in populate_properties()
868 xd->vendor_name = NULL; in populate_properties()
873 xd->device_name = kstrdup(p->value.text, GFP_KERNEL); in populate_properties()
876 xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL); in populate_properties()
878 return 0; in populate_properties()
881 /* Called with @xd->lock held */
882 static void tb_xdomain_restore_paths(struct tb_xdomain *xd) in tb_xdomain_restore_paths() argument
884 if (!xd->resume) in tb_xdomain_restore_paths()
887 xd->resume = false; in tb_xdomain_restore_paths()
888 if (xd->transmit_path) { in tb_xdomain_restore_paths()
889 dev_dbg(&xd->dev, "re-establishing DMA path\n"); in tb_xdomain_restore_paths()
890 tb_domain_approve_xdomain_paths(xd->tb, xd); in tb_xdomain_restore_paths()
896 struct tb_xdomain *xd = container_of(work, typeof(*xd), in tb_xdomain_get_uuid() local
898 struct tb *tb = xd->tb; in tb_xdomain_get_uuid()
902 ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid); in tb_xdomain_get_uuid()
903 if (ret < 0) { in tb_xdomain_get_uuid()
904 if (xd->uuid_retries-- > 0) { in tb_xdomain_get_uuid()
905 queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, in tb_xdomain_get_uuid()
908 dev_dbg(&xd->dev, "failed to read remote UUID\n"); in tb_xdomain_get_uuid()
913 if (uuid_equal(&uuid, xd->local_uuid)) { in tb_xdomain_get_uuid()
914 dev_dbg(&xd->dev, "intra-domain loop detected\n"); in tb_xdomain_get_uuid()
923 if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) { in tb_xdomain_get_uuid()
924 dev_dbg(&xd->dev, "remote UUID is different, unplugging\n"); in tb_xdomain_get_uuid()
925 xd->is_unplugged = true; in tb_xdomain_get_uuid()
930 if (!xd->remote_uuid) { in tb_xdomain_get_uuid()
931 xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL); in tb_xdomain_get_uuid()
932 if (!xd->remote_uuid) in tb_xdomain_get_uuid()
937 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, in tb_xdomain_get_uuid()
939 queue_delayed_work(xd->tb->wq, &xd->get_properties_work, in tb_xdomain_get_uuid()
945 struct tb_xdomain *xd = container_of(work, typeof(*xd), in tb_xdomain_get_properties() local
948 struct tb *tb = xd->tb; in tb_xdomain_get_properties()
951 u32 gen = 0; in tb_xdomain_get_properties()
954 ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid, in tb_xdomain_get_properties()
955 xd->remote_uuid, xd->properties_retries, in tb_xdomain_get_properties()
957 if (ret < 0) { in tb_xdomain_get_properties()
958 if (xd->properties_retries-- > 0) { in tb_xdomain_get_properties()
959 queue_delayed_work(xd->tb->wq, &xd->get_properties_work, in tb_xdomain_get_properties()
963 dev_err(&xd->dev, in tb_xdomain_get_properties()
965 xd->remote_uuid); in tb_xdomain_get_properties()
970 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; in tb_xdomain_get_properties()
972 mutex_lock(&xd->lock); in tb_xdomain_get_properties()
975 if (xd->properties && gen <= xd->property_block_gen) { in tb_xdomain_get_properties()
982 tb_xdomain_restore_paths(xd); in tb_xdomain_get_properties()
988 dev_err(&xd->dev, "failed to parse XDomain properties\n"); in tb_xdomain_get_properties()
992 ret = populate_properties(xd, dir); in tb_xdomain_get_properties()
994 dev_err(&xd->dev, "missing XDomain properties in response\n"); in tb_xdomain_get_properties()
999 if (xd->properties) { in tb_xdomain_get_properties()
1000 tb_property_free_dir(xd->properties); in tb_xdomain_get_properties()
1004 xd->properties = dir; in tb_xdomain_get_properties()
1005 xd->property_block_gen = gen; in tb_xdomain_get_properties()
1007 tb_xdomain_restore_paths(xd); in tb_xdomain_get_properties()
1009 mutex_unlock(&xd->lock); in tb_xdomain_get_properties()
1019 if (device_add(&xd->dev)) { in tb_xdomain_get_properties()
1020 dev_err(&xd->dev, "failed to add XDomain device\n"); in tb_xdomain_get_properties()
1024 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); in tb_xdomain_get_properties()
1027 enumerate_services(xd); in tb_xdomain_get_properties()
1034 mutex_unlock(&xd->lock); in tb_xdomain_get_properties()
1039 struct tb_xdomain *xd = container_of(work, typeof(*xd), in tb_xdomain_properties_changed() local
1043 ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route, in tb_xdomain_properties_changed()
1044 xd->properties_changed_retries, xd->local_uuid); in tb_xdomain_properties_changed()
1046 if (xd->properties_changed_retries-- > 0) in tb_xdomain_properties_changed()
1047 queue_delayed_work(xd->tb->wq, in tb_xdomain_properties_changed()
1048 &xd->properties_changed_work, in tb_xdomain_properties_changed()
1053 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; in tb_xdomain_properties_changed()
1059 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); in device_show() local
1061 return sprintf(buf, "%#x\n", xd->device); in device_show()
1068 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); in device_name_show() local
1071 if (mutex_lock_interruptible(&xd->lock)) in device_name_show()
1073 ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : ""); in device_name_show()
1074 mutex_unlock(&xd->lock); in device_name_show()
1083 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); in vendor_show() local
1085 return sprintf(buf, "%#x\n", xd->vendor); in vendor_show()
1092 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); in vendor_name_show() local
1095 if (mutex_lock_interruptible(&xd->lock)) in vendor_name_show()
1097 ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : ""); in vendor_name_show()
1098 mutex_unlock(&xd->lock); in vendor_name_show()
1107 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); in unique_id_show() local
1109 return sprintf(buf, "%pUb\n", xd->remote_uuid); in unique_id_show()
1133 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); in tb_xdomain_release() local
1135 put_device(xd->dev.parent); in tb_xdomain_release()
1137 tb_property_free_dir(xd->properties); in tb_xdomain_release()
1138 ida_destroy(&xd->service_ids); in tb_xdomain_release()
1140 kfree(xd->local_uuid); in tb_xdomain_release()
1141 kfree(xd->remote_uuid); in tb_xdomain_release()
1142 kfree(xd->device_name); in tb_xdomain_release()
1143 kfree(xd->vendor_name); in tb_xdomain_release()
1144 kfree(xd); in tb_xdomain_release()
1147 static void start_handshake(struct tb_xdomain *xd) in start_handshake() argument
1149 xd->uuid_retries = XDOMAIN_UUID_RETRIES; in start_handshake()
1150 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; in start_handshake()
1151 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; in start_handshake()
1153 if (xd->needs_uuid) { in start_handshake()
1154 queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, in start_handshake()
1158 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, in start_handshake()
1160 queue_delayed_work(xd->tb->wq, &xd->get_properties_work, in start_handshake()
1165 static void stop_handshake(struct tb_xdomain *xd) in stop_handshake() argument
1167 xd->uuid_retries = 0; in stop_handshake()
1168 xd->properties_retries = 0; in stop_handshake()
1169 xd->properties_changed_retries = 0; in stop_handshake()
1171 cancel_delayed_work_sync(&xd->get_uuid_work); in stop_handshake()
1172 cancel_delayed_work_sync(&xd->get_properties_work); in stop_handshake()
1173 cancel_delayed_work_sync(&xd->properties_changed_work); in stop_handshake()
1179 return 0; in tb_xdomain_suspend()
1184 struct tb_xdomain *xd = tb_to_xdomain(dev); in tb_xdomain_resume() local
1190 xd->resume = true; in tb_xdomain_resume()
1191 start_handshake(xd); in tb_xdomain_resume()
1193 return 0; in tb_xdomain_resume()
1223 struct tb_xdomain *xd; in tb_xdomain_alloc() local
1225 xd = kzalloc(sizeof(*xd), GFP_KERNEL); in tb_xdomain_alloc()
1226 if (!xd) in tb_xdomain_alloc()
1229 xd->tb = tb; in tb_xdomain_alloc()
1230 xd->route = route; in tb_xdomain_alloc()
1231 ida_init(&xd->service_ids); in tb_xdomain_alloc()
1232 mutex_init(&xd->lock); in tb_xdomain_alloc()
1233 INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid); in tb_xdomain_alloc()
1234 INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties); in tb_xdomain_alloc()
1235 INIT_DELAYED_WORK(&xd->properties_changed_work, in tb_xdomain_alloc()
1238 xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL); in tb_xdomain_alloc()
1239 if (!xd->local_uuid) in tb_xdomain_alloc()
1243 xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), in tb_xdomain_alloc()
1245 if (!xd->remote_uuid) in tb_xdomain_alloc()
1248 xd->needs_uuid = true; in tb_xdomain_alloc()
1251 device_initialize(&xd->dev); in tb_xdomain_alloc()
1252 xd->dev.parent = get_device(parent); in tb_xdomain_alloc()
1253 xd->dev.bus = &tb_bus_type; in tb_xdomain_alloc()
1254 xd->dev.type = &tb_xdomain_type; in tb_xdomain_alloc()
1255 xd->dev.groups = xdomain_attr_groups; in tb_xdomain_alloc()
1256 dev_set_name(&xd->dev, "%u-%llx", tb->index, route); in tb_xdomain_alloc()
1262 pm_runtime_set_active(&xd->dev); in tb_xdomain_alloc()
1263 pm_runtime_get_noresume(&xd->dev); in tb_xdomain_alloc()
1264 pm_runtime_enable(&xd->dev); in tb_xdomain_alloc()
1266 return xd; in tb_xdomain_alloc()
1269 kfree(xd->local_uuid); in tb_xdomain_alloc()
1271 kfree(xd); in tb_xdomain_alloc()
1278 * @xd: XDomain to add
1285 void tb_xdomain_add(struct tb_xdomain *xd) in tb_xdomain_add() argument
1288 start_handshake(xd); in tb_xdomain_add()
1294 return 0; in unregister_service()
1299 * @xd: XDomain to remove
1302 * along with any services from the bus. When the last reference to @xd
1305 void tb_xdomain_remove(struct tb_xdomain *xd) in tb_xdomain_remove() argument
1307 stop_handshake(xd); in tb_xdomain_remove()
1309 device_for_each_child_reverse(&xd->dev, xd, unregister_service); in tb_xdomain_remove()
1316 pm_runtime_disable(&xd->dev); in tb_xdomain_remove()
1317 pm_runtime_put_noidle(&xd->dev); in tb_xdomain_remove()
1318 pm_runtime_set_suspended(&xd->dev); in tb_xdomain_remove()
1320 if (!device_is_registered(&xd->dev)) in tb_xdomain_remove()
1321 put_device(&xd->dev); in tb_xdomain_remove()
1323 device_unregister(&xd->dev); in tb_xdomain_remove()
1328 * @xd: XDomain connection
1340 * Return: %0 in case of success and negative errno in case of error
1342 int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, in tb_xdomain_enable_paths() argument
1348 mutex_lock(&xd->lock); in tb_xdomain_enable_paths()
1350 if (xd->transmit_path) { in tb_xdomain_enable_paths()
1351 ret = xd->transmit_path == transmit_path ? 0 : -EBUSY; in tb_xdomain_enable_paths()
1355 xd->transmit_path = transmit_path; in tb_xdomain_enable_paths()
1356 xd->transmit_ring = transmit_ring; in tb_xdomain_enable_paths()
1357 xd->receive_path = receive_path; in tb_xdomain_enable_paths()
1358 xd->receive_ring = receive_ring; in tb_xdomain_enable_paths()
1360 ret = tb_domain_approve_xdomain_paths(xd->tb, xd); in tb_xdomain_enable_paths()
1363 mutex_unlock(&xd->lock); in tb_xdomain_enable_paths()
1371 * @xd: XDomain connection
1376 * Return: %0 in case of success and negative errno in case of error
1378 int tb_xdomain_disable_paths(struct tb_xdomain *xd) in tb_xdomain_disable_paths() argument
1380 int ret = 0; in tb_xdomain_disable_paths()
1382 mutex_lock(&xd->lock); in tb_xdomain_disable_paths()
1383 if (xd->transmit_path) { in tb_xdomain_disable_paths()
1384 xd->transmit_path = 0; in tb_xdomain_disable_paths()
1385 xd->transmit_ring = 0; in tb_xdomain_disable_paths()
1386 xd->receive_path = 0; in tb_xdomain_disable_paths()
1387 xd->receive_ring = 0; in tb_xdomain_disable_paths()
1389 ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd); in tb_xdomain_disable_paths()
1391 mutex_unlock(&xd->lock); in tb_xdomain_disable_paths()
1411 struct tb_xdomain *xd; in switch_find_xdomain() local
1414 xd = port->xdomain; in switch_find_xdomain()
1417 if (xd->remote_uuid && in switch_find_xdomain()
1418 uuid_equal(xd->remote_uuid, lookup->uuid)) in switch_find_xdomain()
1419 return xd; in switch_find_xdomain()
1421 lookup->link == xd->link && in switch_find_xdomain()
1422 lookup->depth == xd->depth) { in switch_find_xdomain()
1423 return xd; in switch_find_xdomain()
1425 lookup->route == xd->route) { in switch_find_xdomain()
1426 return xd; in switch_find_xdomain()
1429 xd = switch_find_xdomain(port->remote->sw, lookup); in switch_find_xdomain()
1430 if (xd) in switch_find_xdomain()
1431 return xd; in switch_find_xdomain()
1456 struct tb_xdomain *xd; in tb_xdomain_find_by_uuid() local
1458 memset(&lookup, 0, sizeof(lookup)); in tb_xdomain_find_by_uuid()
1461 xd = switch_find_xdomain(tb->root_switch, &lookup); in tb_xdomain_find_by_uuid()
1462 return tb_xdomain_get(xd); in tb_xdomain_find_by_uuid()
1486 struct tb_xdomain *xd; in tb_xdomain_find_by_link_depth() local
1488 memset(&lookup, 0, sizeof(lookup)); in tb_xdomain_find_by_link_depth()
1492 xd = switch_find_xdomain(tb->root_switch, &lookup); in tb_xdomain_find_by_link_depth()
1493 return tb_xdomain_get(xd); in tb_xdomain_find_by_link_depth()
1514 struct tb_xdomain *xd; in tb_xdomain_find_by_route() local
1516 memset(&lookup, 0, sizeof(lookup)); in tb_xdomain_find_by_route()
1519 xd = switch_find_xdomain(tb->root_switch, &lookup); in tb_xdomain_find_by_route()
1520 return tb_xdomain_get(xd); in tb_xdomain_find_by_route()
1530 int ret = 0; in tb_xdomain_handle_request()
1564 return ret > 0; in tb_xdomain_handle_request()
1572 ret = tb_property_format_dir(xdomain_property_dir, NULL, 0); in rebuild_property_block()
1573 if (ret < 0) in rebuild_property_block()
1593 return 0; in rebuild_property_block()
1598 struct tb_xdomain *xd; in update_xdomain() local
1600 xd = tb_to_xdomain(dev); in update_xdomain()
1601 if (xd) { in update_xdomain()
1602 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, in update_xdomain()
1606 return 0; in update_xdomain()
1637 * Return: %0 on success and negative errno on failure
1668 return 0; in tb_register_property_dir()
1686 int ret = 0; in tb_unregister_property_dir()
1714 tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1); in tb_xdomain_init()
1717 tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100); in tb_xdomain_init()