Lines Matching +full:sw +full:- +full:reset +full:- +full:number

1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - Tunneling support
37 /* Minimum number of credits needed for PCIe path */
40 * Number of credits we try to allocate for each DMA path if not limited
44 /* Minimum number of credits for DMA path */
52 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
53 tb_route(__tunnel->src_port->sw), \
54 __tunnel->src_port->port, \
55 tb_route(__tunnel->dst_port->sw), \
56 __tunnel->dst_port->port, \
57 tb_tunnel_names[__tunnel->type], \
72 return port->total_credits - port->ctl_credits; in tb_usable_credits()
76 * tb_available_credits() - Available credits for PCIe and DMA
78 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
84 const struct tb_switch *sw = port->sw; in tb_available_credits() local
88 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0; in tb_available_credits()
89 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0; in tb_available_credits()
92 spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS); in tb_available_credits()
102 * Maximum number of DP streams possible through the in tb_available_credits()
105 if (sw->min_dp_aux_credits + sw->min_dp_main_credits) in tb_available_credits()
106 ndp = (credits - (usb3 + pcie + spare)) / in tb_available_credits()
107 (sw->min_dp_aux_credits + sw->min_dp_main_credits); in tb_available_credits()
113 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits); in tb_available_credits()
114 credits -= usb3; in tb_available_credits()
131 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL); in tb_tunnel_alloc()
132 if (!tunnel->paths) { in tb_tunnel_alloc()
137 INIT_LIST_HEAD(&tunnel->list); in tb_tunnel_alloc()
138 tunnel->tb = tb; in tb_tunnel_alloc()
139 tunnel->npaths = npaths; in tb_tunnel_alloc()
140 tunnel->type = type; in tb_tunnel_alloc()
149 res = tb_pci_port_enable(tunnel->src_port, activate); in tb_pci_activate()
153 if (tb_port_is_pcie_up(tunnel->dst_port)) in tb_pci_activate()
154 return tb_pci_port_enable(tunnel->dst_port, activate); in tb_pci_activate()
161 struct tb_port *port = hop->in_port; in tb_pci_init_credits()
162 struct tb_switch *sw = port->sw; in tb_pci_init_credits() local
169 credits = min(sw->max_pcie_credits, available); in tb_pci_init_credits()
172 return -ENOSPC; in tb_pci_init_credits()
177 credits = port->bonded ? 32 : 16; in tb_pci_init_credits()
182 hop->initial_credits = credits; in tb_pci_init_credits()
190 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; in tb_pci_init_path()
191 path->egress_shared_buffer = TB_PATH_NONE; in tb_pci_init_path()
192 path->ingress_fc_enable = TB_PATH_ALL; in tb_pci_init_path()
193 path->ingress_shared_buffer = TB_PATH_NONE; in tb_pci_init_path()
194 path->priority = 3; in tb_pci_init_path()
195 path->weight = 1; in tb_pci_init_path()
196 path->drop_packages = 0; in tb_pci_init_path()
210 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
232 tunnel->activate = tb_pci_activate; in tb_tunnel_discover_pci()
233 tunnel->src_port = down; in tb_tunnel_discover_pci()
240 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1, in tb_tunnel_discover_pci()
241 &tunnel->dst_port, "PCIe Up", alloc_hopid); in tb_tunnel_discover_pci()
247 tunnel->paths[TB_PCI_PATH_UP] = path; in tb_tunnel_discover_pci()
248 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP])) in tb_tunnel_discover_pci()
251 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL, in tb_tunnel_discover_pci()
255 tunnel->paths[TB_PCI_PATH_DOWN] = path; in tb_tunnel_discover_pci()
256 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN])) in tb_tunnel_discover_pci()
260 if (!tb_port_is_pcie_up(tunnel->dst_port)) { in tb_tunnel_discover_pci()
261 tb_port_warn(tunnel->dst_port, in tb_tunnel_discover_pci()
266 if (down != tunnel->src_port) { in tb_tunnel_discover_pci()
271 if (!tb_pci_port_is_enabled(tunnel->dst_port)) { in tb_tunnel_discover_pci()
289 * tb_tunnel_alloc_pci() - allocate a pci tunnel
309 tunnel->activate = tb_pci_activate; in tb_tunnel_alloc_pci()
310 tunnel->src_port = down; in tb_tunnel_alloc_pci()
311 tunnel->dst_port = up; in tb_tunnel_alloc_pci()
317 tunnel->paths[TB_PCI_PATH_DOWN] = path; in tb_tunnel_alloc_pci()
325 tunnel->paths[TB_PCI_PATH_UP] = path; in tb_tunnel_alloc_pci()
336 static bool tb_dp_is_usb4(const struct tb_switch *sw) in tb_dp_is_usb4() argument
339 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); in tb_dp_is_usb4()
349 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw)) in tb_dp_cm_handshake()
353 out->cap_adap + DP_STATUS_CTRL, 1); in tb_dp_cm_handshake()
360 out->cap_adap + DP_STATUS_CTRL, 1); in tb_dp_cm_handshake()
366 out->cap_adap + DP_STATUS_CTRL, 1); in tb_dp_cm_handshake()
372 } while (timeout--); in tb_dp_cm_handshake()
374 return -ETIMEDOUT; in tb_dp_cm_handshake()
439 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n", in tb_dp_cap_set_lanes()
501 return -ENOSR; in tb_dp_reduce_bandwidth()
507 struct tb_port *out = tunnel->dst_port; in tb_dp_xchg_caps()
508 struct tb_port *in = tunnel->src_port; in tb_dp_xchg_caps()
515 if (in->sw->generation < 2 || out->sw->generation < 2) in tb_dp_xchg_caps()
528 in->cap_adap + DP_LOCAL_CAP, 1); in tb_dp_xchg_caps()
533 out->cap_adap + DP_LOCAL_CAP, 1); in tb_dp_xchg_caps()
539 out->cap_adap + DP_REMOTE_CAP, 1); in tb_dp_xchg_caps()
558 if (in->sw->config.depth < out->sw->config.depth) in tb_dp_xchg_caps()
559 max_bw = tunnel->max_down; in tb_dp_xchg_caps()
561 max_bw = tunnel->max_up; in tb_dp_xchg_caps()
579 * Set new rate and number of lanes before writing it to in tb_dp_xchg_caps()
591 if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) { in tb_dp_xchg_caps()
597 in->cap_adap + DP_REMOTE_CAP, 1); in tb_dp_xchg_caps()
608 paths = tunnel->paths; in tb_dp_activate()
609 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1; in tb_dp_activate()
611 tb_dp_port_set_hops(tunnel->src_port, in tb_dp_activate()
612 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index, in tb_dp_activate()
613 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index, in tb_dp_activate()
614 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index); in tb_dp_activate()
616 tb_dp_port_set_hops(tunnel->dst_port, in tb_dp_activate()
617 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index, in tb_dp_activate()
618 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index, in tb_dp_activate()
619 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index); in tb_dp_activate()
621 tb_dp_port_hpd_clear(tunnel->src_port); in tb_dp_activate()
622 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0); in tb_dp_activate()
623 if (tb_port_is_dpout(tunnel->dst_port)) in tb_dp_activate()
624 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0); in tb_dp_activate()
627 ret = tb_dp_port_enable(tunnel->src_port, active); in tb_dp_activate()
631 if (tb_port_is_dpout(tunnel->dst_port)) in tb_dp_activate()
632 return tb_dp_port_enable(tunnel->dst_port, active); in tb_dp_activate()
640 struct tb_port *in = tunnel->src_port; in tb_dp_consumed_bandwidth()
641 const struct tb_switch *sw = in->sw; in tb_dp_consumed_bandwidth() local
645 if (tb_dp_is_usb4(sw)) { in tb_dp_consumed_bandwidth()
654 in->cap_adap + DP_COMMON_CAP, 1); in tb_dp_consumed_bandwidth()
664 } while (timeout--); in tb_dp_consumed_bandwidth()
667 return -ETIMEDOUT; in tb_dp_consumed_bandwidth()
668 } else if (sw->generation >= 2) { in tb_dp_consumed_bandwidth()
674 in->cap_adap + DP_REMOTE_CAP, 1); in tb_dp_consumed_bandwidth()
687 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { in tb_dp_consumed_bandwidth()
700 struct tb_port *port = hop->in_port; in tb_dp_init_aux_credits()
701 struct tb_switch *sw = port->sw; in tb_dp_init_aux_credits() local
704 hop->initial_credits = sw->min_dp_aux_credits; in tb_dp_init_aux_credits()
706 hop->initial_credits = 1; in tb_dp_init_aux_credits()
713 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; in tb_dp_init_aux_path()
714 path->egress_shared_buffer = TB_PATH_NONE; in tb_dp_init_aux_path()
715 path->ingress_fc_enable = TB_PATH_ALL; in tb_dp_init_aux_path()
716 path->ingress_shared_buffer = TB_PATH_NONE; in tb_dp_init_aux_path()
717 path->priority = 2; in tb_dp_init_aux_path()
718 path->weight = 1; in tb_dp_init_aux_path()
726 struct tb_port *port = hop->in_port; in tb_dp_init_video_credits()
727 struct tb_switch *sw = port->sw; in tb_dp_init_video_credits() local
735 * Read the number of currently allocated NFC credits in tb_dp_init_video_credits()
740 nfc_credits = port->config.nfc_credits & in tb_dp_init_video_credits()
742 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams) in tb_dp_init_video_credits()
743 return -ENOSPC; in tb_dp_init_video_credits()
745 hop->nfc_credits = sw->min_dp_main_credits; in tb_dp_init_video_credits()
747 hop->nfc_credits = min(port->total_credits - 2, 12U); in tb_dp_init_video_credits()
757 path->egress_fc_enable = TB_PATH_NONE; in tb_dp_init_video_path()
758 path->egress_shared_buffer = TB_PATH_NONE; in tb_dp_init_video_path()
759 path->ingress_fc_enable = TB_PATH_NONE; in tb_dp_init_video_path()
760 path->ingress_shared_buffer = TB_PATH_NONE; in tb_dp_init_video_path()
761 path->priority = 1; in tb_dp_init_video_path()
762 path->weight = 1; in tb_dp_init_video_path()
776 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
801 tunnel->init = tb_dp_xchg_caps; in tb_tunnel_discover_dp()
802 tunnel->activate = tb_dp_activate; in tb_tunnel_discover_dp()
803 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; in tb_tunnel_discover_dp()
804 tunnel->src_port = in; in tb_tunnel_discover_dp()
806 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1, in tb_tunnel_discover_dp()
807 &tunnel->dst_port, "Video", alloc_hopid); in tb_tunnel_discover_dp()
813 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path; in tb_tunnel_discover_dp()
814 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT])) in tb_tunnel_discover_dp()
817 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX", in tb_tunnel_discover_dp()
821 tunnel->paths[TB_DP_AUX_PATH_OUT] = path; in tb_tunnel_discover_dp()
822 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]); in tb_tunnel_discover_dp()
824 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID, in tb_tunnel_discover_dp()
828 tunnel->paths[TB_DP_AUX_PATH_IN] = path; in tb_tunnel_discover_dp()
829 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]); in tb_tunnel_discover_dp()
832 if (!tb_port_is_dpout(tunnel->dst_port)) { in tb_tunnel_discover_dp()
837 if (!tb_dp_port_is_enabled(tunnel->dst_port)) in tb_tunnel_discover_dp()
840 if (!tb_dp_port_hpd_is_active(tunnel->dst_port)) in tb_tunnel_discover_dp()
843 if (port != tunnel->src_port) { in tb_tunnel_discover_dp()
860 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
883 if (WARN_ON(!in->cap_adap || !out->cap_adap)) in tb_tunnel_alloc_dp()
890 tunnel->init = tb_dp_xchg_caps; in tb_tunnel_alloc_dp()
891 tunnel->activate = tb_dp_activate; in tb_tunnel_alloc_dp()
892 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; in tb_tunnel_alloc_dp()
893 tunnel->src_port = in; in tb_tunnel_alloc_dp()
894 tunnel->dst_port = out; in tb_tunnel_alloc_dp()
895 tunnel->max_up = max_up; in tb_tunnel_alloc_dp()
896 tunnel->max_down = max_down; in tb_tunnel_alloc_dp()
898 paths = tunnel->paths; in tb_tunnel_alloc_dp()
930 const struct tb_switch *sw = port->sw; in tb_dma_available_credits() local
935 credits -= sw->max_pcie_credits; in tb_dma_available_credits()
936 credits -= port->dma_credits; in tb_dma_available_credits()
943 struct tb_port *port = hop->in_port; in tb_dma_reserve_credits()
953 return -ENOSPC; in tb_dma_reserve_credits()
956 credits--; in tb_dma_reserve_credits()
961 port->dma_credits += credits; in tb_dma_reserve_credits()
964 credits = port->bonded ? 14 : 6; in tb_dma_reserve_credits()
966 credits = min(port->total_credits, credits); in tb_dma_reserve_credits()
969 hop->initial_credits = credits; in tb_dma_reserve_credits()
979 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; in tb_dma_init_rx_path()
980 path->ingress_fc_enable = TB_PATH_ALL; in tb_dma_init_rx_path()
981 path->egress_shared_buffer = TB_PATH_NONE; in tb_dma_init_rx_path()
982 path->ingress_shared_buffer = TB_PATH_NONE; in tb_dma_init_rx_path()
983 path->priority = 5; in tb_dma_init_rx_path()
984 path->weight = 1; in tb_dma_init_rx_path()
985 path->clear_fc = true; in tb_dma_init_rx_path()
992 hop = &path->hops[0]; in tb_dma_init_rx_path()
993 tmp = min(tb_usable_credits(hop->in_port), credits); in tb_dma_init_rx_path()
994 hop->initial_credits = tmp; in tb_dma_init_rx_path()
995 hop->in_port->dma_credits += tmp; in tb_dma_init_rx_path()
997 for (i = 1; i < path->path_length; i++) { in tb_dma_init_rx_path()
1000 ret = tb_dma_reserve_credits(&path->hops[i], credits); in tb_dma_init_rx_path()
1013 path->egress_fc_enable = TB_PATH_ALL; in tb_dma_init_tx_path()
1014 path->ingress_fc_enable = TB_PATH_ALL; in tb_dma_init_tx_path()
1015 path->egress_shared_buffer = TB_PATH_NONE; in tb_dma_init_tx_path()
1016 path->ingress_shared_buffer = TB_PATH_NONE; in tb_dma_init_tx_path()
1017 path->priority = 5; in tb_dma_init_tx_path()
1018 path->weight = 1; in tb_dma_init_tx_path()
1019 path->clear_fc = true; in tb_dma_init_tx_path()
1034 struct tb_port *port = hop->in_port; in tb_dma_release_credits()
1037 port->dma_credits -= hop->initial_credits; in tb_dma_release_credits()
1040 hop->initial_credits); in tb_dma_release_credits()
1056 for (i = 0; i < tunnel->npaths; i++) { in tb_dma_deinit()
1057 if (!tunnel->paths[i]) in tb_dma_deinit()
1059 tb_dma_deinit_path(tunnel->paths[i]); in tb_dma_deinit()
1064 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1069 * @transmit_ring: NHI ring number used to send packets towards the
1070 * other domain. Set to %-1 if TX path is not needed.
1072 * @receive_ring: NHI ring number used to receive packets from the
1073 * other domain. Set to %-1 if RX path is not needed.
1099 tunnel->src_port = nhi; in tb_tunnel_alloc_dma()
1100 tunnel->dst_port = dst; in tb_tunnel_alloc_dma()
1101 tunnel->deinit = tb_dma_deinit; in tb_tunnel_alloc_dma()
1103 credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits); in tb_tunnel_alloc_dma()
1110 tunnel->paths[i++] = path; in tb_tunnel_alloc_dma()
1122 tunnel->paths[i++] = path; in tb_tunnel_alloc_dma()
1137 * tb_tunnel_match_dma() - Match DMA tunnel
1139 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1140 * @transmit_ring: NHI ring number used to send packets towards the
1141 * other domain. Pass %-1 to ignore.
1142 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1143 * @receive_ring: NHI ring number used to receive packets from the
1144 * other domain. Pass %-1 to ignore.
1159 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_match_dma()
1160 const struct tb_path *path = tunnel->paths[i]; in tb_tunnel_match_dma()
1165 if (tb_port_is_nhi(path->hops[0].in_port)) in tb_tunnel_match_dma()
1167 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port)) in tb_tunnel_match_dma()
1175 (tx_path->hops[0].in_hop_index != transmit_ring)) in tb_tunnel_match_dma()
1178 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path)) in tb_tunnel_match_dma()
1186 (rx_path->hops[0].in_hop_index != receive_path)) in tb_tunnel_match_dma()
1189 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring)) in tb_tunnel_match_dma()
1216 tunnel->allocated_up, tunnel->allocated_down); in tb_usb3_init()
1218 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port, in tb_usb3_init()
1219 &tunnel->allocated_up, in tb_usb3_init()
1220 &tunnel->allocated_down); in tb_usb3_init()
1227 res = tb_usb3_port_enable(tunnel->src_port, activate); in tb_usb3_activate()
1231 if (tb_port_is_usb3_up(tunnel->dst_port)) in tb_usb3_activate()
1232 return tb_usb3_port_enable(tunnel->dst_port, activate); in tb_usb3_activate()
1246 *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3; in tb_usb3_consumed_bandwidth()
1247 *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3; in tb_usb3_consumed_bandwidth()
1255 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port, in tb_usb3_release_unused_bandwidth()
1256 &tunnel->allocated_up, in tb_usb3_release_unused_bandwidth()
1257 &tunnel->allocated_down); in tb_usb3_release_unused_bandwidth()
1262 tunnel->allocated_up, tunnel->allocated_down); in tb_usb3_release_unused_bandwidth()
1272 ret = usb4_usb3_port_actual_link_rate(tunnel->src_port); in tb_usb3_reclaim_available_bandwidth()
1278 ret = usb4_usb3_port_max_link_rate(tunnel->src_port); in tb_usb3_reclaim_available_bandwidth()
1292 if (tunnel->allocated_up >= max_rate && in tb_usb3_reclaim_available_bandwidth()
1293 tunnel->allocated_down >= max_rate) in tb_usb3_reclaim_available_bandwidth()
1298 if (allocate_up < tunnel->allocated_up) in tb_usb3_reclaim_available_bandwidth()
1299 allocate_up = tunnel->allocated_up; in tb_usb3_reclaim_available_bandwidth()
1302 if (allocate_down < tunnel->allocated_down) in tb_usb3_reclaim_available_bandwidth()
1303 allocate_down = tunnel->allocated_down; in tb_usb3_reclaim_available_bandwidth()
1306 if (allocate_up == tunnel->allocated_up && in tb_usb3_reclaim_available_bandwidth()
1307 allocate_down == tunnel->allocated_down) in tb_usb3_reclaim_available_bandwidth()
1310 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up, in tb_usb3_reclaim_available_bandwidth()
1317 tunnel->allocated_up = allocate_up; in tb_usb3_reclaim_available_bandwidth()
1318 *available_up -= tunnel->allocated_up; in tb_usb3_reclaim_available_bandwidth()
1320 tunnel->allocated_down = allocate_down; in tb_usb3_reclaim_available_bandwidth()
1321 *available_down -= tunnel->allocated_down; in tb_usb3_reclaim_available_bandwidth()
1324 tunnel->allocated_up, tunnel->allocated_down); in tb_usb3_reclaim_available_bandwidth()
1329 struct tb_port *port = hop->in_port; in tb_usb3_init_credits()
1330 struct tb_switch *sw = port->sw; in tb_usb3_init_credits() local
1334 credits = sw->max_usb3_credits; in tb_usb3_init_credits()
1337 credits = port->bonded ? 32 : 16; in tb_usb3_init_credits()
1342 hop->initial_credits = credits; in tb_usb3_init_credits()
1349 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; in tb_usb3_init_path()
1350 path->egress_shared_buffer = TB_PATH_NONE; in tb_usb3_init_path()
1351 path->ingress_fc_enable = TB_PATH_ALL; in tb_usb3_init_path()
1352 path->ingress_shared_buffer = TB_PATH_NONE; in tb_usb3_init_path()
1353 path->priority = 3; in tb_usb3_init_path()
1354 path->weight = 3; in tb_usb3_init_path()
1355 path->drop_packages = 0; in tb_usb3_init_path()
1362 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1384 tunnel->activate = tb_usb3_activate; in tb_tunnel_discover_usb3()
1385 tunnel->src_port = down; in tb_tunnel_discover_usb3()
1392 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1, in tb_tunnel_discover_usb3()
1393 &tunnel->dst_port, "USB3 Down", alloc_hopid); in tb_tunnel_discover_usb3()
1399 tunnel->paths[TB_USB3_PATH_DOWN] = path; in tb_tunnel_discover_usb3()
1400 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); in tb_tunnel_discover_usb3()
1402 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, in tb_tunnel_discover_usb3()
1406 tunnel->paths[TB_USB3_PATH_UP] = path; in tb_tunnel_discover_usb3()
1407 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); in tb_tunnel_discover_usb3()
1410 if (!tb_port_is_usb3_up(tunnel->dst_port)) { in tb_tunnel_discover_usb3()
1411 tb_port_warn(tunnel->dst_port, in tb_tunnel_discover_usb3()
1416 if (down != tunnel->src_port) { in tb_tunnel_discover_usb3()
1421 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) { in tb_tunnel_discover_usb3()
1427 if (!tb_route(down->sw)) { in tb_tunnel_discover_usb3()
1435 &tunnel->allocated_up, &tunnel->allocated_down); in tb_tunnel_discover_usb3()
1440 tunnel->allocated_up, tunnel->allocated_down); in tb_tunnel_discover_usb3()
1442 tunnel->init = tb_usb3_init; in tb_tunnel_discover_usb3()
1443 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; in tb_tunnel_discover_usb3()
1444 tunnel->release_unused_bandwidth = in tb_tunnel_discover_usb3()
1446 tunnel->reclaim_available_bandwidth = in tb_tunnel_discover_usb3()
1462 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1508 tunnel->activate = tb_usb3_activate; in tb_tunnel_alloc_usb3()
1509 tunnel->src_port = down; in tb_tunnel_alloc_usb3()
1510 tunnel->dst_port = up; in tb_tunnel_alloc_usb3()
1511 tunnel->max_up = max_up; in tb_tunnel_alloc_usb3()
1512 tunnel->max_down = max_down; in tb_tunnel_alloc_usb3()
1521 tunnel->paths[TB_USB3_PATH_DOWN] = path; in tb_tunnel_alloc_usb3()
1530 tunnel->paths[TB_USB3_PATH_UP] = path; in tb_tunnel_alloc_usb3()
1532 if (!tb_route(down->sw)) { in tb_tunnel_alloc_usb3()
1533 tunnel->allocated_up = max_rate; in tb_tunnel_alloc_usb3()
1534 tunnel->allocated_down = max_rate; in tb_tunnel_alloc_usb3()
1536 tunnel->init = tb_usb3_init; in tb_tunnel_alloc_usb3()
1537 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; in tb_tunnel_alloc_usb3()
1538 tunnel->release_unused_bandwidth = in tb_tunnel_alloc_usb3()
1540 tunnel->reclaim_available_bandwidth = in tb_tunnel_alloc_usb3()
1548 * tb_tunnel_free() - free a tunnel
1560 if (tunnel->deinit) in tb_tunnel_free()
1561 tunnel->deinit(tunnel); in tb_tunnel_free()
1563 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_free()
1564 if (tunnel->paths[i]) in tb_tunnel_free()
1565 tb_path_free(tunnel->paths[i]); in tb_tunnel_free()
1568 kfree(tunnel->paths); in tb_tunnel_free()
1573 * tb_tunnel_is_invalid - check whether an activated path is still valid
1580 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_is_invalid()
1581 WARN_ON(!tunnel->paths[i]->activated); in tb_tunnel_is_invalid()
1582 if (tb_path_is_invalid(tunnel->paths[i])) in tb_tunnel_is_invalid()
1590 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1605 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_restart()
1606 if (tunnel->paths[i]->activated) { in tb_tunnel_restart()
1607 tb_path_deactivate(tunnel->paths[i]); in tb_tunnel_restart()
1608 tunnel->paths[i]->activated = false; in tb_tunnel_restart()
1612 if (tunnel->init) { in tb_tunnel_restart()
1613 res = tunnel->init(tunnel); in tb_tunnel_restart()
1618 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_restart()
1619 res = tb_path_activate(tunnel->paths[i]); in tb_tunnel_restart()
1624 if (tunnel->activate) { in tb_tunnel_restart()
1625 res = tunnel->activate(tunnel, true); in tb_tunnel_restart()
1639 * tb_tunnel_activate() - activate a tunnel
1648 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_activate()
1649 if (tunnel->paths[i]->activated) { in tb_tunnel_activate()
1652 return -EINVAL; in tb_tunnel_activate()
1660 * tb_tunnel_deactivate() - deactivate a tunnel
1669 if (tunnel->activate) in tb_tunnel_deactivate()
1670 tunnel->activate(tunnel, false); in tb_tunnel_deactivate()
1672 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_deactivate()
1673 if (tunnel->paths[i] && tunnel->paths[i]->activated) in tb_tunnel_deactivate()
1674 tb_path_deactivate(tunnel->paths[i]); in tb_tunnel_deactivate()
1679 * tb_tunnel_port_on_path() - Does the tunnel go through port
1691 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_port_on_path()
1692 if (!tunnel->paths[i]) in tb_tunnel_port_on_path()
1695 if (tb_path_port_on_path(tunnel->paths[i], port)) in tb_tunnel_port_on_path()
1706 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_is_active()
1707 if (!tunnel->paths[i]) in tb_tunnel_is_active()
1709 if (!tunnel->paths[i]->activated) in tb_tunnel_is_active()
1717 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1736 if (tunnel->consumed_bandwidth) { in tb_tunnel_consumed_bandwidth()
1739 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw); in tb_tunnel_consumed_bandwidth()
1757 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1770 if (tunnel->release_unused_bandwidth) { in tb_tunnel_release_unused_bandwidth()
1773 ret = tunnel->release_unused_bandwidth(tunnel); in tb_tunnel_release_unused_bandwidth()
1782 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1799 if (tunnel->reclaim_available_bandwidth) in tb_tunnel_reclaim_available_bandwidth()
1800 tunnel->reclaim_available_bandwidth(tunnel, available_up, in tb_tunnel_reclaim_available_bandwidth()