Lines Matching +full:usb3 +full:- +full:if
1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - Tunneling support
22 /* USB3 adapters use always HopID of 8 for both directions */
40 * Number of credits we try to allocate for each DMA path if not limited
47 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
52 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
53 tb_route(__tunnel->src_port->sw), \
54 __tunnel->src_port->port, \
55 tb_route(__tunnel->dst_port->sw), \
56 __tunnel->dst_port->port, \
57 tb_tunnel_names[__tunnel->type], \
72 return port->total_credits - port->ctl_credits; in tb_usable_credits()
76 * tb_available_credits() - Available credits for PCIe and DMA
78 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
84 const struct tb_switch *sw = port->sw; in tb_available_credits()
85 int credits, usb3, pcie, spare; in tb_available_credits() local
88 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0; in tb_available_credits()
89 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0; in tb_available_credits()
91 if (tb_acpi_is_xdomain_allowed()) { in tb_available_credits()
92 spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS); in tb_available_credits()
100 if (tb_acpi_may_tunnel_dp()) { in tb_available_credits()
105 ndp = (credits - (usb3 + pcie + spare)) / in tb_available_credits()
106 (sw->min_dp_aux_credits + sw->min_dp_main_credits); in tb_available_credits()
110 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits); in tb_available_credits()
111 credits -= usb3; in tb_available_credits()
113 if (max_dp_streams) in tb_available_credits()
125 if (!tunnel) in tb_tunnel_alloc()
128 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL); in tb_tunnel_alloc()
129 if (!tunnel->paths) { in tb_tunnel_alloc()
134 INIT_LIST_HEAD(&tunnel->list); in tb_tunnel_alloc()
135 tunnel->tb = tb; in tb_tunnel_alloc()
136 tunnel->npaths = npaths; in tb_tunnel_alloc()
137 tunnel->type = type; in tb_tunnel_alloc()
146 res = tb_pci_port_enable(tunnel->src_port, activate); in tb_pci_activate()
147 if (res) in tb_pci_activate()
150 if (tb_port_is_pcie_up(tunnel->dst_port)) in tb_pci_activate()
151 return tb_pci_port_enable(tunnel->dst_port, activate); in tb_pci_activate()
158 struct tb_port *port = hop->in_port; in tb_pci_init_credits()
159 struct tb_switch *sw = port->sw; in tb_pci_init_credits()
162 if (tb_port_use_credit_allocation(port)) { in tb_pci_init_credits()
166 credits = min(sw->max_pcie_credits, available); in tb_pci_init_credits()
168 if (credits < TB_MIN_PCIE_CREDITS) in tb_pci_init_credits()
169 return -ENOSPC; in tb_pci_init_credits()
173 if (tb_port_is_null(port)) in tb_pci_init_credits()
174 credits = port->bonded ? 32 : 16; in tb_pci_init_credits()
179 hop->initial_credits = credits; in tb_pci_init_credits()
187 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; in tb_pci_init_path()
188 path->egress_shared_buffer = TB_PATH_NONE; in tb_pci_init_path()
189 path->ingress_fc_enable = TB_PATH_ALL; in tb_pci_init_path()
190 path->ingress_shared_buffer = TB_PATH_NONE; in tb_pci_init_path()
191 path->priority = 3; in tb_pci_init_path()
192 path->weight = 1; in tb_pci_init_path()
193 path->drop_packages = 0; in tb_pci_init_path()
199 if (ret) in tb_pci_init_path()
207 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
211 * If @down adapter is active, follows the tunnel to the PCIe upstream
212 * adapter and back. Returns the discovered tunnel or %NULL if there was
220 if (!tb_pci_port_is_enabled(down)) in tb_tunnel_discover_pci()
224 if (!tunnel) in tb_tunnel_discover_pci()
227 tunnel->activate = tb_pci_activate; in tb_tunnel_discover_pci()
228 tunnel->src_port = down; in tb_tunnel_discover_pci()
231 * Discover both paths even if they are not complete. We will in tb_tunnel_discover_pci()
235 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1, in tb_tunnel_discover_pci()
236 &tunnel->dst_port, "PCIe Up"); in tb_tunnel_discover_pci()
237 if (!path) { in tb_tunnel_discover_pci()
242 tunnel->paths[TB_PCI_PATH_UP] = path; in tb_tunnel_discover_pci()
243 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP])) in tb_tunnel_discover_pci()
246 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL, in tb_tunnel_discover_pci()
248 if (!path) in tb_tunnel_discover_pci()
250 tunnel->paths[TB_PCI_PATH_DOWN] = path; in tb_tunnel_discover_pci()
251 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN])) in tb_tunnel_discover_pci()
255 if (!tb_port_is_pcie_up(tunnel->dst_port)) { in tb_tunnel_discover_pci()
256 tb_port_warn(tunnel->dst_port, in tb_tunnel_discover_pci()
261 if (down != tunnel->src_port) { in tb_tunnel_discover_pci()
266 if (!tb_pci_port_is_enabled(tunnel->dst_port)) { in tb_tunnel_discover_pci()
284 * tb_tunnel_alloc_pci() - allocate a pci tunnel
301 if (!tunnel) in tb_tunnel_alloc_pci()
304 tunnel->activate = tb_pci_activate; in tb_tunnel_alloc_pci()
305 tunnel->src_port = down; in tb_tunnel_alloc_pci()
306 tunnel->dst_port = up; in tb_tunnel_alloc_pci()
310 if (!path) in tb_tunnel_alloc_pci()
312 tunnel->paths[TB_PCI_PATH_DOWN] = path; in tb_tunnel_alloc_pci()
313 if (tb_pci_init_path(path)) in tb_tunnel_alloc_pci()
318 if (!path) in tb_tunnel_alloc_pci()
320 tunnel->paths[TB_PCI_PATH_UP] = path; in tb_tunnel_alloc_pci()
321 if (tb_pci_init_path(path)) in tb_tunnel_alloc_pci()
344 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw)) in tb_dp_cm_handshake()
348 out->cap_adap + DP_STATUS_CTRL, 1); in tb_dp_cm_handshake()
349 if (ret) in tb_dp_cm_handshake()
355 out->cap_adap + DP_STATUS_CTRL, 1); in tb_dp_cm_handshake()
356 if (ret) in tb_dp_cm_handshake()
361 out->cap_adap + DP_STATUS_CTRL, 1); in tb_dp_cm_handshake()
362 if (ret) in tb_dp_cm_handshake()
364 if (!(val & DP_STATUS_CTRL_CMHS)) in tb_dp_cm_handshake()
367 } while (timeout--); in tb_dp_cm_handshake()
369 return -ETIMEDOUT; in tb_dp_cm_handshake()
483 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes) in tb_dp_reduce_bandwidth()
486 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes) in tb_dp_reduce_bandwidth()
489 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) { in tb_dp_reduce_bandwidth()
496 return -ENOSR; in tb_dp_reduce_bandwidth()
502 struct tb_port *out = tunnel->dst_port; in tb_dp_xchg_caps()
503 struct tb_port *in = tunnel->src_port; in tb_dp_xchg_caps()
510 if (in->sw->generation < 2 || out->sw->generation < 2) in tb_dp_xchg_caps()
518 if (ret) in tb_dp_xchg_caps()
523 in->cap_adap + DP_LOCAL_CAP, 1); in tb_dp_xchg_caps()
524 if (ret) in tb_dp_xchg_caps()
528 out->cap_adap + DP_LOCAL_CAP, 1); in tb_dp_xchg_caps()
529 if (ret) in tb_dp_xchg_caps()
534 out->cap_adap + DP_REMOTE_CAP, 1); in tb_dp_xchg_caps()
535 if (ret) in tb_dp_xchg_caps()
544 * If the tunnel bandwidth is limited (max_bw is set) then see in tb_dp_xchg_caps()
545 * if we need to reduce bandwidth to fit there. in tb_dp_xchg_caps()
553 if (in->sw->config.depth < out->sw->config.depth) in tb_dp_xchg_caps()
554 max_bw = tunnel->max_down; in tb_dp_xchg_caps()
556 max_bw = tunnel->max_up; in tb_dp_xchg_caps()
558 if (max_bw && bw > max_bw) { in tb_dp_xchg_caps()
564 if (ret) { in tb_dp_xchg_caps()
582 in->cap_adap + DP_REMOTE_CAP, 1); in tb_dp_xchg_caps()
589 if (active) { in tb_dp_activate()
593 paths = tunnel->paths; in tb_dp_activate()
594 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1; in tb_dp_activate()
596 tb_dp_port_set_hops(tunnel->src_port, in tb_dp_activate()
597 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index, in tb_dp_activate()
598 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index, in tb_dp_activate()
599 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index); in tb_dp_activate()
601 tb_dp_port_set_hops(tunnel->dst_port, in tb_dp_activate()
602 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index, in tb_dp_activate()
603 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index, in tb_dp_activate()
604 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index); in tb_dp_activate()
606 tb_dp_port_hpd_clear(tunnel->src_port); in tb_dp_activate()
607 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0); in tb_dp_activate()
608 if (tb_port_is_dpout(tunnel->dst_port)) in tb_dp_activate()
609 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0); in tb_dp_activate()
612 ret = tb_dp_port_enable(tunnel->src_port, active); in tb_dp_activate()
613 if (ret) in tb_dp_activate()
616 if (tb_port_is_dpout(tunnel->dst_port)) in tb_dp_activate()
617 return tb_dp_port_enable(tunnel->dst_port, active); in tb_dp_activate()
625 struct tb_port *in = tunnel->src_port; in tb_dp_consumed_bandwidth()
626 const struct tb_switch *sw = in->sw; in tb_dp_consumed_bandwidth()
630 if (tb_dp_is_usb4(sw)) { in tb_dp_consumed_bandwidth()
639 in->cap_adap + DP_COMMON_CAP, 1); in tb_dp_consumed_bandwidth()
640 if (ret) in tb_dp_consumed_bandwidth()
643 if (val & DP_COMMON_CAP_DPRX_DONE) { in tb_dp_consumed_bandwidth()
649 } while (timeout--); in tb_dp_consumed_bandwidth()
651 if (!timeout) in tb_dp_consumed_bandwidth()
652 return -ETIMEDOUT; in tb_dp_consumed_bandwidth()
653 } else if (sw->generation >= 2) { in tb_dp_consumed_bandwidth()
656 * account if capabilities were reduced during exchange. in tb_dp_consumed_bandwidth()
659 in->cap_adap + DP_REMOTE_CAP, 1); in tb_dp_consumed_bandwidth()
660 if (ret) in tb_dp_consumed_bandwidth()
672 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { in tb_dp_consumed_bandwidth()
685 struct tb_port *port = hop->in_port; in tb_dp_init_aux_credits()
686 struct tb_switch *sw = port->sw; in tb_dp_init_aux_credits()
688 if (tb_port_use_credit_allocation(port)) in tb_dp_init_aux_credits()
689 hop->initial_credits = sw->min_dp_aux_credits; in tb_dp_init_aux_credits()
691 hop->initial_credits = 1; in tb_dp_init_aux_credits()
698 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; in tb_dp_init_aux_path()
699 path->egress_shared_buffer = TB_PATH_NONE; in tb_dp_init_aux_path()
700 path->ingress_fc_enable = TB_PATH_ALL; in tb_dp_init_aux_path()
701 path->ingress_shared_buffer = TB_PATH_NONE; in tb_dp_init_aux_path()
702 path->priority = 2; in tb_dp_init_aux_path()
703 path->weight = 1; in tb_dp_init_aux_path()
711 struct tb_port *port = hop->in_port; in tb_dp_init_video_credits()
712 struct tb_switch *sw = port->sw; in tb_dp_init_video_credits()
714 if (tb_port_use_credit_allocation(port)) { in tb_dp_init_video_credits()
725 nfc_credits = port->config.nfc_credits & in tb_dp_init_video_credits()
727 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams) in tb_dp_init_video_credits()
728 return -ENOSPC; in tb_dp_init_video_credits()
730 hop->nfc_credits = sw->min_dp_main_credits; in tb_dp_init_video_credits()
732 hop->nfc_credits = min(port->total_credits - 2, 12U); in tb_dp_init_video_credits()
742 path->egress_fc_enable = TB_PATH_NONE; in tb_dp_init_video_path()
743 path->egress_shared_buffer = TB_PATH_NONE; in tb_dp_init_video_path()
744 path->ingress_fc_enable = TB_PATH_NONE; in tb_dp_init_video_path()
745 path->ingress_shared_buffer = TB_PATH_NONE; in tb_dp_init_video_path()
746 path->priority = 1; in tb_dp_init_video_path()
747 path->weight = 1; in tb_dp_init_video_path()
753 if (ret) in tb_dp_init_video_path()
761 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
765 * If @in adapter is active, follows the tunnel to the DP out adapter
766 * and back. Returns the discovered tunnel or %NULL if there was no
769 * Return: DP tunnel or %NULL if no tunnel found.
777 if (!tb_dp_port_is_enabled(in)) in tb_tunnel_discover_dp()
781 if (!tunnel) in tb_tunnel_discover_dp()
784 tunnel->init = tb_dp_xchg_caps; in tb_tunnel_discover_dp()
785 tunnel->activate = tb_dp_activate; in tb_tunnel_discover_dp()
786 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; in tb_tunnel_discover_dp()
787 tunnel->src_port = in; in tb_tunnel_discover_dp()
789 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1, in tb_tunnel_discover_dp()
790 &tunnel->dst_port, "Video"); in tb_tunnel_discover_dp()
791 if (!path) { in tb_tunnel_discover_dp()
796 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path; in tb_tunnel_discover_dp()
797 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT])) in tb_tunnel_discover_dp()
800 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX"); in tb_tunnel_discover_dp()
801 if (!path) in tb_tunnel_discover_dp()
803 tunnel->paths[TB_DP_AUX_PATH_OUT] = path; in tb_tunnel_discover_dp()
804 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]); in tb_tunnel_discover_dp()
806 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID, in tb_tunnel_discover_dp()
808 if (!path) in tb_tunnel_discover_dp()
810 tunnel->paths[TB_DP_AUX_PATH_IN] = path; in tb_tunnel_discover_dp()
811 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]); in tb_tunnel_discover_dp()
814 if (!tb_port_is_dpout(tunnel->dst_port)) { in tb_tunnel_discover_dp()
819 if (!tb_dp_port_is_enabled(tunnel->dst_port)) in tb_tunnel_discover_dp()
822 if (!tb_dp_port_hpd_is_active(tunnel->dst_port)) in tb_tunnel_discover_dp()
825 if (port != tunnel->src_port) { in tb_tunnel_discover_dp()
842 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
847 * if not limited)
849 * (%0 if not limited)
864 if (WARN_ON(!in->cap_adap || !out->cap_adap)) in tb_tunnel_alloc_dp()
868 if (!tunnel) in tb_tunnel_alloc_dp()
871 tunnel->init = tb_dp_xchg_caps; in tb_tunnel_alloc_dp()
872 tunnel->activate = tb_dp_activate; in tb_tunnel_alloc_dp()
873 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; in tb_tunnel_alloc_dp()
874 tunnel->src_port = in; in tb_tunnel_alloc_dp()
875 tunnel->dst_port = out; in tb_tunnel_alloc_dp()
876 tunnel->max_up = max_up; in tb_tunnel_alloc_dp()
877 tunnel->max_down = max_down; in tb_tunnel_alloc_dp()
879 paths = tunnel->paths; in tb_tunnel_alloc_dp()
883 if (!path) in tb_tunnel_alloc_dp()
890 if (!path) in tb_tunnel_alloc_dp()
897 if (!path) in tb_tunnel_alloc_dp()
911 const struct tb_switch *sw = port->sw; in tb_dma_available_credits()
915 if (tb_acpi_may_tunnel_pcie()) in tb_dma_available_credits()
916 credits -= sw->max_pcie_credits; in tb_dma_available_credits()
917 credits -= port->dma_credits; in tb_dma_available_credits()
924 struct tb_port *port = hop->in_port; in tb_dma_reserve_credits()
926 if (tb_port_use_credit_allocation(port)) { in tb_dma_reserve_credits()
933 if (available < TB_MIN_DMA_CREDITS) in tb_dma_reserve_credits()
934 return -ENOSPC; in tb_dma_reserve_credits()
937 credits--; in tb_dma_reserve_credits()
942 port->dma_credits += credits; in tb_dma_reserve_credits()
944 if (tb_port_is_null(port)) in tb_dma_reserve_credits()
945 credits = port->bonded ? 14 : 6; in tb_dma_reserve_credits()
947 credits = min(port->total_credits, credits); in tb_dma_reserve_credits()
950 hop->initial_credits = credits; in tb_dma_reserve_credits()
960 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; in tb_dma_init_rx_path()
961 path->ingress_fc_enable = TB_PATH_ALL; in tb_dma_init_rx_path()
962 path->egress_shared_buffer = TB_PATH_NONE; in tb_dma_init_rx_path()
963 path->ingress_shared_buffer = TB_PATH_NONE; in tb_dma_init_rx_path()
964 path->priority = 5; in tb_dma_init_rx_path()
965 path->weight = 1; in tb_dma_init_rx_path()
966 path->clear_fc = true; in tb_dma_init_rx_path()
973 hop = &path->hops[0]; in tb_dma_init_rx_path()
974 tmp = min(tb_usable_credits(hop->in_port), credits); in tb_dma_init_rx_path()
975 hop->initial_credits = tmp; in tb_dma_init_rx_path()
976 hop->in_port->dma_credits += tmp; in tb_dma_init_rx_path()
978 for (i = 1; i < path->path_length; i++) { in tb_dma_init_rx_path()
981 ret = tb_dma_reserve_credits(&path->hops[i], credits); in tb_dma_init_rx_path()
982 if (ret) in tb_dma_init_rx_path()
994 path->egress_fc_enable = TB_PATH_ALL; in tb_dma_init_tx_path()
995 path->ingress_fc_enable = TB_PATH_ALL; in tb_dma_init_tx_path()
996 path->egress_shared_buffer = TB_PATH_NONE; in tb_dma_init_tx_path()
997 path->ingress_shared_buffer = TB_PATH_NONE; in tb_dma_init_tx_path()
998 path->priority = 5; in tb_dma_init_tx_path()
999 path->weight = 1; in tb_dma_init_tx_path()
1000 path->clear_fc = true; in tb_dma_init_tx_path()
1006 if (ret) in tb_dma_init_tx_path()
1015 struct tb_port *port = hop->in_port; in tb_dma_release_credits()
1017 if (tb_port_use_credit_allocation(port)) { in tb_dma_release_credits()
1018 port->dma_credits -= hop->initial_credits; in tb_dma_release_credits()
1021 hop->initial_credits); in tb_dma_release_credits()
1037 for (i = 0; i < tunnel->npaths; i++) { in tb_dma_deinit()
1038 if (!tunnel->paths[i]) in tb_dma_deinit()
1040 tb_dma_deinit_path(tunnel->paths[i]); in tb_dma_deinit()
1045 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1051 * other domain. Set to %-1 if TX path is not needed.
1054 * other domain. Set to %-1 if RX path is not needed.
1068 if (receive_ring > 0) in tb_tunnel_alloc_dma()
1070 if (transmit_ring > 0) in tb_tunnel_alloc_dma()
1073 if (WARN_ON(!npaths)) in tb_tunnel_alloc_dma()
1077 if (!tunnel) in tb_tunnel_alloc_dma()
1080 tunnel->src_port = nhi; in tb_tunnel_alloc_dma()
1081 tunnel->dst_port = dst; in tb_tunnel_alloc_dma()
1082 tunnel->deinit = tb_dma_deinit; in tb_tunnel_alloc_dma()
1084 credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits); in tb_tunnel_alloc_dma()
1086 if (receive_ring > 0) { in tb_tunnel_alloc_dma()
1089 if (!path) in tb_tunnel_alloc_dma()
1091 tunnel->paths[i++] = path; in tb_tunnel_alloc_dma()
1092 if (tb_dma_init_rx_path(path, credits)) { in tb_tunnel_alloc_dma()
1098 if (transmit_ring > 0) { in tb_tunnel_alloc_dma()
1101 if (!path) in tb_tunnel_alloc_dma()
1103 tunnel->paths[i++] = path; in tb_tunnel_alloc_dma()
1104 if (tb_dma_init_tx_path(path, credits)) { in tb_tunnel_alloc_dma()
1118 * tb_tunnel_match_dma() - Match DMA tunnel
1120 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1122 * other domain. Pass %-1 to ignore.
1123 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1125 * other domain. Pass %-1 to ignore.
1127 * This function can be used to match specific DMA tunnel, if there are
1129 * Returns true if there is match and false otherwise.
1137 if (!receive_ring || !transmit_ring) in tb_tunnel_match_dma()
1140 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_match_dma()
1141 const struct tb_path *path = tunnel->paths[i]; in tb_tunnel_match_dma()
1143 if (!path) in tb_tunnel_match_dma()
1146 if (tb_port_is_nhi(path->hops[0].in_port)) in tb_tunnel_match_dma()
1148 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port)) in tb_tunnel_match_dma()
1152 if (transmit_ring > 0 || transmit_path > 0) { in tb_tunnel_match_dma()
1153 if (!tx_path) in tb_tunnel_match_dma()
1155 if (transmit_ring > 0 && in tb_tunnel_match_dma()
1156 (tx_path->hops[0].in_hop_index != transmit_ring)) in tb_tunnel_match_dma()
1158 if (transmit_path > 0 && in tb_tunnel_match_dma()
1159 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path)) in tb_tunnel_match_dma()
1163 if (receive_ring > 0 || receive_path > 0) { in tb_tunnel_match_dma()
1164 if (!rx_path) in tb_tunnel_match_dma()
1166 if (receive_path > 0 && in tb_tunnel_match_dma()
1167 (rx_path->hops[0].in_hop_index != receive_path)) in tb_tunnel_match_dma()
1169 if (receive_ring > 0 && in tb_tunnel_match_dma()
1170 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring)) in tb_tunnel_match_dma()
1182 if (ret < 0) in tb_usb3_max_link_rate()
1187 if (ret < 0) in tb_usb3_max_link_rate()
1197 tunnel->allocated_up, tunnel->allocated_down); in tb_usb3_init()
1199 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port, in tb_usb3_init()
1200 &tunnel->allocated_up, in tb_usb3_init()
1201 &tunnel->allocated_down); in tb_usb3_init()
1208 res = tb_usb3_port_enable(tunnel->src_port, activate); in tb_usb3_activate()
1209 if (res) in tb_usb3_activate()
1212 if (tb_port_is_usb3_up(tunnel->dst_port)) in tb_usb3_activate()
1213 return tb_usb3_port_enable(tunnel->dst_port, activate); in tb_usb3_activate()
1224 * PCIe tunneling, if enabled, affects the USB3 bandwidth so in tb_usb3_consumed_bandwidth()
1227 *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3; in tb_usb3_consumed_bandwidth()
1228 *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3; in tb_usb3_consumed_bandwidth()
1236 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port, in tb_usb3_release_unused_bandwidth()
1237 &tunnel->allocated_up, in tb_usb3_release_unused_bandwidth()
1238 &tunnel->allocated_down); in tb_usb3_release_unused_bandwidth()
1239 if (ret) in tb_usb3_release_unused_bandwidth()
1243 tunnel->allocated_up, tunnel->allocated_down); in tb_usb3_release_unused_bandwidth()
1253 ret = usb4_usb3_port_actual_link_rate(tunnel->src_port); in tb_usb3_reclaim_available_bandwidth()
1254 if (ret < 0) { in tb_usb3_reclaim_available_bandwidth()
1257 } else if (!ret) { in tb_usb3_reclaim_available_bandwidth()
1258 /* Use maximum link rate if the link valid is not set */ in tb_usb3_reclaim_available_bandwidth()
1259 ret = usb4_usb3_port_max_link_rate(tunnel->src_port); in tb_usb3_reclaim_available_bandwidth()
1260 if (ret < 0) { in tb_usb3_reclaim_available_bandwidth()
1272 /* No need to reclaim if already at maximum */ in tb_usb3_reclaim_available_bandwidth()
1273 if (tunnel->allocated_up >= max_rate && in tb_usb3_reclaim_available_bandwidth()
1274 tunnel->allocated_down >= max_rate) in tb_usb3_reclaim_available_bandwidth()
1279 if (allocate_up < tunnel->allocated_up) in tb_usb3_reclaim_available_bandwidth()
1280 allocate_up = tunnel->allocated_up; in tb_usb3_reclaim_available_bandwidth()
1283 if (allocate_down < tunnel->allocated_down) in tb_usb3_reclaim_available_bandwidth()
1284 allocate_down = tunnel->allocated_down; in tb_usb3_reclaim_available_bandwidth()
1286 /* If no changes no need to do more */ in tb_usb3_reclaim_available_bandwidth()
1287 if (allocate_up == tunnel->allocated_up && in tb_usb3_reclaim_available_bandwidth()
1288 allocate_down == tunnel->allocated_down) in tb_usb3_reclaim_available_bandwidth()
1291 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up, in tb_usb3_reclaim_available_bandwidth()
1293 if (ret) { in tb_usb3_reclaim_available_bandwidth()
1298 tunnel->allocated_up = allocate_up; in tb_usb3_reclaim_available_bandwidth()
1299 *available_up -= tunnel->allocated_up; in tb_usb3_reclaim_available_bandwidth()
1301 tunnel->allocated_down = allocate_down; in tb_usb3_reclaim_available_bandwidth()
1302 *available_down -= tunnel->allocated_down; in tb_usb3_reclaim_available_bandwidth()
1305 tunnel->allocated_up, tunnel->allocated_down); in tb_usb3_reclaim_available_bandwidth()
1310 struct tb_port *port = hop->in_port; in tb_usb3_init_credits()
1311 struct tb_switch *sw = port->sw; in tb_usb3_init_credits()
1314 if (tb_port_use_credit_allocation(port)) { in tb_usb3_init_credits()
1315 credits = sw->max_usb3_credits; in tb_usb3_init_credits()
1317 if (tb_port_is_null(port)) in tb_usb3_init_credits()
1318 credits = port->bonded ? 32 : 16; in tb_usb3_init_credits()
1323 hop->initial_credits = credits; in tb_usb3_init_credits()
1330 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; in tb_usb3_init_path()
1331 path->egress_shared_buffer = TB_PATH_NONE; in tb_usb3_init_path()
1332 path->ingress_fc_enable = TB_PATH_ALL; in tb_usb3_init_path()
1333 path->ingress_shared_buffer = TB_PATH_NONE; in tb_usb3_init_path()
1334 path->priority = 3; in tb_usb3_init_path()
1335 path->weight = 3; in tb_usb3_init_path()
1336 path->drop_packages = 0; in tb_usb3_init_path()
1343 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1345 * @down: USB3 downstream adapter
1347 * If @down adapter is active, follows the tunnel to the USB3 upstream
1348 * adapter and back. Returns the discovered tunnel or %NULL if there was
1356 if (!tb_usb3_port_is_enabled(down)) in tb_tunnel_discover_usb3()
1360 if (!tunnel) in tb_tunnel_discover_usb3()
1363 tunnel->activate = tb_usb3_activate; in tb_tunnel_discover_usb3()
1364 tunnel->src_port = down; in tb_tunnel_discover_usb3()
1367 * Discover both paths even if they are not complete. We will in tb_tunnel_discover_usb3()
1371 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1, in tb_tunnel_discover_usb3()
1372 &tunnel->dst_port, "USB3 Down"); in tb_tunnel_discover_usb3()
1373 if (!path) { in tb_tunnel_discover_usb3()
1378 tunnel->paths[TB_USB3_PATH_DOWN] = path; in tb_tunnel_discover_usb3()
1379 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); in tb_tunnel_discover_usb3()
1381 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, in tb_tunnel_discover_usb3()
1382 "USB3 Up"); in tb_tunnel_discover_usb3()
1383 if (!path) in tb_tunnel_discover_usb3()
1385 tunnel->paths[TB_USB3_PATH_UP] = path; in tb_tunnel_discover_usb3()
1386 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); in tb_tunnel_discover_usb3()
1389 if (!tb_port_is_usb3_up(tunnel->dst_port)) { in tb_tunnel_discover_usb3()
1390 tb_port_warn(tunnel->dst_port, in tb_tunnel_discover_usb3()
1391 "path does not end on an USB3 adapter, cleaning up\n"); in tb_tunnel_discover_usb3()
1395 if (down != tunnel->src_port) { in tb_tunnel_discover_usb3()
1400 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) { in tb_tunnel_discover_usb3()
1406 if (!tb_route(down->sw)) { in tb_tunnel_discover_usb3()
1414 &tunnel->allocated_up, &tunnel->allocated_down); in tb_tunnel_discover_usb3()
1415 if (ret) in tb_tunnel_discover_usb3()
1419 tunnel->allocated_up, tunnel->allocated_down); in tb_tunnel_discover_usb3()
1421 tunnel->init = tb_usb3_init; in tb_tunnel_discover_usb3()
1422 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; in tb_tunnel_discover_usb3()
1423 tunnel->release_unused_bandwidth = in tb_tunnel_discover_usb3()
1425 tunnel->reclaim_available_bandwidth = in tb_tunnel_discover_usb3()
1441 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1443 * @up: USB3 upstream adapter port
1444 * @down: USB3 downstream adapter port
1445 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1446 * if not limited).
1447 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1448 * (%0 if not limited).
1450 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1465 * USB3 tunnel. in tb_tunnel_alloc_usb3()
1467 if (max_up > 0 || max_down > 0) { in tb_tunnel_alloc_usb3()
1469 if (max_rate < 0) in tb_tunnel_alloc_usb3()
1472 /* Only 90% can be allocated for USB3 isochronous transfers */ in tb_tunnel_alloc_usb3()
1474 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n", in tb_tunnel_alloc_usb3()
1477 if (max_rate > max_up || max_rate > max_down) { in tb_tunnel_alloc_usb3()
1478 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n"); in tb_tunnel_alloc_usb3()
1484 if (!tunnel) in tb_tunnel_alloc_usb3()
1487 tunnel->activate = tb_usb3_activate; in tb_tunnel_alloc_usb3()
1488 tunnel->src_port = down; in tb_tunnel_alloc_usb3()
1489 tunnel->dst_port = up; in tb_tunnel_alloc_usb3()
1490 tunnel->max_up = max_up; in tb_tunnel_alloc_usb3()
1491 tunnel->max_down = max_down; in tb_tunnel_alloc_usb3()
1494 "USB3 Down"); in tb_tunnel_alloc_usb3()
1495 if (!path) { in tb_tunnel_alloc_usb3()
1500 tunnel->paths[TB_USB3_PATH_DOWN] = path; in tb_tunnel_alloc_usb3()
1503 "USB3 Up"); in tb_tunnel_alloc_usb3()
1504 if (!path) { in tb_tunnel_alloc_usb3()
1509 tunnel->paths[TB_USB3_PATH_UP] = path; in tb_tunnel_alloc_usb3()
1511 if (!tb_route(down->sw)) { in tb_tunnel_alloc_usb3()
1512 tunnel->allocated_up = max_rate; in tb_tunnel_alloc_usb3()
1513 tunnel->allocated_down = max_rate; in tb_tunnel_alloc_usb3()
1515 tunnel->init = tb_usb3_init; in tb_tunnel_alloc_usb3()
1516 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; in tb_tunnel_alloc_usb3()
1517 tunnel->release_unused_bandwidth = in tb_tunnel_alloc_usb3()
1519 tunnel->reclaim_available_bandwidth = in tb_tunnel_alloc_usb3()
1527 * tb_tunnel_free() - free a tunnel
1536 if (!tunnel) in tb_tunnel_free()
1539 if (tunnel->deinit) in tb_tunnel_free()
1540 tunnel->deinit(tunnel); in tb_tunnel_free()
1542 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_free()
1543 if (tunnel->paths[i]) in tb_tunnel_free()
1544 tb_path_free(tunnel->paths[i]); in tb_tunnel_free()
1547 kfree(tunnel->paths); in tb_tunnel_free()
1552 * tb_tunnel_is_invalid - check whether an activated path is still valid
1559 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_is_invalid()
1560 WARN_ON(!tunnel->paths[i]->activated); in tb_tunnel_is_invalid()
1561 if (tb_path_is_invalid(tunnel->paths[i])) in tb_tunnel_is_invalid()
1569 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1572 * Return: 0 on success and negative errno in case if failure
1584 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_restart()
1585 if (tunnel->paths[i]->activated) { in tb_tunnel_restart()
1586 tb_path_deactivate(tunnel->paths[i]); in tb_tunnel_restart()
1587 tunnel->paths[i]->activated = false; in tb_tunnel_restart()
1591 if (tunnel->init) { in tb_tunnel_restart()
1592 res = tunnel->init(tunnel); in tb_tunnel_restart()
1593 if (res) in tb_tunnel_restart()
1597 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_restart()
1598 res = tb_path_activate(tunnel->paths[i]); in tb_tunnel_restart()
1599 if (res) in tb_tunnel_restart()
1603 if (tunnel->activate) { in tb_tunnel_restart()
1604 res = tunnel->activate(tunnel, true); in tb_tunnel_restart()
1605 if (res) in tb_tunnel_restart()
1618 * tb_tunnel_activate() - activate a tunnel
1627 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_activate()
1628 if (tunnel->paths[i]->activated) { in tb_tunnel_activate()
1631 return -EINVAL; in tb_tunnel_activate()
1639 * tb_tunnel_deactivate() - deactivate a tunnel
1648 if (tunnel->activate) in tb_tunnel_deactivate()
1649 tunnel->activate(tunnel, false); in tb_tunnel_deactivate()
1651 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_deactivate()
1652 if (tunnel->paths[i] && tunnel->paths[i]->activated) in tb_tunnel_deactivate()
1653 tb_path_deactivate(tunnel->paths[i]); in tb_tunnel_deactivate()
1658 * tb_tunnel_port_on_path() - Does the tunnel go through port
1662 * Returns true if @tunnel goes through @port (direction does not matter),
1670 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_port_on_path()
1671 if (!tunnel->paths[i]) in tb_tunnel_port_on_path()
1674 if (tb_path_port_on_path(tunnel->paths[i], port)) in tb_tunnel_port_on_path()
1685 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_is_active()
1686 if (!tunnel->paths[i]) in tb_tunnel_is_active()
1688 if (!tunnel->paths[i]->activated) in tb_tunnel_is_active()
1696 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1712 if (!tb_tunnel_is_active(tunnel)) in tb_tunnel_consumed_bandwidth()
1715 if (tunnel->consumed_bandwidth) { in tb_tunnel_consumed_bandwidth()
1718 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw); in tb_tunnel_consumed_bandwidth()
1719 if (ret) in tb_tunnel_consumed_bandwidth()
1727 if (consumed_up) in tb_tunnel_consumed_bandwidth()
1729 if (consumed_down) in tb_tunnel_consumed_bandwidth()
1736 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1739 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1746 if (!tb_tunnel_is_active(tunnel)) in tb_tunnel_release_unused_bandwidth()
1749 if (tunnel->release_unused_bandwidth) { in tb_tunnel_release_unused_bandwidth()
1752 ret = tunnel->release_unused_bandwidth(tunnel); in tb_tunnel_release_unused_bandwidth()
1753 if (ret) in tb_tunnel_release_unused_bandwidth()
1761 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1768 * reclaimed by the tunnel). If nothing was reclaimed the values are
1775 if (!tb_tunnel_is_active(tunnel)) in tb_tunnel_reclaim_available_bandwidth()
1778 if (tunnel->reclaim_available_bandwidth) in tb_tunnel_reclaim_available_bandwidth()
1779 tunnel->reclaim_available_bandwidth(tunnel, available_up, in tb_tunnel_reclaim_available_bandwidth()