1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt driver - switch/port utility functions
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
7 */
8
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/nvmem-provider.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16
17 #include "tb.h"
18
19 /* Switch NVM support */
20
21 #define NVM_CSS 0x10
22
23 struct nvm_auth_status {
24 struct list_head list;
25 uuid_t uuid;
26 u32 status;
27 };
28
29 enum nvm_write_ops {
30 WRITE_AND_AUTHENTICATE = 1,
31 WRITE_ONLY = 2,
32 };
33
34 /*
35 * Hold NVM authentication failure status per switch This information
36 * needs to stay around even when the switch gets power cycled so we
37 * keep it separately.
38 */
39 static LIST_HEAD(nvm_auth_status_cache);
40 static DEFINE_MUTEX(nvm_auth_status_lock);
41
__nvm_get_auth_status(const struct tb_switch * sw)42 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
43 {
44 struct nvm_auth_status *st;
45
46 list_for_each_entry(st, &nvm_auth_status_cache, list) {
47 if (uuid_equal(&st->uuid, sw->uuid))
48 return st;
49 }
50
51 return NULL;
52 }
53
nvm_get_auth_status(const struct tb_switch * sw,u32 * status)54 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
55 {
56 struct nvm_auth_status *st;
57
58 mutex_lock(&nvm_auth_status_lock);
59 st = __nvm_get_auth_status(sw);
60 mutex_unlock(&nvm_auth_status_lock);
61
62 *status = st ? st->status : 0;
63 }
64
nvm_set_auth_status(const struct tb_switch * sw,u32 status)65 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
66 {
67 struct nvm_auth_status *st;
68
69 if (WARN_ON(!sw->uuid))
70 return;
71
72 mutex_lock(&nvm_auth_status_lock);
73 st = __nvm_get_auth_status(sw);
74
75 if (!st) {
76 st = kzalloc(sizeof(*st), GFP_KERNEL);
77 if (!st)
78 goto unlock;
79
80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
81 INIT_LIST_HEAD(&st->list);
82 list_add_tail(&st->list, &nvm_auth_status_cache);
83 }
84
85 st->status = status;
86 unlock:
87 mutex_unlock(&nvm_auth_status_lock);
88 }
89
nvm_clear_auth_status(const struct tb_switch * sw)90 static void nvm_clear_auth_status(const struct tb_switch *sw)
91 {
92 struct nvm_auth_status *st;
93
94 mutex_lock(&nvm_auth_status_lock);
95 st = __nvm_get_auth_status(sw);
96 if (st) {
97 list_del(&st->list);
98 kfree(st);
99 }
100 mutex_unlock(&nvm_auth_status_lock);
101 }
102
nvm_validate_and_write(struct tb_switch * sw)103 static int nvm_validate_and_write(struct tb_switch *sw)
104 {
105 unsigned int image_size, hdr_size;
106 const u8 *buf = sw->nvm->buf;
107 u16 ds_size;
108 int ret;
109
110 if (!buf)
111 return -EINVAL;
112
113 image_size = sw->nvm->buf_data_size;
114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
115 return -EINVAL;
116
117 /*
118 * FARB pointer must point inside the image and must at least
119 * contain parts of the digital section we will be reading here.
120 */
121 hdr_size = (*(u32 *)buf) & 0xffffff;
122 if (hdr_size + NVM_DEVID + 2 >= image_size)
123 return -EINVAL;
124
125 /* Digital section start should be aligned to 4k page */
126 if (!IS_ALIGNED(hdr_size, SZ_4K))
127 return -EINVAL;
128
129 /*
130 * Read digital section size and check that it also fits inside
131 * the image.
132 */
133 ds_size = *(u16 *)(buf + hdr_size);
134 if (ds_size >= image_size)
135 return -EINVAL;
136
137 if (!sw->safe_mode) {
138 u16 device_id;
139
140 /*
141 * Make sure the device ID in the image matches the one
142 * we read from the switch config space.
143 */
144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
145 if (device_id != sw->config.device_id)
146 return -EINVAL;
147
148 if (sw->generation < 3) {
149 /* Write CSS headers first */
150 ret = dma_port_flash_write(sw->dma_port,
151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
152 DMA_PORT_CSS_MAX_SIZE);
153 if (ret)
154 return ret;
155 }
156
157 /* Skip headers in the image */
158 buf += hdr_size;
159 image_size -= hdr_size;
160 }
161
162 if (tb_switch_is_usb4(sw))
163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
164 else
165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
166 if (!ret)
167 sw->nvm->flushed = true;
168 return ret;
169 }
170
nvm_authenticate_host_dma_port(struct tb_switch * sw)171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
172 {
173 int ret = 0;
174
175 /*
176 * Root switch NVM upgrade requires that we disconnect the
177 * existing paths first (in case it is not in safe mode
178 * already).
179 */
180 if (!sw->safe_mode) {
181 u32 status;
182
183 ret = tb_domain_disconnect_all_paths(sw->tb);
184 if (ret)
185 return ret;
186 /*
187 * The host controller goes away pretty soon after this if
188 * everything goes well so getting timeout is expected.
189 */
190 ret = dma_port_flash_update_auth(sw->dma_port);
191 if (!ret || ret == -ETIMEDOUT)
192 return 0;
193
194 /*
195 * Any error from update auth operation requires power
196 * cycling of the host router.
197 */
198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
200 nvm_set_auth_status(sw, status);
201 }
202
203 /*
204 * From safe mode we can get out by just power cycling the
205 * switch.
206 */
207 dma_port_power_cycle(sw->dma_port);
208 return ret;
209 }
210
nvm_authenticate_device_dma_port(struct tb_switch * sw)211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
212 {
213 int ret, retries = 10;
214
215 ret = dma_port_flash_update_auth(sw->dma_port);
216 switch (ret) {
217 case 0:
218 case -ETIMEDOUT:
219 case -EACCES:
220 case -EINVAL:
221 /* Power cycle is required */
222 break;
223 default:
224 return ret;
225 }
226
227 /*
228 * Poll here for the authentication status. It takes some time
229 * for the device to respond (we get timeout for a while). Once
230 * we get response the device needs to be power cycled in order
231 * to the new NVM to be taken into use.
232 */
233 do {
234 u32 status;
235
236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
237 if (ret < 0 && ret != -ETIMEDOUT)
238 return ret;
239 if (ret > 0) {
240 if (status) {
241 tb_sw_warn(sw, "failed to authenticate NVM\n");
242 nvm_set_auth_status(sw, status);
243 }
244
245 tb_sw_info(sw, "power cycling the switch now\n");
246 dma_port_power_cycle(sw->dma_port);
247 return 0;
248 }
249
250 msleep(500);
251 } while (--retries);
252
253 return -ETIMEDOUT;
254 }
255
nvm_authenticate_start_dma_port(struct tb_switch * sw)256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
257 {
258 struct pci_dev *root_port;
259
260 /*
261 * During host router NVM upgrade we should not allow root port to
262 * go into D3cold because some root ports cannot trigger PME
263 * itself. To be on the safe side keep the root port in D0 during
264 * the whole upgrade process.
265 */
266 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
267 if (root_port)
268 pm_runtime_get_noresume(&root_port->dev);
269 }
270
nvm_authenticate_complete_dma_port(struct tb_switch * sw)271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
272 {
273 struct pci_dev *root_port;
274
275 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
276 if (root_port)
277 pm_runtime_put(&root_port->dev);
278 }
279
nvm_readable(struct tb_switch * sw)280 static inline bool nvm_readable(struct tb_switch *sw)
281 {
282 if (tb_switch_is_usb4(sw)) {
283 /*
284 * USB4 devices must support NVM operations but it is
285 * optional for hosts. Therefore we query the NVM sector
286 * size here and if it is supported assume NVM
287 * operations are implemented.
288 */
289 return usb4_switch_nvm_sector_size(sw) > 0;
290 }
291
292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
293 return !!sw->dma_port;
294 }
295
nvm_upgradeable(struct tb_switch * sw)296 static inline bool nvm_upgradeable(struct tb_switch *sw)
297 {
298 if (sw->no_nvm_upgrade)
299 return false;
300 return nvm_readable(sw);
301 }
302
nvm_read(struct tb_switch * sw,unsigned int address,void * buf,size_t size)303 static inline int nvm_read(struct tb_switch *sw, unsigned int address,
304 void *buf, size_t size)
305 {
306 if (tb_switch_is_usb4(sw))
307 return usb4_switch_nvm_read(sw, address, buf, size);
308 return dma_port_flash_read(sw->dma_port, address, buf, size);
309 }
310
nvm_authenticate(struct tb_switch * sw)311 static int nvm_authenticate(struct tb_switch *sw)
312 {
313 int ret;
314
315 if (tb_switch_is_usb4(sw))
316 return usb4_switch_nvm_authenticate(sw);
317
318 if (!tb_route(sw)) {
319 nvm_authenticate_start_dma_port(sw);
320 ret = nvm_authenticate_host_dma_port(sw);
321 } else {
322 ret = nvm_authenticate_device_dma_port(sw);
323 }
324
325 return ret;
326 }
327
tb_switch_nvm_read(void * priv,unsigned int offset,void * val,size_t bytes)328 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
329 size_t bytes)
330 {
331 struct tb_nvm *nvm = priv;
332 struct tb_switch *sw = tb_to_switch(nvm->dev);
333 int ret;
334
335 pm_runtime_get_sync(&sw->dev);
336
337 if (!mutex_trylock(&sw->tb->lock)) {
338 ret = restart_syscall();
339 goto out;
340 }
341
342 ret = nvm_read(sw, offset, val, bytes);
343 mutex_unlock(&sw->tb->lock);
344
345 out:
346 pm_runtime_mark_last_busy(&sw->dev);
347 pm_runtime_put_autosuspend(&sw->dev);
348
349 return ret;
350 }
351
tb_switch_nvm_write(void * priv,unsigned int offset,void * val,size_t bytes)352 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
353 size_t bytes)
354 {
355 struct tb_nvm *nvm = priv;
356 struct tb_switch *sw = tb_to_switch(nvm->dev);
357 int ret;
358
359 if (!mutex_trylock(&sw->tb->lock))
360 return restart_syscall();
361
362 /*
363 * Since writing the NVM image might require some special steps,
364 * for example when CSS headers are written, we cache the image
365 * locally here and handle the special cases when the user asks
366 * us to authenticate the image.
367 */
368 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
369 mutex_unlock(&sw->tb->lock);
370
371 return ret;
372 }
373
tb_switch_nvm_add(struct tb_switch * sw)374 static int tb_switch_nvm_add(struct tb_switch *sw)
375 {
376 struct tb_nvm *nvm;
377 u32 val;
378 int ret;
379
380 if (!nvm_readable(sw))
381 return 0;
382
383 /*
384 * The NVM format of non-Intel hardware is not known so
385 * currently restrict NVM upgrade for Intel hardware. We may
386 * relax this in the future when we learn other NVM formats.
387 */
388 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
389 sw->config.vendor_id != 0x8087) {
390 dev_info(&sw->dev,
391 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
392 sw->config.vendor_id);
393 return 0;
394 }
395
396 nvm = tb_nvm_alloc(&sw->dev);
397 if (IS_ERR(nvm))
398 return PTR_ERR(nvm);
399
400 /*
401 * If the switch is in safe-mode the only accessible portion of
402 * the NVM is the non-active one where userspace is expected to
403 * write new functional NVM.
404 */
405 if (!sw->safe_mode) {
406 u32 nvm_size, hdr_size;
407
408 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
409 if (ret)
410 goto err_nvm;
411
412 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
413 nvm_size = (SZ_1M << (val & 7)) / 8;
414 nvm_size = (nvm_size - hdr_size) / 2;
415
416 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
417 if (ret)
418 goto err_nvm;
419
420 nvm->major = val >> 16;
421 nvm->minor = val >> 8;
422
423 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
424 if (ret)
425 goto err_nvm;
426 }
427
428 if (!sw->no_nvm_upgrade) {
429 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
430 tb_switch_nvm_write);
431 if (ret)
432 goto err_nvm;
433 }
434
435 sw->nvm = nvm;
436 return 0;
437
438 err_nvm:
439 tb_nvm_free(nvm);
440 return ret;
441 }
442
tb_switch_nvm_remove(struct tb_switch * sw)443 static void tb_switch_nvm_remove(struct tb_switch *sw)
444 {
445 struct tb_nvm *nvm;
446
447 nvm = sw->nvm;
448 sw->nvm = NULL;
449
450 if (!nvm)
451 return;
452
453 /* Remove authentication status in case the switch is unplugged */
454 if (!nvm->authenticating)
455 nvm_clear_auth_status(sw);
456
457 tb_nvm_free(nvm);
458 }
459
460 /* port utility functions */
461
tb_port_type(struct tb_regs_port_header * port)462 static const char *tb_port_type(struct tb_regs_port_header *port)
463 {
464 switch (port->type >> 16) {
465 case 0:
466 switch ((u8) port->type) {
467 case 0:
468 return "Inactive";
469 case 1:
470 return "Port";
471 case 2:
472 return "NHI";
473 default:
474 return "unknown";
475 }
476 case 0x2:
477 return "Ethernet";
478 case 0x8:
479 return "SATA";
480 case 0xe:
481 return "DP/HDMI";
482 case 0x10:
483 return "PCIe";
484 case 0x20:
485 return "USB";
486 default:
487 return "unknown";
488 }
489 }
490
tb_dump_port(struct tb * tb,struct tb_regs_port_header * port)491 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
492 {
493 tb_dbg(tb,
494 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
495 port->port_number, port->vendor_id, port->device_id,
496 port->revision, port->thunderbolt_version, tb_port_type(port),
497 port->type);
498 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
499 port->max_in_hop_id, port->max_out_hop_id);
500 tb_dbg(tb, " Max counters: %d\n", port->max_counters);
501 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits);
502 }
503
504 /**
505 * tb_port_state() - get connectedness state of a port
506 *
507 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
508 *
509 * Return: Returns an enum tb_port_state on success or an error code on failure.
510 */
tb_port_state(struct tb_port * port)511 static int tb_port_state(struct tb_port *port)
512 {
513 struct tb_cap_phy phy;
514 int res;
515 if (port->cap_phy == 0) {
516 tb_port_WARN(port, "does not have a PHY\n");
517 return -EINVAL;
518 }
519 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
520 if (res)
521 return res;
522 return phy.state;
523 }
524
525 /**
526 * tb_wait_for_port() - wait for a port to become ready
527 *
528 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
529 * wait_if_unplugged is set then we also wait if the port is in state
530 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
531 * switch resume). Otherwise we only wait if a device is registered but the link
532 * has not yet been established.
533 *
534 * Return: Returns an error code on failure. Returns 0 if the port is not
535 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
536 * if the port is connected and in state TB_PORT_UP.
537 */
tb_wait_for_port(struct tb_port * port,bool wait_if_unplugged)538 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
539 {
540 int retries = 10;
541 int state;
542 if (!port->cap_phy) {
543 tb_port_WARN(port, "does not have PHY\n");
544 return -EINVAL;
545 }
546 if (tb_is_upstream_port(port)) {
547 tb_port_WARN(port, "is the upstream port\n");
548 return -EINVAL;
549 }
550
551 while (retries--) {
552 state = tb_port_state(port);
553 if (state < 0)
554 return state;
555 if (state == TB_PORT_DISABLED) {
556 tb_port_dbg(port, "is disabled (state: 0)\n");
557 return 0;
558 }
559 if (state == TB_PORT_UNPLUGGED) {
560 if (wait_if_unplugged) {
561 /* used during resume */
562 tb_port_dbg(port,
563 "is unplugged (state: 7), retrying...\n");
564 msleep(100);
565 continue;
566 }
567 tb_port_dbg(port, "is unplugged (state: 7)\n");
568 return 0;
569 }
570 if (state == TB_PORT_UP) {
571 tb_port_dbg(port, "is connected, link is up (state: 2)\n");
572 return 1;
573 }
574
575 /*
576 * After plug-in the state is TB_PORT_CONNECTING. Give it some
577 * time.
578 */
579 tb_port_dbg(port,
580 "is connected, link is not up (state: %d), retrying...\n",
581 state);
582 msleep(100);
583 }
584 tb_port_warn(port,
585 "failed to reach state TB_PORT_UP. Ignoring port...\n");
586 return 0;
587 }
588
589 /**
590 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
591 *
592 * Change the number of NFC credits allocated to @port by @credits. To remove
593 * NFC credits pass a negative amount of credits.
594 *
595 * Return: Returns 0 on success or an error code on failure.
596 */
tb_port_add_nfc_credits(struct tb_port * port,int credits)597 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
598 {
599 u32 nfc_credits;
600
601 if (credits == 0 || port->sw->is_unplugged)
602 return 0;
603
604 /*
605 * USB4 restricts programming NFC buffers to lane adapters only
606 * so skip other ports.
607 */
608 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
609 return 0;
610
611 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
612 nfc_credits += credits;
613
614 tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
615 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
616
617 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
618 port->config.nfc_credits |= nfc_credits;
619
620 return tb_port_write(port, &port->config.nfc_credits,
621 TB_CFG_PORT, ADP_CS_4, 1);
622 }
623
624 /**
625 * tb_port_set_initial_credits() - Set initial port link credits allocated
626 * @port: Port to set the initial credits
627 * @credits: Number of credits to to allocate
628 *
629 * Set initial credits value to be used for ingress shared buffering.
630 */
tb_port_set_initial_credits(struct tb_port * port,u32 credits)631 int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
632 {
633 u32 data;
634 int ret;
635
636 ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
637 if (ret)
638 return ret;
639
640 data &= ~ADP_CS_5_LCA_MASK;
641 data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
642
643 return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
644 }
645
646 /**
647 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
648 *
649 * Return: Returns 0 on success or an error code on failure.
650 */
tb_port_clear_counter(struct tb_port * port,int counter)651 int tb_port_clear_counter(struct tb_port *port, int counter)
652 {
653 u32 zero[3] = { 0, 0, 0 };
654 tb_port_dbg(port, "clearing counter %d\n", counter);
655 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
656 }
657
658 /**
659 * tb_port_unlock() - Unlock downstream port
660 * @port: Port to unlock
661 *
662 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
663 * downstream router accessible for CM.
664 */
tb_port_unlock(struct tb_port * port)665 int tb_port_unlock(struct tb_port *port)
666 {
667 if (tb_switch_is_icm(port->sw))
668 return 0;
669 if (!tb_port_is_null(port))
670 return -EINVAL;
671 if (tb_switch_is_usb4(port->sw))
672 return usb4_port_unlock(port);
673 return 0;
674 }
675
__tb_port_enable(struct tb_port * port,bool enable)676 static int __tb_port_enable(struct tb_port *port, bool enable)
677 {
678 int ret;
679 u32 phy;
680
681 if (!tb_port_is_null(port))
682 return -EINVAL;
683
684 ret = tb_port_read(port, &phy, TB_CFG_PORT,
685 port->cap_phy + LANE_ADP_CS_1, 1);
686 if (ret)
687 return ret;
688
689 if (enable)
690 phy &= ~LANE_ADP_CS_1_LD;
691 else
692 phy |= LANE_ADP_CS_1_LD;
693
694 return tb_port_write(port, &phy, TB_CFG_PORT,
695 port->cap_phy + LANE_ADP_CS_1, 1);
696 }
697
698 /**
699 * tb_port_enable() - Enable lane adapter
700 * @port: Port to enable (can be %NULL)
701 *
702 * This is used for lane 0 and 1 adapters to enable it.
703 */
tb_port_enable(struct tb_port * port)704 int tb_port_enable(struct tb_port *port)
705 {
706 return __tb_port_enable(port, true);
707 }
708
709 /**
710 * tb_port_disable() - Disable lane adapter
711 * @port: Port to disable (can be %NULL)
712 *
713 * This is used for lane 0 and 1 adapters to disable it.
714 */
tb_port_disable(struct tb_port * port)715 int tb_port_disable(struct tb_port *port)
716 {
717 return __tb_port_enable(port, false);
718 }
719
720 /**
721 * tb_init_port() - initialize a port
722 *
723 * This is a helper method for tb_switch_alloc. Does not check or initialize
724 * any downstream switches.
725 *
726 * Return: Returns 0 on success or an error code on failure.
727 */
tb_init_port(struct tb_port * port)728 static int tb_init_port(struct tb_port *port)
729 {
730 int res;
731 int cap;
732
733 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
734 if (res) {
735 if (res == -ENODEV) {
736 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
737 port->port);
738 port->disabled = true;
739 return 0;
740 }
741 return res;
742 }
743
744 /* Port 0 is the switch itself and has no PHY. */
745 if (port->config.type == TB_TYPE_PORT && port->port != 0) {
746 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
747
748 if (cap > 0)
749 port->cap_phy = cap;
750 else
751 tb_port_WARN(port, "non switch port without a PHY\n");
752
753 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
754 if (cap > 0)
755 port->cap_usb4 = cap;
756 } else if (port->port != 0) {
757 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
758 if (cap > 0)
759 port->cap_adap = cap;
760 }
761
762 tb_dump_port(port->sw->tb, &port->config);
763
764 /* Control port does not need HopID allocation */
765 if (port->port) {
766 ida_init(&port->in_hopids);
767 ida_init(&port->out_hopids);
768 }
769
770 INIT_LIST_HEAD(&port->list);
771 return 0;
772
773 }
774
tb_port_alloc_hopid(struct tb_port * port,bool in,int min_hopid,int max_hopid)775 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
776 int max_hopid)
777 {
778 int port_max_hopid;
779 struct ida *ida;
780
781 if (in) {
782 port_max_hopid = port->config.max_in_hop_id;
783 ida = &port->in_hopids;
784 } else {
785 port_max_hopid = port->config.max_out_hop_id;
786 ida = &port->out_hopids;
787 }
788
789 /*
790 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
791 * reserved.
792 */
793 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
794 min_hopid = TB_PATH_MIN_HOPID;
795
796 if (max_hopid < 0 || max_hopid > port_max_hopid)
797 max_hopid = port_max_hopid;
798
799 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
800 }
801
802 /**
803 * tb_port_alloc_in_hopid() - Allocate input HopID from port
804 * @port: Port to allocate HopID for
805 * @min_hopid: Minimum acceptable input HopID
806 * @max_hopid: Maximum acceptable input HopID
807 *
808 * Return: HopID between @min_hopid and @max_hopid or negative errno in
809 * case of error.
810 */
tb_port_alloc_in_hopid(struct tb_port * port,int min_hopid,int max_hopid)811 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
812 {
813 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
814 }
815
816 /**
817 * tb_port_alloc_out_hopid() - Allocate output HopID from port
818 * @port: Port to allocate HopID for
819 * @min_hopid: Minimum acceptable output HopID
820 * @max_hopid: Maximum acceptable output HopID
821 *
822 * Return: HopID between @min_hopid and @max_hopid or negative errno in
823 * case of error.
824 */
tb_port_alloc_out_hopid(struct tb_port * port,int min_hopid,int max_hopid)825 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
826 {
827 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
828 }
829
830 /**
831 * tb_port_release_in_hopid() - Release allocated input HopID from port
832 * @port: Port whose HopID to release
833 * @hopid: HopID to release
834 */
tb_port_release_in_hopid(struct tb_port * port,int hopid)835 void tb_port_release_in_hopid(struct tb_port *port, int hopid)
836 {
837 ida_simple_remove(&port->in_hopids, hopid);
838 }
839
840 /**
841 * tb_port_release_out_hopid() - Release allocated output HopID from port
842 * @port: Port whose HopID to release
843 * @hopid: HopID to release
844 */
tb_port_release_out_hopid(struct tb_port * port,int hopid)845 void tb_port_release_out_hopid(struct tb_port *port, int hopid)
846 {
847 ida_simple_remove(&port->out_hopids, hopid);
848 }
849
tb_switch_is_reachable(const struct tb_switch * parent,const struct tb_switch * sw)850 static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
851 const struct tb_switch *sw)
852 {
853 u64 mask = (1ULL << parent->config.depth * 8) - 1;
854 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
855 }
856
857 /**
858 * tb_next_port_on_path() - Return next port for given port on a path
859 * @start: Start port of the walk
860 * @end: End port of the walk
861 * @prev: Previous port (%NULL if this is the first)
862 *
863 * This function can be used to walk from one port to another if they
864 * are connected through zero or more switches. If the @prev is dual
865 * link port, the function follows that link and returns another end on
866 * that same link.
867 *
868 * If the @end port has been reached, return %NULL.
869 *
870 * Domain tb->lock must be held when this function is called.
871 */
tb_next_port_on_path(struct tb_port * start,struct tb_port * end,struct tb_port * prev)872 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
873 struct tb_port *prev)
874 {
875 struct tb_port *next;
876
877 if (!prev)
878 return start;
879
880 if (prev->sw == end->sw) {
881 if (prev == end)
882 return NULL;
883 return end;
884 }
885
886 if (tb_switch_is_reachable(prev->sw, end->sw)) {
887 next = tb_port_at(tb_route(end->sw), prev->sw);
888 /* Walk down the topology if next == prev */
889 if (prev->remote &&
890 (next == prev || next->dual_link_port == prev))
891 next = prev->remote;
892 } else {
893 if (tb_is_upstream_port(prev)) {
894 next = prev->remote;
895 } else {
896 next = tb_upstream_port(prev->sw);
897 /*
898 * Keep the same link if prev and next are both
899 * dual link ports.
900 */
901 if (next->dual_link_port &&
902 next->link_nr != prev->link_nr) {
903 next = next->dual_link_port;
904 }
905 }
906 }
907
908 return next != prev ? next : NULL;
909 }
910
911 /**
912 * tb_port_get_link_speed() - Get current link speed
913 * @port: Port to check (USB4 or CIO)
914 *
915 * Returns link speed in Gb/s or negative errno in case of failure.
916 */
tb_port_get_link_speed(struct tb_port * port)917 int tb_port_get_link_speed(struct tb_port *port)
918 {
919 u32 val, speed;
920 int ret;
921
922 if (!port->cap_phy)
923 return -EINVAL;
924
925 ret = tb_port_read(port, &val, TB_CFG_PORT,
926 port->cap_phy + LANE_ADP_CS_1, 1);
927 if (ret)
928 return ret;
929
930 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
931 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
932 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
933 }
934
tb_port_get_link_width(struct tb_port * port)935 static int tb_port_get_link_width(struct tb_port *port)
936 {
937 u32 val;
938 int ret;
939
940 if (!port->cap_phy)
941 return -EINVAL;
942
943 ret = tb_port_read(port, &val, TB_CFG_PORT,
944 port->cap_phy + LANE_ADP_CS_1, 1);
945 if (ret)
946 return ret;
947
948 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
949 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
950 }
951
tb_port_is_width_supported(struct tb_port * port,int width)952 static bool tb_port_is_width_supported(struct tb_port *port, int width)
953 {
954 u32 phy, widths;
955 int ret;
956
957 if (!port->cap_phy)
958 return false;
959
960 ret = tb_port_read(port, &phy, TB_CFG_PORT,
961 port->cap_phy + LANE_ADP_CS_0, 1);
962 if (ret)
963 return false;
964
965 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
966 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
967
968 return !!(widths & width);
969 }
970
tb_port_set_link_width(struct tb_port * port,unsigned int width)971 static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
972 {
973 u32 val;
974 int ret;
975
976 if (!port->cap_phy)
977 return -EINVAL;
978
979 ret = tb_port_read(port, &val, TB_CFG_PORT,
980 port->cap_phy + LANE_ADP_CS_1, 1);
981 if (ret)
982 return ret;
983
984 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
985 switch (width) {
986 case 1:
987 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
988 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
989 break;
990 case 2:
991 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
992 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
993 break;
994 default:
995 return -EINVAL;
996 }
997
998 val |= LANE_ADP_CS_1_LB;
999
1000 return tb_port_write(port, &val, TB_CFG_PORT,
1001 port->cap_phy + LANE_ADP_CS_1, 1);
1002 }
1003
tb_port_lane_bonding_enable(struct tb_port * port)1004 static int tb_port_lane_bonding_enable(struct tb_port *port)
1005 {
1006 int ret;
1007
1008 /*
1009 * Enable lane bonding for both links if not already enabled by
1010 * for example the boot firmware.
1011 */
1012 ret = tb_port_get_link_width(port);
1013 if (ret == 1) {
1014 ret = tb_port_set_link_width(port, 2);
1015 if (ret)
1016 return ret;
1017 }
1018
1019 ret = tb_port_get_link_width(port->dual_link_port);
1020 if (ret == 1) {
1021 ret = tb_port_set_link_width(port->dual_link_port, 2);
1022 if (ret) {
1023 tb_port_set_link_width(port, 1);
1024 return ret;
1025 }
1026 }
1027
1028 port->bonded = true;
1029 port->dual_link_port->bonded = true;
1030
1031 return 0;
1032 }
1033
tb_port_lane_bonding_disable(struct tb_port * port)1034 static void tb_port_lane_bonding_disable(struct tb_port *port)
1035 {
1036 port->dual_link_port->bonded = false;
1037 port->bonded = false;
1038
1039 tb_port_set_link_width(port->dual_link_port, 1);
1040 tb_port_set_link_width(port, 1);
1041 }
1042
1043 /**
1044 * tb_port_is_enabled() - Is the adapter port enabled
1045 * @port: Port to check
1046 */
tb_port_is_enabled(struct tb_port * port)1047 bool tb_port_is_enabled(struct tb_port *port)
1048 {
1049 switch (port->config.type) {
1050 case TB_TYPE_PCIE_UP:
1051 case TB_TYPE_PCIE_DOWN:
1052 return tb_pci_port_is_enabled(port);
1053
1054 case TB_TYPE_DP_HDMI_IN:
1055 case TB_TYPE_DP_HDMI_OUT:
1056 return tb_dp_port_is_enabled(port);
1057
1058 case TB_TYPE_USB3_UP:
1059 case TB_TYPE_USB3_DOWN:
1060 return tb_usb3_port_is_enabled(port);
1061
1062 default:
1063 return false;
1064 }
1065 }
1066
1067 /**
1068 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1069 * @port: USB3 adapter port to check
1070 */
tb_usb3_port_is_enabled(struct tb_port * port)1071 bool tb_usb3_port_is_enabled(struct tb_port *port)
1072 {
1073 u32 data;
1074
1075 if (tb_port_read(port, &data, TB_CFG_PORT,
1076 port->cap_adap + ADP_USB3_CS_0, 1))
1077 return false;
1078
1079 return !!(data & ADP_USB3_CS_0_PE);
1080 }
1081
1082 /**
1083 * tb_usb3_port_enable() - Enable USB3 adapter port
1084 * @port: USB3 adapter port to enable
1085 * @enable: Enable/disable the USB3 adapter
1086 */
tb_usb3_port_enable(struct tb_port * port,bool enable)1087 int tb_usb3_port_enable(struct tb_port *port, bool enable)
1088 {
1089 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1090 : ADP_USB3_CS_0_V;
1091
1092 if (!port->cap_adap)
1093 return -ENXIO;
1094 return tb_port_write(port, &word, TB_CFG_PORT,
1095 port->cap_adap + ADP_USB3_CS_0, 1);
1096 }
1097
1098 /**
1099 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1100 * @port: PCIe port to check
1101 */
tb_pci_port_is_enabled(struct tb_port * port)1102 bool tb_pci_port_is_enabled(struct tb_port *port)
1103 {
1104 u32 data;
1105
1106 if (tb_port_read(port, &data, TB_CFG_PORT,
1107 port->cap_adap + ADP_PCIE_CS_0, 1))
1108 return false;
1109
1110 return !!(data & ADP_PCIE_CS_0_PE);
1111 }
1112
1113 /**
1114 * tb_pci_port_enable() - Enable PCIe adapter port
1115 * @port: PCIe port to enable
1116 * @enable: Enable/disable the PCIe adapter
1117 */
tb_pci_port_enable(struct tb_port * port,bool enable)1118 int tb_pci_port_enable(struct tb_port *port, bool enable)
1119 {
1120 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1121 if (!port->cap_adap)
1122 return -ENXIO;
1123 return tb_port_write(port, &word, TB_CFG_PORT,
1124 port->cap_adap + ADP_PCIE_CS_0, 1);
1125 }
1126
1127 /**
1128 * tb_dp_port_hpd_is_active() - Is HPD already active
1129 * @port: DP out port to check
1130 *
1131 * Checks if the DP OUT adapter port has HDP bit already set.
1132 */
tb_dp_port_hpd_is_active(struct tb_port * port)1133 int tb_dp_port_hpd_is_active(struct tb_port *port)
1134 {
1135 u32 data;
1136 int ret;
1137
1138 ret = tb_port_read(port, &data, TB_CFG_PORT,
1139 port->cap_adap + ADP_DP_CS_2, 1);
1140 if (ret)
1141 return ret;
1142
1143 return !!(data & ADP_DP_CS_2_HDP);
1144 }
1145
1146 /**
1147 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1148 * @port: Port to clear HPD
1149 *
1150 * If the DP IN port has HDP set, this function can be used to clear it.
1151 */
tb_dp_port_hpd_clear(struct tb_port * port)1152 int tb_dp_port_hpd_clear(struct tb_port *port)
1153 {
1154 u32 data;
1155 int ret;
1156
1157 ret = tb_port_read(port, &data, TB_CFG_PORT,
1158 port->cap_adap + ADP_DP_CS_3, 1);
1159 if (ret)
1160 return ret;
1161
1162 data |= ADP_DP_CS_3_HDPC;
1163 return tb_port_write(port, &data, TB_CFG_PORT,
1164 port->cap_adap + ADP_DP_CS_3, 1);
1165 }
1166
1167 /**
1168 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1169 * @port: DP IN/OUT port to set hops
1170 * @video: Video Hop ID
1171 * @aux_tx: AUX TX Hop ID
1172 * @aux_rx: AUX RX Hop ID
1173 *
1174 * Programs specified Hop IDs for DP IN/OUT port.
1175 */
tb_dp_port_set_hops(struct tb_port * port,unsigned int video,unsigned int aux_tx,unsigned int aux_rx)1176 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1177 unsigned int aux_tx, unsigned int aux_rx)
1178 {
1179 u32 data[2];
1180 int ret;
1181
1182 ret = tb_port_read(port, data, TB_CFG_PORT,
1183 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1184 if (ret)
1185 return ret;
1186
1187 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1188 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1189 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1190
1191 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1192 ADP_DP_CS_0_VIDEO_HOPID_MASK;
1193 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1194 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1195 ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1196
1197 return tb_port_write(port, data, TB_CFG_PORT,
1198 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1199 }
1200
1201 /**
1202 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1203 * @port: DP adapter port to check
1204 */
tb_dp_port_is_enabled(struct tb_port * port)1205 bool tb_dp_port_is_enabled(struct tb_port *port)
1206 {
1207 u32 data[2];
1208
1209 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1210 ARRAY_SIZE(data)))
1211 return false;
1212
1213 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1214 }
1215
1216 /**
1217 * tb_dp_port_enable() - Enables/disables DP paths of a port
1218 * @port: DP IN/OUT port
1219 * @enable: Enable/disable DP path
1220 *
1221 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1222 * calling this function.
1223 */
tb_dp_port_enable(struct tb_port * port,bool enable)1224 int tb_dp_port_enable(struct tb_port *port, bool enable)
1225 {
1226 u32 data[2];
1227 int ret;
1228
1229 ret = tb_port_read(port, data, TB_CFG_PORT,
1230 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1231 if (ret)
1232 return ret;
1233
1234 if (enable)
1235 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1236 else
1237 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1238
1239 return tb_port_write(port, data, TB_CFG_PORT,
1240 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1241 }
1242
1243 /* switch utility functions */
1244
tb_switch_generation_name(const struct tb_switch * sw)1245 static const char *tb_switch_generation_name(const struct tb_switch *sw)
1246 {
1247 switch (sw->generation) {
1248 case 1:
1249 return "Thunderbolt 1";
1250 case 2:
1251 return "Thunderbolt 2";
1252 case 3:
1253 return "Thunderbolt 3";
1254 case 4:
1255 return "USB4";
1256 default:
1257 return "Unknown";
1258 }
1259 }
1260
tb_dump_switch(const struct tb * tb,const struct tb_switch * sw)1261 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1262 {
1263 const struct tb_regs_switch_header *regs = &sw->config;
1264
1265 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1266 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1267 regs->revision, regs->thunderbolt_version);
1268 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
1269 tb_dbg(tb, " Config:\n");
1270 tb_dbg(tb,
1271 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1272 regs->upstream_port_number, regs->depth,
1273 (((u64) regs->route_hi) << 32) | regs->route_lo,
1274 regs->enabled, regs->plug_events_delay);
1275 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
1276 regs->__unknown1, regs->__unknown4);
1277 }
1278
1279 /**
1280 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
1281 * @sw: Switch to reset
1282 *
1283 * Return: Returns 0 on success or an error code on failure.
1284 */
tb_switch_reset(struct tb_switch * sw)1285 int tb_switch_reset(struct tb_switch *sw)
1286 {
1287 struct tb_cfg_result res;
1288
1289 if (sw->generation > 1)
1290 return 0;
1291
1292 tb_sw_dbg(sw, "resetting switch\n");
1293
1294 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1295 TB_CFG_SWITCH, 2, 2);
1296 if (res.err)
1297 return res.err;
1298 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT);
1299 if (res.err > 0)
1300 return -EIO;
1301 return res.err;
1302 }
1303
1304 /**
1305 * tb_plug_events_active() - enable/disable plug events on a switch
1306 *
1307 * Also configures a sane plug_events_delay of 255ms.
1308 *
1309 * Return: Returns 0 on success or an error code on failure.
1310 */
tb_plug_events_active(struct tb_switch * sw,bool active)1311 static int tb_plug_events_active(struct tb_switch *sw, bool active)
1312 {
1313 u32 data;
1314 int res;
1315
1316 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1317 return 0;
1318
1319 sw->config.plug_events_delay = 0xff;
1320 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1321 if (res)
1322 return res;
1323
1324 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1325 if (res)
1326 return res;
1327
1328 if (active) {
1329 data = data & 0xFFFFFF83;
1330 switch (sw->config.device_id) {
1331 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1332 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1333 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1334 break;
1335 default:
1336 data |= 4;
1337 }
1338 } else {
1339 data = data | 0x7c;
1340 }
1341 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1342 sw->cap_plug_events + 1, 1);
1343 }
1344
authorized_show(struct device * dev,struct device_attribute * attr,char * buf)1345 static ssize_t authorized_show(struct device *dev,
1346 struct device_attribute *attr,
1347 char *buf)
1348 {
1349 struct tb_switch *sw = tb_to_switch(dev);
1350
1351 return sprintf(buf, "%u\n", sw->authorized);
1352 }
1353
tb_switch_set_authorized(struct tb_switch * sw,unsigned int val)1354 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1355 {
1356 int ret = -EINVAL;
1357
1358 if (!mutex_trylock(&sw->tb->lock))
1359 return restart_syscall();
1360
1361 if (sw->authorized)
1362 goto unlock;
1363
1364 switch (val) {
1365 /* Approve switch */
1366 case 1:
1367 if (sw->key)
1368 ret = tb_domain_approve_switch_key(sw->tb, sw);
1369 else
1370 ret = tb_domain_approve_switch(sw->tb, sw);
1371 break;
1372
1373 /* Challenge switch */
1374 case 2:
1375 if (sw->key)
1376 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1377 break;
1378
1379 default:
1380 break;
1381 }
1382
1383 if (!ret) {
1384 sw->authorized = val;
1385 /* Notify status change to the userspace */
1386 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1387 }
1388
1389 unlock:
1390 mutex_unlock(&sw->tb->lock);
1391 return ret;
1392 }
1393
authorized_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1394 static ssize_t authorized_store(struct device *dev,
1395 struct device_attribute *attr,
1396 const char *buf, size_t count)
1397 {
1398 struct tb_switch *sw = tb_to_switch(dev);
1399 unsigned int val;
1400 ssize_t ret;
1401
1402 ret = kstrtouint(buf, 0, &val);
1403 if (ret)
1404 return ret;
1405 if (val > 2)
1406 return -EINVAL;
1407
1408 pm_runtime_get_sync(&sw->dev);
1409 ret = tb_switch_set_authorized(sw, val);
1410 pm_runtime_mark_last_busy(&sw->dev);
1411 pm_runtime_put_autosuspend(&sw->dev);
1412
1413 return ret ? ret : count;
1414 }
1415 static DEVICE_ATTR_RW(authorized);
1416
boot_show(struct device * dev,struct device_attribute * attr,char * buf)1417 static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1418 char *buf)
1419 {
1420 struct tb_switch *sw = tb_to_switch(dev);
1421
1422 return sprintf(buf, "%u\n", sw->boot);
1423 }
1424 static DEVICE_ATTR_RO(boot);
1425
device_show(struct device * dev,struct device_attribute * attr,char * buf)1426 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1427 char *buf)
1428 {
1429 struct tb_switch *sw = tb_to_switch(dev);
1430
1431 return sprintf(buf, "%#x\n", sw->device);
1432 }
1433 static DEVICE_ATTR_RO(device);
1434
1435 static ssize_t
device_name_show(struct device * dev,struct device_attribute * attr,char * buf)1436 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1437 {
1438 struct tb_switch *sw = tb_to_switch(dev);
1439
1440 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1441 }
1442 static DEVICE_ATTR_RO(device_name);
1443
1444 static ssize_t
generation_show(struct device * dev,struct device_attribute * attr,char * buf)1445 generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1446 {
1447 struct tb_switch *sw = tb_to_switch(dev);
1448
1449 return sprintf(buf, "%u\n", sw->generation);
1450 }
1451 static DEVICE_ATTR_RO(generation);
1452
key_show(struct device * dev,struct device_attribute * attr,char * buf)1453 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1454 char *buf)
1455 {
1456 struct tb_switch *sw = tb_to_switch(dev);
1457 ssize_t ret;
1458
1459 if (!mutex_trylock(&sw->tb->lock))
1460 return restart_syscall();
1461
1462 if (sw->key)
1463 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1464 else
1465 ret = sprintf(buf, "\n");
1466
1467 mutex_unlock(&sw->tb->lock);
1468 return ret;
1469 }
1470
key_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1471 static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1472 const char *buf, size_t count)
1473 {
1474 struct tb_switch *sw = tb_to_switch(dev);
1475 u8 key[TB_SWITCH_KEY_SIZE];
1476 ssize_t ret = count;
1477 bool clear = false;
1478
1479 if (!strcmp(buf, "\n"))
1480 clear = true;
1481 else if (hex2bin(key, buf, sizeof(key)))
1482 return -EINVAL;
1483
1484 if (!mutex_trylock(&sw->tb->lock))
1485 return restart_syscall();
1486
1487 if (sw->authorized) {
1488 ret = -EBUSY;
1489 } else {
1490 kfree(sw->key);
1491 if (clear) {
1492 sw->key = NULL;
1493 } else {
1494 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1495 if (!sw->key)
1496 ret = -ENOMEM;
1497 }
1498 }
1499
1500 mutex_unlock(&sw->tb->lock);
1501 return ret;
1502 }
1503 static DEVICE_ATTR(key, 0600, key_show, key_store);
1504
speed_show(struct device * dev,struct device_attribute * attr,char * buf)1505 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1506 char *buf)
1507 {
1508 struct tb_switch *sw = tb_to_switch(dev);
1509
1510 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1511 }
1512
1513 /*
1514 * Currently all lanes must run at the same speed but we expose here
1515 * both directions to allow possible asymmetric links in the future.
1516 */
1517 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1518 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1519
lanes_show(struct device * dev,struct device_attribute * attr,char * buf)1520 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1521 char *buf)
1522 {
1523 struct tb_switch *sw = tb_to_switch(dev);
1524
1525 return sprintf(buf, "%u\n", sw->link_width);
1526 }
1527
1528 /*
1529 * Currently link has same amount of lanes both directions (1 or 2) but
1530 * expose them separately to allow possible asymmetric links in the future.
1531 */
1532 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1533 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1534
nvm_authenticate_show(struct device * dev,struct device_attribute * attr,char * buf)1535 static ssize_t nvm_authenticate_show(struct device *dev,
1536 struct device_attribute *attr, char *buf)
1537 {
1538 struct tb_switch *sw = tb_to_switch(dev);
1539 u32 status;
1540
1541 nvm_get_auth_status(sw, &status);
1542 return sprintf(buf, "%#x\n", status);
1543 }
1544
nvm_authenticate_sysfs(struct device * dev,const char * buf,bool disconnect)1545 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1546 bool disconnect)
1547 {
1548 struct tb_switch *sw = tb_to_switch(dev);
1549 int val;
1550 int ret;
1551
1552 pm_runtime_get_sync(&sw->dev);
1553
1554 if (!mutex_trylock(&sw->tb->lock)) {
1555 ret = restart_syscall();
1556 goto exit_rpm;
1557 }
1558
1559 /* If NVMem devices are not yet added */
1560 if (!sw->nvm) {
1561 ret = -EAGAIN;
1562 goto exit_unlock;
1563 }
1564
1565 ret = kstrtoint(buf, 10, &val);
1566 if (ret)
1567 goto exit_unlock;
1568
1569 /* Always clear the authentication status */
1570 nvm_clear_auth_status(sw);
1571
1572 if (val > 0) {
1573 if (!sw->nvm->flushed) {
1574 if (!sw->nvm->buf) {
1575 ret = -EINVAL;
1576 goto exit_unlock;
1577 }
1578
1579 ret = nvm_validate_and_write(sw);
1580 if (ret || val == WRITE_ONLY)
1581 goto exit_unlock;
1582 }
1583 if (val == WRITE_AND_AUTHENTICATE) {
1584 if (disconnect) {
1585 ret = tb_lc_force_power(sw);
1586 } else {
1587 sw->nvm->authenticating = true;
1588 ret = nvm_authenticate(sw);
1589 }
1590 }
1591 }
1592
1593 exit_unlock:
1594 mutex_unlock(&sw->tb->lock);
1595 exit_rpm:
1596 pm_runtime_mark_last_busy(&sw->dev);
1597 pm_runtime_put_autosuspend(&sw->dev);
1598
1599 return ret;
1600 }
1601
nvm_authenticate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1602 static ssize_t nvm_authenticate_store(struct device *dev,
1603 struct device_attribute *attr, const char *buf, size_t count)
1604 {
1605 int ret = nvm_authenticate_sysfs(dev, buf, false);
1606 if (ret)
1607 return ret;
1608 return count;
1609 }
1610 static DEVICE_ATTR_RW(nvm_authenticate);
1611
nvm_authenticate_on_disconnect_show(struct device * dev,struct device_attribute * attr,char * buf)1612 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
1613 struct device_attribute *attr, char *buf)
1614 {
1615 return nvm_authenticate_show(dev, attr, buf);
1616 }
1617
nvm_authenticate_on_disconnect_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1618 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
1619 struct device_attribute *attr, const char *buf, size_t count)
1620 {
1621 int ret;
1622
1623 ret = nvm_authenticate_sysfs(dev, buf, true);
1624 return ret ? ret : count;
1625 }
1626 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
1627
nvm_version_show(struct device * dev,struct device_attribute * attr,char * buf)1628 static ssize_t nvm_version_show(struct device *dev,
1629 struct device_attribute *attr, char *buf)
1630 {
1631 struct tb_switch *sw = tb_to_switch(dev);
1632 int ret;
1633
1634 if (!mutex_trylock(&sw->tb->lock))
1635 return restart_syscall();
1636
1637 if (sw->safe_mode)
1638 ret = -ENODATA;
1639 else if (!sw->nvm)
1640 ret = -EAGAIN;
1641 else
1642 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1643
1644 mutex_unlock(&sw->tb->lock);
1645
1646 return ret;
1647 }
1648 static DEVICE_ATTR_RO(nvm_version);
1649
vendor_show(struct device * dev,struct device_attribute * attr,char * buf)1650 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1651 char *buf)
1652 {
1653 struct tb_switch *sw = tb_to_switch(dev);
1654
1655 return sprintf(buf, "%#x\n", sw->vendor);
1656 }
1657 static DEVICE_ATTR_RO(vendor);
1658
1659 static ssize_t
vendor_name_show(struct device * dev,struct device_attribute * attr,char * buf)1660 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1661 {
1662 struct tb_switch *sw = tb_to_switch(dev);
1663
1664 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1665 }
1666 static DEVICE_ATTR_RO(vendor_name);
1667
unique_id_show(struct device * dev,struct device_attribute * attr,char * buf)1668 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1669 char *buf)
1670 {
1671 struct tb_switch *sw = tb_to_switch(dev);
1672
1673 return sprintf(buf, "%pUb\n", sw->uuid);
1674 }
1675 static DEVICE_ATTR_RO(unique_id);
1676
1677 static struct attribute *switch_attrs[] = {
1678 &dev_attr_authorized.attr,
1679 &dev_attr_boot.attr,
1680 &dev_attr_device.attr,
1681 &dev_attr_device_name.attr,
1682 &dev_attr_generation.attr,
1683 &dev_attr_key.attr,
1684 &dev_attr_nvm_authenticate.attr,
1685 &dev_attr_nvm_authenticate_on_disconnect.attr,
1686 &dev_attr_nvm_version.attr,
1687 &dev_attr_rx_speed.attr,
1688 &dev_attr_rx_lanes.attr,
1689 &dev_attr_tx_speed.attr,
1690 &dev_attr_tx_lanes.attr,
1691 &dev_attr_vendor.attr,
1692 &dev_attr_vendor_name.attr,
1693 &dev_attr_unique_id.attr,
1694 NULL,
1695 };
1696
switch_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)1697 static umode_t switch_attr_is_visible(struct kobject *kobj,
1698 struct attribute *attr, int n)
1699 {
1700 struct device *dev = kobj_to_dev(kobj);
1701 struct tb_switch *sw = tb_to_switch(dev);
1702
1703 if (attr == &dev_attr_device.attr) {
1704 if (!sw->device)
1705 return 0;
1706 } else if (attr == &dev_attr_device_name.attr) {
1707 if (!sw->device_name)
1708 return 0;
1709 } else if (attr == &dev_attr_vendor.attr) {
1710 if (!sw->vendor)
1711 return 0;
1712 } else if (attr == &dev_attr_vendor_name.attr) {
1713 if (!sw->vendor_name)
1714 return 0;
1715 } else if (attr == &dev_attr_key.attr) {
1716 if (tb_route(sw) &&
1717 sw->tb->security_level == TB_SECURITY_SECURE &&
1718 sw->security_level == TB_SECURITY_SECURE)
1719 return attr->mode;
1720 return 0;
1721 } else if (attr == &dev_attr_rx_speed.attr ||
1722 attr == &dev_attr_rx_lanes.attr ||
1723 attr == &dev_attr_tx_speed.attr ||
1724 attr == &dev_attr_tx_lanes.attr) {
1725 if (tb_route(sw))
1726 return attr->mode;
1727 return 0;
1728 } else if (attr == &dev_attr_nvm_authenticate.attr) {
1729 if (nvm_upgradeable(sw))
1730 return attr->mode;
1731 return 0;
1732 } else if (attr == &dev_attr_nvm_version.attr) {
1733 if (nvm_readable(sw))
1734 return attr->mode;
1735 return 0;
1736 } else if (attr == &dev_attr_boot.attr) {
1737 if (tb_route(sw))
1738 return attr->mode;
1739 return 0;
1740 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
1741 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
1742 return attr->mode;
1743 return 0;
1744 }
1745
1746 return sw->safe_mode ? 0 : attr->mode;
1747 }
1748
1749 static struct attribute_group switch_group = {
1750 .is_visible = switch_attr_is_visible,
1751 .attrs = switch_attrs,
1752 };
1753
1754 static const struct attribute_group *switch_groups[] = {
1755 &switch_group,
1756 NULL,
1757 };
1758
tb_switch_release(struct device * dev)1759 static void tb_switch_release(struct device *dev)
1760 {
1761 struct tb_switch *sw = tb_to_switch(dev);
1762 struct tb_port *port;
1763
1764 dma_port_free(sw->dma_port);
1765
1766 tb_switch_for_each_port(sw, port) {
1767 if (!port->disabled) {
1768 ida_destroy(&port->in_hopids);
1769 ida_destroy(&port->out_hopids);
1770 }
1771 }
1772
1773 kfree(sw->uuid);
1774 kfree(sw->device_name);
1775 kfree(sw->vendor_name);
1776 kfree(sw->ports);
1777 kfree(sw->drom);
1778 kfree(sw->key);
1779 kfree(sw);
1780 }
1781
1782 /*
1783 * Currently only need to provide the callbacks. Everything else is handled
1784 * in the connection manager.
1785 */
tb_switch_runtime_suspend(struct device * dev)1786 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
1787 {
1788 struct tb_switch *sw = tb_to_switch(dev);
1789 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1790
1791 if (cm_ops->runtime_suspend_switch)
1792 return cm_ops->runtime_suspend_switch(sw);
1793
1794 return 0;
1795 }
1796
tb_switch_runtime_resume(struct device * dev)1797 static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
1798 {
1799 struct tb_switch *sw = tb_to_switch(dev);
1800 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1801
1802 if (cm_ops->runtime_resume_switch)
1803 return cm_ops->runtime_resume_switch(sw);
1804 return 0;
1805 }
1806
1807 static const struct dev_pm_ops tb_switch_pm_ops = {
1808 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
1809 NULL)
1810 };
1811
1812 struct device_type tb_switch_type = {
1813 .name = "thunderbolt_device",
1814 .release = tb_switch_release,
1815 .pm = &tb_switch_pm_ops,
1816 };
1817
tb_switch_get_generation(struct tb_switch * sw)1818 static int tb_switch_get_generation(struct tb_switch *sw)
1819 {
1820 switch (sw->config.device_id) {
1821 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1822 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1823 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1824 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1825 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1826 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1827 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1828 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1829 return 1;
1830
1831 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1832 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1833 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1834 return 2;
1835
1836 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1837 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1838 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1839 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1840 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1841 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1842 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1843 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
1844 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
1845 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
1846 return 3;
1847
1848 default:
1849 if (tb_switch_is_usb4(sw))
1850 return 4;
1851
1852 /*
1853 * For unknown switches assume generation to be 1 to be
1854 * on the safe side.
1855 */
1856 tb_sw_warn(sw, "unsupported switch device id %#x\n",
1857 sw->config.device_id);
1858 return 1;
1859 }
1860 }
1861
tb_switch_exceeds_max_depth(const struct tb_switch * sw,int depth)1862 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
1863 {
1864 int max_depth;
1865
1866 if (tb_switch_is_usb4(sw) ||
1867 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
1868 max_depth = USB4_SWITCH_MAX_DEPTH;
1869 else
1870 max_depth = TB_SWITCH_MAX_DEPTH;
1871
1872 return depth > max_depth;
1873 }
1874
1875 /**
1876 * tb_switch_alloc() - allocate a switch
1877 * @tb: Pointer to the owning domain
1878 * @parent: Parent device for this switch
1879 * @route: Route string for this switch
1880 *
1881 * Allocates and initializes a switch. Will not upload configuration to
1882 * the switch. For that you need to call tb_switch_configure()
1883 * separately. The returned switch should be released by calling
1884 * tb_switch_put().
1885 *
1886 * Return: Pointer to the allocated switch or ERR_PTR() in case of
1887 * failure.
1888 */
tb_switch_alloc(struct tb * tb,struct device * parent,u64 route)1889 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1890 u64 route)
1891 {
1892 struct tb_switch *sw;
1893 int upstream_port;
1894 int i, ret, depth;
1895
1896 /* Unlock the downstream port so we can access the switch below */
1897 if (route) {
1898 struct tb_switch *parent_sw = tb_to_switch(parent);
1899 struct tb_port *down;
1900
1901 down = tb_port_at(route, parent_sw);
1902 tb_port_unlock(down);
1903 }
1904
1905 depth = tb_route_length(route);
1906
1907 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1908 if (upstream_port < 0)
1909 return ERR_PTR(upstream_port);
1910
1911 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1912 if (!sw)
1913 return ERR_PTR(-ENOMEM);
1914
1915 sw->tb = tb;
1916 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
1917 if (ret)
1918 goto err_free_sw_ports;
1919
1920 sw->generation = tb_switch_get_generation(sw);
1921
1922 tb_dbg(tb, "current switch config:\n");
1923 tb_dump_switch(tb, sw);
1924
1925 /* configure switch */
1926 sw->config.upstream_port_number = upstream_port;
1927 sw->config.depth = depth;
1928 sw->config.route_hi = upper_32_bits(route);
1929 sw->config.route_lo = lower_32_bits(route);
1930 sw->config.enabled = 0;
1931
1932 /* Make sure we do not exceed maximum topology limit */
1933 if (tb_switch_exceeds_max_depth(sw, depth)) {
1934 ret = -EADDRNOTAVAIL;
1935 goto err_free_sw_ports;
1936 }
1937
1938 /* initialize ports */
1939 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
1940 GFP_KERNEL);
1941 if (!sw->ports) {
1942 ret = -ENOMEM;
1943 goto err_free_sw_ports;
1944 }
1945
1946 for (i = 0; i <= sw->config.max_port_number; i++) {
1947 /* minimum setup for tb_find_cap and tb_drom_read to work */
1948 sw->ports[i].sw = sw;
1949 sw->ports[i].port = i;
1950 }
1951
1952 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
1953 if (ret > 0)
1954 sw->cap_plug_events = ret;
1955
1956 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1957 if (ret > 0)
1958 sw->cap_lc = ret;
1959
1960 /* Root switch is always authorized */
1961 if (!route)
1962 sw->authorized = true;
1963
1964 device_initialize(&sw->dev);
1965 sw->dev.parent = parent;
1966 sw->dev.bus = &tb_bus_type;
1967 sw->dev.type = &tb_switch_type;
1968 sw->dev.groups = switch_groups;
1969 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1970
1971 return sw;
1972
1973 err_free_sw_ports:
1974 kfree(sw->ports);
1975 kfree(sw);
1976
1977 return ERR_PTR(ret);
1978 }
1979
1980 /**
1981 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1982 * @tb: Pointer to the owning domain
1983 * @parent: Parent device for this switch
1984 * @route: Route string for this switch
1985 *
1986 * This creates a switch in safe mode. This means the switch pretty much
1987 * lacks all capabilities except DMA configuration port before it is
1988 * flashed with a valid NVM firmware.
1989 *
1990 * The returned switch must be released by calling tb_switch_put().
1991 *
1992 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
1993 */
1994 struct tb_switch *
tb_switch_alloc_safe_mode(struct tb * tb,struct device * parent,u64 route)1995 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1996 {
1997 struct tb_switch *sw;
1998
1999 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2000 if (!sw)
2001 return ERR_PTR(-ENOMEM);
2002
2003 sw->tb = tb;
2004 sw->config.depth = tb_route_length(route);
2005 sw->config.route_hi = upper_32_bits(route);
2006 sw->config.route_lo = lower_32_bits(route);
2007 sw->safe_mode = true;
2008
2009 device_initialize(&sw->dev);
2010 sw->dev.parent = parent;
2011 sw->dev.bus = &tb_bus_type;
2012 sw->dev.type = &tb_switch_type;
2013 sw->dev.groups = switch_groups;
2014 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2015
2016 return sw;
2017 }
2018
2019 /**
2020 * tb_switch_configure() - Uploads configuration to the switch
2021 * @sw: Switch to configure
2022 *
2023 * Call this function before the switch is added to the system. It will
2024 * upload configuration to the switch and makes it available for the
2025 * connection manager to use. Can be called to the switch again after
2026 * resume from low power states to re-initialize it.
2027 *
2028 * Return: %0 in case of success and negative errno in case of failure
2029 */
tb_switch_configure(struct tb_switch * sw)2030 int tb_switch_configure(struct tb_switch *sw)
2031 {
2032 struct tb *tb = sw->tb;
2033 u64 route;
2034 int ret;
2035
2036 route = tb_route(sw);
2037
2038 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2039 sw->config.enabled ? "restoring" : "initializing", route,
2040 tb_route_length(route), sw->config.upstream_port_number);
2041
2042 sw->config.enabled = 1;
2043
2044 if (tb_switch_is_usb4(sw)) {
2045 /*
2046 * For USB4 devices, we need to program the CM version
2047 * accordingly so that it knows to expose all the
2048 * additional capabilities.
2049 */
2050 sw->config.cmuv = USB4_VERSION_1_0;
2051
2052 /* Enumerate the switch */
2053 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2054 ROUTER_CS_1, 4);
2055 if (ret)
2056 return ret;
2057
2058 ret = usb4_switch_setup(sw);
2059 } else {
2060 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2061 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2062 sw->config.vendor_id);
2063
2064 if (!sw->cap_plug_events) {
2065 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2066 return -ENODEV;
2067 }
2068
2069 /* Enumerate the switch */
2070 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2071 ROUTER_CS_1, 3);
2072 }
2073 if (ret)
2074 return ret;
2075
2076 return tb_plug_events_active(sw, true);
2077 }
2078
tb_switch_set_uuid(struct tb_switch * sw)2079 static int tb_switch_set_uuid(struct tb_switch *sw)
2080 {
2081 bool uid = false;
2082 u32 uuid[4];
2083 int ret;
2084
2085 if (sw->uuid)
2086 return 0;
2087
2088 if (tb_switch_is_usb4(sw)) {
2089 ret = usb4_switch_read_uid(sw, &sw->uid);
2090 if (ret)
2091 return ret;
2092 uid = true;
2093 } else {
2094 /*
2095 * The newer controllers include fused UUID as part of
2096 * link controller specific registers
2097 */
2098 ret = tb_lc_read_uuid(sw, uuid);
2099 if (ret) {
2100 if (ret != -EINVAL)
2101 return ret;
2102 uid = true;
2103 }
2104 }
2105
2106 if (uid) {
2107 /*
2108 * ICM generates UUID based on UID and fills the upper
2109 * two words with ones. This is not strictly following
2110 * UUID format but we want to be compatible with it so
2111 * we do the same here.
2112 */
2113 uuid[0] = sw->uid & 0xffffffff;
2114 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2115 uuid[2] = 0xffffffff;
2116 uuid[3] = 0xffffffff;
2117 }
2118
2119 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2120 if (!sw->uuid)
2121 return -ENOMEM;
2122 return 0;
2123 }
2124
tb_switch_add_dma_port(struct tb_switch * sw)2125 static int tb_switch_add_dma_port(struct tb_switch *sw)
2126 {
2127 u32 status;
2128 int ret;
2129
2130 switch (sw->generation) {
2131 case 2:
2132 /* Only root switch can be upgraded */
2133 if (tb_route(sw))
2134 return 0;
2135
2136 fallthrough;
2137 case 3:
2138 ret = tb_switch_set_uuid(sw);
2139 if (ret)
2140 return ret;
2141 break;
2142
2143 default:
2144 /*
2145 * DMA port is the only thing available when the switch
2146 * is in safe mode.
2147 */
2148 if (!sw->safe_mode)
2149 return 0;
2150 break;
2151 }
2152
2153 /* Root switch DMA port requires running firmware */
2154 if (!tb_route(sw) && !tb_switch_is_icm(sw))
2155 return 0;
2156
2157 sw->dma_port = dma_port_alloc(sw);
2158 if (!sw->dma_port)
2159 return 0;
2160
2161 if (sw->no_nvm_upgrade)
2162 return 0;
2163
2164 /*
2165 * If there is status already set then authentication failed
2166 * when the dma_port_flash_update_auth() returned. Power cycling
2167 * is not needed (it was done already) so only thing we do here
2168 * is to unblock runtime PM of the root port.
2169 */
2170 nvm_get_auth_status(sw, &status);
2171 if (status) {
2172 if (!tb_route(sw))
2173 nvm_authenticate_complete_dma_port(sw);
2174 return 0;
2175 }
2176
2177 /*
2178 * Check status of the previous flash authentication. If there
2179 * is one we need to power cycle the switch in any case to make
2180 * it functional again.
2181 */
2182 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2183 if (ret <= 0)
2184 return ret;
2185
2186 /* Now we can allow root port to suspend again */
2187 if (!tb_route(sw))
2188 nvm_authenticate_complete_dma_port(sw);
2189
2190 if (status) {
2191 tb_sw_info(sw, "switch flash authentication failed\n");
2192 nvm_set_auth_status(sw, status);
2193 }
2194
2195 tb_sw_info(sw, "power cycling the switch now\n");
2196 dma_port_power_cycle(sw->dma_port);
2197
2198 /*
2199 * We return error here which causes the switch adding failure.
2200 * It should appear back after power cycle is complete.
2201 */
2202 return -ESHUTDOWN;
2203 }
2204
tb_switch_default_link_ports(struct tb_switch * sw)2205 static void tb_switch_default_link_ports(struct tb_switch *sw)
2206 {
2207 int i;
2208
2209 for (i = 1; i <= sw->config.max_port_number; i += 2) {
2210 struct tb_port *port = &sw->ports[i];
2211 struct tb_port *subordinate;
2212
2213 if (!tb_port_is_null(port))
2214 continue;
2215
2216 /* Check for the subordinate port */
2217 if (i == sw->config.max_port_number ||
2218 !tb_port_is_null(&sw->ports[i + 1]))
2219 continue;
2220
2221 /* Link them if not already done so (by DROM) */
2222 subordinate = &sw->ports[i + 1];
2223 if (!port->dual_link_port && !subordinate->dual_link_port) {
2224 port->link_nr = 0;
2225 port->dual_link_port = subordinate;
2226 subordinate->link_nr = 1;
2227 subordinate->dual_link_port = port;
2228
2229 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2230 port->port, subordinate->port);
2231 }
2232 }
2233 }
2234
tb_switch_lane_bonding_possible(struct tb_switch * sw)2235 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2236 {
2237 const struct tb_port *up = tb_upstream_port(sw);
2238
2239 if (!up->dual_link_port || !up->dual_link_port->remote)
2240 return false;
2241
2242 if (tb_switch_is_usb4(sw))
2243 return usb4_switch_lane_bonding_possible(sw);
2244 return tb_lc_lane_bonding_possible(sw);
2245 }
2246
tb_switch_update_link_attributes(struct tb_switch * sw)2247 static int tb_switch_update_link_attributes(struct tb_switch *sw)
2248 {
2249 struct tb_port *up;
2250 bool change = false;
2251 int ret;
2252
2253 if (!tb_route(sw) || tb_switch_is_icm(sw))
2254 return 0;
2255
2256 up = tb_upstream_port(sw);
2257
2258 ret = tb_port_get_link_speed(up);
2259 if (ret < 0)
2260 return ret;
2261 if (sw->link_speed != ret)
2262 change = true;
2263 sw->link_speed = ret;
2264
2265 ret = tb_port_get_link_width(up);
2266 if (ret < 0)
2267 return ret;
2268 if (sw->link_width != ret)
2269 change = true;
2270 sw->link_width = ret;
2271
2272 /* Notify userspace that there is possible link attribute change */
2273 if (device_is_registered(&sw->dev) && change)
2274 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2275
2276 return 0;
2277 }
2278
2279 /**
2280 * tb_switch_lane_bonding_enable() - Enable lane bonding
2281 * @sw: Switch to enable lane bonding
2282 *
2283 * Connection manager can call this function to enable lane bonding of a
2284 * switch. If conditions are correct and both switches support the feature,
2285 * lanes are bonded. It is safe to call this to any switch.
2286 */
tb_switch_lane_bonding_enable(struct tb_switch * sw)2287 int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2288 {
2289 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2290 struct tb_port *up, *down;
2291 u64 route = tb_route(sw);
2292 int ret;
2293
2294 if (!route)
2295 return 0;
2296
2297 if (!tb_switch_lane_bonding_possible(sw))
2298 return 0;
2299
2300 up = tb_upstream_port(sw);
2301 down = tb_port_at(route, parent);
2302
2303 if (!tb_port_is_width_supported(up, 2) ||
2304 !tb_port_is_width_supported(down, 2))
2305 return 0;
2306
2307 ret = tb_port_lane_bonding_enable(up);
2308 if (ret) {
2309 tb_port_warn(up, "failed to enable lane bonding\n");
2310 return ret;
2311 }
2312
2313 ret = tb_port_lane_bonding_enable(down);
2314 if (ret) {
2315 tb_port_warn(down, "failed to enable lane bonding\n");
2316 tb_port_lane_bonding_disable(up);
2317 return ret;
2318 }
2319
2320 tb_switch_update_link_attributes(sw);
2321
2322 tb_sw_dbg(sw, "lane bonding enabled\n");
2323 return ret;
2324 }
2325
2326 /**
2327 * tb_switch_lane_bonding_disable() - Disable lane bonding
2328 * @sw: Switch whose lane bonding to disable
2329 *
2330 * Disables lane bonding between @sw and parent. This can be called even
2331 * if lanes were not bonded originally.
2332 */
tb_switch_lane_bonding_disable(struct tb_switch * sw)2333 void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2334 {
2335 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2336 struct tb_port *up, *down;
2337
2338 if (!tb_route(sw))
2339 return;
2340
2341 up = tb_upstream_port(sw);
2342 if (!up->bonded)
2343 return;
2344
2345 down = tb_port_at(tb_route(sw), parent);
2346
2347 tb_port_lane_bonding_disable(up);
2348 tb_port_lane_bonding_disable(down);
2349
2350 tb_switch_update_link_attributes(sw);
2351 tb_sw_dbg(sw, "lane bonding disabled\n");
2352 }
2353
2354 /**
2355 * tb_switch_configure_link() - Set link configured
2356 * @sw: Switch whose link is configured
2357 *
2358 * Sets the link upstream from @sw configured (from both ends) so that
2359 * it will not be disconnected when the domain exits sleep. Can be
2360 * called for any switch.
2361 *
2362 * It is recommended that this is called after lane bonding is enabled.
2363 *
2364 * Returns %0 on success and negative errno in case of error.
2365 */
tb_switch_configure_link(struct tb_switch * sw)2366 int tb_switch_configure_link(struct tb_switch *sw)
2367 {
2368 struct tb_port *up, *down;
2369 int ret;
2370
2371 if (!tb_route(sw) || tb_switch_is_icm(sw))
2372 return 0;
2373
2374 up = tb_upstream_port(sw);
2375 if (tb_switch_is_usb4(up->sw))
2376 ret = usb4_port_configure(up);
2377 else
2378 ret = tb_lc_configure_port(up);
2379 if (ret)
2380 return ret;
2381
2382 down = up->remote;
2383 if (tb_switch_is_usb4(down->sw))
2384 return usb4_port_configure(down);
2385 return tb_lc_configure_port(down);
2386 }
2387
2388 /**
2389 * tb_switch_unconfigure_link() - Unconfigure link
2390 * @sw: Switch whose link is unconfigured
2391 *
2392 * Sets the link unconfigured so the @sw will be disconnected if the
2393 * domain exists sleep.
2394 */
tb_switch_unconfigure_link(struct tb_switch * sw)2395 void tb_switch_unconfigure_link(struct tb_switch *sw)
2396 {
2397 struct tb_port *up, *down;
2398
2399 if (sw->is_unplugged)
2400 return;
2401 if (!tb_route(sw) || tb_switch_is_icm(sw))
2402 return;
2403
2404 up = tb_upstream_port(sw);
2405 if (tb_switch_is_usb4(up->sw))
2406 usb4_port_unconfigure(up);
2407 else
2408 tb_lc_unconfigure_port(up);
2409
2410 down = up->remote;
2411 if (tb_switch_is_usb4(down->sw))
2412 usb4_port_unconfigure(down);
2413 else
2414 tb_lc_unconfigure_port(down);
2415 }
2416
2417 /**
2418 * tb_switch_add() - Add a switch to the domain
2419 * @sw: Switch to add
2420 *
2421 * This is the last step in adding switch to the domain. It will read
2422 * identification information from DROM and initializes ports so that
2423 * they can be used to connect other switches. The switch will be
2424 * exposed to the userspace when this function successfully returns. To
2425 * remove and release the switch, call tb_switch_remove().
2426 *
2427 * Return: %0 in case of success and negative errno in case of failure
2428 */
tb_switch_add(struct tb_switch * sw)2429 int tb_switch_add(struct tb_switch *sw)
2430 {
2431 int i, ret;
2432
2433 /*
2434 * Initialize DMA control port now before we read DROM. Recent
2435 * host controllers have more complete DROM on NVM that includes
2436 * vendor and model identification strings which we then expose
2437 * to the userspace. NVM can be accessed through DMA
2438 * configuration based mailbox.
2439 */
2440 ret = tb_switch_add_dma_port(sw);
2441 if (ret) {
2442 dev_err(&sw->dev, "failed to add DMA port\n");
2443 return ret;
2444 }
2445
2446 if (!sw->safe_mode) {
2447 /* read drom */
2448 ret = tb_drom_read(sw);
2449 if (ret) {
2450 dev_err(&sw->dev, "reading DROM failed\n");
2451 return ret;
2452 }
2453 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2454
2455 ret = tb_switch_set_uuid(sw);
2456 if (ret) {
2457 dev_err(&sw->dev, "failed to set UUID\n");
2458 return ret;
2459 }
2460
2461 for (i = 0; i <= sw->config.max_port_number; i++) {
2462 if (sw->ports[i].disabled) {
2463 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2464 continue;
2465 }
2466 ret = tb_init_port(&sw->ports[i]);
2467 if (ret) {
2468 dev_err(&sw->dev, "failed to initialize port %d\n", i);
2469 return ret;
2470 }
2471 }
2472
2473 tb_switch_default_link_ports(sw);
2474
2475 ret = tb_switch_update_link_attributes(sw);
2476 if (ret)
2477 return ret;
2478
2479 ret = tb_switch_tmu_init(sw);
2480 if (ret)
2481 return ret;
2482 }
2483
2484 ret = device_add(&sw->dev);
2485 if (ret) {
2486 dev_err(&sw->dev, "failed to add device: %d\n", ret);
2487 return ret;
2488 }
2489
2490 if (tb_route(sw)) {
2491 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2492 sw->vendor, sw->device);
2493 if (sw->vendor_name && sw->device_name)
2494 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2495 sw->device_name);
2496 }
2497
2498 ret = tb_switch_nvm_add(sw);
2499 if (ret) {
2500 dev_err(&sw->dev, "failed to add NVM devices\n");
2501 device_del(&sw->dev);
2502 return ret;
2503 }
2504
2505 /*
2506 * Thunderbolt routers do not generate wakeups themselves but
2507 * they forward wakeups from tunneled protocols, so enable it
2508 * here.
2509 */
2510 device_init_wakeup(&sw->dev, true);
2511
2512 pm_runtime_set_active(&sw->dev);
2513 if (sw->rpm) {
2514 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
2515 pm_runtime_use_autosuspend(&sw->dev);
2516 pm_runtime_mark_last_busy(&sw->dev);
2517 pm_runtime_enable(&sw->dev);
2518 pm_request_autosuspend(&sw->dev);
2519 }
2520
2521 tb_switch_debugfs_init(sw);
2522 return 0;
2523 }
2524
2525 /**
2526 * tb_switch_remove() - Remove and release a switch
2527 * @sw: Switch to remove
2528 *
2529 * This will remove the switch from the domain and release it after last
2530 * reference count drops to zero. If there are switches connected below
2531 * this switch, they will be removed as well.
2532 */
tb_switch_remove(struct tb_switch * sw)2533 void tb_switch_remove(struct tb_switch *sw)
2534 {
2535 struct tb_port *port;
2536
2537 tb_switch_debugfs_remove(sw);
2538
2539 if (sw->rpm) {
2540 pm_runtime_get_sync(&sw->dev);
2541 pm_runtime_disable(&sw->dev);
2542 }
2543
2544 /* port 0 is the switch itself and never has a remote */
2545 tb_switch_for_each_port(sw, port) {
2546 if (tb_port_has_remote(port)) {
2547 tb_switch_remove(port->remote->sw);
2548 port->remote = NULL;
2549 } else if (port->xdomain) {
2550 tb_xdomain_remove(port->xdomain);
2551 port->xdomain = NULL;
2552 }
2553
2554 /* Remove any downstream retimers */
2555 tb_retimer_remove_all(port);
2556 }
2557
2558 if (!sw->is_unplugged)
2559 tb_plug_events_active(sw, false);
2560
2561 tb_switch_nvm_remove(sw);
2562
2563 if (tb_route(sw))
2564 dev_info(&sw->dev, "device disconnected\n");
2565 device_unregister(&sw->dev);
2566 }
2567
2568 /**
2569 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
2570 */
tb_sw_set_unplugged(struct tb_switch * sw)2571 void tb_sw_set_unplugged(struct tb_switch *sw)
2572 {
2573 struct tb_port *port;
2574
2575 if (sw == sw->tb->root_switch) {
2576 tb_sw_WARN(sw, "cannot unplug root switch\n");
2577 return;
2578 }
2579 if (sw->is_unplugged) {
2580 tb_sw_WARN(sw, "is_unplugged already set\n");
2581 return;
2582 }
2583 sw->is_unplugged = true;
2584 tb_switch_for_each_port(sw, port) {
2585 if (tb_port_has_remote(port))
2586 tb_sw_set_unplugged(port->remote->sw);
2587 else if (port->xdomain)
2588 port->xdomain->is_unplugged = true;
2589 }
2590 }
2591
tb_switch_set_wake(struct tb_switch * sw,unsigned int flags)2592 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
2593 {
2594 if (flags)
2595 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
2596 else
2597 tb_sw_dbg(sw, "disabling wakeup\n");
2598
2599 if (tb_switch_is_usb4(sw))
2600 return usb4_switch_set_wake(sw, flags);
2601 return tb_lc_set_wake(sw, flags);
2602 }
2603
tb_switch_resume(struct tb_switch * sw)2604 int tb_switch_resume(struct tb_switch *sw)
2605 {
2606 struct tb_port *port;
2607 int err;
2608
2609 tb_sw_dbg(sw, "resuming switch\n");
2610
2611 /*
2612 * Check for UID of the connected switches except for root
2613 * switch which we assume cannot be removed.
2614 */
2615 if (tb_route(sw)) {
2616 u64 uid;
2617
2618 /*
2619 * Check first that we can still read the switch config
2620 * space. It may be that there is now another domain
2621 * connected.
2622 */
2623 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
2624 if (err < 0) {
2625 tb_sw_info(sw, "switch not present anymore\n");
2626 return err;
2627 }
2628
2629 if (tb_switch_is_usb4(sw))
2630 err = usb4_switch_read_uid(sw, &uid);
2631 else
2632 err = tb_drom_read_uid_only(sw, &uid);
2633 if (err) {
2634 tb_sw_warn(sw, "uid read failed\n");
2635 return err;
2636 }
2637 if (sw->uid != uid) {
2638 tb_sw_info(sw,
2639 "changed while suspended (uid %#llx -> %#llx)\n",
2640 sw->uid, uid);
2641 return -ENODEV;
2642 }
2643 }
2644
2645 err = tb_switch_configure(sw);
2646 if (err)
2647 return err;
2648
2649 /* Disable wakes */
2650 tb_switch_set_wake(sw, 0);
2651
2652 err = tb_switch_tmu_init(sw);
2653 if (err)
2654 return err;
2655
2656 /* check for surviving downstream switches */
2657 tb_switch_for_each_port(sw, port) {
2658 if (!tb_port_has_remote(port) && !port->xdomain)
2659 continue;
2660
2661 if (tb_wait_for_port(port, true) <= 0) {
2662 tb_port_warn(port,
2663 "lost during suspend, disconnecting\n");
2664 if (tb_port_has_remote(port))
2665 tb_sw_set_unplugged(port->remote->sw);
2666 else if (port->xdomain)
2667 port->xdomain->is_unplugged = true;
2668 } else if (tb_port_has_remote(port) || port->xdomain) {
2669 /*
2670 * Always unlock the port so the downstream
2671 * switch/domain is accessible.
2672 */
2673 if (tb_port_unlock(port))
2674 tb_port_warn(port, "failed to unlock port\n");
2675 if (port->remote && tb_switch_resume(port->remote->sw)) {
2676 tb_port_warn(port,
2677 "lost during suspend, disconnecting\n");
2678 tb_sw_set_unplugged(port->remote->sw);
2679 }
2680 }
2681 }
2682 return 0;
2683 }
2684
2685 /**
2686 * tb_switch_suspend() - Put a switch to sleep
2687 * @sw: Switch to suspend
2688 * @runtime: Is this runtime suspend or system sleep
2689 *
2690 * Suspends router and all its children. Enables wakes according to
2691 * value of @runtime and then sets sleep bit for the router. If @sw is
2692 * host router the domain is ready to go to sleep once this function
2693 * returns.
2694 */
tb_switch_suspend(struct tb_switch * sw,bool runtime)2695 void tb_switch_suspend(struct tb_switch *sw, bool runtime)
2696 {
2697 unsigned int flags = 0;
2698 struct tb_port *port;
2699 int err;
2700
2701 tb_sw_dbg(sw, "suspending switch\n");
2702
2703 err = tb_plug_events_active(sw, false);
2704 if (err)
2705 return;
2706
2707 tb_switch_for_each_port(sw, port) {
2708 if (tb_port_has_remote(port))
2709 tb_switch_suspend(port->remote->sw, runtime);
2710 }
2711
2712 if (runtime) {
2713 /* Trigger wake when something is plugged in/out */
2714 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
2715 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
2716 } else if (device_may_wakeup(&sw->dev)) {
2717 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
2718 }
2719
2720 tb_switch_set_wake(sw, flags);
2721
2722 if (tb_switch_is_usb4(sw))
2723 usb4_switch_set_sleep(sw);
2724 else
2725 tb_lc_set_sleep(sw);
2726 }
2727
2728 /**
2729 * tb_switch_query_dp_resource() - Query availability of DP resource
2730 * @sw: Switch whose DP resource is queried
2731 * @in: DP IN port
2732 *
2733 * Queries availability of DP resource for DP tunneling using switch
2734 * specific means. Returns %true if resource is available.
2735 */
tb_switch_query_dp_resource(struct tb_switch * sw,struct tb_port * in)2736 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
2737 {
2738 if (tb_switch_is_usb4(sw))
2739 return usb4_switch_query_dp_resource(sw, in);
2740 return tb_lc_dp_sink_query(sw, in);
2741 }
2742
2743 /**
2744 * tb_switch_alloc_dp_resource() - Allocate available DP resource
2745 * @sw: Switch whose DP resource is allocated
2746 * @in: DP IN port
2747 *
2748 * Allocates DP resource for DP tunneling. The resource must be
2749 * available for this to succeed (see tb_switch_query_dp_resource()).
2750 * Returns %0 in success and negative errno otherwise.
2751 */
tb_switch_alloc_dp_resource(struct tb_switch * sw,struct tb_port * in)2752 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
2753 {
2754 if (tb_switch_is_usb4(sw))
2755 return usb4_switch_alloc_dp_resource(sw, in);
2756 return tb_lc_dp_sink_alloc(sw, in);
2757 }
2758
2759 /**
2760 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
2761 * @sw: Switch whose DP resource is de-allocated
2762 * @in: DP IN port
2763 *
2764 * De-allocates DP resource that was previously allocated for DP
2765 * tunneling.
2766 */
tb_switch_dealloc_dp_resource(struct tb_switch * sw,struct tb_port * in)2767 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
2768 {
2769 int ret;
2770
2771 if (tb_switch_is_usb4(sw))
2772 ret = usb4_switch_dealloc_dp_resource(sw, in);
2773 else
2774 ret = tb_lc_dp_sink_dealloc(sw, in);
2775
2776 if (ret)
2777 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
2778 in->port);
2779 }
2780
2781 struct tb_sw_lookup {
2782 struct tb *tb;
2783 u8 link;
2784 u8 depth;
2785 const uuid_t *uuid;
2786 u64 route;
2787 };
2788
tb_switch_match(struct device * dev,const void * data)2789 static int tb_switch_match(struct device *dev, const void *data)
2790 {
2791 struct tb_switch *sw = tb_to_switch(dev);
2792 const struct tb_sw_lookup *lookup = data;
2793
2794 if (!sw)
2795 return 0;
2796 if (sw->tb != lookup->tb)
2797 return 0;
2798
2799 if (lookup->uuid)
2800 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
2801
2802 if (lookup->route) {
2803 return sw->config.route_lo == lower_32_bits(lookup->route) &&
2804 sw->config.route_hi == upper_32_bits(lookup->route);
2805 }
2806
2807 /* Root switch is matched only by depth */
2808 if (!lookup->depth)
2809 return !sw->depth;
2810
2811 return sw->link == lookup->link && sw->depth == lookup->depth;
2812 }
2813
2814 /**
2815 * tb_switch_find_by_link_depth() - Find switch by link and depth
2816 * @tb: Domain the switch belongs
2817 * @link: Link number the switch is connected
2818 * @depth: Depth of the switch in link
2819 *
2820 * Returned switch has reference count increased so the caller needs to
2821 * call tb_switch_put() when done with the switch.
2822 */
tb_switch_find_by_link_depth(struct tb * tb,u8 link,u8 depth)2823 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
2824 {
2825 struct tb_sw_lookup lookup;
2826 struct device *dev;
2827
2828 memset(&lookup, 0, sizeof(lookup));
2829 lookup.tb = tb;
2830 lookup.link = link;
2831 lookup.depth = depth;
2832
2833 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2834 if (dev)
2835 return tb_to_switch(dev);
2836
2837 return NULL;
2838 }
2839
2840 /**
2841 * tb_switch_find_by_uuid() - Find switch by UUID
2842 * @tb: Domain the switch belongs
2843 * @uuid: UUID to look for
2844 *
2845 * Returned switch has reference count increased so the caller needs to
2846 * call tb_switch_put() when done with the switch.
2847 */
tb_switch_find_by_uuid(struct tb * tb,const uuid_t * uuid)2848 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
2849 {
2850 struct tb_sw_lookup lookup;
2851 struct device *dev;
2852
2853 memset(&lookup, 0, sizeof(lookup));
2854 lookup.tb = tb;
2855 lookup.uuid = uuid;
2856
2857 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2858 if (dev)
2859 return tb_to_switch(dev);
2860
2861 return NULL;
2862 }
2863
2864 /**
2865 * tb_switch_find_by_route() - Find switch by route string
2866 * @tb: Domain the switch belongs
2867 * @route: Route string to look for
2868 *
2869 * Returned switch has reference count increased so the caller needs to
2870 * call tb_switch_put() when done with the switch.
2871 */
tb_switch_find_by_route(struct tb * tb,u64 route)2872 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
2873 {
2874 struct tb_sw_lookup lookup;
2875 struct device *dev;
2876
2877 if (!route)
2878 return tb_switch_get(tb->root_switch);
2879
2880 memset(&lookup, 0, sizeof(lookup));
2881 lookup.tb = tb;
2882 lookup.route = route;
2883
2884 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2885 if (dev)
2886 return tb_to_switch(dev);
2887
2888 return NULL;
2889 }
2890
2891 /**
2892 * tb_switch_find_port() - return the first port of @type on @sw or NULL
2893 * @sw: Switch to find the port from
2894 * @type: Port type to look for
2895 */
tb_switch_find_port(struct tb_switch * sw,enum tb_port_type type)2896 struct tb_port *tb_switch_find_port(struct tb_switch *sw,
2897 enum tb_port_type type)
2898 {
2899 struct tb_port *port;
2900
2901 tb_switch_for_each_port(sw, port) {
2902 if (port->config.type == type)
2903 return port;
2904 }
2905
2906 return NULL;
2907 }
2908