Lines Matching +full:dma +full:- +full:router
1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - control channel and configuration commands
23 * struct tb_ctl - Thunderbolt control channel
27 * @frame_pool: DMA pool for control messages
32 * @timeout_msec: Default timeout for non-raw control messages
54 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
57 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
60 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
63 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
66 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
73 * tb_cfg_request_alloc() - Allocates a new config request
86 kref_init(&req->kref); in tb_cfg_request_alloc()
92 * tb_cfg_request_get() - Increase refcount of a request
98 kref_get(&req->kref); in tb_cfg_request_get()
110 * tb_cfg_request_put() - Decrease refcount and possibly release the request
119 kref_put(&req->kref, tb_cfg_request_destroy); in tb_cfg_request_put()
126 WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)); in tb_cfg_request_enqueue()
127 WARN_ON(req->ctl); in tb_cfg_request_enqueue()
129 mutex_lock(&ctl->request_queue_lock); in tb_cfg_request_enqueue()
130 if (!ctl->running) { in tb_cfg_request_enqueue()
131 mutex_unlock(&ctl->request_queue_lock); in tb_cfg_request_enqueue()
132 return -ENOTCONN; in tb_cfg_request_enqueue()
134 req->ctl = ctl; in tb_cfg_request_enqueue()
135 list_add_tail(&req->list, &ctl->request_queue); in tb_cfg_request_enqueue()
136 set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); in tb_cfg_request_enqueue()
137 mutex_unlock(&ctl->request_queue_lock); in tb_cfg_request_enqueue()
143 struct tb_ctl *ctl = req->ctl; in tb_cfg_request_dequeue()
145 mutex_lock(&ctl->request_queue_lock); in tb_cfg_request_dequeue()
146 list_del(&req->list); in tb_cfg_request_dequeue()
147 clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); in tb_cfg_request_dequeue()
148 if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags)) in tb_cfg_request_dequeue()
150 mutex_unlock(&ctl->request_queue_lock); in tb_cfg_request_dequeue()
155 return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); in tb_cfg_request_is_active()
163 mutex_lock(&pkg->ctl->request_queue_lock); in tb_cfg_request_find()
164 list_for_each_entry(iter, &pkg->ctl->request_queue, list) { in tb_cfg_request_find()
166 if (iter->match(iter, pkg)) { in tb_cfg_request_find()
172 mutex_unlock(&pkg->ctl->request_queue_lock); in tb_cfg_request_find()
183 struct tb_cfg_header *header = pkg->buffer; in check_header()
186 if (WARN(len != pkg->frame.size, in check_header()
188 len, pkg->frame.size)) in check_header()
189 return -EIO; in check_header()
190 if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n", in check_header()
191 type, pkg->frame.eof)) in check_header()
192 return -EIO; in check_header()
193 if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n", in check_header()
194 pkg->frame.sof)) in check_header()
195 return -EIO; in check_header()
198 if (WARN(header->unknown != 1 << 9, in check_header()
199 "header->unknown is %#x\n", header->unknown)) in check_header()
200 return -EIO; in check_header()
204 return -EIO; in check_header()
213 return -EIO; in check_config_address()
216 return -EIO; in check_config_address()
219 return -EIO; in check_config_address()
222 return -EIO; in check_config_address()
224 * We cannot check addr->port as it is set to the upstream port of the in check_config_address()
232 struct cfg_error_pkg *pkg = response->buffer; in decode_error()
233 struct tb_ctl *ctl = response->ctl; in decode_error()
235 res.response_route = tb_cfg_get_route(&pkg->header); in decode_error()
238 tb_cfg_get_route(&pkg->header)); in decode_error()
242 if (pkg->zero1) in decode_error()
243 tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1); in decode_error()
244 if (pkg->zero2) in decode_error()
245 tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2); in decode_error()
246 if (pkg->zero3) in decode_error()
247 tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3); in decode_error()
250 res.tb_error = pkg->error; in decode_error()
251 res.response_port = pkg->port; in decode_error()
259 struct tb_cfg_header *header = pkg->buffer; in parse_header()
262 if (pkg->frame.eof == TB_CFG_PKG_ERROR) in parse_header()
274 WARN_ON(res->err != 1); in tb_cfg_print_error()
275 switch (res->tb_error) { in tb_cfg_print_error()
286 res->response_route, res->response_port); in tb_cfg_print_error()
290 * - The route contains a non-existent port. in tb_cfg_print_error()
291 * - The route contains a non-PHY port (e.g. PCIe). in tb_cfg_print_error()
292 * - The port in cfg_read/cfg_write does not exist. in tb_cfg_print_error()
295 res->response_route, res->response_port); in tb_cfg_print_error()
299 res->response_route, res->response_port); in tb_cfg_print_error()
303 res->response_route, res->response_port); in tb_cfg_print_error()
308 res->response_route, res->response_port); in tb_cfg_print_error()
321 dma_pool_free(pkg->ctl->frame_pool, in tb_ctl_pkg_free()
322 pkg->buffer, pkg->frame.buffer_phy); in tb_ctl_pkg_free()
332 pkg->ctl = ctl; in tb_ctl_pkg_alloc()
333 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL, in tb_ctl_pkg_alloc()
334 &pkg->frame.buffer_phy); in tb_ctl_pkg_alloc()
335 if (!pkg->buffer) { in tb_ctl_pkg_alloc()
353 * tb_cfg_tx() - transmit a packet on the control channel
364 if (len % 4 != 0) { /* required for le->be conversion */ in tb_ctl_tx()
366 return -EINVAL; in tb_ctl_tx()
368 if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */ in tb_ctl_tx()
370 len, TB_FRAME_SIZE - 4); in tb_ctl_tx()
371 return -EINVAL; in tb_ctl_tx()
375 return -ENOMEM; in tb_ctl_tx()
376 pkg->frame.callback = tb_ctl_tx_callback; in tb_ctl_tx()
377 pkg->frame.size = len + 4; in tb_ctl_tx()
378 pkg->frame.sof = type; in tb_ctl_tx()
379 pkg->frame.eof = type; in tb_ctl_tx()
380 cpu_to_be32_array(pkg->buffer, data, len / 4); in tb_ctl_tx()
381 *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len); in tb_ctl_tx()
383 res = tb_ring_tx(ctl->tx, &pkg->frame); in tb_ctl_tx()
390 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
395 return ctl->callback(ctl->callback_data, type, pkg->buffer, size); in tb_ctl_handle_event()
400 tb_ring_rx(pkg->ctl->rx, &pkg->frame); /* in tb_ctl_rx_submit()
403 * from ctl->rx_packets, so we do in tb_ctl_rx_submit()
410 const struct cfg_error_pkg *error = pkg->buffer; in tb_async_error()
412 if (pkg->frame.eof != TB_CFG_PKG_ERROR) in tb_async_error()
415 switch (error->error) { in tb_async_error()
436 * ctl->rx_packets. in tb_ctl_rx_callback()
439 if (frame->size < 4 || frame->size % 4 != 0) { in tb_ctl_rx_callback()
440 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n", in tb_ctl_rx_callback()
441 frame->size); in tb_ctl_rx_callback()
445 frame->size -= 4; /* remove checksum */ in tb_ctl_rx_callback()
446 crc32 = tb_crc(pkg->buffer, frame->size); in tb_ctl_rx_callback()
447 be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4); in tb_ctl_rx_callback()
449 switch (frame->eof) { in tb_ctl_rx_callback()
455 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { in tb_ctl_rx_callback()
456 tb_ctl_err(pkg->ctl, in tb_ctl_rx_callback()
461 tb_ctl_handle_event(pkg->ctl, frame->eof, in tb_ctl_rx_callback()
462 pkg, frame->size); in tb_ctl_rx_callback()
470 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { in tb_ctl_rx_callback()
471 tb_ctl_err(pkg->ctl, in tb_ctl_rx_callback()
477 if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size)) in tb_ctl_rx_callback()
491 req = tb_cfg_request_find(pkg->ctl, pkg); in tb_ctl_rx_callback()
493 if (req->copy(req, pkg)) in tb_ctl_rx_callback()
494 schedule_work(&req->work); in tb_ctl_rx_callback()
506 if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags)) in tb_cfg_request_work()
507 req->callback(req->callback_data); in tb_cfg_request_work()
514 * tb_cfg_request() - Start control request not waiting for it to complete
528 req->flags = 0; in tb_cfg_request()
529 req->callback = callback; in tb_cfg_request()
530 req->callback_data = callback_data; in tb_cfg_request()
531 INIT_WORK(&req->work, tb_cfg_request_work); in tb_cfg_request()
532 INIT_LIST_HEAD(&req->list); in tb_cfg_request()
539 ret = tb_ctl_tx(ctl, req->request, req->request_size, in tb_cfg_request()
540 req->request_type); in tb_cfg_request()
544 if (!req->response) in tb_cfg_request()
545 schedule_work(&req->work); in tb_cfg_request()
558 * tb_cfg_request_cancel() - Cancel a control request
567 set_bit(TB_CFG_REQUEST_CANCELED, &req->flags); in tb_cfg_request_cancel()
568 schedule_work(&req->work); in tb_cfg_request_cancel()
570 req->result.err = err; in tb_cfg_request_cancel()
579 * tb_cfg_request_sync() - Start control request and wait until it completes
605 tb_cfg_request_cancel(req, -ETIMEDOUT); in tb_cfg_request_sync()
607 flush_work(&req->work); in tb_cfg_request_sync()
609 return req->result; in tb_cfg_request_sync()
615 * tb_ctl_alloc() - allocate a control channel
617 * @timeout_msec: Default timeout used with non-raw control messages
632 ctl->nhi = nhi; in tb_ctl_alloc()
633 ctl->timeout_msec = timeout_msec; in tb_ctl_alloc()
634 ctl->callback = cb; in tb_ctl_alloc()
635 ctl->callback_data = cb_data; in tb_ctl_alloc()
637 mutex_init(&ctl->request_queue_lock); in tb_ctl_alloc()
638 INIT_LIST_HEAD(&ctl->request_queue); in tb_ctl_alloc()
639 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev, in tb_ctl_alloc()
641 if (!ctl->frame_pool) in tb_ctl_alloc()
644 ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND); in tb_ctl_alloc()
645 if (!ctl->tx) in tb_ctl_alloc()
648 ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff, in tb_ctl_alloc()
650 if (!ctl->rx) in tb_ctl_alloc()
654 ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl); in tb_ctl_alloc()
655 if (!ctl->rx_packets[i]) in tb_ctl_alloc()
657 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback; in tb_ctl_alloc()
668 * tb_ctl_free() - free a control channel
673 * Must NOT be called from ctl->callback.
682 if (ctl->rx) in tb_ctl_free()
683 tb_ring_free(ctl->rx); in tb_ctl_free()
684 if (ctl->tx) in tb_ctl_free()
685 tb_ring_free(ctl->tx); in tb_ctl_free()
689 tb_ctl_pkg_free(ctl->rx_packets[i]); in tb_ctl_free()
692 dma_pool_destroy(ctl->frame_pool); in tb_ctl_free()
697 * tb_ctl_start() - start/resume the control channel
704 tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */ in tb_ctl_start()
705 tb_ring_start(ctl->rx); in tb_ctl_start()
707 tb_ctl_rx_submit(ctl->rx_packets[i]); in tb_ctl_start()
709 ctl->running = true; in tb_ctl_start()
713 * tb_ctl_stop() - pause the control channel
716 * All invocations of ctl->callback will have finished after this method
719 * Must NOT be called from ctl->callback.
723 mutex_lock(&ctl->request_queue_lock); in tb_ctl_stop()
724 ctl->running = false; in tb_ctl_stop()
725 mutex_unlock(&ctl->request_queue_lock); in tb_ctl_stop()
727 tb_ring_stop(ctl->rx); in tb_ctl_stop()
728 tb_ring_stop(ctl->tx); in tb_ctl_stop()
730 if (!list_empty(&ctl->request_queue)) in tb_ctl_stop()
732 INIT_LIST_HEAD(&ctl->request_queue); in tb_ctl_stop()
739 * tb_cfg_ack_plug() - Ack hot plug/unplug event
741 * @route: Router that originated the event
765 u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63); in tb_cfg_match()
767 if (pkg->frame.eof == TB_CFG_PKG_ERROR) in tb_cfg_match()
770 if (pkg->frame.eof != req->response_type) in tb_cfg_match()
772 if (route != tb_cfg_get_route(req->request)) in tb_cfg_match()
774 if (pkg->frame.size != req->response_size) in tb_cfg_match()
777 if (pkg->frame.eof == TB_CFG_PKG_READ || in tb_cfg_match()
778 pkg->frame.eof == TB_CFG_PKG_WRITE) { in tb_cfg_match()
779 const struct cfg_read_pkg *req_hdr = req->request; in tb_cfg_match()
780 const struct cfg_read_pkg *res_hdr = pkg->buffer; in tb_cfg_match()
782 if (req_hdr->addr.seq != res_hdr->addr.seq) in tb_cfg_match()
794 res = parse_header(pkg, req->response_size, req->response_type, in tb_cfg_copy()
795 tb_cfg_get_route(req->request)); in tb_cfg_copy()
797 memcpy(req->response, pkg->buffer, req->response_size); in tb_cfg_copy()
799 req->result = res; in tb_cfg_copy()
806 * tb_cfg_reset() - send a reset packet and wait for a response
808 * @route: Router string for the router to send reset
812 * -ETIMEDOUT and attempt to reconfigure the switch.
823 res.err = -ENOMEM; in tb_cfg_reset()
827 req->match = tb_cfg_match; in tb_cfg_reset()
828 req->copy = tb_cfg_copy; in tb_cfg_reset()
829 req->request = &request; in tb_cfg_reset()
830 req->request_size = sizeof(request); in tb_cfg_reset()
831 req->request_type = TB_CFG_PKG_RESET; in tb_cfg_reset()
832 req->response = &reply; in tb_cfg_reset()
833 req->response_size = sizeof(reply); in tb_cfg_reset()
834 req->response_type = TB_CFG_PKG_RESET; in tb_cfg_reset()
836 res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec); in tb_cfg_reset()
844 * tb_cfg_read_raw() - read from config space into buffer
847 * @route: Route string of the router
854 * Reads from router config space without translating the possible error.
878 res.err = -ENOMEM; in tb_cfg_read_raw()
884 req->match = tb_cfg_match; in tb_cfg_read_raw()
885 req->copy = tb_cfg_copy; in tb_cfg_read_raw()
886 req->request = &request; in tb_cfg_read_raw()
887 req->request_size = sizeof(request); in tb_cfg_read_raw()
888 req->request_type = TB_CFG_PKG_READ; in tb_cfg_read_raw()
889 req->response = &reply; in tb_cfg_read_raw()
890 req->response_size = 12 + 4 * length; in tb_cfg_read_raw()
891 req->response_type = TB_CFG_PKG_READ; in tb_cfg_read_raw()
897 if (res.err != -ETIMEDOUT) in tb_cfg_read_raw()
915 * tb_cfg_write_raw() - write from buffer into config space
918 * @route: Route string of the router
925 * Writes to router config space without translating the possible error.
951 res.err = -ENOMEM; in tb_cfg_write_raw()
957 req->match = tb_cfg_match; in tb_cfg_write_raw()
958 req->copy = tb_cfg_copy; in tb_cfg_write_raw()
959 req->request = &request; in tb_cfg_write_raw()
960 req->request_size = 12 + 4 * length; in tb_cfg_write_raw()
961 req->request_type = TB_CFG_PKG_WRITE; in tb_cfg_write_raw()
962 req->response = &reply; in tb_cfg_write_raw()
963 req->response_size = sizeof(reply); in tb_cfg_write_raw()
964 req->response_type = TB_CFG_PKG_WRITE; in tb_cfg_write_raw()
970 if (res.err != -ETIMEDOUT) in tb_cfg_write_raw()
991 * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so in tb_cfg_get_error()
995 res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE) in tb_cfg_get_error()
996 return -ENODEV; in tb_cfg_get_error()
1000 if (res->tb_error == TB_CFG_ERROR_LOCK) in tb_cfg_get_error()
1001 return -EACCES; in tb_cfg_get_error()
1002 else if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED) in tb_cfg_get_error()
1003 return -ENOTCONN; in tb_cfg_get_error()
1005 return -EIO; in tb_cfg_get_error()
1012 space, offset, length, ctl->timeout_msec); in tb_cfg_read()
1022 case -ETIMEDOUT: in tb_cfg_read()
1038 space, offset, length, ctl->timeout_msec); in tb_cfg_write()
1048 case -ETIMEDOUT: in tb_cfg_write()
1061 * tb_cfg_get_upstream_port() - get upstream port number of switch at route
1063 * @route: Route string of the router
1076 ctl->timeout_msec); in tb_cfg_get_upstream_port()
1078 return -EIO; in tb_cfg_get_upstream_port()