1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Greybus connections
4 *
5 * Copyright 2014 Google Inc.
6 * Copyright 2014 Linaro Ltd.
7 */
8
9 #include <linux/workqueue.h>
10
11 #include "greybus.h"
12 #include "greybus_trace.h"
13
14
15 #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
16
17
18 static void gb_connection_kref_release(struct kref *kref);
19
20
21 static DEFINE_SPINLOCK(gb_connections_lock);
22 static DEFINE_MUTEX(gb_connection_mutex);
23
24
25 /* Caller holds gb_connection_mutex. */
gb_connection_cport_in_use(struct gb_interface * intf,u16 cport_id)26 static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
27 {
28 struct gb_host_device *hd = intf->hd;
29 struct gb_connection *connection;
30
31 list_for_each_entry(connection, &hd->connections, hd_links) {
32 if (connection->intf == intf &&
33 connection->intf_cport_id == cport_id)
34 return true;
35 }
36
37 return false;
38 }
39
gb_connection_get(struct gb_connection * connection)40 static void gb_connection_get(struct gb_connection *connection)
41 {
42 kref_get(&connection->kref);
43
44 trace_gb_connection_get(connection);
45 }
46
gb_connection_put(struct gb_connection * connection)47 static void gb_connection_put(struct gb_connection *connection)
48 {
49 trace_gb_connection_put(connection);
50
51 kref_put(&connection->kref, gb_connection_kref_release);
52 }
53
54 /*
55 * Returns a reference-counted pointer to the connection if found.
56 */
57 static struct gb_connection *
gb_connection_hd_find(struct gb_host_device * hd,u16 cport_id)58 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
59 {
60 struct gb_connection *connection;
61 unsigned long flags;
62
63 spin_lock_irqsave(&gb_connections_lock, flags);
64 list_for_each_entry(connection, &hd->connections, hd_links)
65 if (connection->hd_cport_id == cport_id) {
66 gb_connection_get(connection);
67 goto found;
68 }
69 connection = NULL;
70 found:
71 spin_unlock_irqrestore(&gb_connections_lock, flags);
72
73 return connection;
74 }
75
76 /*
77 * Callback from the host driver to let us know that data has been
78 * received on the bundle.
79 */
greybus_data_rcvd(struct gb_host_device * hd,u16 cport_id,u8 * data,size_t length)80 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
81 u8 *data, size_t length)
82 {
83 struct gb_connection *connection;
84
85 trace_gb_hd_in(hd);
86
87 connection = gb_connection_hd_find(hd, cport_id);
88 if (!connection) {
89 dev_err(&hd->dev,
90 "nonexistent connection (%zu bytes dropped)\n", length);
91 return;
92 }
93 gb_connection_recv(connection, data, length);
94 gb_connection_put(connection);
95 }
96 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
97
gb_connection_kref_release(struct kref * kref)98 static void gb_connection_kref_release(struct kref *kref)
99 {
100 struct gb_connection *connection;
101
102 connection = container_of(kref, struct gb_connection, kref);
103
104 trace_gb_connection_release(connection);
105
106 kfree(connection);
107 }
108
gb_connection_init_name(struct gb_connection * connection)109 static void gb_connection_init_name(struct gb_connection *connection)
110 {
111 u16 hd_cport_id = connection->hd_cport_id;
112 u16 cport_id = 0;
113 u8 intf_id = 0;
114
115 if (connection->intf) {
116 intf_id = connection->intf->interface_id;
117 cport_id = connection->intf_cport_id;
118 }
119
120 snprintf(connection->name, sizeof(connection->name),
121 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
122 }
123
124 /*
125 * _gb_connection_create() - create a Greybus connection
126 * @hd: host device of the connection
127 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
128 * @intf: remote interface, or NULL for static connections
129 * @bundle: remote-interface bundle (may be NULL)
130 * @cport_id: remote-interface cport id, or 0 for static connections
131 * @handler: request handler (may be NULL)
132 * @flags: connection flags
133 *
134 * Create a Greybus connection, representing the bidirectional link
135 * between a CPort on a (local) Greybus host device and a CPort on
136 * another Greybus interface.
137 *
138 * A connection also maintains the state of operations sent over the
139 * connection.
140 *
141 * Serialised against concurrent create and destroy using the
142 * gb_connection_mutex.
143 *
144 * Return: A pointer to the new connection if successful, or an ERR_PTR
145 * otherwise.
146 */
147 static struct gb_connection *
_gb_connection_create(struct gb_host_device * hd,int hd_cport_id,struct gb_interface * intf,struct gb_bundle * bundle,int cport_id,gb_request_handler_t handler,unsigned long flags)148 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
149 struct gb_interface *intf,
150 struct gb_bundle *bundle, int cport_id,
151 gb_request_handler_t handler,
152 unsigned long flags)
153 {
154 struct gb_connection *connection;
155 int ret;
156
157 mutex_lock(&gb_connection_mutex);
158
159 if (intf && gb_connection_cport_in_use(intf, cport_id)) {
160 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
161 ret = -EBUSY;
162 goto err_unlock;
163 }
164
165 ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
166 if (ret < 0) {
167 dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
168 goto err_unlock;
169 }
170 hd_cport_id = ret;
171
172 connection = kzalloc(sizeof(*connection), GFP_KERNEL);
173 if (!connection) {
174 ret = -ENOMEM;
175 goto err_hd_cport_release;
176 }
177
178 connection->hd_cport_id = hd_cport_id;
179 connection->intf_cport_id = cport_id;
180 connection->hd = hd;
181 connection->intf = intf;
182 connection->bundle = bundle;
183 connection->handler = handler;
184 connection->flags = flags;
185 if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
186 connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
187 connection->state = GB_CONNECTION_STATE_DISABLED;
188
189 atomic_set(&connection->op_cycle, 0);
190 mutex_init(&connection->mutex);
191 spin_lock_init(&connection->lock);
192 INIT_LIST_HEAD(&connection->operations);
193
194 connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
195 dev_name(&hd->dev), hd_cport_id);
196 if (!connection->wq) {
197 ret = -ENOMEM;
198 goto err_free_connection;
199 }
200
201 kref_init(&connection->kref);
202
203 gb_connection_init_name(connection);
204
205 spin_lock_irq(&gb_connections_lock);
206 list_add(&connection->hd_links, &hd->connections);
207
208 if (bundle)
209 list_add(&connection->bundle_links, &bundle->connections);
210 else
211 INIT_LIST_HEAD(&connection->bundle_links);
212
213 spin_unlock_irq(&gb_connections_lock);
214
215 mutex_unlock(&gb_connection_mutex);
216
217 trace_gb_connection_create(connection);
218
219 return connection;
220
221 err_free_connection:
222 kfree(connection);
223 err_hd_cport_release:
224 gb_hd_cport_release(hd, hd_cport_id);
225 err_unlock:
226 mutex_unlock(&gb_connection_mutex);
227
228 return ERR_PTR(ret);
229 }
230
231 struct gb_connection *
gb_connection_create_static(struct gb_host_device * hd,u16 hd_cport_id,gb_request_handler_t handler)232 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
233 gb_request_handler_t handler)
234 {
235 return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
236 GB_CONNECTION_FLAG_HIGH_PRIO);
237 }
238
239 struct gb_connection *
gb_connection_create_control(struct gb_interface * intf)240 gb_connection_create_control(struct gb_interface *intf)
241 {
242 return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
243 GB_CONNECTION_FLAG_CONTROL |
244 GB_CONNECTION_FLAG_HIGH_PRIO);
245 }
246
247 struct gb_connection *
gb_connection_create(struct gb_bundle * bundle,u16 cport_id,gb_request_handler_t handler)248 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
249 gb_request_handler_t handler)
250 {
251 struct gb_interface *intf = bundle->intf;
252
253 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
254 handler, 0);
255 }
256 EXPORT_SYMBOL_GPL(gb_connection_create);
257
258 struct gb_connection *
gb_connection_create_flags(struct gb_bundle * bundle,u16 cport_id,gb_request_handler_t handler,unsigned long flags)259 gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
260 gb_request_handler_t handler,
261 unsigned long flags)
262 {
263 struct gb_interface *intf = bundle->intf;
264
265 if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
266 flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
267
268 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
269 handler, flags);
270 }
271 EXPORT_SYMBOL_GPL(gb_connection_create_flags);
272
273 struct gb_connection *
gb_connection_create_offloaded(struct gb_bundle * bundle,u16 cport_id,unsigned long flags)274 gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
275 unsigned long flags)
276 {
277 flags |= GB_CONNECTION_FLAG_OFFLOADED;
278
279 return gb_connection_create_flags(bundle, cport_id, NULL, flags);
280 }
281 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
282
gb_connection_hd_cport_enable(struct gb_connection * connection)283 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
284 {
285 struct gb_host_device *hd = connection->hd;
286 int ret;
287
288 if (!hd->driver->cport_enable)
289 return 0;
290
291 ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
292 connection->flags);
293 if (ret) {
294 dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
295 connection->name, ret);
296 return ret;
297 }
298
299 return 0;
300 }
301
gb_connection_hd_cport_disable(struct gb_connection * connection)302 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
303 {
304 struct gb_host_device *hd = connection->hd;
305 int ret;
306
307 if (!hd->driver->cport_disable)
308 return;
309
310 ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
311 if (ret) {
312 dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
313 connection->name, ret);
314 }
315 }
316
gb_connection_hd_cport_connected(struct gb_connection * connection)317 static int gb_connection_hd_cport_connected(struct gb_connection *connection)
318 {
319 struct gb_host_device *hd = connection->hd;
320 int ret;
321
322 if (!hd->driver->cport_connected)
323 return 0;
324
325 ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
326 if (ret) {
327 dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
328 connection->name, ret);
329 return ret;
330 }
331
332 return 0;
333 }
334
gb_connection_hd_cport_flush(struct gb_connection * connection)335 static int gb_connection_hd_cport_flush(struct gb_connection *connection)
336 {
337 struct gb_host_device *hd = connection->hd;
338 int ret;
339
340 if (!hd->driver->cport_flush)
341 return 0;
342
343 ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
344 if (ret) {
345 dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
346 connection->name, ret);
347 return ret;
348 }
349
350 return 0;
351 }
352
gb_connection_hd_cport_quiesce(struct gb_connection * connection)353 static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
354 {
355 struct gb_host_device *hd = connection->hd;
356 size_t peer_space;
357 int ret;
358
359 if (!hd->driver->cport_quiesce)
360 return 0;
361
362 peer_space = sizeof(struct gb_operation_msg_hdr) +
363 sizeof(struct gb_cport_shutdown_request);
364
365 if (connection->mode_switch)
366 peer_space += sizeof(struct gb_operation_msg_hdr);
367
368 if (!hd->driver->cport_quiesce)
369 return 0;
370
371 ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
372 peer_space,
373 GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
374 if (ret) {
375 dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
376 connection->name, ret);
377 return ret;
378 }
379
380 return 0;
381 }
382
gb_connection_hd_cport_clear(struct gb_connection * connection)383 static int gb_connection_hd_cport_clear(struct gb_connection *connection)
384 {
385 struct gb_host_device *hd = connection->hd;
386 int ret;
387
388 if (!hd->driver->cport_clear)
389 return 0;
390
391 ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
392 if (ret) {
393 dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
394 connection->name, ret);
395 return ret;
396 }
397
398 return 0;
399 }
400
401 /*
402 * Request the SVC to create a connection from AP's cport to interface's
403 * cport.
404 */
405 static int
gb_connection_svc_connection_create(struct gb_connection * connection)406 gb_connection_svc_connection_create(struct gb_connection *connection)
407 {
408 struct gb_host_device *hd = connection->hd;
409 struct gb_interface *intf;
410 u8 cport_flags;
411 int ret;
412
413 if (gb_connection_is_static(connection))
414 return 0;
415
416 intf = connection->intf;
417
418 /*
419 * Enable either E2EFC or CSD, unless no flow control is requested.
420 */
421 cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
422 if (gb_connection_flow_control_disabled(connection)) {
423 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
424 } else if (gb_connection_e2efc_enabled(connection)) {
425 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
426 GB_SVC_CPORT_FLAG_E2EFC;
427 }
428
429 ret = gb_svc_connection_create(hd->svc,
430 hd->svc->ap_intf_id,
431 connection->hd_cport_id,
432 intf->interface_id,
433 connection->intf_cport_id,
434 cport_flags);
435 if (ret) {
436 dev_err(&connection->hd->dev,
437 "%s: failed to create svc connection: %d\n",
438 connection->name, ret);
439 return ret;
440 }
441
442 return 0;
443 }
444
445 static void
gb_connection_svc_connection_destroy(struct gb_connection * connection)446 gb_connection_svc_connection_destroy(struct gb_connection *connection)
447 {
448 if (gb_connection_is_static(connection))
449 return;
450
451 gb_svc_connection_destroy(connection->hd->svc,
452 connection->hd->svc->ap_intf_id,
453 connection->hd_cport_id,
454 connection->intf->interface_id,
455 connection->intf_cport_id);
456 }
457
458 /* Inform Interface about active CPorts */
gb_connection_control_connected(struct gb_connection * connection)459 static int gb_connection_control_connected(struct gb_connection *connection)
460 {
461 struct gb_control *control;
462 u16 cport_id = connection->intf_cport_id;
463 int ret;
464
465 if (gb_connection_is_static(connection))
466 return 0;
467
468 if (gb_connection_is_control(connection))
469 return 0;
470
471 control = connection->intf->control;
472
473 ret = gb_control_connected_operation(control, cport_id);
474 if (ret) {
475 dev_err(&connection->bundle->dev,
476 "failed to connect cport: %d\n", ret);
477 return ret;
478 }
479
480 return 0;
481 }
482
483 static void
gb_connection_control_disconnecting(struct gb_connection * connection)484 gb_connection_control_disconnecting(struct gb_connection *connection)
485 {
486 struct gb_control *control;
487 u16 cport_id = connection->intf_cport_id;
488 int ret;
489
490 if (gb_connection_is_static(connection))
491 return;
492
493 control = connection->intf->control;
494
495 ret = gb_control_disconnecting_operation(control, cport_id);
496 if (ret) {
497 dev_err(&connection->hd->dev,
498 "%s: failed to send disconnecting: %d\n",
499 connection->name, ret);
500 }
501 }
502
503 static void
gb_connection_control_disconnected(struct gb_connection * connection)504 gb_connection_control_disconnected(struct gb_connection *connection)
505 {
506 struct gb_control *control;
507 u16 cport_id = connection->intf_cport_id;
508 int ret;
509
510 if (gb_connection_is_static(connection))
511 return;
512
513 control = connection->intf->control;
514
515 if (gb_connection_is_control(connection)) {
516 if (connection->mode_switch) {
517 ret = gb_control_mode_switch_operation(control);
518 if (ret) {
519 /*
520 * Allow mode switch to time out waiting for
521 * mailbox event.
522 */
523 return;
524 }
525 }
526
527 return;
528 }
529
530 ret = gb_control_disconnected_operation(control, cport_id);
531 if (ret) {
532 dev_warn(&connection->bundle->dev,
533 "failed to disconnect cport: %d\n", ret);
534 }
535 }
536
gb_connection_shutdown_operation(struct gb_connection * connection,u8 phase)537 static int gb_connection_shutdown_operation(struct gb_connection *connection,
538 u8 phase)
539 {
540 struct gb_cport_shutdown_request *req;
541 struct gb_operation *operation;
542 int ret;
543
544 operation = gb_operation_create_core(connection,
545 GB_REQUEST_TYPE_CPORT_SHUTDOWN,
546 sizeof(*req), 0, 0,
547 GFP_KERNEL);
548 if (!operation)
549 return -ENOMEM;
550
551 req = operation->request->payload;
552 req->phase = phase;
553
554 ret = gb_operation_request_send_sync(operation);
555
556 gb_operation_put(operation);
557
558 return ret;
559 }
560
gb_connection_cport_shutdown(struct gb_connection * connection,u8 phase)561 static int gb_connection_cport_shutdown(struct gb_connection *connection,
562 u8 phase)
563 {
564 struct gb_host_device *hd = connection->hd;
565 const struct gb_hd_driver *drv = hd->driver;
566 int ret;
567
568 if (gb_connection_is_static(connection))
569 return 0;
570
571 if (gb_connection_is_offloaded(connection)) {
572 if (!drv->cport_shutdown)
573 return 0;
574
575 ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
576 GB_OPERATION_TIMEOUT_DEFAULT);
577 } else {
578 ret = gb_connection_shutdown_operation(connection, phase);
579 }
580
581 if (ret) {
582 dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
583 connection->name, phase, ret);
584 return ret;
585 }
586
587 return 0;
588 }
589
590 static int
gb_connection_cport_shutdown_phase_1(struct gb_connection * connection)591 gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
592 {
593 return gb_connection_cport_shutdown(connection, 1);
594 }
595
596 static int
gb_connection_cport_shutdown_phase_2(struct gb_connection * connection)597 gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
598 {
599 return gb_connection_cport_shutdown(connection, 2);
600 }
601
602 /*
603 * Cancel all active operations on a connection.
604 *
605 * Locking: Called with connection lock held and state set to DISABLED or
606 * DISCONNECTING.
607 */
gb_connection_cancel_operations(struct gb_connection * connection,int errno)608 static void gb_connection_cancel_operations(struct gb_connection *connection,
609 int errno)
610 __must_hold(&connection->lock)
611 {
612 struct gb_operation *operation;
613
614 while (!list_empty(&connection->operations)) {
615 operation = list_last_entry(&connection->operations,
616 struct gb_operation, links);
617 gb_operation_get(operation);
618 spin_unlock_irq(&connection->lock);
619
620 if (gb_operation_is_incoming(operation))
621 gb_operation_cancel_incoming(operation, errno);
622 else
623 gb_operation_cancel(operation, errno);
624
625 gb_operation_put(operation);
626
627 spin_lock_irq(&connection->lock);
628 }
629 }
630
631 /*
632 * Cancel all active incoming operations on a connection.
633 *
634 * Locking: Called with connection lock held and state set to ENABLED_TX.
635 */
636 static void
gb_connection_flush_incoming_operations(struct gb_connection * connection,int errno)637 gb_connection_flush_incoming_operations(struct gb_connection *connection,
638 int errno)
639 __must_hold(&connection->lock)
640 {
641 struct gb_operation *operation;
642 bool incoming;
643
644 while (!list_empty(&connection->operations)) {
645 incoming = false;
646 list_for_each_entry(operation, &connection->operations,
647 links) {
648 if (gb_operation_is_incoming(operation)) {
649 gb_operation_get(operation);
650 incoming = true;
651 break;
652 }
653 }
654
655 if (!incoming)
656 break;
657
658 spin_unlock_irq(&connection->lock);
659
660 /* FIXME: flush, not cancel? */
661 gb_operation_cancel_incoming(operation, errno);
662 gb_operation_put(operation);
663
664 spin_lock_irq(&connection->lock);
665 }
666 }
667
668 /*
669 * _gb_connection_enable() - enable a connection
670 * @connection: connection to enable
671 * @rx: whether to enable incoming requests
672 *
673 * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
674 * ENABLED_TX->ENABLED state transitions.
675 *
676 * Locking: Caller holds connection->mutex.
677 */
_gb_connection_enable(struct gb_connection * connection,bool rx)678 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
679 {
680 int ret;
681
682 /* Handle ENABLED_TX -> ENABLED transitions. */
683 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
684 if (!(connection->handler && rx))
685 return 0;
686
687 spin_lock_irq(&connection->lock);
688 connection->state = GB_CONNECTION_STATE_ENABLED;
689 spin_unlock_irq(&connection->lock);
690
691 return 0;
692 }
693
694 ret = gb_connection_hd_cport_enable(connection);
695 if (ret)
696 return ret;
697
698 ret = gb_connection_svc_connection_create(connection);
699 if (ret)
700 goto err_hd_cport_clear;
701
702 ret = gb_connection_hd_cport_connected(connection);
703 if (ret)
704 goto err_svc_connection_destroy;
705
706 spin_lock_irq(&connection->lock);
707 if (connection->handler && rx)
708 connection->state = GB_CONNECTION_STATE_ENABLED;
709 else
710 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
711 spin_unlock_irq(&connection->lock);
712
713 ret = gb_connection_control_connected(connection);
714 if (ret)
715 goto err_control_disconnecting;
716
717 return 0;
718
719 err_control_disconnecting:
720 spin_lock_irq(&connection->lock);
721 connection->state = GB_CONNECTION_STATE_DISCONNECTING;
722 gb_connection_cancel_operations(connection, -ESHUTDOWN);
723 spin_unlock_irq(&connection->lock);
724
725 /* Transmit queue should already be empty. */
726 gb_connection_hd_cport_flush(connection);
727
728 gb_connection_control_disconnecting(connection);
729 gb_connection_cport_shutdown_phase_1(connection);
730 gb_connection_hd_cport_quiesce(connection);
731 gb_connection_cport_shutdown_phase_2(connection);
732 gb_connection_control_disconnected(connection);
733 connection->state = GB_CONNECTION_STATE_DISABLED;
734 err_svc_connection_destroy:
735 gb_connection_svc_connection_destroy(connection);
736 err_hd_cport_clear:
737 gb_connection_hd_cport_clear(connection);
738
739 gb_connection_hd_cport_disable(connection);
740
741 return ret;
742 }
743
gb_connection_enable(struct gb_connection * connection)744 int gb_connection_enable(struct gb_connection *connection)
745 {
746 int ret = 0;
747
748 mutex_lock(&connection->mutex);
749
750 if (connection->state == GB_CONNECTION_STATE_ENABLED)
751 goto out_unlock;
752
753 ret = _gb_connection_enable(connection, true);
754 if (!ret)
755 trace_gb_connection_enable(connection);
756
757 out_unlock:
758 mutex_unlock(&connection->mutex);
759
760 return ret;
761 }
762 EXPORT_SYMBOL_GPL(gb_connection_enable);
763
gb_connection_enable_tx(struct gb_connection * connection)764 int gb_connection_enable_tx(struct gb_connection *connection)
765 {
766 int ret = 0;
767
768 mutex_lock(&connection->mutex);
769
770 if (connection->state == GB_CONNECTION_STATE_ENABLED) {
771 ret = -EINVAL;
772 goto out_unlock;
773 }
774
775 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
776 goto out_unlock;
777
778 ret = _gb_connection_enable(connection, false);
779 if (!ret)
780 trace_gb_connection_enable(connection);
781
782 out_unlock:
783 mutex_unlock(&connection->mutex);
784
785 return ret;
786 }
787 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
788
gb_connection_disable_rx(struct gb_connection * connection)789 void gb_connection_disable_rx(struct gb_connection *connection)
790 {
791 mutex_lock(&connection->mutex);
792
793 spin_lock_irq(&connection->lock);
794 if (connection->state != GB_CONNECTION_STATE_ENABLED) {
795 spin_unlock_irq(&connection->lock);
796 goto out_unlock;
797 }
798 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
799 gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
800 spin_unlock_irq(&connection->lock);
801
802 trace_gb_connection_disable(connection);
803
804 out_unlock:
805 mutex_unlock(&connection->mutex);
806 }
807 EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
808
gb_connection_mode_switch_prepare(struct gb_connection * connection)809 void gb_connection_mode_switch_prepare(struct gb_connection *connection)
810 {
811 connection->mode_switch = true;
812 }
813
gb_connection_mode_switch_complete(struct gb_connection * connection)814 void gb_connection_mode_switch_complete(struct gb_connection *connection)
815 {
816 gb_connection_svc_connection_destroy(connection);
817 gb_connection_hd_cport_clear(connection);
818
819 gb_connection_hd_cport_disable(connection);
820
821 connection->mode_switch = false;
822 }
823
gb_connection_disable(struct gb_connection * connection)824 void gb_connection_disable(struct gb_connection *connection)
825 {
826 mutex_lock(&connection->mutex);
827
828 if (connection->state == GB_CONNECTION_STATE_DISABLED)
829 goto out_unlock;
830
831 trace_gb_connection_disable(connection);
832
833 spin_lock_irq(&connection->lock);
834 connection->state = GB_CONNECTION_STATE_DISCONNECTING;
835 gb_connection_cancel_operations(connection, -ESHUTDOWN);
836 spin_unlock_irq(&connection->lock);
837
838 gb_connection_hd_cport_flush(connection);
839
840 gb_connection_control_disconnecting(connection);
841 gb_connection_cport_shutdown_phase_1(connection);
842 gb_connection_hd_cport_quiesce(connection);
843 gb_connection_cport_shutdown_phase_2(connection);
844 gb_connection_control_disconnected(connection);
845
846 connection->state = GB_CONNECTION_STATE_DISABLED;
847
848 /* control-connection tear down is deferred when mode switching */
849 if (!connection->mode_switch) {
850 gb_connection_svc_connection_destroy(connection);
851 gb_connection_hd_cport_clear(connection);
852
853 gb_connection_hd_cport_disable(connection);
854 }
855
856 out_unlock:
857 mutex_unlock(&connection->mutex);
858 }
859 EXPORT_SYMBOL_GPL(gb_connection_disable);
860
861 /* Disable a connection without communicating with the remote end. */
gb_connection_disable_forced(struct gb_connection * connection)862 void gb_connection_disable_forced(struct gb_connection *connection)
863 {
864 mutex_lock(&connection->mutex);
865
866 if (connection->state == GB_CONNECTION_STATE_DISABLED)
867 goto out_unlock;
868
869 trace_gb_connection_disable(connection);
870
871 spin_lock_irq(&connection->lock);
872 connection->state = GB_CONNECTION_STATE_DISABLED;
873 gb_connection_cancel_operations(connection, -ESHUTDOWN);
874 spin_unlock_irq(&connection->lock);
875
876 gb_connection_hd_cport_flush(connection);
877
878 gb_connection_svc_connection_destroy(connection);
879 gb_connection_hd_cport_clear(connection);
880
881 gb_connection_hd_cport_disable(connection);
882 out_unlock:
883 mutex_unlock(&connection->mutex);
884 }
885 EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
886
887 /* Caller must have disabled the connection before destroying it. */
gb_connection_destroy(struct gb_connection * connection)888 void gb_connection_destroy(struct gb_connection *connection)
889 {
890 if (!connection)
891 return;
892
893 if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
894 gb_connection_disable(connection);
895
896 mutex_lock(&gb_connection_mutex);
897
898 spin_lock_irq(&gb_connections_lock);
899 list_del(&connection->bundle_links);
900 list_del(&connection->hd_links);
901 spin_unlock_irq(&gb_connections_lock);
902
903 destroy_workqueue(connection->wq);
904
905 gb_hd_cport_release(connection->hd, connection->hd_cport_id);
906 connection->hd_cport_id = CPORT_ID_BAD;
907
908 mutex_unlock(&gb_connection_mutex);
909
910 gb_connection_put(connection);
911 }
912 EXPORT_SYMBOL_GPL(gb_connection_destroy);
913
gb_connection_latency_tag_enable(struct gb_connection * connection)914 void gb_connection_latency_tag_enable(struct gb_connection *connection)
915 {
916 struct gb_host_device *hd = connection->hd;
917 int ret;
918
919 if (!hd->driver->latency_tag_enable)
920 return;
921
922 ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
923 if (ret) {
924 dev_err(&connection->hd->dev,
925 "%s: failed to enable latency tag: %d\n",
926 connection->name, ret);
927 }
928 }
929 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
930
gb_connection_latency_tag_disable(struct gb_connection * connection)931 void gb_connection_latency_tag_disable(struct gb_connection *connection)
932 {
933 struct gb_host_device *hd = connection->hd;
934 int ret;
935
936 if (!hd->driver->latency_tag_disable)
937 return;
938
939 ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
940 if (ret) {
941 dev_err(&connection->hd->dev,
942 "%s: failed to disable latency tag: %d\n",
943 connection->name, ret);
944 }
945 }
946 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
947