1 /*
2 * Texas Instruments System Control Interface Driver
3 * Based on Linux and U-Boot implementation
4 *
5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6 *
7 * SPDX-License-Identifier: BSD-3-Clause
8 */
9
10 #include <errno.h>
11 #include <stdbool.h>
12 #include <stddef.h>
13 #include <string.h>
14
15 #include <platform_def.h>
16 #include <lib/bakery_lock.h>
17
18 #include <common/debug.h>
19 #include <sec_proxy.h>
20
21 #include "ti_sci_protocol.h"
22 #include "ti_sci.h"
23
24 #if USE_COHERENT_MEM
25 __section(".tzfw_coherent_mem")
26 #endif
27 static uint8_t message_sequence;
28
29 DEFINE_BAKERY_LOCK(ti_sci_xfer_lock);
30
31 /**
32 * struct ti_sci_xfer - Structure representing a message flow
33 * @tx_message: Transmit message
34 * @rx_message: Receive message
35 */
36 struct ti_sci_xfer {
37 struct k3_sec_proxy_msg tx_message;
38 struct k3_sec_proxy_msg rx_message;
39 };
40
41 /**
42 * ti_sci_setup_one_xfer() - Setup one message type
43 *
44 * @msg_type: Message type
45 * @msg_flags: Flag to set for the message
46 * @tx_buf: Buffer to be sent to mailbox channel
47 * @tx_message_size: transmit message size
48 * @rx_buf: Buffer to be received from mailbox channel
49 * @rx_message_size: receive message size
50 *
51 * Helper function which is used by various command functions that are
52 * exposed to clients of this driver for allocating a message traffic event.
53 *
54 * Return: 0 if all goes well, else appropriate error message
55 */
ti_sci_setup_one_xfer(uint16_t msg_type,uint32_t msg_flags,void * tx_buf,size_t tx_message_size,void * rx_buf,size_t rx_message_size,struct ti_sci_xfer * xfer)56 static int ti_sci_setup_one_xfer(uint16_t msg_type, uint32_t msg_flags,
57 void *tx_buf,
58 size_t tx_message_size,
59 void *rx_buf,
60 size_t rx_message_size,
61 struct ti_sci_xfer *xfer)
62 {
63 struct ti_sci_msg_hdr *hdr;
64
65 /* Ensure we have sane transfer sizes */
66 if (rx_message_size > TI_SCI_MAX_MESSAGE_SIZE ||
67 tx_message_size > TI_SCI_MAX_MESSAGE_SIZE ||
68 tx_message_size < sizeof(*hdr))
69 return -ERANGE;
70
71 hdr = (struct ti_sci_msg_hdr *)tx_buf;
72 hdr->seq = ++message_sequence;
73 hdr->type = msg_type;
74 hdr->host = TI_SCI_HOST_ID;
75 hdr->flags = msg_flags;
76 /* Request a response if rx_message_size is non-zero */
77 if (rx_message_size != 0U) {
78 hdr->flags |= TI_SCI_FLAG_REQ_ACK_ON_PROCESSED;
79 }
80
81 xfer->tx_message.buf = tx_buf;
82 xfer->tx_message.len = tx_message_size;
83
84 xfer->rx_message.buf = rx_buf;
85 xfer->rx_message.len = rx_message_size;
86
87 return 0;
88 }
89
90 /**
91 * ti_sci_get_response() - Receive response from mailbox channel
92 *
93 * @xfer: Transfer to initiate and wait for response
94 * @chan: Channel to receive the response
95 *
96 * Return: 0 if all goes well, else appropriate error message
97 */
ti_sci_get_response(struct k3_sec_proxy_msg * msg,enum k3_sec_proxy_chan_id chan)98 static int ti_sci_get_response(struct k3_sec_proxy_msg *msg,
99 enum k3_sec_proxy_chan_id chan)
100 {
101 struct ti_sci_msg_hdr *hdr;
102 unsigned int retry = 5;
103 int ret;
104
105 for (; retry > 0; retry--) {
106 /* Receive the response */
107 ret = k3_sec_proxy_recv(chan, msg);
108 if (ret) {
109 ERROR("Message receive failed (%d)\n", ret);
110 return ret;
111 }
112
113 /* msg is updated by Secure Proxy driver */
114 hdr = (struct ti_sci_msg_hdr *)msg->buf;
115
116 /* Sanity check for message response */
117 if (hdr->seq == message_sequence)
118 break;
119 else
120 WARN("Message with sequence ID %u is not expected\n", hdr->seq);
121 }
122 if (!retry) {
123 ERROR("Timed out waiting for message\n");
124 return -EINVAL;
125 }
126
127 if (msg->len > TI_SCI_MAX_MESSAGE_SIZE) {
128 ERROR("Unable to handle %lu xfer (max %d)\n",
129 msg->len, TI_SCI_MAX_MESSAGE_SIZE);
130 return -EINVAL;
131 }
132
133 if (!(hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK))
134 return -ENODEV;
135
136 return 0;
137 }
138
139 /**
140 * ti_sci_do_xfer() - Do one transfer
141 *
142 * @xfer: Transfer to initiate and wait for response
143 *
144 * Return: 0 if all goes well, else appropriate error message
145 */
ti_sci_do_xfer(struct ti_sci_xfer * xfer)146 static int ti_sci_do_xfer(struct ti_sci_xfer *xfer)
147 {
148 struct k3_sec_proxy_msg *tx_msg = &xfer->tx_message;
149 struct k3_sec_proxy_msg *rx_msg = &xfer->rx_message;
150 int ret;
151
152 bakery_lock_get(&ti_sci_xfer_lock);
153
154 /* Clear any spurious messages in receive queue */
155 ret = k3_sec_proxy_clear_rx_thread(SP_RESPONSE);
156 if (ret) {
157 ERROR("Could not clear response queue (%d)\n", ret);
158 goto unlock;
159 }
160
161 /* Send the message */
162 ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, tx_msg);
163 if (ret) {
164 ERROR("Message sending failed (%d)\n", ret);
165 goto unlock;
166 }
167
168 /* Get the response if requested */
169 if (rx_msg->len != 0U) {
170 ret = ti_sci_get_response(rx_msg, SP_RESPONSE);
171 if (ret != 0U) {
172 ERROR("Failed to get response (%d)\n", ret);
173 goto unlock;
174 }
175 }
176
177 unlock:
178 bakery_lock_release(&ti_sci_xfer_lock);
179
180 return ret;
181 }
182
183 /**
184 * ti_sci_get_revision() - Get the revision of the SCI entity
185 *
186 * Updates the SCI information in the internal data structure.
187 *
188 * Return: 0 if all goes well, else appropriate error message
189 */
ti_sci_get_revision(struct ti_sci_msg_resp_version * rev_info)190 int ti_sci_get_revision(struct ti_sci_msg_resp_version *rev_info)
191 {
192 struct ti_sci_msg_hdr hdr;
193 struct ti_sci_xfer xfer;
194 int ret;
195
196 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_VERSION, 0x0,
197 &hdr, sizeof(hdr),
198 rev_info, sizeof(*rev_info),
199 &xfer);
200 if (ret) {
201 ERROR("Message alloc failed (%d)\n", ret);
202 return ret;
203 }
204
205 ret = ti_sci_do_xfer(&xfer);
206 if (ret) {
207 ERROR("Transfer send failed (%d)\n", ret);
208 return ret;
209 }
210
211 return 0;
212 }
213
214 /**
215 * ti_sci_query_fw_caps() - Get the FW/SoC capabilities
216 * @handle: Pointer to TI SCI handle
217 * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability
218 *
219 * Return: 0 if all went well, else returns appropriate error value.
220 */
ti_sci_query_fw_caps(uint64_t * fw_caps)221 int ti_sci_query_fw_caps(uint64_t *fw_caps)
222 {
223 struct ti_sci_msg_hdr req;
224 struct ti_sci_msg_resp_query_fw_caps resp;
225
226 struct ti_sci_xfer xfer;
227 int ret;
228
229 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_QUERY_FW_CAPS, 0,
230 &req, sizeof(req),
231 &resp, sizeof(resp),
232 &xfer);
233 if (ret != 0U) {
234 ERROR("Message alloc failed (%d)\n", ret);
235 return ret;
236 }
237
238 ret = ti_sci_do_xfer(&xfer);
239 if (ret != 0U) {
240 ERROR("Transfer send failed (%d)\n", ret);
241 return ret;
242 }
243
244 if (fw_caps)
245 *fw_caps = resp.fw_caps;
246
247 return 0;
248 }
249
250 /**
251 * ti_sci_device_set_state() - Set device state
252 *
253 * @id: Device identifier
254 * @flags: flags to setup for the device
255 * @state: State to move the device to
256 *
257 * Return: 0 if all goes well, else appropriate error message
258 */
ti_sci_device_set_state(uint32_t id,uint32_t flags,uint8_t state)259 static int ti_sci_device_set_state(uint32_t id, uint32_t flags, uint8_t state)
260 {
261 struct ti_sci_msg_req_set_device_state req;
262 struct ti_sci_msg_hdr resp;
263
264 struct ti_sci_xfer xfer;
265 int ret;
266
267 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_STATE, flags,
268 &req, sizeof(req),
269 &resp, sizeof(resp),
270 &xfer);
271 if (ret) {
272 ERROR("Message alloc failed (%d)\n", ret);
273 return ret;
274 }
275
276 req.id = id;
277 req.state = state;
278
279 ret = ti_sci_do_xfer(&xfer);
280 if (ret) {
281 ERROR("Transfer send failed (%d)\n", ret);
282 return ret;
283 }
284
285 return 0;
286 }
287
288 /**
289 * ti_sci_device_get_state() - Get device state
290 *
291 * @id: Device Identifier
292 * @clcnt: Pointer to Context Loss Count
293 * @resets: pointer to resets
294 * @p_state: pointer to p_state
295 * @c_state: pointer to c_state
296 *
297 * Return: 0 if all goes well, else appropriate error message
298 */
ti_sci_device_get_state(uint32_t id,uint32_t * clcnt,uint32_t * resets,uint8_t * p_state,uint8_t * c_state)299 static int ti_sci_device_get_state(uint32_t id, uint32_t *clcnt,
300 uint32_t *resets, uint8_t *p_state,
301 uint8_t *c_state)
302 {
303 struct ti_sci_msg_req_get_device_state req;
304 struct ti_sci_msg_resp_get_device_state resp;
305
306 struct ti_sci_xfer xfer;
307 int ret;
308
309 if (!clcnt && !resets && !p_state && !c_state)
310 return -EINVAL;
311
312 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_DEVICE_STATE, 0,
313 &req, sizeof(req),
314 &resp, sizeof(resp),
315 &xfer);
316 if (ret) {
317 ERROR("Message alloc failed (%d)\n", ret);
318 return ret;
319 }
320
321 req.id = id;
322
323 ret = ti_sci_do_xfer(&xfer);
324 if (ret) {
325 ERROR("Transfer send failed (%d)\n", ret);
326 return ret;
327 }
328
329 if (clcnt)
330 *clcnt = resp.context_loss_count;
331 if (resets)
332 *resets = resp.resets;
333 if (p_state)
334 *p_state = resp.programmed_state;
335 if (c_state)
336 *c_state = resp.current_state;
337
338 return 0;
339 }
340
341 /**
342 * ti_sci_device_get() - Request for device managed by TISCI
343 *
344 * @id: Device Identifier
345 *
346 * Request for the device - NOTE: the client MUST maintain integrity of
347 * usage count by balancing get_device with put_device. No refcounting is
348 * managed by driver for that purpose.
349 *
350 * Return: 0 if all goes well, else appropriate error message
351 */
ti_sci_device_get(uint32_t id)352 int ti_sci_device_get(uint32_t id)
353 {
354 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_ON);
355 }
356
357 /**
358 * ti_sci_device_get_exclusive() - Exclusive request for device managed by TISCI
359 *
360 * @id: Device Identifier
361 *
362 * Request for the device - NOTE: the client MUST maintain integrity of
363 * usage count by balancing get_device with put_device. No refcounting is
364 * managed by driver for that purpose.
365 *
366 * NOTE: This _exclusive version of the get API is for exclusive access to the
367 * device. Any other host in the system will fail to get this device after this
368 * call until exclusive access is released with device_put or a non-exclusive
369 * set call.
370 *
371 * Return: 0 if all goes well, else appropriate error message
372 */
ti_sci_device_get_exclusive(uint32_t id)373 int ti_sci_device_get_exclusive(uint32_t id)
374 {
375 return ti_sci_device_set_state(id,
376 MSG_FLAG_DEVICE_EXCLUSIVE,
377 MSG_DEVICE_SW_STATE_ON);
378 }
379
380 /**
381 * ti_sci_device_idle() - Idle a device managed by TISCI
382 *
383 * @id: Device Identifier
384 *
385 * Request for the device - NOTE: the client MUST maintain integrity of
386 * usage count by balancing get_device with put_device. No refcounting is
387 * managed by driver for that purpose.
388 *
389 * Return: 0 if all goes well, else appropriate error message
390 */
ti_sci_device_idle(uint32_t id)391 int ti_sci_device_idle(uint32_t id)
392 {
393 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_RETENTION);
394 }
395
396 /**
397 * ti_sci_device_idle_exclusive() - Exclusive idle a device managed by TISCI
398 *
399 * @id: Device Identifier
400 *
401 * Request for the device - NOTE: the client MUST maintain integrity of
402 * usage count by balancing get_device with put_device. No refcounting is
403 * managed by driver for that purpose.
404 *
405 * NOTE: This _exclusive version of the idle API is for exclusive access to
406 * the device. Any other host in the system will fail to get this device after
407 * this call until exclusive access is released with device_put or a
408 * non-exclusive set call.
409 *
410 * Return: 0 if all goes well, else appropriate error message
411 */
ti_sci_device_idle_exclusive(uint32_t id)412 int ti_sci_device_idle_exclusive(uint32_t id)
413 {
414 return ti_sci_device_set_state(id,
415 MSG_FLAG_DEVICE_EXCLUSIVE,
416 MSG_DEVICE_SW_STATE_RETENTION);
417 }
418
419 /**
420 * ti_sci_device_put() - Release a device managed by TISCI
421 *
422 * @id: Device Identifier
423 *
424 * Request for the device - NOTE: the client MUST maintain integrity of
425 * usage count by balancing get_device with put_device. No refcounting is
426 * managed by driver for that purpose.
427 *
428 * Return: 0 if all goes well, else appropriate error message
429 */
ti_sci_device_put(uint32_t id)430 int ti_sci_device_put(uint32_t id)
431 {
432 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
433 }
434
435 /**
436 * ti_sci_device_put_no_wait() - Release a device without requesting or waiting
437 * for a response.
438 *
439 * @id: Device Identifier
440 *
441 * Request for the device - NOTE: the client MUST maintain integrity of
442 * usage count by balancing get_device with put_device. No refcounting is
443 * managed by driver for that purpose.
444 *
445 * Return: 0 if all goes well, else appropriate error message
446 */
ti_sci_device_put_no_wait(uint32_t id)447 int ti_sci_device_put_no_wait(uint32_t id)
448 {
449 struct ti_sci_msg_req_set_device_state req;
450 struct ti_sci_xfer xfer;
451 int ret;
452
453 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_STATE, 0,
454 &req, sizeof(req),
455 NULL, 0,
456 &xfer);
457 if (ret != 0U) {
458 ERROR("Message alloc failed (%d)\n", ret);
459 return ret;
460 }
461
462 req.id = id;
463 req.state = MSG_DEVICE_SW_STATE_AUTO_OFF;
464
465 ret = ti_sci_do_xfer(&xfer);
466 if (ret != 0U) {
467 ERROR("Transfer send failed (%d)\n", ret);
468 return ret;
469 }
470
471 return 0;
472 }
473
474 /**
475 * ti_sci_device_is_valid() - Is the device valid
476 *
477 * @id: Device Identifier
478 *
479 * Return: 0 if all goes well and the device ID is valid, else return
480 * appropriate error
481 */
ti_sci_device_is_valid(uint32_t id)482 int ti_sci_device_is_valid(uint32_t id)
483 {
484 uint8_t unused;
485
486 /* check the device state which will also tell us if the ID is valid */
487 return ti_sci_device_get_state(id, NULL, NULL, NULL, &unused);
488 }
489
490 /**
491 * ti_sci_device_get_clcnt() - Get context loss counter
492 *
493 * @id: Device Identifier
494 * @count: Pointer to Context Loss counter to populate
495 *
496 * Return: 0 if all goes well, else appropriate error message
497 */
ti_sci_device_get_clcnt(uint32_t id,uint32_t * count)498 int ti_sci_device_get_clcnt(uint32_t id, uint32_t *count)
499 {
500 return ti_sci_device_get_state(id, count, NULL, NULL, NULL);
501 }
502
503 /**
504 * ti_sci_device_is_idle() - Check if the device is requested to be idle
505 *
506 * @id: Device Identifier
507 * @r_state: true if requested to be idle
508 *
509 * Return: 0 if all goes well, else appropriate error message
510 */
ti_sci_device_is_idle(uint32_t id,bool * r_state)511 int ti_sci_device_is_idle(uint32_t id, bool *r_state)
512 {
513 int ret;
514 uint8_t state;
515
516 if (!r_state)
517 return -EINVAL;
518
519 ret = ti_sci_device_get_state(id, NULL, NULL, &state, NULL);
520 if (ret)
521 return ret;
522
523 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
524
525 return 0;
526 }
527
528 /**
529 * ti_sci_device_is_stop() - Check if the device is requested to be stopped
530 *
531 * @id: Device Identifier
532 * @r_state: true if requested to be stopped
533 * @curr_state: true if currently stopped
534 *
535 * Return: 0 if all goes well, else appropriate error message
536 */
ti_sci_device_is_stop(uint32_t id,bool * r_state,bool * curr_state)537 int ti_sci_device_is_stop(uint32_t id, bool *r_state, bool *curr_state)
538 {
539 int ret;
540 uint8_t p_state, c_state;
541
542 if (!r_state && !curr_state)
543 return -EINVAL;
544
545 ret = ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state);
546 if (ret)
547 return ret;
548
549 if (r_state)
550 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
551 if (curr_state)
552 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
553
554 return 0;
555 }
556
557 /**
558 * ti_sci_device_is_on() - Check if the device is requested to be ON
559 *
560 * @id: Device Identifier
561 * @r_state: true if requested to be ON
562 * @curr_state: true if currently ON and active
563 *
564 * Return: 0 if all goes well, else appropriate error message
565 */
ti_sci_device_is_on(uint32_t id,bool * r_state,bool * curr_state)566 int ti_sci_device_is_on(uint32_t id, bool *r_state, bool *curr_state)
567 {
568 int ret;
569 uint8_t p_state, c_state;
570
571 if (!r_state && !curr_state)
572 return -EINVAL;
573
574 ret =
575 ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state);
576 if (ret)
577 return ret;
578
579 if (r_state)
580 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
581 if (curr_state)
582 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
583
584 return 0;
585 }
586
587 /**
588 * ti_sci_device_is_trans() - Check if the device is currently transitioning
589 *
590 * @id: Device Identifier
591 * @curr_state: true if currently transitioning
592 *
593 * Return: 0 if all goes well, else appropriate error message
594 */
ti_sci_device_is_trans(uint32_t id,bool * curr_state)595 int ti_sci_device_is_trans(uint32_t id, bool *curr_state)
596 {
597 int ret;
598 uint8_t state;
599
600 if (!curr_state)
601 return -EINVAL;
602
603 ret = ti_sci_device_get_state(id, NULL, NULL, NULL, &state);
604 if (ret)
605 return ret;
606
607 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
608
609 return 0;
610 }
611
612 /**
613 * ti_sci_device_set_resets() - Set resets for device managed by TISCI
614 *
615 * @id: Device Identifier
616 * @reset_state: Device specific reset bit field
617 *
618 * Return: 0 if all goes well, else appropriate error message
619 */
ti_sci_device_set_resets(uint32_t id,uint32_t reset_state)620 int ti_sci_device_set_resets(uint32_t id, uint32_t reset_state)
621 {
622 struct ti_sci_msg_req_set_device_resets req;
623 struct ti_sci_msg_hdr resp;
624
625 struct ti_sci_xfer xfer;
626 int ret;
627
628 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_RESETS, 0,
629 &req, sizeof(req),
630 &resp, sizeof(resp),
631 &xfer);
632 if (ret) {
633 ERROR("Message alloc failed (%d)\n", ret);
634 return ret;
635 }
636
637 req.id = id;
638 req.resets = reset_state;
639
640 ret = ti_sci_do_xfer(&xfer);
641 if (ret) {
642 ERROR("Transfer send failed (%d)\n", ret);
643 return ret;
644 }
645
646 return 0;
647 }
648
649 /**
650 * ti_sci_device_get_resets() - Get reset state for device managed by TISCI
651 *
652 * @id: Device Identifier
653 * @reset_state: Pointer to reset state to populate
654 *
655 * Return: 0 if all goes well, else appropriate error message
656 */
ti_sci_device_get_resets(uint32_t id,uint32_t * reset_state)657 int ti_sci_device_get_resets(uint32_t id, uint32_t *reset_state)
658 {
659 return ti_sci_device_get_state(id, NULL, reset_state, NULL, NULL);
660 }
661
662 /**
663 * ti_sci_clock_set_state() - Set clock state helper
664 *
665 * @dev_id: Device identifier this request is for
666 * @clk_id: Clock identifier for the device for this request,
667 * Each device has its own set of clock inputs, This indexes
668 * which clock input to modify
669 * @flags: Header flags as needed
670 * @state: State to request for the clock
671 *
672 * Return: 0 if all goes well, else appropriate error message
673 */
ti_sci_clock_set_state(uint32_t dev_id,uint8_t clk_id,uint32_t flags,uint8_t state)674 int ti_sci_clock_set_state(uint32_t dev_id, uint8_t clk_id,
675 uint32_t flags, uint8_t state)
676 {
677 struct ti_sci_msg_req_set_clock_state req;
678 struct ti_sci_msg_hdr resp;
679
680 struct ti_sci_xfer xfer;
681 int ret;
682
683 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_STATE, flags,
684 &req, sizeof(req),
685 &resp, sizeof(resp),
686 &xfer);
687 if (ret) {
688 ERROR("Message alloc failed (%d)\n", ret);
689 return ret;
690 }
691
692 req.dev_id = dev_id;
693 req.clk_id = clk_id;
694 req.request_state = state;
695
696 ret = ti_sci_do_xfer(&xfer);
697 if (ret) {
698 ERROR("Transfer send failed (%d)\n", ret);
699 return ret;
700 }
701
702 return 0;
703 }
704
705 /**
706 * ti_sci_clock_get_state() - Get clock state helper
707 *
708 * @dev_id: Device identifier this request is for
709 * @clk_id: Clock identifier for the device for this request.
710 * Each device has its own set of clock inputs. This indexes
711 * which clock input to modify.
712 * @programmed_state: State requested for clock to move to
713 * @current_state: State that the clock is currently in
714 *
715 * Return: 0 if all goes well, else appropriate error message
716 */
ti_sci_clock_get_state(uint32_t dev_id,uint8_t clk_id,uint8_t * programmed_state,uint8_t * current_state)717 int ti_sci_clock_get_state(uint32_t dev_id, uint8_t clk_id,
718 uint8_t *programmed_state,
719 uint8_t *current_state)
720 {
721 struct ti_sci_msg_req_get_clock_state req;
722 struct ti_sci_msg_resp_get_clock_state resp;
723
724 struct ti_sci_xfer xfer;
725 int ret;
726
727 if (!programmed_state && !current_state)
728 return -EINVAL;
729
730 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_STATE, 0,
731 &req, sizeof(req),
732 &resp, sizeof(resp),
733 &xfer);
734 if (ret) {
735 ERROR("Message alloc failed (%d)\n", ret);
736 return ret;
737 }
738
739 req.dev_id = dev_id;
740 req.clk_id = clk_id;
741
742 ret = ti_sci_do_xfer(&xfer);
743 if (ret) {
744 ERROR("Transfer send failed (%d)\n", ret);
745 return ret;
746 }
747
748 if (programmed_state)
749 *programmed_state = resp.programmed_state;
750 if (current_state)
751 *current_state = resp.current_state;
752
753 return 0;
754 }
755
756 /**
757 * ti_sci_clock_get() - Get control of a clock from TI SCI
758
759 * @dev_id: Device identifier this request is for
760 * @clk_id: Clock identifier for the device for this request.
761 * Each device has its own set of clock inputs. This indexes
762 * which clock input to modify.
763 * @needs_ssc: 'true' iff Spread Spectrum clock is desired
764 * @can_change_freq: 'true' iff frequency change is desired
765 * @enable_input_term: 'true' iff input termination is desired
766 *
767 * Return: 0 if all goes well, else appropriate error message
768 */
ti_sci_clock_get(uint32_t dev_id,uint8_t clk_id,bool needs_ssc,bool can_change_freq,bool enable_input_term)769 int ti_sci_clock_get(uint32_t dev_id, uint8_t clk_id,
770 bool needs_ssc, bool can_change_freq,
771 bool enable_input_term)
772 {
773 uint32_t flags = 0;
774
775 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
776 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
777 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
778
779 return ti_sci_clock_set_state(dev_id, clk_id, flags,
780 MSG_CLOCK_SW_STATE_REQ);
781 }
782
783 /**
784 * ti_sci_clock_idle() - Idle a clock which is in our control
785
786 * @dev_id: Device identifier this request is for
787 * @clk_id: Clock identifier for the device for this request.
788 * Each device has its own set of clock inputs. This indexes
789 * which clock input to modify.
790 *
791 * NOTE: This clock must have been requested by get_clock previously.
792 *
793 * Return: 0 if all goes well, else appropriate error message
794 */
ti_sci_clock_idle(uint32_t dev_id,uint8_t clk_id)795 int ti_sci_clock_idle(uint32_t dev_id, uint8_t clk_id)
796 {
797 return ti_sci_clock_set_state(dev_id, clk_id, 0,
798 MSG_CLOCK_SW_STATE_UNREQ);
799 }
800
801 /**
802 * ti_sci_clock_put() - Release a clock from our control
803 *
804 * @dev_id: Device identifier this request is for
805 * @clk_id: Clock identifier for the device for this request.
806 * Each device has its own set of clock inputs. This indexes
807 * which clock input to modify.
808 *
809 * NOTE: This clock must have been requested by get_clock previously.
810 *
811 * Return: 0 if all goes well, else appropriate error message
812 */
ti_sci_clock_put(uint32_t dev_id,uint8_t clk_id)813 int ti_sci_clock_put(uint32_t dev_id, uint8_t clk_id)
814 {
815 return ti_sci_clock_set_state(dev_id, clk_id, 0,
816 MSG_CLOCK_SW_STATE_AUTO);
817 }
818
819 /**
820 * ti_sci_clock_is_auto() - Is the clock being auto managed
821 *
822 * @dev_id: Device identifier this request is for
823 * @clk_id: Clock identifier for the device for this request.
824 * Each device has its own set of clock inputs. This indexes
825 * which clock input to modify.
826 * @req_state: state indicating if the clock is auto managed
827 *
828 * Return: 0 if all goes well, else appropriate error message
829 */
ti_sci_clock_is_auto(uint32_t dev_id,uint8_t clk_id,bool * req_state)830 int ti_sci_clock_is_auto(uint32_t dev_id, uint8_t clk_id, bool *req_state)
831 {
832 uint8_t state = 0;
833 int ret;
834
835 if (!req_state)
836 return -EINVAL;
837
838 ret = ti_sci_clock_get_state(dev_id, clk_id, &state, NULL);
839 if (ret)
840 return ret;
841
842 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
843
844 return 0;
845 }
846
847 /**
848 * ti_sci_clock_is_on() - Is the clock ON
849 *
850 * @dev_id: Device identifier this request is for
851 * @clk_id: Clock identifier for the device for this request.
852 * Each device has its own set of clock inputs. This indexes
853 * which clock input to modify.
854 * @req_state: state indicating if the clock is managed by us and enabled
855 * @curr_state: state indicating if the clock is ready for operation
856 *
857 * Return: 0 if all goes well, else appropriate error message
858 */
ti_sci_clock_is_on(uint32_t dev_id,uint8_t clk_id,bool * req_state,bool * curr_state)859 int ti_sci_clock_is_on(uint32_t dev_id, uint8_t clk_id,
860 bool *req_state, bool *curr_state)
861 {
862 uint8_t c_state = 0, r_state = 0;
863 int ret;
864
865 if (!req_state && !curr_state)
866 return -EINVAL;
867
868 ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state);
869 if (ret)
870 return ret;
871
872 if (req_state)
873 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
874 if (curr_state)
875 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
876
877 return 0;
878 }
879
880 /**
881 * ti_sci_clock_is_off() - Is the clock OFF
882 *
883 * @dev_id: Device identifier this request is for
884 * @clk_id: Clock identifier for the device for this request.
885 * Each device has its own set of clock inputs. This indexes
886 * which clock input to modify.
887 * @req_state: state indicating if the clock is managed by us and disabled
888 * @curr_state: state indicating if the clock is NOT ready for operation
889 *
890 * Return: 0 if all goes well, else appropriate error message
891 */
ti_sci_clock_is_off(uint32_t dev_id,uint8_t clk_id,bool * req_state,bool * curr_state)892 int ti_sci_clock_is_off(uint32_t dev_id, uint8_t clk_id,
893 bool *req_state, bool *curr_state)
894 {
895 uint8_t c_state = 0, r_state = 0;
896 int ret;
897
898 if (!req_state && !curr_state)
899 return -EINVAL;
900
901 ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state);
902 if (ret)
903 return ret;
904
905 if (req_state)
906 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
907 if (curr_state)
908 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
909
910 return 0;
911 }
912
913 /**
914 * ti_sci_clock_set_parent() - Set the clock source of a specific device clock
915 *
916 * @dev_id: Device identifier this request is for
917 * @clk_id: Clock identifier for the device for this request.
918 * Each device has its own set of clock inputs. This indexes
919 * which clock input to modify.
920 * @parent_id: Parent clock identifier to set
921 *
922 * Return: 0 if all goes well, else appropriate error message
923 */
ti_sci_clock_set_parent(uint32_t dev_id,uint8_t clk_id,uint8_t parent_id)924 int ti_sci_clock_set_parent(uint32_t dev_id, uint8_t clk_id, uint8_t parent_id)
925 {
926 struct ti_sci_msg_req_set_clock_parent req;
927 struct ti_sci_msg_hdr resp;
928
929 struct ti_sci_xfer xfer;
930 int ret;
931
932 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_PARENT, 0,
933 &req, sizeof(req),
934 &resp, sizeof(resp),
935 &xfer);
936 if (ret) {
937 ERROR("Message alloc failed (%d)\n", ret);
938 return ret;
939 }
940
941 req.dev_id = dev_id;
942 req.clk_id = clk_id;
943 req.parent_id = parent_id;
944
945 ret = ti_sci_do_xfer(&xfer);
946 if (ret) {
947 ERROR("Transfer send failed (%d)\n", ret);
948 return ret;
949 }
950
951 return 0;
952 }
953
954 /**
955 * ti_sci_clock_get_parent() - Get current parent clock source
956 *
957 * @dev_id: Device identifier this request is for
958 * @clk_id: Clock identifier for the device for this request.
959 * Each device has its own set of clock inputs. This indexes
960 * which clock input to modify.
961 * @parent_id: Current clock parent
962 *
963 * Return: 0 if all goes well, else appropriate error message
964 */
ti_sci_clock_get_parent(uint32_t dev_id,uint8_t clk_id,uint8_t * parent_id)965 int ti_sci_clock_get_parent(uint32_t dev_id, uint8_t clk_id, uint8_t *parent_id)
966 {
967 struct ti_sci_msg_req_get_clock_parent req;
968 struct ti_sci_msg_resp_get_clock_parent resp;
969
970 struct ti_sci_xfer xfer;
971 int ret;
972
973 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_PARENT, 0,
974 &req, sizeof(req),
975 &resp, sizeof(resp),
976 &xfer);
977 if (ret) {
978 ERROR("Message alloc failed (%d)\n", ret);
979 return ret;
980 }
981
982 req.dev_id = dev_id;
983 req.clk_id = clk_id;
984
985 ret = ti_sci_do_xfer(&xfer);
986 if (ret) {
987 ERROR("Transfer send failed (%d)\n", ret);
988 return ret;
989 }
990
991 *parent_id = resp.parent_id;
992
993 return 0;
994 }
995
996 /**
997 * ti_sci_clock_get_num_parents() - Get num parents of the current clk source
998 *
999 * @dev_id: Device identifier this request is for
1000 * @clk_id: Clock identifier for the device for this request.
1001 * Each device has its own set of clock inputs. This indexes
1002 * which clock input to modify.
1003 * @num_parents: Returns he number of parents to the current clock.
1004 *
1005 * Return: 0 if all goes well, else appropriate error message
1006 */
ti_sci_clock_get_num_parents(uint32_t dev_id,uint8_t clk_id,uint8_t * num_parents)1007 int ti_sci_clock_get_num_parents(uint32_t dev_id, uint8_t clk_id,
1008 uint8_t *num_parents)
1009 {
1010 struct ti_sci_msg_req_get_clock_num_parents req;
1011 struct ti_sci_msg_resp_get_clock_num_parents resp;
1012
1013 struct ti_sci_xfer xfer;
1014 int ret;
1015
1016 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, 0,
1017 &req, sizeof(req),
1018 &resp, sizeof(resp),
1019 &xfer);
1020 if (ret) {
1021 ERROR("Message alloc failed (%d)\n", ret);
1022 return ret;
1023 }
1024
1025 req.dev_id = dev_id;
1026 req.clk_id = clk_id;
1027
1028 ret = ti_sci_do_xfer(&xfer);
1029 if (ret) {
1030 ERROR("Transfer send failed (%d)\n", ret);
1031 return ret;
1032 }
1033
1034 *num_parents = resp.num_parents;
1035
1036 return 0;
1037 }
1038
1039 /**
1040 * ti_sci_clock_get_match_freq() - Find a good match for frequency
1041 *
1042 * @dev_id: Device identifier this request is for
1043 * @clk_id: Clock identifier for the device for this request.
1044 * Each device has its own set of clock inputs. This indexes
1045 * which clock input to modify.
1046 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1047 * allowable programmed frequency and does not account for clock
1048 * tolerances and jitter.
1049 * @target_freq: The target clock frequency in Hz. A frequency will be
1050 * processed as close to this target frequency as possible.
1051 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1052 * allowable programmed frequency and does not account for clock
1053 * tolerances and jitter.
1054 * @match_freq: Frequency match in Hz response.
1055 *
1056 * Return: 0 if all goes well, else appropriate error message
1057 */
ti_sci_clock_get_match_freq(uint32_t dev_id,uint8_t clk_id,uint64_t min_freq,uint64_t target_freq,uint64_t max_freq,uint64_t * match_freq)1058 int ti_sci_clock_get_match_freq(uint32_t dev_id, uint8_t clk_id,
1059 uint64_t min_freq, uint64_t target_freq,
1060 uint64_t max_freq, uint64_t *match_freq)
1061 {
1062 struct ti_sci_msg_req_query_clock_freq req;
1063 struct ti_sci_msg_resp_query_clock_freq resp;
1064
1065 struct ti_sci_xfer xfer;
1066 int ret;
1067
1068 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_QUERY_CLOCK_FREQ, 0,
1069 &req, sizeof(req),
1070 &resp, sizeof(resp),
1071 &xfer);
1072 if (ret) {
1073 ERROR("Message alloc failed (%d)\n", ret);
1074 return ret;
1075 }
1076
1077 req.dev_id = dev_id;
1078 req.clk_id = clk_id;
1079 req.min_freq_hz = min_freq;
1080 req.target_freq_hz = target_freq;
1081 req.max_freq_hz = max_freq;
1082
1083 ret = ti_sci_do_xfer(&xfer);
1084 if (ret) {
1085 ERROR("Transfer send failed (%d)\n", ret);
1086 return ret;
1087 }
1088
1089 *match_freq = resp.freq_hz;
1090
1091 return 0;
1092 }
1093
1094 /**
1095 * ti_sci_clock_set_freq() - Set a frequency for clock
1096 *
1097 * @dev_id: Device identifier this request is for
1098 * @clk_id: Clock identifier for the device for this request.
1099 * Each device has its own set of clock inputs. This indexes
1100 * which clock input to modify.
1101 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1102 * allowable programmed frequency and does not account for clock
1103 * tolerances and jitter.
1104 * @target_freq: The target clock frequency in Hz. A frequency will be
1105 * processed as close to this target frequency as possible.
1106 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1107 * allowable programmed frequency and does not account for clock
1108 * tolerances and jitter.
1109 *
1110 * Return: 0 if all goes well, else appropriate error message
1111 */
ti_sci_clock_set_freq(uint32_t dev_id,uint8_t clk_id,uint64_t min_freq,uint64_t target_freq,uint64_t max_freq)1112 int ti_sci_clock_set_freq(uint32_t dev_id, uint8_t clk_id, uint64_t min_freq,
1113 uint64_t target_freq, uint64_t max_freq)
1114 {
1115 struct ti_sci_msg_req_set_clock_freq req;
1116 struct ti_sci_msg_hdr resp;
1117
1118 struct ti_sci_xfer xfer;
1119 int ret;
1120
1121 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_FREQ, 0,
1122 &req, sizeof(req),
1123 &resp, sizeof(resp),
1124 &xfer);
1125 if (ret) {
1126 ERROR("Message alloc failed (%d)\n", ret);
1127 return ret;
1128 }
1129 req.dev_id = dev_id;
1130 req.clk_id = clk_id;
1131 req.min_freq_hz = min_freq;
1132 req.target_freq_hz = target_freq;
1133 req.max_freq_hz = max_freq;
1134
1135 ret = ti_sci_do_xfer(&xfer);
1136 if (ret) {
1137 ERROR("Transfer send failed (%d)\n", ret);
1138 return ret;
1139 }
1140
1141 return 0;
1142 }
1143
1144 /**
1145 * ti_sci_clock_get_freq() - Get current frequency
1146 *
1147 * @dev_id: Device identifier this request is for
1148 * @clk_id: Clock identifier for the device for this request.
1149 * Each device has its own set of clock inputs. This indexes
1150 * which clock input to modify.
1151 * @freq: Currently frequency in Hz
1152 *
1153 * Return: 0 if all goes well, else appropriate error message
1154 */
ti_sci_clock_get_freq(uint32_t dev_id,uint8_t clk_id,uint64_t * freq)1155 int ti_sci_clock_get_freq(uint32_t dev_id, uint8_t clk_id, uint64_t *freq)
1156 {
1157 struct ti_sci_msg_req_get_clock_freq req;
1158 struct ti_sci_msg_resp_get_clock_freq resp;
1159
1160 struct ti_sci_xfer xfer;
1161 int ret;
1162
1163 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_FREQ, 0,
1164 &req, sizeof(req),
1165 &resp, sizeof(resp),
1166 &xfer);
1167 if (ret) {
1168 ERROR("Message alloc failed (%d)\n", ret);
1169 return ret;
1170 }
1171
1172 req.dev_id = dev_id;
1173 req.clk_id = clk_id;
1174
1175 ret = ti_sci_do_xfer(&xfer);
1176 if (ret) {
1177 ERROR("Transfer send failed (%d)\n", ret);
1178 return ret;
1179 }
1180
1181 *freq = resp.freq_hz;
1182
1183 return 0;
1184 }
1185
1186 /**
1187 * ti_sci_core_reboot() - Command to request system reset
1188 *
1189 * Return: 0 if all goes well, else appropriate error message
1190 */
ti_sci_core_reboot(void)1191 int ti_sci_core_reboot(void)
1192 {
1193 struct ti_sci_msg_req_reboot req;
1194 struct ti_sci_msg_hdr resp;
1195
1196 struct ti_sci_xfer xfer;
1197 int ret;
1198
1199 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SYS_RESET, 0,
1200 &req, sizeof(req),
1201 &resp, sizeof(resp),
1202 &xfer);
1203 if (ret) {
1204 ERROR("Message alloc failed (%d)\n", ret);
1205 return ret;
1206 }
1207 req.domain = TI_SCI_DOMAIN_FULL_SOC_RESET;
1208
1209 ret = ti_sci_do_xfer(&xfer);
1210 if (ret) {
1211 ERROR("Transfer send failed (%d)\n", ret);
1212 return ret;
1213 }
1214
1215 return 0;
1216 }
1217
1218 /**
1219 * ti_sci_proc_request() - Request a physical processor control
1220 *
1221 * @proc_id: Processor ID this request is for
1222 *
1223 * Return: 0 if all goes well, else appropriate error message
1224 */
ti_sci_proc_request(uint8_t proc_id)1225 int ti_sci_proc_request(uint8_t proc_id)
1226 {
1227 struct ti_sci_msg_req_proc_request req;
1228 struct ti_sci_msg_hdr resp;
1229
1230 struct ti_sci_xfer xfer;
1231 int ret;
1232
1233 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_REQUEST, 0,
1234 &req, sizeof(req),
1235 &resp, sizeof(resp),
1236 &xfer);
1237 if (ret) {
1238 ERROR("Message alloc failed (%d)\n", ret);
1239 return ret;
1240 }
1241
1242 req.processor_id = proc_id;
1243
1244 ret = ti_sci_do_xfer(&xfer);
1245 if (ret) {
1246 ERROR("Transfer send failed (%d)\n", ret);
1247 return ret;
1248 }
1249
1250 return 0;
1251 }
1252
1253 /**
1254 * ti_sci_proc_release() - Release a physical processor control
1255 *
1256 * @proc_id: Processor ID this request is for
1257 *
1258 * Return: 0 if all goes well, else appropriate error message
1259 */
ti_sci_proc_release(uint8_t proc_id)1260 int ti_sci_proc_release(uint8_t proc_id)
1261 {
1262 struct ti_sci_msg_req_proc_release req;
1263 struct ti_sci_msg_hdr resp;
1264
1265 struct ti_sci_xfer xfer;
1266 int ret;
1267
1268 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_RELEASE, 0,
1269 &req, sizeof(req),
1270 &resp, sizeof(resp),
1271 &xfer);
1272 if (ret) {
1273 ERROR("Message alloc failed (%d)\n", ret);
1274 return ret;
1275 }
1276
1277 req.processor_id = proc_id;
1278
1279 ret = ti_sci_do_xfer(&xfer);
1280 if (ret) {
1281 ERROR("Transfer send failed (%d)\n", ret);
1282 return ret;
1283 }
1284
1285 return 0;
1286 }
1287
1288 /**
1289 * ti_sci_proc_handover() - Handover a physical processor control to a host in
1290 * the processor's access control list.
1291 *
1292 * @proc_id: Processor ID this request is for
1293 * @host_id: Host ID to get the control of the processor
1294 *
1295 * Return: 0 if all goes well, else appropriate error message
1296 */
ti_sci_proc_handover(uint8_t proc_id,uint8_t host_id)1297 int ti_sci_proc_handover(uint8_t proc_id, uint8_t host_id)
1298 {
1299 struct ti_sci_msg_req_proc_handover req;
1300 struct ti_sci_msg_hdr resp;
1301
1302 struct ti_sci_xfer xfer;
1303 int ret;
1304
1305 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_HANDOVER, 0,
1306 &req, sizeof(req),
1307 &resp, sizeof(resp),
1308 &xfer);
1309 if (ret) {
1310 ERROR("Message alloc failed (%d)\n", ret);
1311 return ret;
1312 }
1313
1314 req.processor_id = proc_id;
1315 req.host_id = host_id;
1316
1317 ret = ti_sci_do_xfer(&xfer);
1318 if (ret) {
1319 ERROR("Transfer send failed (%d)\n", ret);
1320 return ret;
1321 }
1322
1323 return 0;
1324 }
1325
1326 /**
1327 * ti_sci_proc_set_boot_cfg() - Set the processor boot configuration flags
1328 *
1329 * @proc_id: Processor ID this request is for
1330 * @config_flags_set: Configuration flags to be set
1331 * @config_flags_clear: Configuration flags to be cleared
1332 *
1333 * Return: 0 if all goes well, else appropriate error message
1334 */
ti_sci_proc_set_boot_cfg(uint8_t proc_id,uint64_t bootvector,uint32_t config_flags_set,uint32_t config_flags_clear)1335 int ti_sci_proc_set_boot_cfg(uint8_t proc_id, uint64_t bootvector,
1336 uint32_t config_flags_set,
1337 uint32_t config_flags_clear)
1338 {
1339 struct ti_sci_msg_req_set_proc_boot_config req;
1340 struct ti_sci_msg_hdr resp;
1341
1342 struct ti_sci_xfer xfer;
1343 int ret;
1344
1345 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CONFIG, 0,
1346 &req, sizeof(req),
1347 &resp, sizeof(resp),
1348 &xfer);
1349 if (ret) {
1350 ERROR("Message alloc failed (%d)\n", ret);
1351 return ret;
1352 }
1353
1354 req.processor_id = proc_id;
1355 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1356 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1357 TISCI_ADDR_HIGH_SHIFT;
1358 req.config_flags_set = config_flags_set;
1359 req.config_flags_clear = config_flags_clear;
1360
1361 ret = ti_sci_do_xfer(&xfer);
1362 if (ret) {
1363 ERROR("Transfer send failed (%d)\n", ret);
1364 return ret;
1365 }
1366
1367 return 0;
1368 }
1369
1370 /**
1371 * ti_sci_proc_set_boot_ctrl() - Set the processor boot control flags
1372 *
1373 * @proc_id: Processor ID this request is for
1374 * @control_flags_set: Control flags to be set
1375 * @control_flags_clear: Control flags to be cleared
1376 *
1377 * Return: 0 if all goes well, else appropriate error message
1378 */
ti_sci_proc_set_boot_ctrl(uint8_t proc_id,uint32_t control_flags_set,uint32_t control_flags_clear)1379 int ti_sci_proc_set_boot_ctrl(uint8_t proc_id, uint32_t control_flags_set,
1380 uint32_t control_flags_clear)
1381 {
1382 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1383 struct ti_sci_msg_hdr resp;
1384
1385 struct ti_sci_xfer xfer;
1386 int ret;
1387
1388 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CTRL, 0,
1389 &req, sizeof(req),
1390 &resp, sizeof(resp),
1391 &xfer);
1392 if (ret) {
1393 ERROR("Message alloc failed (%d)\n", ret);
1394 return ret;
1395 }
1396
1397 req.processor_id = proc_id;
1398 req.control_flags_set = control_flags_set;
1399 req.control_flags_clear = control_flags_clear;
1400
1401 ret = ti_sci_do_xfer(&xfer);
1402 if (ret) {
1403 ERROR("Transfer send failed (%d)\n", ret);
1404 return ret;
1405 }
1406
1407 return 0;
1408 }
1409
1410 /**
1411 * ti_sci_proc_set_boot_ctrl_no_wait() - Set the processor boot control flags
1412 * without requesting or waiting for a
1413 * response.
1414 *
1415 * @proc_id: Processor ID this request is for
1416 * @control_flags_set: Control flags to be set
1417 * @control_flags_clear: Control flags to be cleared
1418 *
1419 * Return: 0 if all goes well, else appropriate error message
1420 */
ti_sci_proc_set_boot_ctrl_no_wait(uint8_t proc_id,uint32_t control_flags_set,uint32_t control_flags_clear)1421 int ti_sci_proc_set_boot_ctrl_no_wait(uint8_t proc_id,
1422 uint32_t control_flags_set,
1423 uint32_t control_flags_clear)
1424 {
1425 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1426 struct ti_sci_xfer xfer;
1427 int ret;
1428
1429 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CTRL, 0,
1430 &req, sizeof(req),
1431 NULL, 0,
1432 &xfer);
1433 if (ret != 0U) {
1434 ERROR("Message alloc failed (%d)\n", ret);
1435 return ret;
1436 }
1437
1438 req.processor_id = proc_id;
1439 req.control_flags_set = control_flags_set;
1440 req.control_flags_clear = control_flags_clear;
1441
1442 ret = ti_sci_do_xfer(&xfer);
1443 if (ret != 0U) {
1444 ERROR("Transfer send failed (%d)\n", ret);
1445 return ret;
1446 }
1447
1448 return 0;
1449 }
1450
1451 /**
1452 * ti_sci_proc_auth_boot_image() - Authenticate and load image and then set the
1453 * processor configuration flags
1454 *
1455 * @proc_id: Processor ID this request is for
1456 * @cert_addr: Memory address at which payload image certificate is located
1457 *
1458 * Return: 0 if all goes well, else appropriate error message
1459 */
ti_sci_proc_auth_boot_image(uint8_t proc_id,uint64_t cert_addr)1460 int ti_sci_proc_auth_boot_image(uint8_t proc_id, uint64_t cert_addr)
1461 {
1462 struct ti_sci_msg_req_proc_auth_boot_image req;
1463 struct ti_sci_msg_hdr resp;
1464
1465 struct ti_sci_xfer xfer;
1466 int ret;
1467
1468 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_AUTH_BOOT_IMAGE, 0,
1469 &req, sizeof(req),
1470 &resp, sizeof(resp),
1471 &xfer);
1472 if (ret) {
1473 ERROR("Message alloc failed (%d)\n", ret);
1474 return ret;
1475 }
1476
1477 req.processor_id = proc_id;
1478 req.cert_addr_low = cert_addr & TISCI_ADDR_LOW_MASK;
1479 req.cert_addr_high = (cert_addr & TISCI_ADDR_HIGH_MASK) >>
1480 TISCI_ADDR_HIGH_SHIFT;
1481
1482 ret = ti_sci_do_xfer(&xfer);
1483 if (ret) {
1484 ERROR("Transfer send failed (%d)\n", ret);
1485 return ret;
1486 }
1487
1488 return 0;
1489 }
1490
1491 /**
1492 * ti_sci_proc_get_boot_status() - Get the processor boot status
1493 *
1494 * @proc_id: Processor ID this request is for
1495 *
1496 * Return: 0 if all goes well, else appropriate error message
1497 */
ti_sci_proc_get_boot_status(uint8_t proc_id,uint64_t * bv,uint32_t * cfg_flags,uint32_t * ctrl_flags,uint32_t * sts_flags)1498 int ti_sci_proc_get_boot_status(uint8_t proc_id, uint64_t *bv,
1499 uint32_t *cfg_flags,
1500 uint32_t *ctrl_flags,
1501 uint32_t *sts_flags)
1502 {
1503 struct ti_sci_msg_req_get_proc_boot_status req;
1504 struct ti_sci_msg_resp_get_proc_boot_status resp;
1505
1506 struct ti_sci_xfer xfer;
1507 int ret;
1508
1509 ret = ti_sci_setup_one_xfer(TISCI_MSG_GET_PROC_BOOT_STATUS, 0,
1510 &req, sizeof(req),
1511 &resp, sizeof(resp),
1512 &xfer);
1513 if (ret) {
1514 ERROR("Message alloc failed (%d)\n", ret);
1515 return ret;
1516 }
1517
1518 req.processor_id = proc_id;
1519
1520 ret = ti_sci_do_xfer(&xfer);
1521 if (ret) {
1522 ERROR("Transfer send failed (%d)\n", ret);
1523 return ret;
1524 }
1525
1526 *bv = (resp.bootvector_low & TISCI_ADDR_LOW_MASK) |
1527 (((uint64_t)resp.bootvector_high << TISCI_ADDR_HIGH_SHIFT) &
1528 TISCI_ADDR_HIGH_MASK);
1529 *cfg_flags = resp.config_flags;
1530 *ctrl_flags = resp.control_flags;
1531 *sts_flags = resp.status_flags;
1532
1533 return 0;
1534 }
1535
1536 /**
1537 * ti_sci_proc_wait_boot_status() - Wait for a processor boot status
1538 *
1539 * @proc_id: Processor ID this request is for
1540 * @num_wait_iterations Total number of iterations we will check before
1541 * we will timeout and give up
1542 * @num_match_iterations How many iterations should we have continued
1543 * status to account for status bits glitching.
1544 * This is to make sure that match occurs for
1545 * consecutive checks. This implies that the
1546 * worst case should consider that the stable
1547 * time should at the worst be num_wait_iterations
1548 * num_match_iterations to prevent timeout.
1549 * @delay_per_iteration_us Specifies how long to wait (in micro seconds)
1550 * between each status checks. This is the minimum
1551 * duration, and overhead of register reads and
1552 * checks are on top of this and can vary based on
1553 * varied conditions.
1554 * @delay_before_iterations_us Specifies how long to wait (in micro seconds)
1555 * before the very first check in the first
1556 * iteration of status check loop. This is the
1557 * minimum duration, and overhead of register
1558 * reads and checks are.
1559 * @status_flags_1_set_all_wait If non-zero, Specifies that all bits of the
1560 * status matching this field requested MUST be 1.
1561 * @status_flags_1_set_any_wait If non-zero, Specifies that at least one of the
1562 * bits matching this field requested MUST be 1.
1563 * @status_flags_1_clr_all_wait If non-zero, Specifies that all bits of the
1564 * status matching this field requested MUST be 0.
1565 * @status_flags_1_clr_any_wait If non-zero, Specifies that at least one of the
1566 * bits matching this field requested MUST be 0.
1567 *
1568 * Return: 0 if all goes well, else appropriate error message
1569 */
ti_sci_proc_wait_boot_status(uint8_t proc_id,uint8_t num_wait_iterations,uint8_t num_match_iterations,uint8_t delay_per_iteration_us,uint8_t delay_before_iterations_us,uint32_t status_flags_1_set_all_wait,uint32_t status_flags_1_set_any_wait,uint32_t status_flags_1_clr_all_wait,uint32_t status_flags_1_clr_any_wait)1570 int ti_sci_proc_wait_boot_status(uint8_t proc_id, uint8_t num_wait_iterations,
1571 uint8_t num_match_iterations,
1572 uint8_t delay_per_iteration_us,
1573 uint8_t delay_before_iterations_us,
1574 uint32_t status_flags_1_set_all_wait,
1575 uint32_t status_flags_1_set_any_wait,
1576 uint32_t status_flags_1_clr_all_wait,
1577 uint32_t status_flags_1_clr_any_wait)
1578 {
1579 struct ti_sci_msg_req_wait_proc_boot_status req;
1580 struct ti_sci_msg_hdr resp;
1581
1582 struct ti_sci_xfer xfer;
1583 int ret;
1584
1585 ret = ti_sci_setup_one_xfer(TISCI_MSG_WAIT_PROC_BOOT_STATUS, 0,
1586 &req, sizeof(req),
1587 &resp, sizeof(resp),
1588 &xfer);
1589 if (ret) {
1590 ERROR("Message alloc failed (%d)\n", ret);
1591 return ret;
1592 }
1593
1594 req.processor_id = proc_id;
1595 req.num_wait_iterations = num_wait_iterations;
1596 req.num_match_iterations = num_match_iterations;
1597 req.delay_per_iteration_us = delay_per_iteration_us;
1598 req.delay_before_iterations_us = delay_before_iterations_us;
1599 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
1600 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
1601 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
1602 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
1603
1604 ret = ti_sci_do_xfer(&xfer);
1605 if (ret) {
1606 ERROR("Transfer send failed (%d)\n", ret);
1607 return ret;
1608 }
1609
1610 return 0;
1611 }
1612
1613 /**
1614 * ti_sci_proc_wait_boot_status_no_wait() - Wait for a processor boot status
1615 * without requesting or waiting for
1616 * a response.
1617 *
1618 * @proc_id: Processor ID this request is for
1619 * @num_wait_iterations Total number of iterations we will check before
1620 * we will timeout and give up
1621 * @num_match_iterations How many iterations should we have continued
1622 * status to account for status bits glitching.
1623 * This is to make sure that match occurs for
1624 * consecutive checks. This implies that the
1625 * worst case should consider that the stable
1626 * time should at the worst be num_wait_iterations
1627 * num_match_iterations to prevent timeout.
1628 * @delay_per_iteration_us Specifies how long to wait (in micro seconds)
1629 * between each status checks. This is the minimum
1630 * duration, and overhead of register reads and
1631 * checks are on top of this and can vary based on
1632 * varied conditions.
1633 * @delay_before_iterations_us Specifies how long to wait (in micro seconds)
1634 * before the very first check in the first
1635 * iteration of status check loop. This is the
1636 * minimum duration, and overhead of register
1637 * reads and checks are.
1638 * @status_flags_1_set_all_wait If non-zero, Specifies that all bits of the
1639 * status matching this field requested MUST be 1.
1640 * @status_flags_1_set_any_wait If non-zero, Specifies that at least one of the
1641 * bits matching this field requested MUST be 1.
1642 * @status_flags_1_clr_all_wait If non-zero, Specifies that all bits of the
1643 * status matching this field requested MUST be 0.
1644 * @status_flags_1_clr_any_wait If non-zero, Specifies that at least one of the
1645 * bits matching this field requested MUST be 0.
1646 *
1647 * Return: 0 if all goes well, else appropriate error message
1648 */
ti_sci_proc_wait_boot_status_no_wait(uint8_t proc_id,uint8_t num_wait_iterations,uint8_t num_match_iterations,uint8_t delay_per_iteration_us,uint8_t delay_before_iterations_us,uint32_t status_flags_1_set_all_wait,uint32_t status_flags_1_set_any_wait,uint32_t status_flags_1_clr_all_wait,uint32_t status_flags_1_clr_any_wait)1649 int ti_sci_proc_wait_boot_status_no_wait(uint8_t proc_id,
1650 uint8_t num_wait_iterations,
1651 uint8_t num_match_iterations,
1652 uint8_t delay_per_iteration_us,
1653 uint8_t delay_before_iterations_us,
1654 uint32_t status_flags_1_set_all_wait,
1655 uint32_t status_flags_1_set_any_wait,
1656 uint32_t status_flags_1_clr_all_wait,
1657 uint32_t status_flags_1_clr_any_wait)
1658 {
1659 struct ti_sci_msg_req_wait_proc_boot_status req;
1660 struct ti_sci_xfer xfer;
1661 int ret;
1662
1663 ret = ti_sci_setup_one_xfer(TISCI_MSG_WAIT_PROC_BOOT_STATUS, 0,
1664 &req, sizeof(req),
1665 NULL, 0,
1666 &xfer);
1667 if (ret != 0U) {
1668 ERROR("Message alloc failed (%d)\n", ret);
1669 return ret;
1670 }
1671
1672 req.processor_id = proc_id;
1673 req.num_wait_iterations = num_wait_iterations;
1674 req.num_match_iterations = num_match_iterations;
1675 req.delay_per_iteration_us = delay_per_iteration_us;
1676 req.delay_before_iterations_us = delay_before_iterations_us;
1677 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
1678 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
1679 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
1680 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
1681
1682 ret = ti_sci_do_xfer(&xfer);
1683 if (ret != 0U) {
1684 ERROR("Transfer send failed (%d)\n", ret);
1685 return ret;
1686 }
1687
1688 return 0;
1689 }
1690
1691 /**
1692 * ti_sci_enter_sleep - Command to initiate system transition into suspend.
1693 *
1694 * @proc_id: Processor ID.
1695 * @mode: Low power mode to enter.
1696 * @core_resume_addr: Address that core should be
1697 * resumed from after low power transition.
1698 *
1699 * Return: 0 if all goes well, else appropriate error message
1700 */
ti_sci_enter_sleep(uint8_t proc_id,uint8_t mode,uint64_t core_resume_addr)1701 int ti_sci_enter_sleep(uint8_t proc_id,
1702 uint8_t mode,
1703 uint64_t core_resume_addr)
1704 {
1705 struct ti_sci_msg_req_enter_sleep req;
1706 struct ti_sci_xfer xfer;
1707 int ret;
1708
1709 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_ENTER_SLEEP, 0,
1710 &req, sizeof(req),
1711 NULL, 0,
1712 &xfer);
1713 if (ret != 0U) {
1714 ERROR("Message alloc failed (%d)\n", ret);
1715 return ret;
1716 }
1717
1718 req.processor_id = proc_id;
1719 req.mode = mode;
1720 req.core_resume_lo = core_resume_addr & TISCI_ADDR_LOW_MASK;
1721 req.core_resume_hi = (core_resume_addr & TISCI_ADDR_HIGH_MASK) >>
1722 TISCI_ADDR_HIGH_SHIFT;
1723
1724 ret = ti_sci_do_xfer(&xfer);
1725 if (ret != 0U) {
1726 ERROR("Transfer send failed (%d)\n", ret);
1727 return ret;
1728 }
1729
1730 return 0;
1731 }
1732
1733 /**
1734 * ti_sci_init() - Basic initialization
1735 *
1736 * Return: 0 if all goes well, else appropriate error message
1737 */
ti_sci_init(void)1738 int ti_sci_init(void)
1739 {
1740 struct ti_sci_msg_resp_version rev_info;
1741 int ret;
1742
1743 ret = ti_sci_get_revision(&rev_info);
1744 if (ret) {
1745 ERROR("Unable to communicate with control firmware (%d)\n", ret);
1746 return ret;
1747 }
1748
1749 INFO("SYSFW ABI: %d.%d (firmware rev 0x%04x '%s')\n",
1750 rev_info.abi_major, rev_info.abi_minor,
1751 rev_info.firmware_revision,
1752 rev_info.firmware_description);
1753
1754 return 0;
1755 }
1756