1 /*
2  * Copyright (c) 2022 Meta Platforms, Inc. and its affiliates.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <errno.h>
8 
9 #include <zephyr/drivers/i3c.h>
10 #include <zephyr/init.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/logging/log.h>
13 #include <zephyr/sys/byteorder.h>
14 #include <zephyr/sys/sys_io.h>
15 #include <zephyr/sys/util.h>
16 
17 #define DEV_ID            0x0
18 #define DEV_ID_I3C_MASTER 0x5034
19 
20 #define CONF_STATUS0                      0x4
21 #define CONF_STATUS0_CMDR_DEPTH(x)        (4 << (((x) & GENMASK(31, 29)) >> 29))
22 #define CONF_STATUS0_ECC_CHK              BIT(28)
23 #define CONF_STATUS0_INTEG_CHK            BIT(27)
24 #define CONF_STATUS0_CSR_DAP_CHK          BIT(26)
25 #define CONF_STATUS0_TRANS_TOUT_CHK       BIT(25)
26 #define CONF_STATUS0_PROT_FAULTS_CHK      BIT(24)
27 #define CONF_STATUS0_GPO_NUM(x)           (((x) & GENMASK(23, 16)) >> 16)
28 #define CONF_STATUS0_GPI_NUM(x)           (((x) & GENMASK(15, 8)) >> 8)
29 #define CONF_STATUS0_IBIR_DEPTH(x)        (4 << (((x) & GENMASK(7, 6)) >> 7))
30 /* CONF_STATUS0_SUPPORTS_DDR moved to CONF_STATUS1 in rev >= 1p7 */
31 #define CONF_STATUS0_SUPPORTS_DDR         BIT(5)
32 #define CONF_STATUS0_SEC_MASTER           BIT(4)
33 /* And it was replaced with a Dev Role mask */
34 #define CONF_STATUS0_DEV_ROLE(x)          ((x) & GENMASK(5, 4) >> 4)
35 #define CONF_STATUS0_DEV_ROLE_MAIN_MASTER 0
36 #define CONF_STATUS0_DEV_ROLE_SEC_MASTER  1
37 #define CONF_STATUS0_DEV_ROLE_SLAVE       2
38 #define CONF_STATUS0_DEVS_NUM(x)          ((x) & GENMASK(3, 0))
39 
40 #define CONF_STATUS1                     0x8
41 #define CONF_STATUS1_IBI_HW_RES(x)       ((((x) & GENMASK(31, 28)) >> 28) + 1)
42 #define CONF_STATUS1_CMD_DEPTH(x)        (4 << (((x) & GENMASK(27, 26)) >> 26))
43 #define CONF_STATUS1_SLV_DDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21))
44 #define CONF_STATUS1_SLV_DDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16))
45 #define CONF_STATUS1_SUPPORTS_DDR        BIT(14)
46 #define CONF_STATUS1_ALT_MODE            BIT(13)
47 #define CONF_STATUS1_IBI_DEPTH(x)        (2 << (((x) & GENMASK(12, 10)) >> 10))
48 #define CONF_STATUS1_RX_DEPTH(x)         (8 << (((x) & GENMASK(9, 5)) >> 5))
49 #define CONF_STATUS1_TX_DEPTH(x)         (8 << ((x) & GENMASK(4, 0)))
50 
51 #define REV_ID               0xc
52 #define REV_ID_VID(id)       (((id) & GENMASK(31, 20)) >> 20)
53 #define REV_ID_PID(id)       (((id) & GENMASK(19, 8)) >> 8)
54 #define REV_ID_REV(id)       ((id) & GENMASK(7, 0))
55 #define REV_ID_VERSION(m, n) ((m << 5) | (n))
56 #define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 5)) >> 5)
57 #define REV_ID_REV_MINOR(id) ((id) & GENMASK(4, 0))
58 
59 #define CTRL                     0x10
60 #define CTRL_DEV_EN              BIT(31)
61 #define CTRL_HALT_EN             BIT(30)
62 #define CTRL_MCS                 BIT(29)
63 #define CTRL_MCS_EN              BIT(28)
64 #define CTRL_I3C_11_SUPP         BIT(26)
65 #define CTRL_THD_DELAY(x)        (((x) << 24) & GENMASK(25, 24))
66 #define CTRL_TC_EN               BIT(9)
67 #define CTRL_HJ_DISEC            BIT(8)
68 #define CTRL_MST_ACK             BIT(7)
69 #define CTRL_HJ_ACK              BIT(6)
70 #define CTRL_HJ_INIT             BIT(5)
71 #define CTRL_MST_INIT            BIT(4)
72 #define CTRL_AHDR_OPT            BIT(3)
73 #define CTRL_PURE_BUS_MODE       0
74 #define CTRL_MIXED_FAST_BUS_MODE 2
75 #define CTRL_MIXED_SLOW_BUS_MODE 3
76 #define CTRL_BUS_MODE_MASK       GENMASK(1, 0)
77 #define THD_DELAY_MAX            3
78 
79 #define PRESCL_CTRL0         0x14
80 #define PRESCL_CTRL0_I2C(x)  ((x) << 16)
81 #define PRESCL_CTRL0_I3C(x)  (x)
82 #define PRESCL_CTRL0_I3C_MAX GENMASK(9, 0)
83 #define PRESCL_CTRL0_I2C_MAX GENMASK(15, 0)
84 
85 #define PRESCL_CTRL1             0x18
86 #define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
87 #define PRESCL_CTRL1_PP_LOW(x)   ((x) << 8)
88 #define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0)
89 #define PRESCL_CTRL1_OD_LOW(x)   (x)
90 
91 #define SLV_STATUS4                 0x1C
92 #define SLV_STATUS4_BUSCON_FILL_LVL GENMASK(16, 8)
93 #define SLV_STATUS5_BUSCON_DATA     GENMASK(7, 0)
94 
95 #define MST_IER           0x20
96 #define MST_IDR           0x24
97 #define MST_IMR           0x28
98 #define MST_ICR           0x2c
99 #define MST_ISR           0x30
100 #define MST_INT_HALTED    BIT(18)
101 #define MST_INT_MR_DONE   BIT(17)
102 #define MST_INT_IMM_COMP  BIT(16)
103 #define MST_INT_TX_THR    BIT(15)
104 #define MST_INT_TX_OVF    BIT(14)
105 #define MST_INT_C_REF_ROV BIT(13)
106 #define MST_INT_IBID_THR  BIT(12)
107 #define MST_INT_IBID_UNF  BIT(11)
108 #define MST_INT_IBIR_THR  BIT(10)
109 #define MST_INT_IBIR_UNF  BIT(9)
110 #define MST_INT_IBIR_OVF  BIT(8)
111 #define MST_INT_RX_THR    BIT(7)
112 #define MST_INT_RX_UNF    BIT(6)
113 #define MST_INT_CMDD_EMP  BIT(5)
114 #define MST_INT_CMDD_THR  BIT(4)
115 #define MST_INT_CMDD_OVF  BIT(3)
116 #define MST_INT_CMDR_THR  BIT(2)
117 #define MST_INT_CMDR_UNF  BIT(1)
118 #define MST_INT_CMDR_OVF  BIT(0)
119 #define MST_INT_MASK      GENMASK(18, 0)
120 
121 #define MST_STATUS0             0x34
122 #define MST_STATUS0_IDLE        BIT(18)
123 #define MST_STATUS0_HALTED      BIT(17)
124 #define MST_STATUS0_MASTER_MODE BIT(16)
125 #define MST_STATUS0_TX_FULL     BIT(13)
126 #define MST_STATUS0_IBID_FULL   BIT(12)
127 #define MST_STATUS0_IBIR_FULL   BIT(11)
128 #define MST_STATUS0_RX_FULL     BIT(10)
129 #define MST_STATUS0_CMDD_FULL   BIT(9)
130 #define MST_STATUS0_CMDR_FULL   BIT(8)
131 #define MST_STATUS0_TX_EMP      BIT(5)
132 #define MST_STATUS0_IBID_EMP    BIT(4)
133 #define MST_STATUS0_IBIR_EMP    BIT(3)
134 #define MST_STATUS0_RX_EMP      BIT(2)
135 #define MST_STATUS0_CMDD_EMP    BIT(1)
136 #define MST_STATUS0_CMDR_EMP    BIT(0)
137 
138 #define CMDR                    0x38
139 #define CMDR_NO_ERROR           0
140 #define CMDR_DDR_PREAMBLE_ERROR 1
141 #define CMDR_DDR_PARITY_ERROR   2
142 #define CMDR_DDR_RX_FIFO_OVF    3
143 #define CMDR_DDR_TX_FIFO_UNF    4
144 #define CMDR_M0_ERROR           5
145 #define CMDR_M1_ERROR           6
146 #define CMDR_M2_ERROR           7
147 #define CMDR_MST_ABORT          8
148 #define CMDR_NACK_RESP          9
149 #define CMDR_INVALID_DA         10
150 #define CMDR_DDR_DROPPED        11
151 #define CMDR_ERROR(x)           (((x) & GENMASK(27, 24)) >> 24)
152 #define CMDR_XFER_BYTES(x)      (((x) & GENMASK(19, 8)) >> 8)
153 #define CMDR_CMDID_HJACK_DISEC  0xfe
154 #define CMDR_CMDID_HJACK_ENTDAA 0xff
155 #define CMDR_CMDID(x)           ((x) & GENMASK(7, 0))
156 
157 #define IBIR               0x3c
158 #define IBIR_ACKED         BIT(12)
159 #define IBIR_SLVID(x)      (((x) & GENMASK(11, 8)) >> 8)
160 #define IBIR_SLVID_INV     0xF
161 #define IBIR_ERROR         BIT(7)
162 #define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2)
163 #define IBIR_TYPE_IBI      0
164 #define IBIR_TYPE_HJ       1
165 #define IBIR_TYPE_MR       2
166 #define IBIR_TYPE(x)       ((x) & GENMASK(1, 0))
167 
168 #define SLV_IER             0x40
169 #define SLV_IDR             0x44
170 #define SLV_IMR             0x48
171 #define SLV_ICR             0x4c
172 #define SLV_ISR             0x50
173 #define SLV_INT_CHIP_RST    BIT(31)
174 #define SLV_INT_PERIPH_RST  BIT(30)
175 #define SLV_INT_FLUSH_DONE  BIT(29)
176 #define SLV_INT_RST_DAA     BIT(28)
177 #define SLV_INT_BUSCON_UP   BIT(26)
178 #define SLV_INT_MRL_UP      BIT(25)
179 #define SLV_INT_MWL_UP      BIT(24)
180 #define SLV_INT_IBI_THR     BIT(23)
181 #define SLV_INT_IBI_DONE    BIT(22)
182 #define SLV_INT_DEFSLVS     BIT(21)
183 #define SLV_INT_TM          BIT(20)
184 #define SLV_INT_ERROR       BIT(19)
185 #define SLV_INT_EVENT_UP    BIT(18)
186 #define SLV_INT_HJ_DONE     BIT(17)
187 #define SLV_INT_MR_DONE     BIT(16)
188 #define SLV_INT_DA_UPD      BIT(15)
189 #define SLV_INT_SDR_FAIL    BIT(14)
190 #define SLV_INT_DDR_FAIL    BIT(13)
191 #define SLV_INT_M_RD_ABORT  BIT(12)
192 #define SLV_INT_DDR_RX_THR  BIT(11)
193 #define SLV_INT_DDR_TX_THR  BIT(10)
194 #define SLV_INT_SDR_RX_THR  BIT(9)
195 #define SLV_INT_SDR_TX_THR  BIT(8)
196 #define SLV_INT_DDR_RX_UNF  BIT(7)
197 #define SLV_INT_DDR_TX_OVF  BIT(6)
198 #define SLV_INT_SDR_RX_UNF  BIT(5)
199 #define SLV_INT_SDR_TX_OVF  BIT(4)
200 #define SLV_INT_DDR_RD_COMP BIT(3)
201 #define SLV_INT_DDR_WR_COMP BIT(2)
202 #define SLV_INT_SDR_RD_COMP BIT(1)
203 #define SLV_INT_SDR_WR_COMP BIT(0)
204 
205 #define SLV_STATUS0                   0x54
206 #define SLV_STATUS0_IBI_XFRD_BYTEs(s) (((s) & GENMASK(31, 24)) >> 24)
207 #define SLV_STATUS0_REG_ADDR(s)       (((s) & GENMASK(23, 16)) >> 16)
208 #define SLV_STATUS0_XFRD_BYTES(s)     ((s) & GENMASK(15, 0))
209 
210 #define SLV_STATUS1              0x58
211 #define SLV_STATUS1_SCL_IN_RST   BIT(31)
212 #define SLV_STATUS1_HJ_IN_USE    BIT(30)
213 #define SLV_STATUS1_NACK_NXT_PW  BIT(29)
214 #define SLV_STATUS1_NACK_NXT_PR  BIT(28)
215 #define SLV_STATUS1_MR_PEND      BIT(27)
216 #define SLV_STATUS1_HJ_PEND      BIT(26)
217 #define SLV_STATUS1_IBI_PEND     BIT(25)
218 #define SLV_STATUS1_IBI_DIS      BIT(24)
219 #define SLV_STATUS1_BUS_VAR      BIT(23)
220 #define SLV_STATUS1_TCAM0_DIS    BIT(22)
221 #define SLV_STATUS1_AS(s)        (((s) & GENMASK(21, 20)) >> 20)
222 #define SLV_STATUS1_VEN_TM       BIT(19)
223 #define SLV_STATUS1_HJ_DIS       BIT(18)
224 #define SLV_STATUS1_MR_DIS       BIT(17)
225 #define SLV_STATUS1_PROT_ERR     BIT(16)
226 #define SLV_STATUS1_DA(s)        (((s) & GENMASK(15, 9)) >> 9)
227 #define SLV_STATUS1_HAS_DA       BIT(8)
228 #define SLV_STATUS1_DDR_RX_FULL  BIT(7)
229 #define SLV_STATUS1_DDR_TX_FULL  BIT(6)
230 #define SLV_STATUS1_DDR_RX_EMPTY BIT(5)
231 #define SLV_STATUS1_DDR_TX_EMPTY BIT(4)
232 #define SLV_STATUS1_SDR_RX_FULL  BIT(3)
233 #define SLV_STATUS1_SDR_TX_FULL  BIT(2)
234 #define SLV_STATUS1_SDR_RX_EMPTY BIT(1)
235 #define SLV_STATUS1_SDR_TX_EMPTY BIT(0)
236 
237 #define SLV_IBI_CTRL               0x5c
238 #define SLV_IBI_TCAM_EVNT(x)       ((x) << 27)
239 #define SLV_IBI_PL(x)              ((x) << 16)
240 #define SLV_IBI_TCAM0              BIT(9)
241 #define SLV_IBI_REQ                BIT(8)
242 #define SLV_IBI_AUTO_CLR_IBI       1
243 #define SLV_IBI_AUTO_CLR_PR        2
244 #define SLV_IBI_AUTO_CLR_IBI_OR_PR 3
245 #define SLV_IBI_CLEAR_TRIGGER(x)   ((x) << 4)
246 
247 #define CMD0_FIFO                   0x60
248 #define CMD0_FIFO_IS_DDR            BIT(31)
249 #define CMD0_FIFO_IS_CCC            BIT(30)
250 #define CMD0_FIFO_BCH               BIT(29)
251 #define XMIT_BURST_STATIC_SUBADDR   0
252 #define XMIT_SINGLE_INC_SUBADDR     1
253 #define XMIT_SINGLE_STATIC_SUBADDR  2
254 #define XMIT_BURST_WITHOUT_SUBADDR  3
255 #define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27)
256 #define CMD0_FIFO_SBCA              BIT(26)
257 #define CMD0_FIFO_RSBC              BIT(25)
258 #define CMD0_FIFO_IS_10B            BIT(24)
259 #define CMD0_FIFO_PL_LEN(l)         ((l) << 12)
260 #define CMD0_FIFO_IS_DB             BIT(11)
261 #define CMD0_FIFO_PL_LEN_MAX        4095
262 #define CMD0_FIFO_DEV_ADDR(a)       ((a) << 1)
263 #define CMD0_FIFO_RNW               BIT(0)
264 
265 #define CMD1_FIFO            0x64
266 #define CMD1_FIFO_CMDID(id)  ((id) << 24)
267 #define CMD1_FIFO_DB(db)     (((db) & GENMASK(15, 8)) << 8)
268 #define CMD1_FIFO_CSRADDR(a) (a)
269 #define CMD1_FIFO_CCC(id)    (id)
270 
271 #define TX_FIFO 0x68
272 
273 #define TX_FIFO_STATUS 0x6C
274 
275 #define IMD_CMD0             0x70
276 #define IMD_CMD0_PL_LEN(l)   ((l) << 12)
277 #define IMD_CMD0_DEV_ADDR(a) ((a) << 1)
278 #define IMD_CMD0_RNW         BIT(0)
279 
280 #define IMD_CMD1         0x74
281 #define IMD_CMD1_CCC(id) (id)
282 
283 #define IMD_DATA                    0x78
284 #define RX_FIFO                     0x80
285 #define IBI_DATA_FIFO               0x84
286 #define SLV_DDR_TX_FIFO             0x88
287 #define SLV_DDR_RX_FIFO             0x8c
288 #define DDR_PREAMBLE_MASK           GENMASK(19, 18)
289 #define DDR_PREAMBLE_CMD_CRC        0x1 << 18
290 #define DDR_PREAMBLE_DATA_ABORT     0x2 << 18
291 #define DDR_PREAMBLE_DATA_ABORT_ALT 0x3 << 18
292 #define DDR_DATA(x)                 (((x) & GENMASK(17, 2)) >> 2)
293 #define DDR_EVEN_PARITY             BIT(0)
294 #define DDR_ODD_PARITY              BIT(1)
295 #define DDR_CRC_AND_HEADER_SIZE     0x4
296 #define DDR_CONVERT_BUF_LEN(x)      (4 * (x))
297 
298 #define HDR_CMD_RD         BIT(15)
299 #define HDR_CMD_CODE(c)    (((c) & GENMASK(6, 0)) << 8)
300 #define DDR_CRC_TOKEN      (0xC << 14)
301 #define DDR_CRC_TOKEN_MASK GENMASK(17, 14)
302 #define DDR_CRC(t)         (((t) & (GENMASK(13, 9))) >> 9)
303 #define DDR_CRC_WR_SETUP   BIT(8)
304 
305 #define CMD_IBI_THR_CTRL 0x90
306 #define IBIR_THR(t)      ((t) << 24)
307 #define CMDR_THR(t)      ((t) << 16)
308 #define CMDR_THR_MASK    (GENMASK(20, 16))
309 #define IBI_THR(t)       ((t) << 8)
310 #define CMD_THR(t)       (t)
311 
312 #define TX_RX_THR_CTRL 0x94
313 #define RX_THR(t)      ((t) << 16)
314 #define RX_THR_MASK    (GENMASK(31, 16))
315 #define TX_THR(t)      (t)
316 #define TX_THR_MASK    (GENMASK(15, 0))
317 
318 #define SLV_DDR_TX_RX_THR_CTRL 0x98
319 #define SLV_DDR_RX_THR(t)      ((t) << 16)
320 #define SLV_DDR_TX_THR(t)      (t)
321 
322 #define FLUSH_CTRL            0x9c
323 #define FLUSH_IBI_RESP        BIT(24)
324 #define FLUSH_CMD_RESP        BIT(23)
325 #define FLUSH_SLV_DDR_RX_FIFO BIT(22)
326 #define FLUSH_SLV_DDR_TX_FIFO BIT(21)
327 #define FLUSH_IMM_FIFO        BIT(20)
328 #define FLUSH_IBI_FIFO        BIT(19)
329 #define FLUSH_RX_FIFO         BIT(18)
330 #define FLUSH_TX_FIFO         BIT(17)
331 #define FLUSH_CMD_FIFO        BIT(16)
332 
333 #define SLV_CTRL 0xA0
334 
335 #define SLV_PROT_ERR_TYPE 0xA4
336 #define SLV_ERR6_IBI      BIT(9)
337 #define SLV_ERR6_PR       BIT(8)
338 #define SLV_ERR_GETCCC    BIT(7)
339 #define SLV_ERR5          BIT(6)
340 #define SLV_ERR4          BIT(5)
341 #define SLV_ERR3          BIT(4)
342 #define SLV_ERR2_PW       BIT(3)
343 #define SLV_ERR2_SETCCC   BIT(2)
344 #define SLV_ERR1          BIT(1)
345 #define SLV_ERR0          BIT(0)
346 
347 #define SLV_STATUS2 0xA8
348 
349 #define SLV_STATUS3           0xAC
350 #define SLV_STATUS3_BC_FSM(s) (((s) & GENMASK(26, 16)) >> 16)
351 #define SLV_STATUS3_MWL(s)    ((s) & GENMASK(15, 0))
352 
353 #define TTO_PRESCL_CTRL0               0xb0
354 #define TTO_PRESCL_CTRL0_PRESCL_I2C(x) ((x) << 16)
355 #define TTO_PRESCL_CTRL0_PRESCL_I3C(x) (x)
356 
357 #define TTO_PRESCL_CTRL1           0xb4
358 #define TTO_PRESCL_CTRL1_DIVB(x)   ((x) << 16)
359 #define TTO_PRESCL_CTRL1_DIVA(x)   (x)
360 #define TTO_PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
361 #define TTO_PRESCL_CTRL1_OD_LOW(x) (x)
362 
363 #define DEVS_CTRL                  0xb8
364 #define DEVS_CTRL_DEV_CLR_SHIFT    16
365 #define DEVS_CTRL_DEV_CLR_ALL      GENMASK(31, 16)
366 #define DEVS_CTRL_DEV_CLR(dev)     BIT(16 + (dev))
367 #define DEVS_CTRL_DEV_ACTIVE(dev)  BIT(dev)
368 #define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0)
369 #define MAX_DEVS                   16
370 
371 #define DEV_ID_RR0(d)              (0xc0 + ((d) * 0x10))
372 #define DEV_ID_RR0_LVR_EXT_ADDR    BIT(11)
373 #define DEV_ID_RR0_HDR_CAP         BIT(10)
374 #define DEV_ID_RR0_IS_I3C          BIT(9)
375 #define DEV_ID_RR0_DEV_ADDR_MASK   (GENMASK(7, 1) | GENMASK(15, 13))
376 #define DEV_ID_RR0_SET_DEV_ADDR(a) (((a << 1) & GENMASK(7, 1)) | (((a) & GENMASK(9, 7)) << 13))
377 #define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | (((x) >> 6) & GENMASK(9, 7)))
378 
379 #define DEV_ID_RR1(d)           (0xc4 + ((d) * 0x10))
380 #define DEV_ID_RR1_PID_MSB(pid) (pid)
381 
382 #define DEV_ID_RR2(d)           (0xc8 + ((d) * 0x10))
383 #define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16)
384 #define DEV_ID_RR2_BCR(bcr)     ((bcr) << 8)
385 #define DEV_ID_RR2_DCR(dcr)     (dcr)
386 #define DEV_ID_RR2_LVR(lvr)     (lvr)
387 
388 #define SIR_MAP(x)               (0x180 + ((x) * 4))
389 #define SIR_MAP_DEV_REG(d)       SIR_MAP((d) / 2)
390 #define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0))
391 #define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
392 #define SIR_MAP_DEV_CONF(d, c)   ((c) << (((d) % 2) ? 16 : 0))
393 #define DEV_ROLE_SLAVE           0
394 #define DEV_ROLE_MASTER          1
395 #define SIR_MAP_DEV_ROLE(role)   ((role) << 14)
396 #define SIR_MAP_DEV_SLOW         BIT(13)
397 #define SIR_MAP_DEV_PL(l)        ((l) << 8)
398 #define SIR_MAP_PL_MAX           GENMASK(4, 0)
399 #define SIR_MAP_DEV_DA(a)        ((a) << 1)
400 #define SIR_MAP_DEV_ACK          BIT(0)
401 
402 #define GRPADDR_LIST 0x198
403 
404 #define GRPADDR_CS 0x19C
405 
406 #define GPIR_WORD(x)     (0x200 + ((x) * 4))
407 #define GPI_REG(val, id) (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
408 
409 #define GPOR_WORD(x)     (0x220 + ((x) * 4))
410 #define GPO_REG(val, id) (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
411 
412 #define ASF_INT_STATUS        0x300
413 #define ASF_INT_RAW_STATUS    0x304
414 #define ASF_INT_MASK          0x308
415 #define ASF_INT_TEST          0x30c
416 #define ASF_INT_FATAL_SELECT  0x310
417 #define ASF_INTEGRITY_ERR     BIT(6)
418 #define ASF_PROTOCOL_ERR      BIT(5)
419 #define ASF_TRANS_TIMEOUT_ERR BIT(4)
420 #define ASF_CSR_ERR           BIT(3)
421 #define ASF_DAP_ERR           BIT(2)
422 #define ASF_SRAM_UNCORR_ERR   BIT(1)
423 #define ASF_SRAM_CORR_ERR     BIT(0)
424 
425 #define ASF_SRAM_CORR_FAULT_STATUS      0x320
426 #define ASF_SRAM_UNCORR_FAULT_STATUS    0x324
427 #define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24)
428 #define ASF_SRAM_CORR_FAULT_ADDR(x)     ((x) & GENMASK(23, 0))
429 
430 #define ASF_SRAM_FAULT_STATS           0x328
431 #define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16)
432 #define ASF_SRAM_FAULT_CORR_STATS(x)   ((x) & GENMASK(15, 0))
433 
434 #define ASF_TRANS_TOUT_CTRL   0x330
435 #define ASF_TRANS_TOUT_EN     BIT(31)
436 #define ASF_TRANS_TOUT_VAL(x) (x)
437 
438 #define ASF_TRANS_TOUT_FAULT_MASK      0x334
439 #define ASF_TRANS_TOUT_FAULT_STATUS    0x338
440 #define ASF_TRANS_TOUT_FAULT_APB       BIT(3)
441 #define ASF_TRANS_TOUT_FAULT_SCL_LOW   BIT(2)
442 #define ASF_TRANS_TOUT_FAULT_SCL_HIGH  BIT(1)
443 #define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0)
444 
445 #define ASF_PROTO_FAULT_MASK            0x340
446 #define ASF_PROTO_FAULT_STATUS          0x344
447 #define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31)
448 #define ASF_PROTO_FAULT_SLVDDR_FAIL     BIT(30)
449 #define ASF_PROTO_FAULT_S(x)            BIT(16 + (x))
450 #define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15)
451 #define ASF_PROTO_FAULT_MSTDDR_FAIL     BIT(14)
452 #define ASF_PROTO_FAULT_M(x)            BIT(x)
453 
454 /*******************************************************************************
455  * Local Constants Definition
456  ******************************************************************************/
457 
458 /* TODO: this needs to be configurable in the dts...somehow */
459 #define I3C_CONTROLLER_ADDR 0x08
460 
461 /* Maximum i3c devices that the IP can be built with */
462 #define I3C_MAX_DEVS                     11
463 #define I3C_MAX_MSGS                     10
464 #define I3C_SIR_DEFAULT_DA               0x7F
465 #define I3C_MAX_IDLE_CANCEL_WAIT_RETRIES 50
466 #define I3C_PRESCL_REG_SCALE             (4)
467 #define I2C_PRESCL_REG_SCALE             (5)
468 #define I3C_WAIT_FOR_IDLE_STATE_US       100
469 #define I3C_IDLE_TIMEOUT_CYC                                                                       \
470 	(I3C_WAIT_FOR_IDLE_STATE_US * (sys_clock_hw_cycles_per_sec() / USEC_PER_SEC))
471 
472 /* Target T_LOW period in open-drain mode. */
473 #define I3C_BUS_TLOW_OD_MIN_NS 200
474 
475 /*
476  * MIPI I3C v1.1.1 Spec defines SDA Signal Data Hold in Push Pull max as the
477  * minimum of the clock rise and fall time plus 3ns
478  */
479 #define I3C_HD_PP_DEFAULT_NS 10
480 
481 /* Interrupt thresholds. */
482 /* command response fifo threshold */
483 #define I3C_CMDR_THR 1
484 /* command tx fifo threshold - unused */
485 #define I3C_CMDD_THR 1
486 /* in-band-interrupt data fifo threshold - unused */
487 #define I3C_IBID_THR 1
488 /* in-band-interrupt response queue threshold */
489 #define I3C_IBIR_THR 1
490 /* tx data threshold - unused */
491 #define I3C_TX_THR   1
492 
493 #define LOG_MODULE_NAME I3C_CADENCE
494 LOG_MODULE_REGISTER(I3C_CADENCE, CONFIG_I3C_CADENCE_LOG_LEVEL);
495 
496 /*******************************************************************************
497  * Local Types Definition
498  ******************************************************************************/
499 
500 /** Describes peripheral HW configuration determined from CONFx registers. */
501 struct cdns_i3c_hw_config {
502 	/* Revision ID */
503 	uint32_t rev_id;
504 	/* The maxiumum command queue depth. */
505 	uint32_t cmd_mem_depth;
506 	/* The maxiumum command response queue depth. */
507 	uint32_t cmdr_mem_depth;
508 	/* The maximum RX FIFO depth. */
509 	uint32_t rx_mem_depth;
510 	/* The maximum TX FIFO depth. */
511 	uint32_t tx_mem_depth;
512 	/* The maximum DDR RX FIFO depth. */
513 	uint32_t ddr_rx_mem_depth;
514 	/* The maximum DDR TX FIFO depth. */
515 	uint32_t ddr_tx_mem_depth;
516 	/* The maximum IBIR FIFO depth. */
517 	uint32_t ibir_mem_depth;
518 	/* The maximum IBI FIFO depth. */
519 	uint32_t ibi_mem_depth;
520 };
521 
522 /* Cadence I3C/I2C Device Private Data */
523 struct cdns_i3c_i2c_dev_data {
524 	/* Device id within the retaining registers. This is set after bus initialization by the
525 	 * controller.
526 	 */
527 	uint8_t id;
528 };
529 
530 /* Single command/transfer */
531 struct cdns_i3c_cmd {
532 	uint32_t cmd0;
533 	uint32_t cmd1;
534 	uint32_t ddr_header;
535 	uint32_t ddr_crc;
536 	uint32_t len;
537 	uint32_t *num_xfer;
538 	void *buf;
539 	uint32_t error;
540 	enum i3c_data_rate hdr;
541 };
542 
543 /* Transfer data */
544 struct cdns_i3c_xfer {
545 	struct k_sem complete;
546 	int ret;
547 	int num_cmds;
548 	struct cdns_i3c_cmd cmds[I3C_MAX_MSGS];
549 };
550 
551 #ifdef CONFIG_I3C_USE_IBI
552 /* IBI transferred data */
553 struct cdns_i3c_ibi_buf {
554 	uint8_t ibi_data[CONFIG_I3C_IBI_MAX_PAYLOAD_SIZE];
555 	uint8_t ibi_data_cnt;
556 };
557 #endif
558 
559 /* Driver config */
560 struct cdns_i3c_config {
561 	struct i3c_driver_config common;
562 	/** base address of the controller */
563 	uintptr_t base;
564 	/** input frequency to the I3C Cadence */
565 	uint32_t input_frequency;
566 	/** Interrupt configuration function. */
567 	void (*irq_config_func)(const struct device *dev);
568 	/** IBID Threshold value */
569 	uint8_t ibid_thr;
570 };
571 
572 /* Driver instance data */
573 struct cdns_i3c_data {
574 	struct i3c_driver_data common;
575 	struct cdns_i3c_hw_config hw_cfg;
576 #ifdef CONFIG_I3C_USE_IBI
577 	struct cdns_i3c_ibi_buf ibi_buf;
578 #endif
579 	struct k_mutex bus_lock;
580 	struct cdns_i3c_i2c_dev_data cdns_i3c_i2c_priv_data[I3C_MAX_DEVS];
581 	struct cdns_i3c_xfer xfer;
582 	struct i3c_target_config *target_config;
583 	struct k_sem ibi_hj_complete;
584 	uint32_t free_rr_slots;
585 	uint16_t fifo_bytes_read;
586 	uint8_t max_devs;
587 };
588 
589 /*******************************************************************************
590  * Global Variables Declaration
591  ******************************************************************************/
592 
593 /*******************************************************************************
594  * Local Functions Declaration
595  ******************************************************************************/
596 
597 /*******************************************************************************
598  * Private Functions Code
599  ******************************************************************************/
600 
i3c_cdns_crc5(uint8_t crc5,uint16_t word)601 static uint8_t i3c_cdns_crc5(uint8_t crc5, uint16_t word)
602 {
603 	uint8_t crc0;
604 	int i;
605 
606 	/*
607 	 * crc0 = next_data_bit ^ crc[4]
608 	 *                1         2            3       4
609 	 * crc[4:0] = { crc[3:2], crc[1]^crc0, crc[0], crc0 }
610 	 */
611 	for (i = 15; i >= 0; --i) {
612 		crc0 = ((word >> i) ^ (crc5 >> 4)) & 0x1;
613 		crc5 = ((crc5 << 1) & 0x1a) | (((crc5 >> 1) ^ crc0) << 2) | crc0;
614 	}
615 
616 	return crc5 & 0x1f;
617 }
618 
cdns_i3c_ddr_parity(uint16_t payload)619 static uint8_t cdns_i3c_ddr_parity(uint16_t payload)
620 {
621 	uint16_t pb;
622 	uint8_t parity;
623 
624 	/* Calculate odd parity. */
625 	pb = (payload >> 15) ^ (payload >> 13) ^ (payload >> 11) ^ (payload >> 9) ^ (payload >> 7) ^
626 	     (payload >> 5) ^ (payload >> 3) ^ (payload >> 1);
627 	parity = (pb & 1) << 1;
628 	/* Calculate even and 1 parity */
629 	pb = (payload >> 14) ^ (payload >> 12) ^ (payload >> 10) ^ (payload >> 8) ^ (payload >> 6) ^
630 	     (payload >> 4) ^ (payload >> 2) ^ payload ^ 1;
631 	parity |= (pb & 1);
632 
633 	return parity;
634 }
635 
636 /* This prepares the ddr word from the payload add adding on parity, This
637  * does not write the preamble
638  */
prepare_ddr_word(uint16_t payload)639 static uint32_t prepare_ddr_word(uint16_t payload)
640 {
641 	return (uint32_t)payload << 2 | cdns_i3c_ddr_parity(payload);
642 }
643 
644 /* This ensures that PA0 contains 1'b1 which allows for easier Bus Turnaround */
prepare_ddr_cmd_parity_adjustment_bit(uint16_t word)645 static uint16_t prepare_ddr_cmd_parity_adjustment_bit(uint16_t word)
646 {
647 	uint16_t pb;
648 
649 	pb = (word >> 14) ^ (word >> 12) ^ (word >> 10) ^ (word >> 8) ^ (word >> 6) ^ (word >> 4) ^
650 	     (word >> 2);
651 
652 	if (pb & 1) {
653 		word |= BIT(0);
654 	}
655 
656 	return word;
657 }
658 
659 /* Computes and sets parity */
660 /* Returns [7:1] 7-bit addr, [0] even/xor parity */
cdns_i3c_even_parity_byte(uint8_t byte)661 static uint8_t cdns_i3c_even_parity_byte(uint8_t byte)
662 {
663 	uint8_t parity = 0;
664 	uint8_t b = byte;
665 
666 	while (b) {
667 		parity = !parity;
668 		b = b & (b - 1);
669 	}
670 	b = (byte << 1) | !parity;
671 
672 	return b;
673 }
674 
675 /* Check if command response fifo is empty */
cdns_i3c_cmd_rsp_fifo_empty(const struct cdns_i3c_config * config)676 static inline bool cdns_i3c_cmd_rsp_fifo_empty(const struct cdns_i3c_config *config)
677 {
678 	uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
679 
680 	return ((mst_st & MST_STATUS0_CMDR_EMP) ? true : false);
681 }
682 
683 /* Check if command fifo is empty */
cdns_i3c_cmd_fifo_empty(const struct cdns_i3c_config * config)684 static inline bool cdns_i3c_cmd_fifo_empty(const struct cdns_i3c_config *config)
685 {
686 	uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
687 
688 	return ((mst_st & MST_STATUS0_CMDD_EMP) ? true : false);
689 }
690 
691 /* Check if command fifo is full */
cdns_i3c_cmd_fifo_full(const struct cdns_i3c_config * config)692 static inline bool cdns_i3c_cmd_fifo_full(const struct cdns_i3c_config *config)
693 {
694 	uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
695 
696 	return ((mst_st & MST_STATUS0_CMDD_FULL) ? true : false);
697 }
698 
699 /* Check if ibi response fifo is empty */
cdns_i3c_ibi_rsp_fifo_empty(const struct cdns_i3c_config * config)700 static inline bool cdns_i3c_ibi_rsp_fifo_empty(const struct cdns_i3c_config *config)
701 {
702 	uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
703 
704 	return ((mst_st & MST_STATUS0_IBIR_EMP) ? true : false);
705 }
706 
707 /* Check if tx fifo is full */
cdns_i3c_tx_fifo_full(const struct cdns_i3c_config * config)708 static inline bool cdns_i3c_tx_fifo_full(const struct cdns_i3c_config *config)
709 {
710 	uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
711 
712 	return ((mst_st & MST_STATUS0_TX_FULL) ? true : false);
713 }
714 
715 /* Check if rx fifo is full */
cdns_i3c_rx_fifo_full(const struct cdns_i3c_config * config)716 static inline bool cdns_i3c_rx_fifo_full(const struct cdns_i3c_config *config)
717 {
718 	uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
719 
720 	return ((mst_st & MST_STATUS0_RX_FULL) ? true : false);
721 }
722 
723 /* Check if rx fifo is empty */
cdns_i3c_rx_fifo_empty(const struct cdns_i3c_config * config)724 static inline bool cdns_i3c_rx_fifo_empty(const struct cdns_i3c_config *config)
725 {
726 	uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
727 
728 	return ((mst_st & MST_STATUS0_RX_EMP) ? true : false);
729 }
730 
731 /* Check if ibi fifo is empty */
cdns_i3c_ibi_fifo_empty(const struct cdns_i3c_config * config)732 static inline bool cdns_i3c_ibi_fifo_empty(const struct cdns_i3c_config *config)
733 {
734 	uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
735 
736 	return ((mst_st & MST_STATUS0_IBID_EMP) ? true : false);
737 }
738 
739 /* Interrupt handling */
cdns_i3c_interrupts_disable(const struct cdns_i3c_config * config)740 static inline void cdns_i3c_interrupts_disable(const struct cdns_i3c_config *config)
741 {
742 	sys_write32(MST_INT_MASK, config->base + MST_IDR);
743 }
744 
cdns_i3c_interrupts_clear(const struct cdns_i3c_config * config)745 static inline void cdns_i3c_interrupts_clear(const struct cdns_i3c_config *config)
746 {
747 	sys_write32(MST_INT_MASK, config->base + MST_ICR);
748 }
749 
750 /* FIFO mgmt */
cdns_i3c_write_tx_fifo(const struct cdns_i3c_config * config,const void * buf,uint32_t len)751 static void cdns_i3c_write_tx_fifo(const struct cdns_i3c_config *config, const void *buf,
752 				   uint32_t len)
753 {
754 	const uint32_t *ptr = buf;
755 	uint32_t remain, val;
756 
757 	for (remain = len; remain >= 4; remain -= 4) {
758 		val = *ptr++;
759 		sys_write32(val, config->base + TX_FIFO);
760 	}
761 
762 	if (remain > 0) {
763 		val = 0;
764 		memcpy(&val, ptr, remain);
765 		sys_write32(val, config->base + TX_FIFO);
766 	}
767 }
768 
cdns_i3c_write_ddr_tx_fifo(const struct cdns_i3c_config * config,const void * buf,uint32_t len)769 static void cdns_i3c_write_ddr_tx_fifo(const struct cdns_i3c_config *config, const void *buf,
770 				       uint32_t len)
771 {
772 	const uint32_t *ptr = buf;
773 	uint32_t remain, val;
774 
775 	for (remain = len; remain >= 4; remain -= 4) {
776 		val = *ptr++;
777 		sys_write32(val, config->base + SLV_DDR_TX_FIFO);
778 	}
779 
780 	if (remain > 0) {
781 		val = 0;
782 		memcpy(&val, ptr, remain);
783 		sys_write32(val, config->base + SLV_DDR_TX_FIFO);
784 	}
785 }
786 
787 #ifdef CONFIG_I3C_USE_IBI
cdns_i3c_write_ibi_fifo(const struct cdns_i3c_config * config,const void * buf,uint32_t len)788 static void cdns_i3c_write_ibi_fifo(const struct cdns_i3c_config *config, const void *buf,
789 				    uint32_t len)
790 {
791 	const uint32_t *ptr = buf;
792 	uint32_t remain, val;
793 
794 	for (remain = len; remain >= 4; remain -= 4) {
795 		val = *ptr++;
796 		sys_write32(val, config->base + IBI_DATA_FIFO);
797 	}
798 
799 	if (remain > 0) {
800 		val = 0;
801 		memcpy(&val, ptr, remain);
802 		sys_write32(val, config->base + IBI_DATA_FIFO);
803 	}
804 }
805 #endif /* CONFIG_I3C_USE_IBI */
806 
cdns_i3c_target_read_rx_fifo(const struct device * dev)807 static void cdns_i3c_target_read_rx_fifo(const struct device *dev)
808 {
809 	const struct cdns_i3c_config *config = dev->config;
810 	struct cdns_i3c_data *data = dev->data;
811 	const struct i3c_target_callbacks *target_cb = data->target_config->callbacks;
812 
813 	/* Version 1p7 uses the full 32b FIFO width */
814 	if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
815 		uint16_t xferred_bytes =
816 			SLV_STATUS0_XFRD_BYTES(sys_read32(config->base + SLV_STATUS0));
817 
818 		for (int i = data->fifo_bytes_read; i < xferred_bytes; i += 4) {
819 			uint32_t rx_data = sys_read32(config->base + RX_FIFO);
820 			/* Call write received cb for each remaining byte  */
821 			for (int j = 0; j < MIN(4, xferred_bytes - i); j++) {
822 				target_cb->write_received_cb(data->target_config,
823 							     (rx_data >> (8 * j)));
824 			}
825 		}
826 		/*
827 		 * store the xfer bytes as the thr interrupt may trigger again as xferred_bytes will
828 		 * count up to the "total" bytes received
829 		 */
830 		data->fifo_bytes_read = xferred_bytes;
831 	} else {
832 		/*
833 		 * Target writes only write to the first byte of the 32 bit
834 		 * width fifo for older version
835 		 */
836 		uint8_t rx_data = (uint8_t)sys_read32(config->base + RX_FIFO);
837 
838 		target_cb->write_received_cb(data->target_config, rx_data);
839 	}
840 }
841 
cdns_i3c_read_rx_fifo(const struct cdns_i3c_config * config,void * buf,uint32_t len)842 static int cdns_i3c_read_rx_fifo(const struct cdns_i3c_config *config, void *buf, uint32_t len)
843 {
844 	uint32_t *ptr = buf;
845 	uint32_t remain, val;
846 
847 	for (remain = len; remain >= 4; remain -= 4) {
848 		if (cdns_i3c_rx_fifo_empty(config)) {
849 			return -EIO;
850 		}
851 		val = sys_le32_to_cpu(sys_read32(config->base + RX_FIFO));
852 		*ptr++ = val;
853 	}
854 
855 	if (remain > 0) {
856 		if (cdns_i3c_rx_fifo_empty(config)) {
857 			return -EIO;
858 		}
859 		val = sys_le32_to_cpu(sys_read32(config->base + RX_FIFO));
860 		memcpy(ptr, &val, remain);
861 	}
862 
863 	return 0;
864 }
865 
cdns_i3c_read_rx_fifo_ddr_xfer(const struct cdns_i3c_config * config,void * buf,uint32_t len,uint32_t ddr_header)866 static int cdns_i3c_read_rx_fifo_ddr_xfer(const struct cdns_i3c_config *config, void *buf,
867 					  uint32_t len, uint32_t ddr_header)
868 {
869 	uint16_t *ptr = buf;
870 	uint32_t val;
871 	uint32_t preamble;
872 	uint8_t crc5 = 0x1F;
873 
874 	/*
875 	 * TODO: This function does not support threshold interrupts, it is expected that the
876 	 * whole packet to be within the FIFO and not split across multiple calls to this function.
877 	 */
878 	crc5 = i3c_cdns_crc5(crc5, (uint16_t)DDR_DATA(ddr_header));
879 
880 	for (int i = 0; i < len; i++) {
881 		if (cdns_i3c_rx_fifo_empty(config)) {
882 			return -EIO;
883 		}
884 		val = sys_read32(config->base + RX_FIFO);
885 		preamble = (val & DDR_PREAMBLE_MASK);
886 
887 		if (preamble == DDR_PREAMBLE_DATA_ABORT ||
888 		    preamble == DDR_PREAMBLE_DATA_ABORT_ALT) {
889 			*ptr++ = sys_cpu_to_be16((uint16_t)DDR_DATA(val));
890 			crc5 = i3c_cdns_crc5(crc5, (uint16_t)DDR_DATA(val));
891 		} else if ((preamble == DDR_PREAMBLE_CMD_CRC) &&
892 			   ((val & DDR_CRC_TOKEN_MASK) == DDR_CRC_TOKEN)) {
893 			uint8_t crc = (uint8_t)DDR_CRC(val);
894 
895 			if (crc5 != crc) {
896 				LOG_ERR("DDR RX crc error");
897 				return -EIO;
898 			}
899 		}
900 	}
901 
902 	return 0;
903 }
904 
cdns_i3c_wait_for_idle(const struct device * dev)905 static inline int cdns_i3c_wait_for_idle(const struct device *dev)
906 {
907 	const struct cdns_i3c_config *config = dev->config;
908 	uint32_t start_time = k_cycle_get_32();
909 
910 	/**
911 	 * Spin waiting for device to go idle. It is unlikely that this will
912 	 * actually take any time unless if the last transaction came immediately
913 	 * after an error condition.
914 	 */
915 	while (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_IDLE)) {
916 		if (k_cycle_get_32() - start_time > I3C_IDLE_TIMEOUT_CYC) {
917 			return -EAGAIN;
918 		}
919 	}
920 
921 	return 0;
922 }
923 
cdns_i3c_set_prescalers(const struct device * dev)924 static void cdns_i3c_set_prescalers(const struct device *dev)
925 {
926 	struct cdns_i3c_data *data = dev->data;
927 	const struct cdns_i3c_config *config = dev->config;
928 	struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
929 
930 	/* These formulas are from section 6.2.1 of the Cadence I3C Master User Guide. */
931 	uint32_t prescl_i3c = DIV_ROUND_UP(config->input_frequency,
932 					   (ctrl_config->scl.i3c * I3C_PRESCL_REG_SCALE)) -
933 			      1;
934 	uint32_t prescl_i2c = DIV_ROUND_UP(config->input_frequency,
935 					   (ctrl_config->scl.i2c * I2C_PRESCL_REG_SCALE)) -
936 			      1;
937 
938 	/* update with actual value */
939 	ctrl_config->scl.i3c = config->input_frequency / ((prescl_i3c + 1) * I3C_PRESCL_REG_SCALE);
940 	ctrl_config->scl.i2c = config->input_frequency / ((prescl_i2c + 1) * I2C_PRESCL_REG_SCALE);
941 
942 	LOG_DBG("%s: I3C speed = %u, PRESCL_CTRL0.i3c = 0x%x", dev->name, ctrl_config->scl.i3c,
943 		prescl_i3c);
944 	LOG_DBG("%s: I2C speed = %u, PRESCL_CTRL0.i2c = 0x%x", dev->name, ctrl_config->scl.i2c,
945 		prescl_i2c);
946 
947 	/* Calculate the OD_LOW value assuming a desired T_low period of 210ns. */
948 	uint32_t pres_step = 1000000000 / (ctrl_config->scl.i3c * 4);
949 	int32_t od_low = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
950 
951 	if (od_low < 0) {
952 		od_low = 0;
953 	}
954 	LOG_DBG("%s: PRESCL_CTRL1.od_low = 0x%x", dev->name, od_low);
955 
956 	/* disable in order to update timing */
957 	uint32_t ctrl = sys_read32(config->base + CTRL);
958 
959 	if (ctrl & CTRL_DEV_EN) {
960 		sys_write32(~CTRL_DEV_EN & ctrl, config->base + CTRL);
961 	}
962 
963 	sys_write32(PRESCL_CTRL0_I3C(prescl_i3c) | PRESCL_CTRL0_I2C(prescl_i2c),
964 		    config->base + PRESCL_CTRL0);
965 
966 	/* Sets the open drain low time relative to the push-pull. */
967 	sys_write32(PRESCL_CTRL1_OD_LOW(od_low & PRESCL_CTRL1_OD_LOW_MASK),
968 		    config->base + PRESCL_CTRL1);
969 
970 	/* reenable */
971 	if (ctrl & CTRL_DEV_EN) {
972 		sys_write32(CTRL_DEV_EN | ctrl, config->base + CTRL);
973 	}
974 }
975 
976 /**
977  * @brief Compute RR0 Value from addr
978  *
979  * @param addr Address of the target
980  *
981  * @return RR0 value
982  */
prepare_rr0_dev_address(uint16_t addr)983 static uint32_t prepare_rr0_dev_address(uint16_t addr)
984 {
985 	/* RR0[7:1] = addr[6:0] | parity^[0] */
986 	uint32_t ret = cdns_i3c_even_parity_byte(addr);
987 
988 	if (addr & GENMASK(9, 7)) {
989 		/* RR0[15:13] = addr[9:7] */
990 		ret |= (addr & GENMASK(9, 7)) << 6;
991 		/* RR0[11] = 10b lvr addr */
992 		ret |= DEV_ID_RR0_LVR_EXT_ADDR;
993 	}
994 
995 	return ret;
996 }
997 
998 /**
999  * @brief Program Retaining Registers with device lists
1000  *
1001  * This will program the retaining register with the controller itself
1002  *
1003  * @param dev Pointer to controller device driver instance.
1004  */
cdns_i3c_program_controller_retaining_reg(const struct device * dev)1005 static void cdns_i3c_program_controller_retaining_reg(const struct device *dev)
1006 {
1007 	const struct cdns_i3c_config *config = dev->config;
1008 	struct cdns_i3c_data *data = dev->data;
1009 	/* Set controller retaining register */
1010 	uint8_t controller_da = I3C_CONTROLLER_ADDR;
1011 
1012 	if (!i3c_addr_slots_is_free(&data->common.attached_dev.addr_slots, controller_da)) {
1013 		controller_da =
1014 			i3c_addr_slots_next_free_find(&data->common.attached_dev.addr_slots, 0);
1015 		LOG_DBG("%s: 0x%02x DA selected for controller", dev->name, controller_da);
1016 	}
1017 	sys_write32(prepare_rr0_dev_address(controller_da), config->base + DEV_ID_RR0(0));
1018 	/* Mark the address as I3C device */
1019 	i3c_addr_slots_mark_i3c(&data->common.attached_dev.addr_slots, controller_da);
1020 }
1021 
1022 #ifdef CONFIG_I3C_USE_IBI
cdns_i3c_controller_ibi_enable(const struct device * dev,struct i3c_device_desc * target)1023 static int cdns_i3c_controller_ibi_enable(const struct device *dev, struct i3c_device_desc *target)
1024 {
1025 	uint32_t sir_map;
1026 	uint32_t sir_cfg;
1027 	const struct cdns_i3c_config *config = dev->config;
1028 	struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = target->controller_priv;
1029 	struct i3c_ccc_events i3c_events;
1030 	int ret = 0;
1031 
1032 	if (!i3c_device_is_ibi_capable(target)) {
1033 		ret = -EINVAL;
1034 		return ret;
1035 	}
1036 
1037 	/* TODO: check for duplicate in SIR */
1038 
1039 	sir_cfg = SIR_MAP_DEV_ROLE(I3C_BCR_DEVICE_ROLE(target->bcr)) |
1040 		  SIR_MAP_DEV_DA(target->dynamic_addr) |
1041 		  SIR_MAP_DEV_PL(target->data_length.max_ibi);
1042 	if (target->ibi_cb != NULL) {
1043 		sir_cfg |= SIR_MAP_DEV_ACK;
1044 	}
1045 	if (target->bcr & I3C_BCR_MAX_DATA_SPEED_LIMIT) {
1046 		sir_cfg |= SIR_MAP_DEV_SLOW;
1047 	}
1048 
1049 	LOG_DBG("%s: IBI enabling for 0x%02x (BCR 0x%02x)", dev->name, target->dynamic_addr,
1050 		target->bcr);
1051 
1052 	/* Tell target to enable IBI */
1053 	i3c_events.events = I3C_CCC_EVT_INTR;
1054 	ret = i3c_ccc_do_events_set(target, true, &i3c_events);
1055 	if (ret != 0) {
1056 		LOG_ERR("%s: Error sending IBI ENEC for 0x%02x (%d)", dev->name,
1057 			target->dynamic_addr, ret);
1058 		return ret;
1059 	}
1060 
1061 	sir_map = sys_read32(config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1));
1062 	sir_map &= ~SIR_MAP_DEV_CONF_MASK(cdns_i3c_device_data->id - 1);
1063 	sir_map |= SIR_MAP_DEV_CONF(cdns_i3c_device_data->id - 1, sir_cfg);
1064 
1065 	sys_write32(sir_map, config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1));
1066 
1067 	return ret;
1068 }
1069 
cdns_i3c_controller_ibi_disable(const struct device * dev,struct i3c_device_desc * target)1070 static int cdns_i3c_controller_ibi_disable(const struct device *dev, struct i3c_device_desc *target)
1071 {
1072 	uint32_t sir_map;
1073 	const struct cdns_i3c_config *config = dev->config;
1074 	struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = target->controller_priv;
1075 	struct i3c_ccc_events i3c_events;
1076 	int ret = 0;
1077 
1078 	if (!i3c_device_is_ibi_capable(target)) {
1079 		ret = -EINVAL;
1080 		return ret;
1081 	}
1082 
1083 	/* Tell target to disable IBI */
1084 	i3c_events.events = I3C_CCC_EVT_INTR;
1085 	ret = i3c_ccc_do_events_set(target, false, &i3c_events);
1086 	if (ret != 0) {
1087 		LOG_ERR("%s: Error sending IBI DISEC for 0x%02x (%d)", dev->name,
1088 			target->dynamic_addr, ret);
1089 		return ret;
1090 	}
1091 
1092 	sir_map = sys_read32(config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1));
1093 	sir_map &= ~SIR_MAP_DEV_CONF_MASK(cdns_i3c_device_data->id - 1);
1094 	sir_map |=
1095 		SIR_MAP_DEV_CONF(cdns_i3c_device_data->id - 1, SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1096 	sys_write32(sir_map, config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1));
1097 
1098 	return ret;
1099 }
1100 
cdns_i3c_target_ibi_raise_hj(const struct device * dev)1101 static int cdns_i3c_target_ibi_raise_hj(const struct device *dev)
1102 {
1103 	const struct cdns_i3c_config *config = dev->config;
1104 	struct cdns_i3c_data *data = dev->data;
1105 	struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
1106 
1107 	/* HJ requests should not be done by primary controllers */
1108 	if (!ctrl_config->is_secondary) {
1109 		LOG_ERR("%s: controller is primary, HJ not available", dev->name);
1110 		return -ENOTSUP;
1111 	}
1112 	/* Check if target already has a DA assigned to it */
1113 	if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_HAS_DA) {
1114 		LOG_ERR("%s: HJ not available, DA already assigned", dev->name);
1115 		return -EACCES;
1116 	}
1117 	/* Check if HJ requests DISEC CCC with DISHJ field set has been received */
1118 	if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_HJ_DIS) {
1119 		LOG_ERR("%s: HJ requests are currently disabled by DISEC", dev->name);
1120 		return -EAGAIN;
1121 	}
1122 
1123 	sys_write32(CTRL_HJ_INIT | sys_read32(config->base + CTRL), config->base + CTRL);
1124 	k_sem_reset(&data->ibi_hj_complete);
1125 	if (k_sem_take(&data->ibi_hj_complete, K_MSEC(500)) != 0) {
1126 		LOG_ERR("%s: timeout waiting for DAA after HJ", dev->name);
1127 		return -ETIMEDOUT;
1128 	}
1129 	return 0;
1130 }
1131 
cdns_i3c_target_ibi_raise_intr(const struct device * dev,struct i3c_ibi * request)1132 static int cdns_i3c_target_ibi_raise_intr(const struct device *dev, struct i3c_ibi *request)
1133 {
1134 	const struct cdns_i3c_config *config = dev->config;
1135 	const struct cdns_i3c_data *data = dev->data;
1136 	uint32_t ibi_ctrl_val;
1137 
1138 	LOG_DBG("%s: issuing IBI TIR", dev->name);
1139 
1140 	/*
1141 	 * Ensure data will fit within FIFO
1142 	 *
1143 	 * TODO: This limitation prevents burst transfers greater than the
1144 	 *       FIFO sizes and should be replaced with an implementation that
1145 	 *       utilizes the IBI data threshold interrupts.
1146 	 */
1147 	if (request->payload_len > data->hw_cfg.ibi_mem_depth) {
1148 		LOG_ERR("%s: payload too large for IBI TIR", dev->name);
1149 		return -ENOMEM;
1150 	}
1151 
1152 	cdns_i3c_write_ibi_fifo(config, request->payload, request->payload_len);
1153 
1154 	/* Write Payload Length and Start Condition */
1155 	ibi_ctrl_val = sys_read32(config->base + SLV_IBI_CTRL);
1156 	ibi_ctrl_val |= SLV_IBI_PL(request->payload_len);
1157 	ibi_ctrl_val |= SLV_IBI_REQ;
1158 	sys_write32(ibi_ctrl_val, config->base + SLV_IBI_CTRL);
1159 	return 0;
1160 }
1161 
cdns_i3c_target_ibi_raise(const struct device * dev,struct i3c_ibi * request)1162 static int cdns_i3c_target_ibi_raise(const struct device *dev, struct i3c_ibi *request)
1163 {
1164 	struct cdns_i3c_data *data = dev->data;
1165 
1166 	if (request == NULL) {
1167 		return -EINVAL;
1168 	}
1169 
1170 	switch (request->ibi_type) {
1171 	case I3C_IBI_TARGET_INTR:
1172 		/* Check IP Revision since older versions of CDNS IP do not support IBI interrupt*/
1173 		if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
1174 			return cdns_i3c_target_ibi_raise_intr(dev, request);
1175 		} else {
1176 			return -ENOTSUP;
1177 		}
1178 	case I3C_IBI_CONTROLLER_ROLE_REQUEST:
1179 		/* TODO: Cadence I3C can support CR, but not implemented yet */
1180 		return -ENOTSUP;
1181 	case I3C_IBI_HOTJOIN:
1182 		return cdns_i3c_target_ibi_raise_hj(dev);
1183 	default:
1184 		return -EINVAL;
1185 	}
1186 }
1187 #endif
1188 
cdns_i3c_cancel_transfer(const struct device * dev)1189 static void cdns_i3c_cancel_transfer(const struct device *dev)
1190 {
1191 	struct cdns_i3c_data *data = dev->data;
1192 	const struct cdns_i3c_config *config = dev->config;
1193 	uint32_t val;
1194 	uint32_t retry_count;
1195 
1196 	/* Disable further interrupts */
1197 	sys_write32(MST_INT_CMDD_EMP, config->base + MST_IDR);
1198 
1199 	/* Ignore if no pending transfer */
1200 	if (data->xfer.num_cmds == 0) {
1201 		return;
1202 	}
1203 
1204 	data->xfer.num_cmds = 0;
1205 
1206 	/* Clear main enable bit to disable further transactions */
1207 	sys_write32(~CTRL_DEV_EN & sys_read32(config->base + CTRL), config->base + CTRL);
1208 
1209 	/**
1210 	 * Spin waiting for device to go idle. It is unlikely that this will
1211 	 * actually take any time since we only get here if a transaction didn't
1212 	 * complete in a long time.
1213 	 */
1214 	retry_count = I3C_MAX_IDLE_CANCEL_WAIT_RETRIES;
1215 	while (retry_count--) {
1216 		val = sys_read32(config->base + MST_STATUS0);
1217 		if (val & MST_STATUS0_IDLE) {
1218 			break;
1219 		}
1220 		k_msleep(10);
1221 	}
1222 	if (retry_count == 0) {
1223 		data->xfer.ret = -ETIMEDOUT;
1224 	}
1225 
1226 	/**
1227 	 * Flush all queues.
1228 	 */
1229 	sys_write32(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO | FLUSH_CMD_RESP,
1230 		    config->base + FLUSH_CTRL);
1231 
1232 	/* Re-enable device */
1233 	sys_write32(CTRL_DEV_EN | sys_read32(config->base + CTRL), config->base + CTRL);
1234 }
1235 
1236 /**
1237  * @brief Start a I3C/I2C Transfer
1238  *
1239  * This is to be called from a I3C/I2C transfer function. This will write
1240  * all data to tx and cmd fifos
1241  *
1242  * @param dev Pointer to controller device driver instance.
1243  */
cdns_i3c_start_transfer(const struct device * dev)1244 static void cdns_i3c_start_transfer(const struct device *dev)
1245 {
1246 	struct cdns_i3c_data *data = dev->data;
1247 	const struct cdns_i3c_config *config = dev->config;
1248 	struct cdns_i3c_xfer *xfer = &data->xfer;
1249 
1250 	/* Ensure no pending command response queue threshold interrupt */
1251 	sys_write32(MST_INT_CMDD_EMP, config->base + MST_ICR);
1252 
1253 	/* Make sure RX FIFO is empty. */
1254 	while (!cdns_i3c_rx_fifo_empty(config)) {
1255 		(void)sys_read32(config->base + RX_FIFO);
1256 	}
1257 	/* Make sure CMDR FIFO is empty too */
1258 	while (!cdns_i3c_cmd_rsp_fifo_empty(config)) {
1259 		(void)sys_read32(config->base + CMDR);
1260 	}
1261 
1262 	/* Write all tx data to fifo */
1263 	for (unsigned int i = 0; i < xfer->num_cmds; i++) {
1264 		if (xfer->cmds[i].hdr == I3C_DATA_RATE_SDR) {
1265 			if (!(xfer->cmds[i].cmd0 & CMD0_FIFO_RNW)) {
1266 				cdns_i3c_write_tx_fifo(config, xfer->cmds[i].buf,
1267 						       xfer->cmds[i].len);
1268 			}
1269 		} else if (xfer->cmds[i].hdr == I3C_DATA_RATE_HDR_DDR) {
1270 			/* DDR Xfer requires sending header block*/
1271 			cdns_i3c_write_tx_fifo(config, &xfer->cmds[i].ddr_header,
1272 					       DDR_CRC_AND_HEADER_SIZE);
1273 			/* If not read operation need to send data + crc of data*/
1274 			if (!(DDR_DATA(xfer->cmds[i].ddr_header) & HDR_CMD_RD)) {
1275 				uint8_t *buf = (uint8_t *)xfer->cmds[i].buf;
1276 				uint32_t ddr_message = 0;
1277 				uint16_t ddr_data_payload = sys_get_be16(&buf[0]);
1278 				/* HDR-DDR Data Words */
1279 				ddr_message = (DDR_PREAMBLE_DATA_ABORT |
1280 					       prepare_ddr_word(ddr_data_payload));
1281 				cdns_i3c_write_tx_fifo(config, &ddr_message,
1282 						       DDR_CRC_AND_HEADER_SIZE);
1283 				for (int j = 2; j < ((xfer->cmds[i].len - 2) * 2); j += 2) {
1284 					ddr_data_payload = sys_get_be16(&buf[j]);
1285 					ddr_message = (DDR_PREAMBLE_DATA_ABORT_ALT |
1286 						       prepare_ddr_word(ddr_data_payload));
1287 					cdns_i3c_write_tx_fifo(config, &ddr_message,
1288 							       DDR_CRC_AND_HEADER_SIZE);
1289 				}
1290 				/* HDR-DDR CRC Word */
1291 				cdns_i3c_write_tx_fifo(config, &xfer->cmds[i].ddr_crc,
1292 						       DDR_CRC_AND_HEADER_SIZE);
1293 			}
1294 		} else {
1295 			xfer->ret = -ENOTSUP;
1296 			return;
1297 		}
1298 	}
1299 
1300 	/* Write all data to cmd fifos */
1301 	for (unsigned int i = 0; i < xfer->num_cmds; i++) {
1302 		/* The command ID is just the msg index. */
1303 		xfer->cmds[i].cmd1 |= CMD1_FIFO_CMDID(i);
1304 		sys_write32(xfer->cmds[i].cmd1, config->base + CMD1_FIFO);
1305 		sys_write32(xfer->cmds[i].cmd0, config->base + CMD0_FIFO);
1306 
1307 		if (xfer->cmds[i].hdr == I3C_DATA_RATE_HDR_DDR) {
1308 			sys_write32(0x00, config->base + CMD1_FIFO);
1309 			if ((DDR_DATA(xfer->cmds[i].ddr_header) & HDR_CMD_RD)) {
1310 				sys_write32(CMD0_FIFO_IS_DDR | CMD0_FIFO_PL_LEN(1),
1311 					    config->base + CMD0_FIFO);
1312 			} else {
1313 				sys_write32(CMD0_FIFO_IS_DDR | CMD0_FIFO_PL_LEN(xfer->cmds[i].len),
1314 					    config->base + CMD0_FIFO);
1315 			}
1316 		}
1317 	}
1318 
1319 	/* kickoff transfer */
1320 	sys_write32(CTRL_MCS | sys_read32(config->base + CTRL), config->base + CTRL);
1321 	sys_write32(MST_INT_CMDD_EMP, config->base + MST_IER);
1322 }
1323 
1324 /**
1325  * @brief Send Common Command Code (CCC).
1326  *
1327  * @see i3c_do_ccc
1328  *
1329  * @param dev Pointer to controller device driver instance.
1330  * @param payload Pointer to CCC payload.
1331  *
1332  * @return @see i3c_do_ccc
1333  */
cdns_i3c_do_ccc(const struct device * dev,struct i3c_ccc_payload * payload)1334 static int cdns_i3c_do_ccc(const struct device *dev, struct i3c_ccc_payload *payload)
1335 {
1336 	const struct cdns_i3c_config *config = dev->config;
1337 	struct cdns_i3c_data *data = dev->data;
1338 	struct cdns_i3c_cmd *cmd;
1339 	int ret = 0;
1340 	uint8_t num_cmds = 0;
1341 
1342 	/* make sure we are currently the active controller */
1343 	if (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE)) {
1344 		return -EACCES;
1345 	}
1346 
1347 	if (payload == NULL) {
1348 		return -EINVAL;
1349 	}
1350 
1351 	/*
1352 	 * Ensure data will fit within FIFOs.
1353 	 *
1354 	 * TODO: This limitation prevents burst transfers greater than the
1355 	 *       FIFO sizes and should be replaced with an implementation that
1356 	 *       utilizes the RX/TX data threshold interrupts.
1357 	 */
1358 	uint32_t num_msgs =
1359 		1 + ((payload->ccc.data_len > 0) ? payload->targets.num_targets
1360 						 : MAX(payload->targets.num_targets - 1, 0));
1361 	if (num_msgs > data->hw_cfg.cmd_mem_depth || num_msgs > data->hw_cfg.cmdr_mem_depth) {
1362 		LOG_ERR("%s: Too many messages", dev->name);
1363 		return -ENOMEM;
1364 	}
1365 
1366 	uint32_t rxsize = 0;
1367 	/* defining byte is stored in a separate register for direct CCCs */
1368 	uint32_t txsize =
1369 		i3c_ccc_is_payload_broadcast(payload) ? ROUND_UP(payload->ccc.data_len, 4) : 0;
1370 
1371 	for (int i = 0; i < payload->targets.num_targets; i++) {
1372 		if (payload->targets.payloads[i].rnw) {
1373 			rxsize += ROUND_UP(payload->targets.payloads[i].data_len, 4);
1374 		} else {
1375 			txsize += ROUND_UP(payload->targets.payloads[i].data_len, 4);
1376 		}
1377 	}
1378 	if ((rxsize > data->hw_cfg.rx_mem_depth) || (txsize > data->hw_cfg.tx_mem_depth)) {
1379 		LOG_ERR("%s: Total RX and/or TX transfer larger than FIFO", dev->name);
1380 		return -ENOMEM;
1381 	}
1382 
1383 	LOG_DBG("%s: CCC[0x%02x]", dev->name, payload->ccc.id);
1384 
1385 	k_mutex_lock(&data->bus_lock, K_FOREVER);
1386 
1387 	/* wait for idle */
1388 	ret = cdns_i3c_wait_for_idle(dev);
1389 	if (ret != 0) {
1390 		goto error;
1391 	}
1392 
1393 	/* if this is a direct CCC */
1394 	if (!i3c_ccc_is_payload_broadcast(payload)) {
1395 		/* if the CCC has no data bytes, then the target payload must be in
1396 		 * the same command buffer
1397 		 */
1398 		for (int i = 0; i < payload->targets.num_targets; i++) {
1399 			cmd = &data->xfer.cmds[i];
1400 			num_cmds++;
1401 			cmd->cmd1 = CMD1_FIFO_CCC(payload->ccc.id);
1402 			cmd->cmd0 = CMD0_FIFO_IS_CCC;
1403 			/* if there is a defining byte */
1404 			if (payload->ccc.data_len == 1) {
1405 				/* Only revision 1p7 supports defining byte for direct CCCs */
1406 				if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
1407 					cmd->cmd0 |= CMD0_FIFO_IS_DB;
1408 					cmd->cmd1 |= CMD1_FIFO_DB(payload->ccc.data[0]);
1409 				} else {
1410 					LOG_ERR("%s: Defining Byte with Direct CCC not supported "
1411 						"with rev %lup%lu",
1412 						dev->name, REV_ID_REV_MAJOR(data->hw_cfg.rev_id),
1413 						REV_ID_REV_MINOR(data->hw_cfg.rev_id));
1414 					ret = -ENOTSUP;
1415 					goto error;
1416 				}
1417 			} else if (payload->ccc.data_len > 1) {
1418 				LOG_ERR("%s: Defining Byte length greater than 1", dev->name);
1419 				ret = -EINVAL;
1420 				goto error;
1421 			}
1422 			/* for a short CCC, i.e. where a direct ccc has multiple targets,
1423 			 * BCH must be 0 for subsequent targets and RSBC must be 1, otherwise
1424 			 * if there is just one target, RSBC must be 0 on the first target
1425 			 */
1426 			if (i == 0) {
1427 				cmd->cmd0 |= CMD0_FIFO_BCH;
1428 			}
1429 			if (i < (payload->targets.num_targets - 1)) {
1430 				cmd->cmd0 |= CMD0_FIFO_RSBC;
1431 			}
1432 			cmd->buf = payload->targets.payloads[i].data;
1433 			cmd->len = payload->targets.payloads[i].data_len;
1434 			cmd->cmd0 |= CMD0_FIFO_DEV_ADDR(payload->targets.payloads[i].addr) |
1435 				     CMD0_FIFO_PL_LEN(payload->targets.payloads[i].data_len);
1436 			if (payload->targets.payloads[i].rnw) {
1437 				cmd->cmd0 |= CMD0_FIFO_RNW;
1438 			}
1439 			cmd->hdr = I3C_DATA_RATE_SDR;
1440 			/*
1441 			 * write the address of num_xfer which is to be updated upon message
1442 			 * completion
1443 			 */
1444 			cmd->num_xfer = &(payload->targets.payloads[i].num_xfer);
1445 		}
1446 	} else {
1447 		cmd = &data->xfer.cmds[0];
1448 		num_cmds++;
1449 		cmd->cmd1 = CMD1_FIFO_CCC(payload->ccc.id);
1450 		cmd->cmd0 = CMD0_FIFO_IS_CCC | CMD0_FIFO_BCH;
1451 		cmd->hdr = I3C_DATA_RATE_SDR;
1452 
1453 		if (payload->ccc.data_len > 0) {
1454 			/* Write additional data for CCC if needed */
1455 			cmd->buf = payload->ccc.data;
1456 			cmd->len = payload->ccc.data_len;
1457 			cmd->cmd0 |= CMD0_FIFO_PL_LEN(payload->ccc.data_len);
1458 			/* write the address of num_xfer which is to be updated upon message
1459 			 * completion
1460 			 */
1461 			cmd->num_xfer = &(payload->ccc.num_xfer);
1462 		} else {
1463 			/* no data to transfer */
1464 			cmd->len = 0;
1465 			cmd->num_xfer = NULL;
1466 		}
1467 	}
1468 
1469 	data->xfer.ret = -ETIMEDOUT;
1470 	data->xfer.num_cmds = num_cmds;
1471 
1472 	cdns_i3c_start_transfer(dev);
1473 	if (k_sem_take(&data->xfer.complete, K_MSEC(1000)) != 0) {
1474 		cdns_i3c_cancel_transfer(dev);
1475 	}
1476 
1477 	if (data->xfer.ret < 0) {
1478 		LOG_ERR("%s: CCC[0x%02x] error (%d)", dev->name, payload->ccc.id, data->xfer.ret);
1479 	}
1480 
1481 	ret = data->xfer.ret;
1482 error:
1483 	k_mutex_unlock(&data->bus_lock);
1484 
1485 	return ret;
1486 }
1487 
1488 /**
1489  * @brief Perform Dynamic Address Assignment.
1490  *
1491  * @see i3c_do_daa
1492  *
1493  * @param dev Pointer to controller device driver instance.
1494  *
1495  * @return @see i3c_do_daa
1496  */
cdns_i3c_do_daa(const struct device * dev)1497 static int cdns_i3c_do_daa(const struct device *dev)
1498 {
1499 	struct cdns_i3c_data *data = dev->data;
1500 	const struct cdns_i3c_config *config = dev->config;
1501 	struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
1502 	uint8_t last_addr = 0;
1503 
1504 	/* DAA should not be done by secondary controllers */
1505 	if (ctrl_config->is_secondary) {
1506 		return -EACCES;
1507 	}
1508 
1509 	/* read dev active reg */
1510 	uint32_t olddevs = sys_read32(config->base + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1511 	/* ignore the controller register */
1512 	olddevs |= BIT(0);
1513 
1514 	/* Assign dynamic addressses to available RRs */
1515 	/* Loop through each clear bit */
1516 	for (uint8_t i = find_lsb_set(~olddevs); i <= data->max_devs; i++) {
1517 		uint8_t rr_idx = i - 1;
1518 
1519 		if (~olddevs & BIT(rr_idx)) {
1520 			/* Read RRx registers */
1521 			last_addr = i3c_addr_slots_next_free_find(
1522 				&data->common.attached_dev.addr_slots, last_addr + 1);
1523 			/* Write RRx registers */
1524 			sys_write32(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C,
1525 				    config->base + DEV_ID_RR0(rr_idx));
1526 			sys_write32(0, config->base + DEV_ID_RR1(rr_idx));
1527 			sys_write32(0, config->base + DEV_ID_RR2(rr_idx));
1528 		}
1529 	}
1530 
1531 	/* the Cadence I3C IP will assign an address for it from the RR */
1532 	struct i3c_ccc_payload entdaa_ccc;
1533 
1534 	memset(&entdaa_ccc, 0, sizeof(entdaa_ccc));
1535 	entdaa_ccc.ccc.id = I3C_CCC_ENTDAA;
1536 
1537 	int status = cdns_i3c_do_ccc(dev, &entdaa_ccc);
1538 
1539 	if (status != 0) {
1540 		return status;
1541 	}
1542 
1543 	/* read again dev active reg  */
1544 	uint32_t newdevs = sys_read32(config->base + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1545 	/* look for new bits that were set */
1546 	newdevs &= ~olddevs;
1547 
1548 	if (newdevs) {
1549 		/* loop through each set bit for new devices */
1550 		for (uint8_t i = find_lsb_set(newdevs); i <= find_msb_set(newdevs); i++) {
1551 			uint8_t rr_idx = i - 1;
1552 
1553 			if (newdevs & BIT(rr_idx)) {
1554 				/* Read RRx registers */
1555 				uint32_t dev_id_rr0 = sys_read32(config->base + DEV_ID_RR0(rr_idx));
1556 				uint32_t dev_id_rr1 = sys_read32(config->base + DEV_ID_RR1(rr_idx));
1557 				uint32_t dev_id_rr2 = sys_read32(config->base + DEV_ID_RR2(rr_idx));
1558 
1559 				uint64_t pid = ((uint64_t)dev_id_rr1 << 16) + (dev_id_rr2 >> 16);
1560 				uint8_t dyn_addr = (dev_id_rr0 & 0xFE) >> 1;
1561 				uint8_t bcr = dev_id_rr2 >> 8;
1562 				uint8_t dcr = dev_id_rr2 & 0xFF;
1563 
1564 				const struct i3c_device_id i3c_id = I3C_DEVICE_ID(pid);
1565 				struct i3c_device_desc *target = i3c_device_find(dev, &i3c_id);
1566 
1567 				if (target == NULL) {
1568 					LOG_INF("%s: PID 0x%012llx is not in registered device "
1569 						"list, given DA 0x%02x",
1570 						dev->name, pid, dyn_addr);
1571 					i3c_addr_slots_mark_i3c(
1572 						&data->common.attached_dev.addr_slots, dyn_addr);
1573 				} else {
1574 					target->dynamic_addr = dyn_addr;
1575 					target->bcr = bcr;
1576 					target->dcr = dcr;
1577 
1578 					LOG_DBG("%s: PID 0x%012llx assigned dynamic address 0x%02x",
1579 						dev->name, pid, dyn_addr);
1580 				}
1581 			}
1582 		}
1583 	} else {
1584 		LOG_DBG("%s: ENTDAA: No devices found", dev->name);
1585 	}
1586 
1587 	/* mark slot as not free, may already be set if already attached */
1588 	data->free_rr_slots &= ~newdevs;
1589 
1590 	/* Unmask Hot-Join request interrupts. HJ will send DISEC HJ from the CTRL value */
1591 	struct i3c_ccc_events i3c_events;
1592 
1593 	i3c_events.events = I3C_CCC_EVT_HJ;
1594 	status = i3c_ccc_do_events_all_set(dev, true, &i3c_events);
1595 	if (status != 0) {
1596 		LOG_DBG("%s: Broadcast ENEC was NACK", dev->name);
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 /**
1603  * @brief Configure I2C hardware.
1604  *
1605  * @param dev Pointer to controller device driver instance.
1606  * @param config Value of the configuration parameters.
1607  *
1608  * @retval 0 If successful.
1609  * @retval -EINVAL If invalid configure parameters.
1610  * @retval -EIO General Input/Output errors.
1611  * @retval -ENOSYS If not implemented.
1612  */
cdns_i3c_i2c_api_configure(const struct device * dev,uint32_t config)1613 static int cdns_i3c_i2c_api_configure(const struct device *dev, uint32_t config)
1614 {
1615 	struct cdns_i3c_data *data = dev->data;
1616 	struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
1617 
1618 	switch (I2C_SPEED_GET(config)) {
1619 	case I2C_SPEED_STANDARD:
1620 		ctrl_config->scl.i2c = 100000;
1621 		break;
1622 	case I2C_SPEED_FAST:
1623 		ctrl_config->scl.i2c = 400000;
1624 		break;
1625 	case I2C_SPEED_FAST_PLUS:
1626 		ctrl_config->scl.i2c = 1000000;
1627 		break;
1628 	case I2C_SPEED_HIGH:
1629 		ctrl_config->scl.i2c = 3400000;
1630 		break;
1631 	case I2C_SPEED_ULTRA:
1632 		ctrl_config->scl.i2c = 5000000;
1633 		break;
1634 	default:
1635 		break;
1636 	}
1637 
1638 	k_mutex_lock(&data->bus_lock, K_FOREVER);
1639 	cdns_i3c_set_prescalers(dev);
1640 	k_mutex_unlock(&data->bus_lock);
1641 
1642 	return 0;
1643 }
1644 
1645 /**
1646  * @brief Configure I3C hardware.
1647  *
1648  * @param dev Pointer to controller device driver instance.
1649  * @param type Type of configuration parameters being passed
1650  *             in @p config.
1651  * @param config Pointer to the configuration parameters.
1652  *
1653  * @retval 0 If successful.
1654  * @retval -EINVAL If invalid configure parameters.
1655  * @retval -EIO General Input/Output errors.
1656  * @retval -ENOSYS If not implemented.
1657  */
cdns_i3c_configure(const struct device * dev,enum i3c_config_type type,void * config)1658 static int cdns_i3c_configure(const struct device *dev, enum i3c_config_type type, void *config)
1659 {
1660 	struct cdns_i3c_data *data = dev->data;
1661 	struct i3c_config_controller *ctrl_cfg = config;
1662 
1663 	if ((ctrl_cfg->scl.i2c == 0U) || (ctrl_cfg->scl.i3c == 0U)) {
1664 		return -EINVAL;
1665 	}
1666 
1667 	data->common.ctrl_config.scl.i3c = ctrl_cfg->scl.i3c;
1668 	data->common.ctrl_config.scl.i2c = ctrl_cfg->scl.i2c;
1669 
1670 	k_mutex_lock(&data->bus_lock, K_FOREVER);
1671 	cdns_i3c_set_prescalers(dev);
1672 	k_mutex_unlock(&data->bus_lock);
1673 
1674 	return 0;
1675 }
1676 
1677 /**
1678  * @brief Complete a I3C/I2C Transfer
1679  *
1680  * This is to be called from an ISR when the Command Response FIFO
1681  * is Empty. This will check each Command Response reading the RX
1682  * FIFO if message was a RnW and if any message had an error.
1683  *
1684  * @param dev Pointer to controller device driver instance.
1685  */
cdns_i3c_complete_transfer(const struct device * dev)1686 static void cdns_i3c_complete_transfer(const struct device *dev)
1687 {
1688 	struct cdns_i3c_data *data = dev->data;
1689 	const struct cdns_i3c_config *config = dev->config;
1690 	uint32_t cmdr;
1691 	uint32_t id = 0;
1692 	uint32_t xfer = 0;
1693 	int ret = 0;
1694 	struct cdns_i3c_cmd *cmd;
1695 	bool was_full;
1696 
1697 	/* Used only to determine in the case of a controller abort */
1698 	was_full = cdns_i3c_rx_fifo_full(config);
1699 
1700 	/* Disable further interrupts */
1701 	sys_write32(MST_INT_CMDD_EMP, config->base + MST_IDR);
1702 
1703 	/* Ignore if no pending transfer */
1704 	if (data->xfer.num_cmds == 0) {
1705 		return;
1706 	}
1707 
1708 	/* Process all results in fifo */
1709 	for (uint32_t status0 = sys_read32(config->base + MST_STATUS0);
1710 	     !(status0 & MST_STATUS0_CMDR_EMP); status0 = sys_read32(config->base + MST_STATUS0)) {
1711 		cmdr = sys_read32(config->base + CMDR);
1712 		id = CMDR_CMDID(cmdr);
1713 
1714 		if (id == CMDR_CMDID_HJACK_DISEC || id == CMDR_CMDID_HJACK_ENTDAA ||
1715 		    id >= data->xfer.num_cmds) {
1716 			continue;
1717 		}
1718 
1719 		cmd = &data->xfer.cmds[id];
1720 
1721 		xfer = MIN(CMDR_XFER_BYTES(cmdr), cmd->len);
1722 		if (cmd->num_xfer != NULL) {
1723 			*cmd->num_xfer = xfer;
1724 		}
1725 		/* Read any rx data into buffer */
1726 		if (cmd->cmd0 & CMD0_FIFO_RNW) {
1727 			ret = cdns_i3c_read_rx_fifo(config, cmd->buf, xfer);
1728 		}
1729 
1730 		if ((cmd->hdr == I3C_DATA_RATE_HDR_DDR) &&
1731 		    (DDR_DATA(cmd->ddr_header) & HDR_CMD_RD)) {
1732 			ret = cdns_i3c_read_rx_fifo_ddr_xfer(config, cmd->buf, xfer,
1733 							     cmd->ddr_header);
1734 		}
1735 
1736 		/* Record error */
1737 		cmd->error = CMDR_ERROR(cmdr);
1738 	}
1739 
1740 	for (int i = 0; i < data->xfer.num_cmds; i++) {
1741 		switch (data->xfer.cmds[i].error) {
1742 		case CMDR_NO_ERROR:
1743 			break;
1744 
1745 		case CMDR_MST_ABORT:
1746 			/*
1747 			 * A controller abort is forced if the RX FIFO fills up
1748 			 * There is also the case where the fifo can be full as
1749 			 * the len of the packet is the same length of the fifo
1750 			 * Check that the requested len is greater than the total
1751 			 * transferred to confirm that is not case. Otherwise the
1752 			 * abort was caused by the buffer length being meet and
1753 			 * the target did not give an End of Data (EoD) in the T
1754 			 * bit. Do not treat that condition as an error because
1755 			 * some targets will just auto-increment the read address
1756 			 * way beyond the buffer not giving an EoD.
1757 			 */
1758 			if ((was_full) && (data->xfer.cmds[i].len > *data->xfer.cmds[i].num_xfer)) {
1759 				ret = -ENOSPC;
1760 			} else {
1761 				LOG_DBG("%s: Controller Abort due to buffer length excedded with "
1762 					"no EoD from target",
1763 					dev->name);
1764 			}
1765 			break;
1766 
1767 		case CMDR_M0_ERROR: {
1768 			uint8_t ccc = data->xfer.cmds[i].cmd1 & 0xFF;
1769 			/*
1770 			 * The M0 is an illegally formatted CCC. i.e. the Controller
1771 			 * receives 1 byte instead of 2 with the GETMWL CCC. This can
1772 			 * be problematic for CCCs that can have variable length such
1773 			 * as GETMXDS and GETCAPS. Verify the number of bytes received matches
1774 			 * what's expected from the specification and ignore the error. The IP will
1775 			 * still retramsit the same CCC and theres nothing that can be done to
1776 			 * prevent this. It it still up to the application to read `num_xfer` to
1777 			 * determine the number of bytes returned.
1778 			 */
1779 			if (ccc == I3C_CCC_GETMXDS) {
1780 				/*
1781 				 * Whether GETMXDS format 1 and format 2 can't be known ahead of
1782 				 * time which will be returned.
1783 				 */
1784 				if ((*data->xfer.cmds[i].num_xfer !=
1785 				     sizeof(((union i3c_ccc_getmxds *)0)->fmt1)) &&
1786 				    (*data->xfer.cmds[i].num_xfer !=
1787 				     sizeof(((union i3c_ccc_getmxds *)0)->fmt2))) {
1788 					ret = -EIO;
1789 				}
1790 			} else if (ccc == I3C_CCC_GETCAPS) {
1791 				/* GETCAPS can only return 1-4 bytes */
1792 				if (*data->xfer.cmds[i].num_xfer > sizeof(union i3c_ccc_getcaps)) {
1793 					ret = -EIO;
1794 				}
1795 			} else {
1796 				ret = -EIO;
1797 			}
1798 			break;
1799 		}
1800 
1801 		case CMDR_DDR_PREAMBLE_ERROR:
1802 		case CMDR_DDR_PARITY_ERROR:
1803 		case CMDR_M1_ERROR:
1804 		case CMDR_M2_ERROR:
1805 		case CMDR_NACK_RESP:
1806 		case CMDR_DDR_DROPPED:
1807 			ret = -EIO;
1808 			break;
1809 
1810 		case CMDR_DDR_RX_FIFO_OVF:
1811 		case CMDR_DDR_TX_FIFO_UNF:
1812 			ret = -ENOSPC;
1813 			break;
1814 
1815 		case CMDR_INVALID_DA:
1816 		default:
1817 			ret = -EINVAL;
1818 			break;
1819 		}
1820 	}
1821 
1822 	data->xfer.ret = ret;
1823 
1824 	/* Indicate no transfer is pending */
1825 	data->xfer.num_cmds = 0;
1826 
1827 	k_sem_give(&data->xfer.complete);
1828 }
1829 
1830 /**
1831  * @brief Transfer messages in I2C mode.
1832  *
1833  * @param dev Pointer to device driver instance.
1834  * @param target Pointer to target device descriptor.
1835  * @param msgs Pointer to I2C messages.
1836  * @param num_msgs Number of messages to transfers.
1837  *
1838  * @retval 0 If successful.
1839  * @retval -EIO General input / output error.
1840  * @retval -EINVAL Address not registered
1841  */
cdns_i3c_i2c_transfer(const struct device * dev,struct i3c_i2c_device_desc * i2c_dev,struct i2c_msg * msgs,uint8_t num_msgs)1842 static int cdns_i3c_i2c_transfer(const struct device *dev, struct i3c_i2c_device_desc *i2c_dev,
1843 				 struct i2c_msg *msgs, uint8_t num_msgs)
1844 {
1845 	const struct cdns_i3c_config *config = dev->config;
1846 	struct cdns_i3c_data *data = dev->data;
1847 	uint32_t txsize = 0;
1848 	uint32_t rxsize = 0;
1849 	int ret;
1850 
1851 	/* make sure we are currently the active controller */
1852 	if (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE)) {
1853 		return -EACCES;
1854 	}
1855 
1856 	if (num_msgs == 0) {
1857 		return 0;
1858 	}
1859 
1860 	if (num_msgs > data->hw_cfg.cmd_mem_depth || num_msgs > data->hw_cfg.cmdr_mem_depth) {
1861 		LOG_ERR("%s: Too many messages", dev->name);
1862 		return -ENOMEM;
1863 	}
1864 
1865 	/*
1866 	 * Ensure data will fit within FIFOs
1867 	 */
1868 	for (unsigned int i = 0; i < num_msgs; i++) {
1869 		if ((msgs[i].flags & I2C_MSG_RW_MASK) == I2C_MSG_READ) {
1870 			rxsize += ROUND_UP(msgs[i].len, 4);
1871 		} else {
1872 			txsize += ROUND_UP(msgs[i].len, 4);
1873 		}
1874 	}
1875 	if ((rxsize > data->hw_cfg.rx_mem_depth) || (txsize > data->hw_cfg.tx_mem_depth)) {
1876 		LOG_ERR("%s: Total RX and/or TX transfer larger than FIFO", dev->name);
1877 		return -ENOMEM;
1878 	}
1879 
1880 	k_mutex_lock(&data->bus_lock, K_FOREVER);
1881 
1882 	/* wait for idle */
1883 	ret = cdns_i3c_wait_for_idle(dev);
1884 	if (ret != 0) {
1885 		goto error;
1886 	}
1887 
1888 	for (unsigned int i = 0; i < num_msgs; i++) {
1889 		struct cdns_i3c_cmd *cmd = &data->xfer.cmds[i];
1890 
1891 		cmd->len = msgs[i].len;
1892 		cmd->buf = msgs[i].buf;
1893 		/* not an i3c transfer, but must be set to sdr */
1894 		cmd->hdr = I3C_DATA_RATE_SDR;
1895 
1896 		cmd->cmd0 = CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
1897 		cmd->cmd0 |= CMD0_FIFO_DEV_ADDR(i2c_dev->addr);
1898 		cmd->cmd0 |= CMD0_FIFO_PL_LEN(msgs[i].len);
1899 
1900 		/* Send repeated start on all transfers except the last or those marked STOP. */
1901 		if ((i < (num_msgs - 1)) && ((msgs[i].flags & I2C_MSG_STOP) == 0)) {
1902 			cmd->cmd0 |= CMD0_FIFO_RSBC;
1903 		}
1904 
1905 		if (msgs[i].flags & I2C_MSG_ADDR_10_BITS) {
1906 			cmd->cmd0 |= CMD0_FIFO_IS_10B;
1907 		}
1908 
1909 		if ((msgs[i].flags & I2C_MSG_RW_MASK) == I2C_MSG_READ) {
1910 			cmd->cmd0 |= CMD0_FIFO_RNW;
1911 		}
1912 
1913 		/* i2c transfers are a don't care for num_xfer */
1914 		cmd->num_xfer = NULL;
1915 	}
1916 
1917 	data->xfer.ret = -ETIMEDOUT;
1918 	data->xfer.num_cmds = num_msgs;
1919 
1920 	cdns_i3c_start_transfer(dev);
1921 	if (k_sem_take(&data->xfer.complete, K_MSEC(1000)) != 0) {
1922 		cdns_i3c_cancel_transfer(dev);
1923 	}
1924 
1925 	ret = data->xfer.ret;
1926 error:
1927 	k_mutex_unlock(&data->bus_lock);
1928 
1929 	return ret;
1930 }
1931 
cdns_i3c_master_get_rr_slot(const struct device * dev,uint8_t dyn_addr)1932 static int cdns_i3c_master_get_rr_slot(const struct device *dev, uint8_t dyn_addr)
1933 {
1934 	struct cdns_i3c_data *data = dev->data;
1935 	const struct cdns_i3c_config *config = dev->config;
1936 	uint8_t rr_idx, i;
1937 	uint32_t rr, activedevs;
1938 
1939 	/* If it does not have a dynamic address, then assign it a free one */
1940 	if (dyn_addr == 0) {
1941 		if (!data->free_rr_slots) {
1942 			return -ENOSPC;
1943 		}
1944 
1945 		return find_lsb_set(data->free_rr_slots) - 1;
1946 	}
1947 
1948 	/* Device already has a Dynamic Address, so assume it is already in the RRs */
1949 	activedevs = sys_read32(config->base + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1950 	/* skip itself */
1951 	activedevs &= ~BIT(0);
1952 
1953 	/* loop through each set bit for new devices */
1954 	for (i = find_lsb_set(activedevs); i <= find_msb_set(activedevs); i++) {
1955 		rr_idx = i - 1;
1956 		if (activedevs & BIT(rr_idx)) {
1957 			rr = sys_read32(config->base + DEV_ID_RR0(rr_idx));
1958 			if ((rr & DEV_ID_RR0_IS_I3C) && DEV_ID_RR0_GET_DEV_ADDR(rr) == dyn_addr) {
1959 				return rr_idx;
1960 			}
1961 		}
1962 	}
1963 
1964 	return -EINVAL;
1965 }
1966 
cdns_i3c_attach_device(const struct device * dev,struct i3c_device_desc * desc)1967 static int cdns_i3c_attach_device(const struct device *dev, struct i3c_device_desc *desc)
1968 {
1969 	/*
1970 	 * Mark Devices as active, devices that will be found and marked active during DAA,
1971 	 * it will be given the exact DA programmed in it's RR, otherwise they get set as active
1972 	 * here. If dynamic address is set, then it assumed that it was already initialized by the
1973 	 * primary controller. When assigned through ENTDAA, the dynamic address, bcr, dcr, and pid
1974 	 * are all set in the RR along with setting the device as active. If it has a static addr,
1975 	 * then it is assumed that it will be programmed with SETDASA and will need to be marked
1976 	 * as active before sending out SETDASA.
1977 	 */
1978 	if ((desc->static_addr != 0) || (desc->dynamic_addr != 0)) {
1979 		const struct cdns_i3c_config *config = dev->config;
1980 		struct cdns_i3c_data *data = dev->data;
1981 
1982 		int slot = cdns_i3c_master_get_rr_slot(dev, desc->dynamic_addr ? desc->dynamic_addr
1983 									       : desc->static_addr);
1984 
1985 		if (slot < 0) {
1986 			LOG_ERR("%s: no space for i3c device: %s", dev->name, desc->dev->name);
1987 			return slot;
1988 		}
1989 
1990 		k_mutex_lock(&data->bus_lock, K_FOREVER);
1991 
1992 		sys_write32(sys_read32(config->base + DEVS_CTRL) | DEVS_CTRL_DEV_ACTIVE(slot),
1993 			    config->base + DEVS_CTRL);
1994 
1995 		data->cdns_i3c_i2c_priv_data[slot].id = slot;
1996 		desc->controller_priv = &(data->cdns_i3c_i2c_priv_data[slot]);
1997 		data->free_rr_slots &= ~BIT(slot);
1998 
1999 		uint32_t dev_id_rr0 =
2000 			DEV_ID_RR0_IS_I3C |
2001 			prepare_rr0_dev_address(desc->dynamic_addr ? desc->dynamic_addr
2002 								   : desc->static_addr);
2003 		uint32_t dev_id_rr1 = DEV_ID_RR1_PID_MSB((desc->pid & 0xFFFFFFFF0000) >> 16);
2004 		uint32_t dev_id_rr2 = DEV_ID_RR2_PID_LSB(desc->pid & 0xFFFF);
2005 
2006 		sys_write32(dev_id_rr0, config->base + DEV_ID_RR0(slot));
2007 		sys_write32(dev_id_rr1, config->base + DEV_ID_RR1(slot));
2008 		sys_write32(dev_id_rr2, config->base + DEV_ID_RR2(slot));
2009 
2010 		k_mutex_unlock(&data->bus_lock);
2011 	}
2012 
2013 	return 0;
2014 }
2015 
cdns_i3c_reattach_device(const struct device * dev,struct i3c_device_desc * desc,uint8_t old_dyn_addr)2016 static int cdns_i3c_reattach_device(const struct device *dev, struct i3c_device_desc *desc,
2017 				    uint8_t old_dyn_addr)
2018 {
2019 	const struct cdns_i3c_config *config = dev->config;
2020 	struct cdns_i3c_data *data = dev->data;
2021 	struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = desc->controller_priv;
2022 
2023 	if (cdns_i3c_device_data == NULL) {
2024 		LOG_ERR("%s: %s: device not attached", dev->name, desc->dev->name);
2025 		return -EINVAL;
2026 	}
2027 
2028 	k_mutex_lock(&data->bus_lock, K_FOREVER);
2029 
2030 	uint32_t dev_id_rr0 = DEV_ID_RR0_IS_I3C | prepare_rr0_dev_address(desc->dynamic_addr);
2031 	uint32_t dev_id_rr1 = DEV_ID_RR1_PID_MSB((desc->pid & 0xFFFFFFFF0000) >> 16);
2032 	uint32_t dev_id_rr2 = DEV_ID_RR2_PID_LSB(desc->pid & 0xFFFF) | DEV_ID_RR2_BCR(desc->bcr) |
2033 			      DEV_ID_RR2_DCR(desc->dcr);
2034 
2035 	sys_write32(dev_id_rr0, config->base + DEV_ID_RR0(cdns_i3c_device_data->id));
2036 	sys_write32(dev_id_rr1, config->base + DEV_ID_RR1(cdns_i3c_device_data->id));
2037 	sys_write32(dev_id_rr2, config->base + DEV_ID_RR2(cdns_i3c_device_data->id));
2038 
2039 	k_mutex_unlock(&data->bus_lock);
2040 
2041 	return 0;
2042 }
2043 
cdns_i3c_detach_device(const struct device * dev,struct i3c_device_desc * desc)2044 static int cdns_i3c_detach_device(const struct device *dev, struct i3c_device_desc *desc)
2045 {
2046 	const struct cdns_i3c_config *config = dev->config;
2047 	struct cdns_i3c_data *data = dev->data;
2048 	struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = desc->controller_priv;
2049 
2050 	if (cdns_i3c_device_data == NULL) {
2051 		LOG_ERR("%s: %s: device not attached", dev->name, desc->dev->name);
2052 		return -EINVAL;
2053 	}
2054 
2055 	k_mutex_lock(&data->bus_lock, K_FOREVER);
2056 
2057 	sys_write32(sys_read32(config->base + DEVS_CTRL) |
2058 			    DEVS_CTRL_DEV_CLR(cdns_i3c_device_data->id),
2059 		    config->base + DEVS_CTRL);
2060 	data->free_rr_slots |= BIT(cdns_i3c_device_data->id);
2061 	desc->controller_priv = NULL;
2062 
2063 	k_mutex_unlock(&data->bus_lock);
2064 
2065 	return 0;
2066 }
2067 
cdns_i3c_i2c_attach_device(const struct device * dev,struct i3c_i2c_device_desc * desc)2068 static int cdns_i3c_i2c_attach_device(const struct device *dev, struct i3c_i2c_device_desc *desc)
2069 {
2070 	const struct cdns_i3c_config *config = dev->config;
2071 	struct cdns_i3c_data *data = dev->data;
2072 
2073 	int slot = cdns_i3c_master_get_rr_slot(dev, 0);
2074 
2075 	if (slot < 0) {
2076 		LOG_ERR("%s: no space for i2c device: addr 0x%02x", dev->name, desc->addr);
2077 		return slot;
2078 	}
2079 
2080 	k_mutex_lock(&data->bus_lock, K_FOREVER);
2081 
2082 	uint32_t dev_id_rr0 = prepare_rr0_dev_address(desc->addr);
2083 	uint32_t dev_id_rr2 = DEV_ID_RR2_LVR(desc->lvr);
2084 
2085 	sys_write32(dev_id_rr0, config->base + DEV_ID_RR0(slot));
2086 	sys_write32(0, config->base + DEV_ID_RR1(slot));
2087 	sys_write32(dev_id_rr2, config->base + DEV_ID_RR2(slot));
2088 
2089 	data->cdns_i3c_i2c_priv_data[slot].id = slot;
2090 	desc->controller_priv = &(data->cdns_i3c_i2c_priv_data[slot]);
2091 	data->free_rr_slots &= ~BIT(slot);
2092 
2093 	sys_write32(sys_read32(config->base + DEVS_CTRL) | DEVS_CTRL_DEV_ACTIVE(slot),
2094 		    config->base + DEVS_CTRL);
2095 
2096 	k_mutex_unlock(&data->bus_lock);
2097 
2098 	return 0;
2099 }
2100 
cdns_i3c_i2c_detach_device(const struct device * dev,struct i3c_i2c_device_desc * desc)2101 static int cdns_i3c_i2c_detach_device(const struct device *dev, struct i3c_i2c_device_desc *desc)
2102 {
2103 	const struct cdns_i3c_config *config = dev->config;
2104 	struct cdns_i3c_data *data = dev->data;
2105 	struct cdns_i3c_i2c_dev_data *cdns_i2c_device_data = desc->controller_priv;
2106 
2107 	if (cdns_i2c_device_data == NULL) {
2108 		LOG_ERR("%s: device not attached", dev->name);
2109 		return -EINVAL;
2110 	}
2111 
2112 	k_mutex_lock(&data->bus_lock, K_FOREVER);
2113 
2114 	sys_write32(sys_read32(config->base + DEVS_CTRL) |
2115 			    DEVS_CTRL_DEV_CLR(cdns_i2c_device_data->id),
2116 		    config->base + DEVS_CTRL);
2117 	data->free_rr_slots |= BIT(cdns_i2c_device_data->id);
2118 	desc->controller_priv = NULL;
2119 
2120 	k_mutex_unlock(&data->bus_lock);
2121 
2122 	return 0;
2123 }
2124 
2125 /**
2126  * @brief Transfer messages in I3C mode.
2127  *
2128  * @see i3c_transfer
2129  *
2130  * @param dev Pointer to device driver instance.
2131  * @param target Pointer to target device descriptor.
2132  * @param msgs Pointer to I3C messages.
2133  * @param num_msgs Number of messages to transfers.
2134  *
2135  * @return @see i3c_transfer
2136  */
cdns_i3c_transfer(const struct device * dev,struct i3c_device_desc * target,struct i3c_msg * msgs,uint8_t num_msgs)2137 static int cdns_i3c_transfer(const struct device *dev, struct i3c_device_desc *target,
2138 			     struct i3c_msg *msgs, uint8_t num_msgs)
2139 {
2140 	const struct cdns_i3c_config *config = dev->config;
2141 	struct cdns_i3c_data *data = dev->data;
2142 	int txsize = 0;
2143 	int rxsize = 0;
2144 	int ret;
2145 
2146 	/* make sure we are currently the active controller */
2147 	if (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE)) {
2148 		return -EACCES;
2149 	}
2150 
2151 	if (num_msgs == 0) {
2152 		return 0;
2153 	}
2154 
2155 	if (num_msgs > data->hw_cfg.cmd_mem_depth || num_msgs > data->hw_cfg.cmdr_mem_depth) {
2156 		LOG_ERR("%s: Too many messages", dev->name);
2157 		return -ENOMEM;
2158 	}
2159 
2160 	/*
2161 	 * Ensure data will fit within FIFOs.
2162 	 *
2163 	 * TODO: This limitation prevents burst transfers greater than the
2164 	 *       FIFO sizes and should be replaced with an implementation that
2165 	 *       utilizes the RX/TX data interrupts.
2166 	 */
2167 	for (int i = 0; i < num_msgs; i++) {
2168 		if ((msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ) {
2169 			rxsize += ROUND_UP(msgs[i].len, 4);
2170 		} else {
2171 			txsize += ROUND_UP(msgs[i].len, 4);
2172 		}
2173 	}
2174 	if ((rxsize > data->hw_cfg.rx_mem_depth) || (txsize > data->hw_cfg.tx_mem_depth)) {
2175 		LOG_ERR("%s: Total RX and/or TX transfer larger than FIFO", dev->name);
2176 		return -ENOMEM;
2177 	}
2178 
2179 	k_mutex_lock(&data->bus_lock, K_FOREVER);
2180 
2181 	/* wait for idle */
2182 	ret = cdns_i3c_wait_for_idle(dev);
2183 	if (ret != 0) {
2184 		goto error;
2185 	}
2186 
2187 	/*
2188 	 * Prepare transfer commands. Currently there is only a single transfer
2189 	 * in-flight but it would be possible to keep a queue of transfers. If so,
2190 	 * this preparation could be completed outside of the bus lock allowing
2191 	 * greater parallelism.
2192 	 */
2193 	bool send_broadcast = true;
2194 
2195 	for (int i = 0; i < num_msgs; i++) {
2196 		struct cdns_i3c_cmd *cmd = &data->xfer.cmds[i];
2197 		uint32_t pl = msgs[i].len;
2198 		/* check hdr mode */
2199 		if ((!(msgs[i].flags & I3C_MSG_HDR)) ||
2200 		    ((msgs[i].flags & I3C_MSG_HDR) && (msgs[i].hdr_mode == 0))) {
2201 			/* HDR message flag is not set or if hdr flag is set but no hdr mode is set
2202 			 */
2203 			cmd->len = pl;
2204 			cmd->buf = msgs[i].buf;
2205 
2206 			cmd->cmd0 = CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
2207 			cmd->cmd0 |= CMD0_FIFO_DEV_ADDR(target->dynamic_addr);
2208 			if ((msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ) {
2209 				cmd->cmd0 |= CMD0_FIFO_RNW;
2210 				/*
2211 				 * For I3C_XMIT_MODE_NO_ADDR reads in SDN mode,
2212 				 * CMD0_FIFO_PL_LEN specifies the abort limit not bytes to read
2213 				 */
2214 				cmd->cmd0 |= CMD0_FIFO_PL_LEN(pl + 1);
2215 			} else {
2216 				cmd->cmd0 |= CMD0_FIFO_PL_LEN(pl);
2217 			}
2218 
2219 			/* Send broadcast header on first transfer or after a STOP. */
2220 			if (!(msgs[i].flags & I3C_MSG_NBCH) && (send_broadcast)) {
2221 				cmd->cmd0 |= CMD0_FIFO_BCH;
2222 				send_broadcast = false;
2223 			}
2224 
2225 			/*
2226 			 * Send repeated start on all transfers except the last or those marked
2227 			 * STOP.
2228 			 */
2229 			if ((i < (num_msgs - 1)) && ((msgs[i].flags & I3C_MSG_STOP) == 0)) {
2230 				cmd->cmd0 |= CMD0_FIFO_RSBC;
2231 			} else {
2232 				send_broadcast = true;
2233 			}
2234 
2235 			/*
2236 			 * write the address of num_xfer which is to be updated upon message
2237 			 * completion
2238 			 */
2239 			cmd->num_xfer = &(msgs[i].num_xfer);
2240 			cmd->hdr = I3C_DATA_RATE_SDR;
2241 		} else if ((data->common.ctrl_config.supported_hdr & I3C_MSG_HDR_DDR) &&
2242 			   (msgs[i].hdr_mode == I3C_MSG_HDR_DDR) && (msgs[i].flags & I3C_MSG_HDR)) {
2243 			uint16_t ddr_header_payload;
2244 
2245 			/* DDR sends data out in 16b, so len must be a multiple of 2 */
2246 			if (!((pl % 2) == 0)) {
2247 				ret = -EINVAL;
2248 				goto error;
2249 			}
2250 			/* HDR message flag is set and hdr mode is DDR */
2251 			cmd->buf = msgs[i].buf;
2252 			if ((msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ) {
2253 				/* HDR-DDR Read */
2254 				ddr_header_payload = HDR_CMD_RD |
2255 						     HDR_CMD_CODE(msgs[i].hdr_cmd_code) |
2256 						     (target->dynamic_addr << 1);
2257 				/* Parity Adjustment Bit for Reads */
2258 				ddr_header_payload =
2259 					prepare_ddr_cmd_parity_adjustment_bit(ddr_header_payload);
2260 				/* HDR-DDR Command Word */
2261 				cmd->ddr_header =
2262 					DDR_PREAMBLE_CMD_CRC | prepare_ddr_word(ddr_header_payload);
2263 			} else {
2264 				uint8_t crc5 = 0x1F;
2265 				/* HDR-DDR Write */
2266 				ddr_header_payload = HDR_CMD_CODE(msgs[i].hdr_cmd_code) |
2267 						     (target->dynamic_addr << 1);
2268 				/* HDR-DDR Command Word */
2269 				cmd->ddr_header =
2270 					DDR_PREAMBLE_CMD_CRC | prepare_ddr_word(ddr_header_payload);
2271 				/* calculate crc5 */
2272 				crc5 = i3c_cdns_crc5(crc5, ddr_header_payload);
2273 				for (int j = 0; j < pl; j += 2) {
2274 					crc5 = i3c_cdns_crc5(
2275 						crc5,
2276 						sys_get_be16((void *)((uintptr_t)cmd->buf + j)));
2277 				}
2278 				cmd->ddr_crc = DDR_PREAMBLE_CMD_CRC | DDR_CRC_TOKEN | (crc5 << 9) |
2279 					       DDR_CRC_WR_SETUP;
2280 			}
2281 			/* Length of DDR Transfer is length of payload (in 16b) + header and CRC
2282 			 * blocks
2283 			 */
2284 			cmd->len = ((pl / 2) + 2);
2285 
2286 			/* prep command FIFO for ENTHDR0 */
2287 			cmd->cmd0 = CMD0_FIFO_IS_CCC;
2288 			cmd->cmd1 = I3C_CCC_ENTHDR0;
2289 			/* write the address of num_xfer which is to be updated upon message
2290 			 * completion
2291 			 */
2292 			cmd->num_xfer = &(msgs[i].num_xfer);
2293 			cmd->hdr = I3C_DATA_RATE_HDR_DDR;
2294 		} else {
2295 			LOG_ERR("%s: Unsupported HDR Mode %d", dev->name, msgs[i].hdr_mode);
2296 			ret = -ENOTSUP;
2297 			goto error;
2298 		}
2299 	}
2300 
2301 	data->xfer.ret = -ETIMEDOUT;
2302 	data->xfer.num_cmds = num_msgs;
2303 
2304 	cdns_i3c_start_transfer(dev);
2305 	if (k_sem_take(&data->xfer.complete, K_MSEC(1000)) != 0) {
2306 		LOG_ERR("%s: transfer timed out", dev->name);
2307 		cdns_i3c_cancel_transfer(dev);
2308 	}
2309 
2310 	ret = data->xfer.ret;
2311 error:
2312 	k_mutex_unlock(&data->bus_lock);
2313 
2314 	return ret;
2315 }
2316 
2317 #ifdef CONFIG_I3C_USE_IBI
cdns_i3c_read_ibi_fifo(const struct cdns_i3c_config * config,void * buf,uint32_t len)2318 static int cdns_i3c_read_ibi_fifo(const struct cdns_i3c_config *config, void *buf, uint32_t len)
2319 {
2320 	uint32_t *ptr = buf;
2321 	uint32_t remain, val;
2322 
2323 	for (remain = len; remain >= 4; remain -= 4) {
2324 		if (cdns_i3c_ibi_fifo_empty(config)) {
2325 			return -EIO;
2326 		}
2327 		val = sys_le32_to_cpu(sys_read32(config->base + IBI_DATA_FIFO));
2328 		*ptr++ = val;
2329 	}
2330 
2331 	if (remain > 0) {
2332 		if (cdns_i3c_ibi_fifo_empty(config)) {
2333 			return -EIO;
2334 		}
2335 		val = sys_le32_to_cpu(sys_read32(config->base + IBI_DATA_FIFO));
2336 		memcpy(ptr, &val, remain);
2337 	}
2338 
2339 	return 0;
2340 }
2341 
cdns_i3c_handle_ibi(const struct device * dev,uint32_t ibir)2342 static void cdns_i3c_handle_ibi(const struct device *dev, uint32_t ibir)
2343 {
2344 	const struct cdns_i3c_config *config = dev->config;
2345 	struct cdns_i3c_data *data = dev->data;
2346 
2347 	/* The slave ID returned here is the device ID in the SIR map NOT the device ID
2348 	 * in the RR map.
2349 	 */
2350 	uint8_t slave_id = IBIR_SLVID(ibir);
2351 
2352 	if (slave_id == IBIR_SLVID_INV) {
2353 		/* DA does not match any value among SIR map */
2354 		return;
2355 	}
2356 
2357 	uint32_t dev_id_rr0 = sys_read32(config->base + DEV_ID_RR0(slave_id + 1));
2358 	uint8_t dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(dev_id_rr0);
2359 	struct i3c_device_desc *desc = i3c_dev_list_i3c_addr_find(dev, dyn_addr);
2360 
2361 	/*
2362 	 * Check for NAK or error conditions.
2363 	 *
2364 	 * Note: The logging is for debugging only so will be compiled out in most cases.
2365 	 * However, if the log level for this module is DEBUG and log mode is IMMEDIATE or MINIMAL,
2366 	 * this option is also set this may cause problems due to being inside an ISR.
2367 	 */
2368 	if (!(IBIR_ACKED & ibir)) {
2369 		LOG_DBG("%s: NAK for slave ID %u", dev->name, (unsigned int)slave_id);
2370 		return;
2371 	}
2372 	if (ibir & IBIR_ERROR) {
2373 		/* Controller issued an Abort */
2374 		LOG_ERR("%s: IBI Data overflow", dev->name);
2375 	}
2376 
2377 	/* Read out any payload bytes */
2378 	uint8_t ibi_len = IBIR_XFER_BYTES(ibir);
2379 
2380 	if (ibi_len > 0) {
2381 		if (ibi_len - data->ibi_buf.ibi_data_cnt > 0) {
2382 			if (cdns_i3c_read_ibi_fifo(
2383 				    config, &data->ibi_buf.ibi_data[data->ibi_buf.ibi_data_cnt],
2384 				    ibi_len - data->ibi_buf.ibi_data_cnt) < 0) {
2385 				LOG_ERR("%s: Failed to get payload", dev->name);
2386 			}
2387 		}
2388 		data->ibi_buf.ibi_data_cnt = 0;
2389 	}
2390 
2391 	if (i3c_ibi_work_enqueue_target_irq(desc, data->ibi_buf.ibi_data, ibi_len) != 0) {
2392 		LOG_ERR("%s: Error enqueue IBI IRQ work", dev->name);
2393 	}
2394 }
2395 
cdns_i3c_handle_hj(const struct device * dev,uint32_t ibir)2396 static void cdns_i3c_handle_hj(const struct device *dev, uint32_t ibir)
2397 {
2398 	if (!(IBIR_ACKED & ibir)) {
2399 		LOG_DBG("%s: NAK for HJ", dev->name);
2400 		return;
2401 	}
2402 
2403 	if (i3c_ibi_work_enqueue_hotjoin(dev) != 0) {
2404 		LOG_ERR("%s: Error enqueue IBI HJ work", dev->name);
2405 	}
2406 }
2407 
cnds_i3c_master_demux_ibis(const struct device * dev)2408 static void cnds_i3c_master_demux_ibis(const struct device *dev)
2409 {
2410 	const struct cdns_i3c_config *config = dev->config;
2411 
2412 	for (uint32_t status0 = sys_read32(config->base + MST_STATUS0);
2413 	     !(status0 & MST_STATUS0_IBIR_EMP); status0 = sys_read32(config->base + MST_STATUS0)) {
2414 		uint32_t ibir = sys_read32(config->base + IBIR);
2415 
2416 		switch (IBIR_TYPE(ibir)) {
2417 		case IBIR_TYPE_IBI:
2418 			cdns_i3c_handle_ibi(dev, ibir);
2419 			break;
2420 		case IBIR_TYPE_HJ:
2421 			cdns_i3c_handle_hj(dev, ibir);
2422 			break;
2423 		case IBIR_TYPE_MR:
2424 			/* not implemented */
2425 			break;
2426 		default:
2427 			break;
2428 		}
2429 	}
2430 }
2431 
cdns_i3c_target_ibi_hj_complete(const struct device * dev)2432 static void cdns_i3c_target_ibi_hj_complete(const struct device *dev)
2433 {
2434 	struct cdns_i3c_data *data = dev->data;
2435 
2436 	k_sem_give(&data->ibi_hj_complete);
2437 }
2438 #endif
2439 
cdns_i3c_target_sdr_tx_thr_int_handler(const struct device * dev,const struct i3c_target_callbacks * target_cb)2440 static void cdns_i3c_target_sdr_tx_thr_int_handler(const struct device *dev,
2441 						   const struct i3c_target_callbacks *target_cb)
2442 {
2443 	int status = 0;
2444 	struct cdns_i3c_data *data = dev->data;
2445 	const struct cdns_i3c_config *config = dev->config;
2446 
2447 	if (target_cb != NULL && target_cb->read_processed_cb) {
2448 		/* with REV_ID 1.7, as a target, the fifos are full word, otherwise only the first
2449 		 * byte is used.
2450 		 */
2451 		if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
2452 			/* while tx fifo is not full and there is still data available */
2453 			while ((!(sys_read32(config->base + SLV_STATUS1) &
2454 				  SLV_STATUS1_SDR_TX_FULL)) &&
2455 			       (status == 0)) {
2456 				/* call function pointer for read */
2457 				uint32_t tx_data = 0;
2458 				bool data_valid = false;
2459 
2460 				for (int j = 0; j < 4; j++) {
2461 					uint8_t byte;
2462 					/* will return negative if no data left to transmit and 0
2463 					 * if data available
2464 					 */
2465 					status = target_cb->read_processed_cb(data->target_config,
2466 									      &byte);
2467 					if (status == 0) {
2468 						data_valid = true;
2469 						tx_data |= (byte << (j * 8));
2470 					}
2471 				}
2472 				if (data_valid) {
2473 					cdns_i3c_write_tx_fifo(config, &tx_data, sizeof(uint32_t));
2474 				}
2475 			}
2476 		} else {
2477 			/* while tx fifo is not full and there is still data available */
2478 			while ((!(sys_read32(config->base + SLV_STATUS1) &
2479 				  SLV_STATUS1_SDR_TX_FULL)) &&
2480 			       (status == 0)) {
2481 				uint8_t byte;
2482 				/* will return negative if no data left to transmit and 0 if
2483 				 * data available
2484 				 */
2485 				status = target_cb->read_processed_cb(data->target_config, &byte);
2486 				if (status == 0) {
2487 					cdns_i3c_write_tx_fifo(config, &byte, sizeof(uint8_t));
2488 				}
2489 			}
2490 		}
2491 	}
2492 }
2493 
cdns_i3c_irq_handler(const struct device * dev)2494 static void cdns_i3c_irq_handler(const struct device *dev)
2495 {
2496 	const struct cdns_i3c_config *config = dev->config;
2497 	struct cdns_i3c_data *data = dev->data;
2498 
2499 	if (sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE) {
2500 		uint32_t int_st = sys_read32(config->base + MST_ISR);
2501 		sys_write32(int_st, config->base + MST_ICR);
2502 
2503 		/* Command queue empty */
2504 		if (int_st & MST_INT_HALTED) {
2505 			LOG_WRN("Core Halted, 2 read aborts");
2506 		}
2507 
2508 		/* Command queue empty */
2509 		if (int_st & MST_INT_CMDD_EMP) {
2510 			cdns_i3c_complete_transfer(dev);
2511 		}
2512 
2513 		/* In-band interrupt */
2514 		if (int_st & MST_INT_IBIR_THR) {
2515 #ifdef CONFIG_I3C_USE_IBI
2516 			cnds_i3c_master_demux_ibis(dev);
2517 #else
2518 			LOG_ERR("%s: IBI received - Kconfig for using IBIs is not enabled",
2519 				dev->name);
2520 #endif
2521 		}
2522 
2523 		/* In-band interrupt data threshold */
2524 		if (int_st & MST_INT_IBID_THR) {
2525 #ifdef CONFIG_I3C_USE_IBI
2526 			/* pop data out of the IBI FIFO */
2527 			while (!cdns_i3c_ibi_fifo_empty(config)) {
2528 				uint32_t *ptr = (uint32_t *)&data->ibi_buf
2529 							.ibi_data[data->ibi_buf.ibi_data_cnt];
2530 				*ptr = sys_le32_to_cpu(sys_read32(config->base + IBI_DATA_FIFO));
2531 				data->ibi_buf.ibi_data_cnt += 4;
2532 			}
2533 #else
2534 			LOG_ERR("%s: IBI received - Kconfig for using IBIs is not enabled",
2535 				dev->name);
2536 #endif
2537 		}
2538 
2539 		/* In-band interrupt response overflow */
2540 		if (int_st & MST_INT_IBIR_OVF) {
2541 			LOG_ERR("%s: controller ibir overflow,", dev->name);
2542 		}
2543 
2544 		/* In-band interrupt data */
2545 		if (int_st & MST_INT_TX_OVF) {
2546 			LOG_ERR("%s: controller tx buffer overflow,", dev->name);
2547 		}
2548 
2549 		/* In-band interrupt data */
2550 		if (int_st & MST_INT_RX_UNF) {
2551 			LOG_ERR("%s: controller rx buffer underflow,", dev->name);
2552 		}
2553 	} else {
2554 		uint32_t int_sl = sys_read32(config->base + SLV_ISR);
2555 		const struct i3c_target_callbacks *target_cb =
2556 			data->target_config ? data->target_config->callbacks : NULL;
2557 		/* Clear interrupts */
2558 		sys_write32(int_sl, config->base + SLV_ICR);
2559 
2560 		/* SLV SDR rx fifo threshold */
2561 		if (int_sl & SLV_INT_SDR_RX_THR) {
2562 			/* while rx fifo is not empty */
2563 			while (!(sys_read32(config->base + SLV_STATUS1) &
2564 				 SLV_STATUS1_SDR_RX_EMPTY)) {
2565 				if (target_cb != NULL && target_cb->write_received_cb != NULL) {
2566 					cdns_i3c_target_read_rx_fifo(dev);
2567 				}
2568 			}
2569 		}
2570 
2571 		/* SLV SDR tx fifo threshold */
2572 		if (int_sl & SLV_INT_SDR_TX_THR) {
2573 			cdns_i3c_target_sdr_tx_thr_int_handler(dev, target_cb);
2574 		}
2575 
2576 		/* SLV SDR rx complete */
2577 		if (int_sl & SLV_INT_SDR_RD_COMP) {
2578 			/* a read needs to be done on slv_status 0 else a NACK will happen */
2579 			(void)sys_read32(config->base + SLV_STATUS0);
2580 			/* call stop function pointer */
2581 			if (target_cb != NULL && target_cb->stop_cb) {
2582 				target_cb->stop_cb(data->target_config);
2583 			}
2584 		}
2585 
2586 		/* SLV SDR tx complete */
2587 		if (int_sl & SLV_INT_SDR_WR_COMP) {
2588 			/* a read needs to be done on slv_status 0 else a NACK will happen */
2589 			(void)sys_read32(config->base + SLV_STATUS0);
2590 			/* clear bytes read parameter */
2591 			data->fifo_bytes_read = 0;
2592 			/* call stop function pointer */
2593 			if (target_cb != NULL && target_cb->stop_cb) {
2594 				target_cb->stop_cb(data->target_config);
2595 			}
2596 		}
2597 
2598 		/* DA has been updated */
2599 		if (int_sl & SLV_INT_DA_UPD) {
2600 			LOG_INF("%s: DA updated to 0x%02lx", dev->name,
2601 				SLV_STATUS1_DA(sys_read32(config->base + SLV_STATUS1)));
2602 			/* HJ could send a DISEC which would trigger the SLV_INT_EVENT_UP bit,
2603 			 * but it's still expected to eventually send a DAA
2604 			 */
2605 #ifdef CONFIG_I3C_USE_IBI
2606 			cdns_i3c_target_ibi_hj_complete(dev);
2607 #endif
2608 		}
2609 
2610 		/* HJ complete and DA has been assigned */
2611 		if (int_sl & SLV_INT_HJ_DONE) {
2612 		}
2613 
2614 		/* Controllership has been been given */
2615 		if (int_sl & SLV_INT_MR_DONE) {
2616 			/* TODO: implement support for controllership handoff */
2617 		}
2618 
2619 		/* EISC or DISEC has been received */
2620 		if (int_sl & SLV_INT_EVENT_UP) {
2621 		}
2622 
2623 		/* sdr transfer aborted by controller */
2624 		if (int_sl & SLV_INT_M_RD_ABORT) {
2625 			/* TODO: consider flushing tx buffer? */
2626 		}
2627 
2628 		/* SLV SDR rx fifo underflow */
2629 		if (int_sl & SLV_INT_SDR_RX_UNF) {
2630 			LOG_ERR("%s: slave sdr rx buffer underflow", dev->name);
2631 		}
2632 
2633 		/* SLV SDR tx fifo overflow */
2634 		if (int_sl & SLV_INT_SDR_TX_OVF) {
2635 			LOG_ERR("%s: slave sdr tx buffer overflow,", dev->name);
2636 		}
2637 
2638 		if (int_sl & SLV_INT_DDR_RX_THR) {
2639 		}
2640 
2641 		/* SLV DDR WR COMPLETE */
2642 		if (int_sl & SLV_INT_DDR_WR_COMP) {
2643 			/* initial value of CRC5 for HDR-DDR is 0x1F */
2644 			uint8_t crc5 = 0x1F;
2645 
2646 			while (!(sys_read32(config->base + SLV_STATUS1) &
2647 				 SLV_STATUS1_DDR_RX_EMPTY)) {
2648 				uint32_t ddr_rx_data = sys_read32(config->base + SLV_DDR_RX_FIFO);
2649 				uint32_t preamble = (ddr_rx_data & DDR_PREAMBLE_MASK);
2650 
2651 				if (preamble == DDR_PREAMBLE_DATA_ABORT ||
2652 				    preamble == DDR_PREAMBLE_DATA_ABORT_ALT) {
2653 					uint16_t ddr_payload = DDR_DATA(ddr_rx_data);
2654 
2655 					if (cdns_i3c_ddr_parity(ddr_payload) !=
2656 					    (ddr_rx_data & (DDR_ODD_PARITY | DDR_EVEN_PARITY))) {
2657 						LOG_ERR("%s: Received incorrect DDR Parity",
2658 							dev->name);
2659 					}
2660 					/* calculate a running a crc */
2661 					crc5 = i3c_cdns_crc5(crc5, ddr_payload);
2662 
2663 					if (target_cb != NULL &&
2664 					    target_cb->write_received_cb != NULL) {
2665 						/* DDR receives 2B for each payload */
2666 						target_cb->write_received_cb(
2667 							data->target_config,
2668 							(uint8_t)((ddr_payload >> 8) & 0xFF));
2669 						target_cb->write_received_cb(
2670 							data->target_config,
2671 							(uint8_t)(ddr_payload));
2672 					}
2673 
2674 				} else if ((preamble == DDR_PREAMBLE_CMD_CRC) &&
2675 					   ((ddr_rx_data & DDR_CRC_TOKEN_MASK) == DDR_CRC_TOKEN)) {
2676 					/* should come through here last */
2677 					if (crc5 != DDR_CRC(ddr_rx_data)) {
2678 						LOG_ERR("%s: Received incorrect DDR CRC5",
2679 							dev->name);
2680 					}
2681 				} else if (preamble == DDR_PREAMBLE_CMD_CRC) {
2682 					/* should come through here first */
2683 					uint16_t ddr_header_payload = DDR_DATA(ddr_rx_data);
2684 
2685 					crc5 = i3c_cdns_crc5(crc5, ddr_header_payload);
2686 				}
2687 			}
2688 
2689 			if (target_cb != NULL && target_cb->stop_cb != NULL) {
2690 				target_cb->stop_cb(data->target_config);
2691 			}
2692 		}
2693 
2694 		/* SLV SDR rx complete */
2695 		if (int_sl & SLV_INT_DDR_RD_COMP) {
2696 			/* a read needs to be done on slv_status 0 else a NACK will happen */
2697 			(void)sys_read32(config->base + SLV_STATUS0);
2698 			/* call stop function pointer */
2699 			if (target_cb != NULL && target_cb->stop_cb) {
2700 				target_cb->stop_cb(data->target_config);
2701 			}
2702 		}
2703 
2704 		/*SLV DDR TX THR*/
2705 		if (int_sl & SLV_INT_DDR_TX_THR) {
2706 			int status = 0;
2707 
2708 			if (target_cb != NULL && target_cb->read_processed_cb) {
2709 
2710 				while ((!(sys_read32(config->base + SLV_STATUS1) &
2711 					  SLV_STATUS1_DDR_TX_FULL)) &&
2712 				       (status == 0)) {
2713 					/* call function pointer for read */
2714 					uint8_t byte;
2715 					/* will return negative if no data left to transmit
2716 					 * and 0 if data available
2717 					 */
2718 					status = target_cb->read_processed_cb(data->target_config,
2719 									      &byte);
2720 					if (status == 0) {
2721 						cdns_i3c_write_ddr_tx_fifo(config, &byte,
2722 									   sizeof(byte));
2723 					}
2724 				}
2725 			}
2726 		}
2727 	}
2728 }
2729 
cdns_i3c_read_hw_cfg(const struct device * dev)2730 static void cdns_i3c_read_hw_cfg(const struct device *dev)
2731 {
2732 	const struct cdns_i3c_config *config = dev->config;
2733 	struct cdns_i3c_data *data = dev->data;
2734 
2735 	uint32_t devid = sys_read32(config->base + DEV_ID);
2736 	uint32_t revid = sys_read32(config->base + REV_ID);
2737 
2738 	LOG_DBG("%s: Device info:\r\n"
2739 		"  vid: 0x%03lX, pid: 0x%03lX\r\n"
2740 		"  revision: major = %lu, minor = %lu\r\n"
2741 		"  device ID: 0x%04X",
2742 		dev->name, REV_ID_VID(revid), REV_ID_PID(revid), REV_ID_REV_MAJOR(revid),
2743 		REV_ID_REV_MINOR(revid), devid);
2744 
2745 	/*
2746 	 * Depths are specified as number of words (32bit), convert to bytes
2747 	 */
2748 	uint32_t cfg0 = sys_read32(config->base + CONF_STATUS0);
2749 	uint32_t cfg1 = sys_read32(config->base + CONF_STATUS1);
2750 
2751 	data->hw_cfg.rev_id = revid;
2752 	data->hw_cfg.cmdr_mem_depth = CONF_STATUS0_CMDR_DEPTH(cfg0) * 4;
2753 	data->hw_cfg.cmd_mem_depth = CONF_STATUS1_CMD_DEPTH(cfg1) * 4;
2754 	data->hw_cfg.rx_mem_depth = CONF_STATUS1_RX_DEPTH(cfg1) * 4;
2755 	data->hw_cfg.tx_mem_depth = CONF_STATUS1_TX_DEPTH(cfg1) * 4;
2756 	data->hw_cfg.ddr_rx_mem_depth = CONF_STATUS1_SLV_DDR_RX_DEPTH(cfg1) * 4;
2757 	data->hw_cfg.ddr_tx_mem_depth = CONF_STATUS1_SLV_DDR_TX_DEPTH(cfg1) * 4;
2758 	data->hw_cfg.ibir_mem_depth = CONF_STATUS0_IBIR_DEPTH(cfg0) * 4;
2759 	data->hw_cfg.ibi_mem_depth = CONF_STATUS1_IBI_DEPTH(cfg0) * 4;
2760 
2761 	LOG_DBG("%s: FIFO info:\r\n"
2762 		"  cmd_mem_depth = %u\r\n"
2763 		"  cmdr_mem_depth = %u\r\n"
2764 		"  rx_mem_depth = %u\r\n"
2765 		"  tx_mem_depth = %u\r\n"
2766 		"  ddr_rx_mem_depth = %u\r\n"
2767 		"  ddr_tx_mem_depth = %u\r\n"
2768 		"  ibi_mem_depth = %u\r\n"
2769 		"  ibir_mem_depth = %u",
2770 		dev->name, data->hw_cfg.cmd_mem_depth, data->hw_cfg.cmdr_mem_depth,
2771 		data->hw_cfg.rx_mem_depth, data->hw_cfg.tx_mem_depth, data->hw_cfg.ddr_rx_mem_depth,
2772 		data->hw_cfg.ddr_tx_mem_depth, data->hw_cfg.ibi_mem_depth,
2773 		data->hw_cfg.ibir_mem_depth);
2774 
2775 	/* Regardless of the cmd depth size we are limited by our cmd array length. */
2776 	data->hw_cfg.cmd_mem_depth = MIN(data->hw_cfg.cmd_mem_depth, ARRAY_SIZE(data->xfer.cmds));
2777 }
2778 
2779 /**
2780  * @brief Get configuration of the I3C hardware.
2781  *
2782  * This provides a way to get the current configuration of the I3C hardware.
2783  *
2784  * This can return cached config or probed hardware parameters, but it has to
2785  * be up to date with current configuration.
2786  *
2787  * @param[in] dev Pointer to controller device driver instance.
2788  * @param[in] type Type of configuration parameters being passed
2789  *                 in @p config.
2790  * @param[in,out] config Pointer to the configuration parameters.
2791  *
2792  * Note that if @p type is @c I3C_CONFIG_CUSTOM, @p config must contain
2793  * the ID of the parameter to be retrieved.
2794  *
2795  * @retval 0 If successful.
2796  * @retval -EIO General Input/Output errors.
2797  * @retval -ENOSYS If not implemented.
2798  */
cdns_i3c_config_get(const struct device * dev,enum i3c_config_type type,void * config)2799 static int cdns_i3c_config_get(const struct device *dev, enum i3c_config_type type, void *config)
2800 {
2801 	struct cdns_i3c_data *data = dev->data;
2802 	int ret = 0;
2803 
2804 	if (config == NULL) {
2805 		ret = -EINVAL;
2806 		goto out_configure;
2807 	}
2808 
2809 	(void)memcpy(config, &data->common.ctrl_config, sizeof(data->common.ctrl_config));
2810 
2811 out_configure:
2812 	return ret;
2813 }
2814 
cdns_i3c_target_tx_ddr_write(const struct device * dev,uint8_t * buf,uint16_t len)2815 static int cdns_i3c_target_tx_ddr_write(const struct device *dev, uint8_t *buf, uint16_t len)
2816 {
2817 	const struct cdns_i3c_config *config = dev->config;
2818 	struct cdns_i3c_data *data = dev->data;
2819 	uint32_t i, preamble;
2820 	uint32_t data_word;
2821 	uint8_t crc5 = 0x1F;
2822 
2823 	/* check if there is space available in the tx fifo */
2824 	if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_TX_FULL) {
2825 		return -ENOSPC;
2826 	}
2827 
2828 	/* DDR sends data out in 16b, so len must be a multiple of 2 */
2829 	if (!((len % 2) == 0)) {
2830 		return -EINVAL;
2831 	}
2832 
2833 	/* Header shall be known in advanced to calculate crc5 */
2834 	uint8_t slave_da = SLV_STATUS1_DA(sys_read32(config->base + SLV_STATUS1));
2835 	uint16_t ddr_payload_header = HDR_CMD_RD | (slave_da << 1);
2836 
2837 	ddr_payload_header = prepare_ddr_cmd_parity_adjustment_bit(ddr_payload_header);
2838 	crc5 = i3c_cdns_crc5(crc5, ddr_payload_header);
2839 
2840 	/* write as much as you can to the fifo */
2841 	for (i = 0;
2842 	     i < len && (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_TX_FULL));
2843 	     i += 2) {
2844 		/* Use ALT with other than first packets */
2845 		preamble = (i > 0) ? DDR_PREAMBLE_DATA_ABORT_ALT : DDR_PREAMBLE_DATA_ABORT;
2846 		data_word = (preamble | prepare_ddr_word(sys_get_be16(&buf[i])));
2847 		crc5 = i3c_cdns_crc5(crc5, sys_get_be16(&buf[i]));
2848 		sys_write32(data_word, config->base + SLV_DDR_TX_FIFO);
2849 	}
2850 	/* end of data buffer, write crc packet (if we are still not full) */
2851 	if ((i == len) && (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_TX_FULL))) {
2852 		sys_write32(DDR_PREAMBLE_CMD_CRC | DDR_CRC_TOKEN | crc5 << 9,
2853 			    config->base + SLV_DDR_TX_FIFO);
2854 	}
2855 
2856 	/* setup THR interrupt */
2857 	uint32_t thr_ctrl = sys_read32(config->base + SLV_DDR_TX_RX_THR_CTRL);
2858 
2859 	/*
2860 	 * Interrupt at half of the data or FIFO depth to give it enough time to be
2861 	 * processed. The ISR will then callback to the function pointer
2862 	 * `read_processed_cb` to collect more data to transmit
2863 	 */
2864 	thr_ctrl &= ~TX_THR_MASK;
2865 	thr_ctrl |= TX_THR(MIN((data->hw_cfg.tx_mem_depth / 4) / 2, len / 2));
2866 
2867 	sys_write32(thr_ctrl, config->base + SLV_DDR_TX_RX_THR_CTRL);
2868 	/* return total bytes written */
2869 	return i;
2870 }
2871 
2872 /**
2873  * @brief Writes to the Target's TX FIFO
2874  *
2875  * The Cadence I3C will then ACK read requests to it's TX FIFO from a
2876  * Controller
2877  *
2878  * @param dev Pointer to the device structure for an I3C controller
2879  *            driver configured in target mode.
2880  * @param buf Pointer to the buffer
2881  * @param len Length of the buffer
2882  *
2883  * @retval Total number of bytes written
2884  * @retval -EACCES Not in Target Mode
2885  * @retval -ENOSPC No space in Tx FIFO
2886  */
cdns_i3c_target_tx_write(const struct device * dev,uint8_t * buf,uint16_t len,uint8_t hdr_mode)2887 static int cdns_i3c_target_tx_write(const struct device *dev, uint8_t *buf, uint16_t len,
2888 				    uint8_t hdr_mode)
2889 {
2890 	const struct cdns_i3c_config *config = dev->config;
2891 	struct cdns_i3c_data *data = dev->data;
2892 	struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
2893 	const uint32_t *buf_32 = (uint32_t *)buf;
2894 	uint32_t i = 0;
2895 	uint32_t val = 0;
2896 	uint16_t remain = len;
2897 
2898 	/* check if we are currently a target */
2899 	if (sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE) {
2900 		return -EACCES;
2901 	}
2902 
2903 	/* check if there is space available in the tx fifo */
2904 	if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_SDR_TX_FULL) {
2905 		return -ENOSPC;
2906 	}
2907 
2908 	k_mutex_lock(&data->bus_lock, K_FOREVER);
2909 
2910 	/* rev 1p7 requires the length be written to the SLV_CTRL reg */
2911 	if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
2912 		sys_write32(len, config->base + SLV_CTRL);
2913 	}
2914 	if (hdr_mode == I3C_MSG_HDR_DDR) {
2915 		if (ctrl_config->supported_hdr & I3C_MSG_HDR_DDR) {
2916 			i = cdns_i3c_target_tx_ddr_write(dev, buf, len);
2917 			/* TODO: DDR THR interrupt support not implemented yet*/
2918 		} else {
2919 			LOG_ERR("%s: HDR-DDR not supported", dev->name);
2920 			i = -ENOTSUP;
2921 		}
2922 	} else if (hdr_mode == 0) {
2923 		/* write as much as you can to the fifo */
2924 		while (i < len &&
2925 		       (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_SDR_TX_FULL))) {
2926 			/* with rev 1p7, while as a target, the fifos are using the full word,
2927 			 * otherwise only the first byte is used
2928 			 */
2929 			if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
2930 				remain = len - i;
2931 				if (remain >= 4) {
2932 					val = *buf_32++;
2933 				} else if (remain > 0) {
2934 					val = 0;
2935 					memcpy(&val, buf_32, remain);
2936 				}
2937 				sys_write32(val, config->base + TX_FIFO);
2938 				i += 4;
2939 			} else {
2940 				sys_write32((uint32_t)buf[i], config->base + TX_FIFO);
2941 				i++;
2942 			}
2943 		}
2944 
2945 		/* setup THR interrupt */
2946 		uint32_t thr_ctrl = sys_read32(config->base + TX_RX_THR_CTRL);
2947 
2948 		/*
2949 		 * Interrupt at half of the data or FIFO depth to give it enough time to be
2950 		 * processed. The ISR will then callback to the function pointer
2951 		 * `read_processed_cb` to collect more data to transmit
2952 		 */
2953 		thr_ctrl &= ~TX_THR_MASK;
2954 		thr_ctrl |= TX_THR(MIN((data->hw_cfg.tx_mem_depth / 4) / 2, len / 2));
2955 		sys_write32(thr_ctrl, config->base + TX_RX_THR_CTRL);
2956 	} else {
2957 		LOG_ERR("%s: Unsupported HDR Mode %d", dev->name, hdr_mode);
2958 		i = -ENOTSUP;
2959 	}
2960 
2961 	k_mutex_unlock(&data->bus_lock);
2962 
2963 	/* return total bytes written */
2964 	return i;
2965 }
2966 
2967 /**
2968  * @brief Instructs the I3C Target device to register itself to the I3C Controller
2969  *
2970  * This routine instructs the I3C Target device to register itself to the I3C
2971  * Controller via its parent controller's i3c_target_register() API.
2972  *
2973  * @param dev Pointer to target device driver instance.
2974  * @param cfg Config struct with functions and parameters used by the I3C driver
2975  * to send bus events
2976  *
2977  * @return @see i3c_device_find.
2978  */
cdns_i3c_target_register(const struct device * dev,struct i3c_target_config * cfg)2979 static int cdns_i3c_target_register(const struct device *dev, struct i3c_target_config *cfg)
2980 {
2981 	struct cdns_i3c_data *data = dev->data;
2982 
2983 	data->target_config = cfg;
2984 	return 0;
2985 }
2986 
2987 /**
2988  * @brief Unregisters the provided config as Target device
2989  *
2990  * This routine disables I3C target mode for the 'dev' I3C bus driver using
2991  * the provided 'config' struct containing the functions and parameters
2992  * to send bus events.
2993  *
2994  * @param dev Pointer to target device driver instance.
2995  * @param cfg Config struct with functions and parameters used by the I3C driver
2996  * to send bus events
2997  *
2998  * @return @see i3c_device_find.
2999  */
cdns_i3c_target_unregister(const struct device * dev,struct i3c_target_config * cfg)3000 static int cdns_i3c_target_unregister(const struct device *dev, struct i3c_target_config *cfg)
3001 {
3002 	/* no way to disable? maybe write DA to 0? */
3003 	return 0;
3004 }
3005 
3006 /**
3007  * @brief Find a registered I3C target device.
3008  *
3009  * This returns the I3C device descriptor of the I3C device
3010  * matching the incoming @p id.
3011  *
3012  * @param dev Pointer to controller device driver instance.
3013  * @param id Pointer to I3C device ID.
3014  *
3015  * @return @see i3c_device_find.
3016  */
cdns_i3c_device_find(const struct device * dev,const struct i3c_device_id * id)3017 static struct i3c_device_desc *cdns_i3c_device_find(const struct device *dev,
3018 						    const struct i3c_device_id *id)
3019 {
3020 	const struct cdns_i3c_config *config = dev->config;
3021 
3022 	return i3c_dev_list_find(&config->common.dev_list, id);
3023 }
3024 
3025 /**
3026  * Find a registered I2C target device.
3027  *
3028  * Controller only API.
3029  *
3030  * This returns the I2C device descriptor of the I2C device
3031  * matching the device address @p addr.
3032  *
3033  * @param dev Pointer to controller device driver instance.
3034  * @param id I2C target device address.
3035  *
3036  * @return @see i3c_i2c_device_find.
3037  */
cdns_i3c_i2c_device_find(const struct device * dev,uint16_t addr)3038 static struct i3c_i2c_device_desc *cdns_i3c_i2c_device_find(const struct device *dev, uint16_t addr)
3039 {
3040 	return i3c_dev_list_i2c_addr_find(dev, addr);
3041 }
3042 
3043 /**
3044  * @brief Transfer messages in I2C mode.
3045  *
3046  * @see i2c_transfer
3047  *
3048  * @param dev Pointer to device driver instance.
3049  * @param target Pointer to target device descriptor.
3050  * @param msgs Pointer to I2C messages.
3051  * @param num_msgs Number of messages to transfers.
3052  *
3053  * @return @see i2c_transfer
3054  */
cdns_i3c_i2c_api_transfer(const struct device * dev,struct i2c_msg * msgs,uint8_t num_msgs,uint16_t addr)3055 static int cdns_i3c_i2c_api_transfer(const struct device *dev, struct i2c_msg *msgs,
3056 				     uint8_t num_msgs, uint16_t addr)
3057 {
3058 	struct i3c_i2c_device_desc *i2c_dev = cdns_i3c_i2c_device_find(dev, addr);
3059 	int ret;
3060 
3061 	if (i2c_dev == NULL) {
3062 		ret = -ENODEV;
3063 	} else {
3064 		ret = cdns_i3c_i2c_transfer(dev, i2c_dev, msgs, num_msgs);
3065 	}
3066 
3067 	return ret;
3068 }
3069 
3070 /**
3071  * Determine I3C bus mode from the i2c devices on the bus
3072  *
3073  * Reads the LVR of all I2C devices and returns the I3C bus
3074  * Mode
3075  *
3076  * @param dev_list Pointer to device list
3077  *
3078  * @return @see enum i3c_bus_mode.
3079  */
i3c_bus_mode(const struct i3c_dev_list * dev_list)3080 static enum i3c_bus_mode i3c_bus_mode(const struct i3c_dev_list *dev_list)
3081 {
3082 	enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
3083 
3084 	for (int i = 0; i < dev_list->num_i2c; i++) {
3085 		switch (I3C_LVR_I2C_DEV_IDX(dev_list->i2c[i].lvr)) {
3086 		case I3C_LVR_I2C_DEV_IDX_0:
3087 			if (mode < I3C_BUS_MODE_MIXED_FAST) {
3088 				mode = I3C_BUS_MODE_MIXED_FAST;
3089 			}
3090 			break;
3091 		case I3C_LVR_I2C_DEV_IDX_1:
3092 			if (mode < I3C_BUS_MODE_MIXED_LIMITED) {
3093 				mode = I3C_BUS_MODE_MIXED_LIMITED;
3094 			}
3095 			break;
3096 		case I3C_LVR_I2C_DEV_IDX_2:
3097 			if (mode < I3C_BUS_MODE_MIXED_SLOW) {
3098 				mode = I3C_BUS_MODE_MIXED_SLOW;
3099 			}
3100 			break;
3101 		default:
3102 			mode = I3C_BUS_MODE_INVALID;
3103 			break;
3104 		}
3105 	}
3106 	return mode;
3107 }
3108 
3109 /**
3110  * Determine THD_DEL value for CTRL register
3111  *
3112  * Should be MIN(t_cf, t_cr) + 3ns
3113  *
3114  * @param dev Pointer to device driver instance.
3115  *
3116  * @return Value to be written to THD_DEL
3117  */
cdns_i3c_sda_data_hold(const struct device * dev)3118 static uint8_t cdns_i3c_sda_data_hold(const struct device *dev)
3119 {
3120 	const struct cdns_i3c_config *config = dev->config;
3121 	uint32_t input_clock_frequency = config->input_frequency;
3122 	uint8_t thd_delay =
3123 		DIV_ROUND_UP(I3C_HD_PP_DEFAULT_NS, (NSEC_PER_SEC / input_clock_frequency));
3124 
3125 	if (thd_delay > THD_DELAY_MAX) {
3126 		thd_delay = THD_DELAY_MAX;
3127 	}
3128 
3129 	return (THD_DELAY_MAX - thd_delay);
3130 }
3131 
3132 /**
3133  * @brief Initialize the hardware.
3134  *
3135  * @param dev Pointer to controller device driver instance.
3136  */
cdns_i3c_bus_init(const struct device * dev)3137 static int cdns_i3c_bus_init(const struct device *dev)
3138 {
3139 	struct cdns_i3c_data *data = dev->data;
3140 	const struct cdns_i3c_config *config = dev->config;
3141 	struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
3142 
3143 	cdns_i3c_read_hw_cfg(dev);
3144 
3145 	/* Clear all retaining regs */
3146 	sys_write32(DEVS_CTRL_DEV_CLR_ALL, config->base + DEVS_CTRL);
3147 
3148 	uint32_t conf0 = sys_read32(config->base + CONF_STATUS0);
3149 	uint32_t conf1 = sys_read32(config->base + CONF_STATUS1);
3150 	data->max_devs = CONF_STATUS0_DEVS_NUM(conf0);
3151 	data->free_rr_slots = GENMASK(data->max_devs, 1);
3152 
3153 	/* DDR supported bit moved in 1p7 revision along with dev role added */
3154 	if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
3155 		ctrl_config->supported_hdr =
3156 			(conf1 & CONF_STATUS1_SUPPORTS_DDR) ? I3C_MSG_HDR_DDR : 0;
3157 		ctrl_config->is_secondary =
3158 			(CONF_STATUS0_DEV_ROLE(conf0) == CONF_STATUS0_DEV_ROLE_SEC_MASTER) ? true
3159 											   : false;
3160 	} else {
3161 		ctrl_config->supported_hdr =
3162 			(conf0 & CONF_STATUS0_SUPPORTS_DDR) ? I3C_MSG_HDR_DDR : 0;
3163 		ctrl_config->is_secondary = (conf0 & CONF_STATUS0_SEC_MASTER) ? true : false;
3164 	}
3165 	k_mutex_init(&data->bus_lock);
3166 	k_sem_init(&data->xfer.complete, 0, 1);
3167 	k_sem_init(&data->ibi_hj_complete, 0, 1);
3168 
3169 	cdns_i3c_interrupts_disable(config);
3170 	cdns_i3c_interrupts_clear(config);
3171 
3172 	config->irq_config_func(dev);
3173 
3174 	/* Ensure the bus is disabled. */
3175 	sys_write32(~CTRL_DEV_EN & sys_read32(config->base + CTRL), config->base + CTRL);
3176 
3177 	/* determine prescaler timings for i3c and i2c scl */
3178 	cdns_i3c_set_prescalers(dev);
3179 
3180 	enum i3c_bus_mode mode = i3c_bus_mode(&config->common.dev_list);
3181 
3182 	LOG_DBG("%s: i3c bus mode %d", dev->name, mode);
3183 	int cdns_mode;
3184 
3185 	switch (mode) {
3186 	case I3C_BUS_MODE_PURE:
3187 		cdns_mode = CTRL_PURE_BUS_MODE;
3188 		break;
3189 	case I3C_BUS_MODE_MIXED_FAST:
3190 		cdns_mode = CTRL_MIXED_FAST_BUS_MODE;
3191 		break;
3192 	case I3C_BUS_MODE_MIXED_LIMITED:
3193 	case I3C_BUS_MODE_MIXED_SLOW:
3194 		cdns_mode = CTRL_MIXED_SLOW_BUS_MODE;
3195 		break;
3196 	default:
3197 		return -EINVAL;
3198 	}
3199 
3200 	/*
3201 	 * When a Hot-Join request happens, disable all events coming from this device.
3202 	 * We will issue ENTDAA afterwards from the threaded IRQ handler.
3203 	 * Set HJ ACK later after bus init to prevent targets from indirect DAA enforcement.
3204 	 *
3205 	 * Set the I3C Bus Mode based on the LVR of the I2C devices
3206 	 */
3207 	uint32_t ctrl = CTRL_HJ_DISEC | CTRL_MCS_EN | (CTRL_BUS_MODE_MASK & cdns_mode);
3208 	/* Disable Controllership requests as it is not supported yet by the driver */
3209 	ctrl &= ~CTRL_MST_ACK;
3210 
3211 	/*
3212 	 * Cadence I3C release r104v1p0 and above support configuration of the sda data hold time
3213 	 */
3214 	if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 4)) {
3215 		ctrl |= CTRL_THD_DELAY(cdns_i3c_sda_data_hold(dev));
3216 	}
3217 
3218 	/*
3219 	 * Cadence I3C release r105v1p0 and above support I3C v1.1 timing change
3220 	 * for tCASHr_min = tCAS_min / 2, otherwise tCASr_min = tCAS_min (as
3221 	 * per MIPI spec v1.0)
3222 	 */
3223 	if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 5)) {
3224 		ctrl |= CTRL_I3C_11_SUPP;
3225 	}
3226 
3227 	/* write ctrl register value */
3228 	sys_write32(ctrl, config->base + CTRL);
3229 
3230 	/* enable Core */
3231 	sys_write32(CTRL_DEV_EN | ctrl, config->base + CTRL);
3232 
3233 	/* Set fifo thresholds. */
3234 	sys_write32(CMD_THR(I3C_CMDD_THR) | IBI_THR(I3C_IBID_THR) | CMDR_THR(I3C_CMDR_THR) |
3235 			    IBIR_THR(config->ibid_thr),
3236 		    config->base + CMD_IBI_THR_CTRL);
3237 
3238 	/* Set TX/RX interrupt thresholds. */
3239 	if (sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE) {
3240 		sys_write32(TX_THR(I3C_TX_THR) | RX_THR(data->hw_cfg.rx_mem_depth),
3241 			    config->base + TX_RX_THR_CTRL);
3242 	} else {
3243 		sys_write32(TX_THR(1) | RX_THR(1), config->base + TX_RX_THR_CTRL);
3244 		sys_write32(SLV_DDR_TX_THR(0) | SLV_DDR_RX_THR(1),
3245 			    config->base + SLV_DDR_TX_RX_THR_CTRL);
3246 	}
3247 
3248 	/* enable target interrupts */
3249 	sys_write32(SLV_INT_DA_UPD | SLV_INT_SDR_RD_COMP | SLV_INT_SDR_WR_COMP |
3250 			    SLV_INT_SDR_RX_THR | SLV_INT_SDR_TX_THR | SLV_INT_SDR_RX_UNF |
3251 			    SLV_INT_SDR_TX_OVF | SLV_INT_HJ_DONE | SLV_INT_DDR_WR_COMP |
3252 			    SLV_INT_DDR_RD_COMP | SLV_INT_DDR_RX_THR | SLV_INT_DDR_TX_THR,
3253 		    config->base + SLV_IER);
3254 
3255 	/* Enable IBI interrupts. */
3256 	sys_write32(MST_INT_IBIR_THR | MST_INT_RX_UNF | MST_INT_HALTED | MST_INT_TX_OVF |
3257 			    MST_INT_IBIR_OVF | MST_INT_IBID_THR,
3258 		    config->base + MST_IER);
3259 
3260 	int ret = i3c_addr_slots_init(dev);
3261 
3262 	if (ret != 0) {
3263 		return ret;
3264 	}
3265 
3266 	/* Program retaining regs. */
3267 	cdns_i3c_program_controller_retaining_reg(dev);
3268 
3269 	/* only primary controllers are responsible for initializing the bus */
3270 	if (!ctrl_config->is_secondary) {
3271 		/* Sleep to wait for bus idle. */
3272 		k_busy_wait(201);
3273 		/* Perform bus initialization */
3274 		ret = i3c_bus_init(dev, &config->common.dev_list);
3275 #ifdef CONFIG_I3C_USE_IBI
3276 		/* Bus Initialization Complete, allow HJ ACKs */
3277 		sys_write32(CTRL_HJ_ACK | sys_read32(config->base + CTRL), config->base + CTRL);
3278 #endif
3279 	}
3280 
3281 	return 0;
3282 }
3283 
3284 static DEVICE_API(i3c, api) = {
3285 	.i2c_api.configure = cdns_i3c_i2c_api_configure,
3286 	.i2c_api.transfer = cdns_i3c_i2c_api_transfer,
3287 #ifdef CONFIG_I2C_RTIO
3288 	.i2c_api.iodev_submit = i2c_iodev_submit_fallback,
3289 #endif
3290 
3291 	.configure = cdns_i3c_configure,
3292 	.config_get = cdns_i3c_config_get,
3293 
3294 	.attach_i3c_device = cdns_i3c_attach_device,
3295 	.reattach_i3c_device = cdns_i3c_reattach_device,
3296 	.detach_i3c_device = cdns_i3c_detach_device,
3297 	.attach_i2c_device = cdns_i3c_i2c_attach_device,
3298 	.detach_i2c_device = cdns_i3c_i2c_detach_device,
3299 
3300 	.do_daa = cdns_i3c_do_daa,
3301 	.do_ccc = cdns_i3c_do_ccc,
3302 
3303 	.i3c_device_find = cdns_i3c_device_find,
3304 
3305 	.i3c_xfers = cdns_i3c_transfer,
3306 
3307 	.target_tx_write = cdns_i3c_target_tx_write,
3308 	.target_register = cdns_i3c_target_register,
3309 	.target_unregister = cdns_i3c_target_unregister,
3310 
3311 #ifdef CONFIG_I3C_USE_IBI
3312 	.ibi_enable = cdns_i3c_controller_ibi_enable,
3313 	.ibi_disable = cdns_i3c_controller_ibi_disable,
3314 	.ibi_raise = cdns_i3c_target_ibi_raise,
3315 #endif
3316 
3317 #ifdef CONFIG_I3C_RTIO
3318 	.iodev_submit = i3c_iodev_submit_fallback,
3319 #endif
3320 };
3321 
3322 #define CADENCE_I3C_INSTANTIATE(n)                                                                 \
3323 	static void cdns_i3c_config_func_##n(const struct device *dev);                            \
3324 	static struct i3c_device_desc cdns_i3c_device_array_##n[] = I3C_DEVICE_ARRAY_DT_INST(n);   \
3325 	static struct i3c_i2c_device_desc cdns_i3c_i2c_device_array_##n[] =                        \
3326 		I3C_I2C_DEVICE_ARRAY_DT_INST(n);                                                   \
3327 	static const struct cdns_i3c_config i3c_config_##n = {                                     \
3328 		.base = DT_INST_REG_ADDR(n),                                                       \
3329 		.input_frequency = DT_INST_PROP(n, input_clock_frequency),                         \
3330 		.irq_config_func = cdns_i3c_config_func_##n,                                       \
3331 		.ibid_thr = DT_INST_PROP(n, ibid_thr),                                             \
3332 		.common.dev_list.i3c = cdns_i3c_device_array_##n,                                  \
3333 		.common.dev_list.num_i3c = ARRAY_SIZE(cdns_i3c_device_array_##n),                  \
3334 		.common.dev_list.i2c = cdns_i3c_i2c_device_array_##n,                              \
3335 		.common.dev_list.num_i2c = ARRAY_SIZE(cdns_i3c_i2c_device_array_##n),              \
3336 	};                                                                                         \
3337 	static struct cdns_i3c_data i3c_data_##n = {                                               \
3338 		.common.ctrl_config.scl.i3c = DT_INST_PROP_OR(n, i3c_scl_hz, 0),                   \
3339 		.common.ctrl_config.scl.i2c = DT_INST_PROP_OR(n, i2c_scl_hz, 0),                   \
3340 	};                                                                                         \
3341 	DEVICE_DT_INST_DEFINE(n, cdns_i3c_bus_init, NULL, &i3c_data_##n, &i3c_config_##n,          \
3342 			      POST_KERNEL, CONFIG_I3C_CONTROLLER_INIT_PRIORITY, &api);             \
3343 	static void cdns_i3c_config_func_##n(const struct device *dev)                             \
3344 	{                                                                                          \
3345 		IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), cdns_i3c_irq_handler,       \
3346 			    DEVICE_DT_INST_GET(n), 0);                                             \
3347 		irq_enable(DT_INST_IRQN(n));                                                       \
3348 	};
3349 
3350 #define DT_DRV_COMPAT cdns_i3c
3351 DT_INST_FOREACH_STATUS_OKAY(CADENCE_I3C_INSTANTIATE)
3352