1 /*
2 * Copyright (c) 2022 Meta Platforms, Inc. and its affiliates.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <errno.h>
8
9 #include <zephyr/drivers/i3c.h>
10 #include <zephyr/init.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/logging/log.h>
13 #include <zephyr/sys/byteorder.h>
14 #include <zephyr/sys/sys_io.h>
15 #include <zephyr/sys/util.h>
16
17 #include <stdlib.h>
18
19 #define DEV_ID 0x0
20 #define DEV_ID_I3C_MASTER 0x5034
21
22 #define CONF_STATUS0 0x4
23 #define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29))
24 #define CONF_STATUS0_ECC_CHK BIT(28)
25 #define CONF_STATUS0_INTEG_CHK BIT(27)
26 #define CONF_STATUS0_CSR_DAP_CHK BIT(26)
27 #define CONF_STATUS0_TRANS_TOUT_CHK BIT(25)
28 #define CONF_STATUS0_PROT_FAULTS_CHK BIT(24)
29 #define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16)
30 #define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8)
31 #define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7))
32 /* CONF_STATUS0_SUPPORTS_DDR moved to CONF_STATUS1 in rev >= 1p7 */
33 #define CONF_STATUS0_SUPPORTS_DDR BIT(5)
34 #define CONF_STATUS0_SEC_MASTER BIT(4)
35 /* And it was replaced with a Dev Role mask */
36 #define CONF_STATUS0_DEV_ROLE(x) ((x) & GENMASK(5, 4) >> 4)
37 #define CONF_STATUS0_DEV_ROLE_MAIN_MASTER 0
38 #define CONF_STATUS0_DEV_ROLE_SEC_MASTER 1
39 #define CONF_STATUS0_DEV_ROLE_SLAVE 2
40 #define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0))
41
42 #define CONF_STATUS1 0x8
43 #define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1)
44 #define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26))
45 #define CONF_STATUS1_SLV_DDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21))
46 #define CONF_STATUS1_SLV_DDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16))
47 #define CONF_STATUS1_SUPPORTS_DDR BIT(14)
48 #define CONF_STATUS1_ALT_MODE BIT(13)
49 #define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10))
50 #define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5))
51 #define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0)))
52
53 #define REV_ID 0xc
54 #define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20)
55 #define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8)
56 #define REV_ID_REV(id) ((id) & GENMASK(7, 0))
57 #define REV_ID_VERSION(m, n) ((m << 5) | (n))
58 #define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 5)) >> 5)
59 #define REV_ID_REV_MINOR(id) ((id) & GENMASK(4, 0))
60
61 #define CTRL 0x10
62 #define CTRL_DEV_EN BIT(31)
63 #define CTRL_HALT_EN BIT(30)
64 #define CTRL_MCS BIT(29)
65 #define CTRL_MCS_EN BIT(28)
66 #define CTRL_I3C_11_SUPP BIT(26)
67 #define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24))
68 #define CTRL_TC_EN BIT(9)
69 #define CTRL_HJ_DISEC BIT(8)
70 #define CTRL_MST_ACK BIT(7)
71 #define CTRL_HJ_ACK BIT(6)
72 #define CTRL_HJ_INIT BIT(5)
73 #define CTRL_MST_INIT BIT(4)
74 #define CTRL_AHDR_OPT BIT(3)
75 #define CTRL_PURE_BUS_MODE 0
76 #define CTRL_MIXED_FAST_BUS_MODE 2
77 #define CTRL_MIXED_SLOW_BUS_MODE 3
78 #define CTRL_BUS_MODE_MASK GENMASK(1, 0)
79 #define THD_DELAY_MAX 3
80
81 #define PRESCL_CTRL0 0x14
82 #define PRESCL_CTRL0_I2C(x) ((x) << 16)
83 #define PRESCL_CTRL0_I3C(x) (x)
84 #define PRESCL_CTRL0_I3C_MAX GENMASK(9, 0)
85 #define PRESCL_CTRL0_I2C_MAX GENMASK(15, 0)
86
87 #define PRESCL_CTRL1 0x18
88 #define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
89 #define PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
90 #define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0)
91 #define PRESCL_CTRL1_OD_LOW(x) (x)
92
93 #define SLV_STATUS4 0x1C
94 #define SLV_STATUS4_BUSCON_FILL_LVL GENMASK(16, 8)
95 #define SLV_STATUS5_BUSCON_DATA GENMASK(7, 0)
96
97 #define MST_IER 0x20
98 #define MST_IDR 0x24
99 #define MST_IMR 0x28
100 #define MST_ICR 0x2c
101 #define MST_ISR 0x30
102 #define MST_INT_HALTED BIT(18)
103 #define MST_INT_MR_DONE BIT(17)
104 #define MST_INT_IMM_COMP BIT(16)
105 #define MST_INT_TX_THR BIT(15)
106 #define MST_INT_TX_OVF BIT(14)
107 #define MST_INT_C_REF_ROV BIT(13)
108 #define MST_INT_IBID_THR BIT(12)
109 #define MST_INT_IBID_UNF BIT(11)
110 #define MST_INT_IBIR_THR BIT(10)
111 #define MST_INT_IBIR_UNF BIT(9)
112 #define MST_INT_IBIR_OVF BIT(8)
113 #define MST_INT_RX_THR BIT(7)
114 #define MST_INT_RX_UNF BIT(6)
115 #define MST_INT_CMDD_EMP BIT(5)
116 #define MST_INT_CMDD_THR BIT(4)
117 #define MST_INT_CMDD_OVF BIT(3)
118 #define MST_INT_CMDR_THR BIT(2)
119 #define MST_INT_CMDR_UNF BIT(1)
120 #define MST_INT_CMDR_OVF BIT(0)
121 #define MST_INT_MASK GENMASK(18, 0)
122
123 #define MST_STATUS0 0x34
124 #define MST_STATUS0_IDLE BIT(18)
125 #define MST_STATUS0_HALTED BIT(17)
126 #define MST_STATUS0_MASTER_MODE BIT(16)
127 #define MST_STATUS0_TX_FULL BIT(13)
128 #define MST_STATUS0_IBID_FULL BIT(12)
129 #define MST_STATUS0_IBIR_FULL BIT(11)
130 #define MST_STATUS0_RX_FULL BIT(10)
131 #define MST_STATUS0_CMDD_FULL BIT(9)
132 #define MST_STATUS0_CMDR_FULL BIT(8)
133 #define MST_STATUS0_TX_EMP BIT(5)
134 #define MST_STATUS0_IBID_EMP BIT(4)
135 #define MST_STATUS0_IBIR_EMP BIT(3)
136 #define MST_STATUS0_RX_EMP BIT(2)
137 #define MST_STATUS0_CMDD_EMP BIT(1)
138 #define MST_STATUS0_CMDR_EMP BIT(0)
139
140 #define CMDR 0x38
141 #define CMDR_NO_ERROR 0
142 #define CMDR_DDR_PREAMBLE_ERROR 1
143 #define CMDR_DDR_PARITY_ERROR 2
144 #define CMDR_DDR_RX_FIFO_OVF 3
145 #define CMDR_DDR_TX_FIFO_UNF 4
146 #define CMDR_M0_ERROR 5
147 #define CMDR_M1_ERROR 6
148 #define CMDR_M2_ERROR 7
149 #define CMDR_MST_ABORT 8
150 #define CMDR_NACK_RESP 9
151 #define CMDR_INVALID_DA 10
152 #define CMDR_DDR_DROPPED 11
153 #define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24)
154 #define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8)
155 #define CMDR_CMDID_HJACK_DISEC 0xfe
156 #define CMDR_CMDID_HJACK_ENTDAA 0xff
157 #define CMDR_CMDID(x) ((x) & GENMASK(7, 0))
158
159 #define IBIR 0x3c
160 #define IBIR_ACKED BIT(12)
161 #define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8)
162 #define IBIR_SLVID_INV 0xF
163 #define IBIR_ERROR BIT(7)
164 #define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2)
165 #define IBIR_TYPE_IBI 0
166 #define IBIR_TYPE_HJ 1
167 #define IBIR_TYPE_MR 2
168 #define IBIR_TYPE(x) ((x) & GENMASK(1, 0))
169
170 #define SLV_IER 0x40
171 #define SLV_IDR 0x44
172 #define SLV_IMR 0x48
173 #define SLV_ICR 0x4c
174 #define SLV_ISR 0x50
175 #define SLV_INT_CHIP_RST BIT(31)
176 #define SLV_INT_PERIPH_RST BIT(30)
177 #define SLV_INT_FLUSH_DONE BIT(29)
178 #define SLV_INT_RST_DAA BIT(28)
179 #define SLV_INT_BUSCON_UP BIT(26)
180 #define SLV_INT_MRL_UP BIT(25)
181 #define SLV_INT_MWL_UP BIT(24)
182 #define SLV_INT_IBI_THR BIT(23)
183 #define SLV_INT_IBI_DONE BIT(22)
184 #define SLV_INT_DEFSLVS BIT(21)
185 #define SLV_INT_TM BIT(20)
186 #define SLV_INT_ERROR BIT(19)
187 #define SLV_INT_EVENT_UP BIT(18)
188 #define SLV_INT_HJ_DONE BIT(17)
189 #define SLV_INT_MR_DONE BIT(16)
190 #define SLV_INT_DA_UPD BIT(15)
191 #define SLV_INT_SDR_FAIL BIT(14)
192 #define SLV_INT_DDR_FAIL BIT(13)
193 #define SLV_INT_M_RD_ABORT BIT(12)
194 #define SLV_INT_DDR_RX_THR BIT(11)
195 #define SLV_INT_DDR_TX_THR BIT(10)
196 #define SLV_INT_SDR_RX_THR BIT(9)
197 #define SLV_INT_SDR_TX_THR BIT(8)
198 #define SLV_INT_DDR_RX_UNF BIT(7)
199 #define SLV_INT_DDR_TX_OVF BIT(6)
200 #define SLV_INT_SDR_RX_UNF BIT(5)
201 #define SLV_INT_SDR_TX_OVF BIT(4)
202 #define SLV_INT_DDR_RD_COMP BIT(3)
203 #define SLV_INT_DDR_WR_COMP BIT(2)
204 #define SLV_INT_SDR_RD_COMP BIT(1)
205 #define SLV_INT_SDR_WR_COMP BIT(0)
206
207 #define SLV_STATUS0 0x54
208 #define SLV_STATUS0_IBI_XFRD_BYTEs(s) (((s) & GENMASK(31, 24)) >> 24)
209 #define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16)
210 #define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0))
211
212 #define SLV_STATUS1 0x58
213 #define SLV_STATUS1_SCL_IN_RST BIT(31)
214 #define SLV_STATUS1_HJ_IN_USE BIT(30)
215 #define SLV_STATUS1_NACK_NXT_PW BIT(29)
216 #define SLV_STATUS1_NACK_NXT_PR BIT(28)
217 #define SLV_STATUS1_MR_PEND BIT(27)
218 #define SLV_STATUS1_HJ_PEND BIT(26)
219 #define SLV_STATUS1_IBI_PEND BIT(25)
220 #define SLV_STATUS1_IBI_DIS BIT(24)
221 #define SLV_STATUS1_BUS_VAR BIT(23)
222 #define SLV_STATUS1_TCAM0_DIS BIT(22)
223 #define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20)
224 #define SLV_STATUS1_VEN_TM BIT(19)
225 #define SLV_STATUS1_HJ_DIS BIT(18)
226 #define SLV_STATUS1_MR_DIS BIT(17)
227 #define SLV_STATUS1_PROT_ERR BIT(16)
228 #define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
229 #define SLV_STATUS1_HAS_DA BIT(8)
230 #define SLV_STATUS1_DDR_RX_FULL BIT(7)
231 #define SLV_STATUS1_DDR_TX_FULL BIT(6)
232 #define SLV_STATUS1_DDR_RX_EMPTY BIT(5)
233 #define SLV_STATUS1_DDR_TX_EMPTY BIT(4)
234 #define SLV_STATUS1_SDR_RX_FULL BIT(3)
235 #define SLV_STATUS1_SDR_TX_FULL BIT(2)
236 #define SLV_STATUS1_SDR_RX_EMPTY BIT(1)
237 #define SLV_STATUS1_SDR_TX_EMPTY BIT(0)
238
239 #define SLV_IBI_CTRL 0x5c
240 #define SLV_IBI_TCAM_EVNT(x) ((x) << 27)
241 #define SLV_IBI_PL(x) ((x) << 16)
242 #define SLV_IBI_TCAM0 BIT(9)
243 #define SLV_IBI_REQ BIT(8)
244 #define SLV_IBI_AUTO_CLR_IBI 1
245 #define SLV_IBI_AUTO_CLR_PR 2
246 #define SLV_IBI_AUTO_CLR_IBI_OR_PR 3
247 #define SLV_IBI_CLEAR_TRIGGER(x) ((x) << 4)
248
249 #define CMD0_FIFO 0x60
250 #define CMD0_FIFO_IS_DDR BIT(31)
251 #define CMD0_FIFO_IS_CCC BIT(30)
252 #define CMD0_FIFO_BCH BIT(29)
253 #define XMIT_BURST_STATIC_SUBADDR 0
254 #define XMIT_SINGLE_INC_SUBADDR 1
255 #define XMIT_SINGLE_STATIC_SUBADDR 2
256 #define XMIT_BURST_WITHOUT_SUBADDR 3
257 #define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27)
258 #define CMD0_FIFO_SBCA BIT(26)
259 #define CMD0_FIFO_RSBC BIT(25)
260 #define CMD0_FIFO_IS_10B BIT(24)
261 #define CMD0_FIFO_PL_LEN(l) ((l) << 12)
262 #define CMD0_FIFO_IS_DB BIT(11)
263 #define CMD0_FIFO_PL_LEN_MAX 4095
264 #define CMD0_FIFO_DEV_ADDR(a) ((a) << 1)
265 #define CMD0_FIFO_RNW BIT(0)
266
267 #define CMD1_FIFO 0x64
268 #define CMD1_FIFO_CMDID(id) ((id) << 24)
269 #define CMD1_FIFO_DB(db) (((db) & GENMASK(15, 8)) << 8)
270 #define CMD1_FIFO_CSRADDR(a) (a)
271 #define CMD1_FIFO_CCC(id) (id)
272
273 #define TX_FIFO 0x68
274
275 #define TX_FIFO_STATUS 0x6C
276
277 #define IMD_CMD0 0x70
278 #define IMD_CMD0_PL_LEN(l) ((l) << 12)
279 #define IMD_CMD0_DEV_ADDR(a) ((a) << 1)
280 #define IMD_CMD0_RNW BIT(0)
281
282 #define IMD_CMD1 0x74
283 #define IMD_CMD1_CCC(id) (id)
284
285 #define IMD_DATA 0x78
286 #define RX_FIFO 0x80
287 #define IBI_DATA_FIFO 0x84
288 #define SLV_DDR_TX_FIFO 0x88
289 #define SLV_DDR_RX_FIFO 0x8c
290 #define DDR_PREAMBLE_MASK GENMASK(19, 18)
291 #define DDR_PREAMBLE_CMD_CRC 0x1 << 18
292 #define DDR_PREAMBLE_DATA_ABORT 0x2 << 18
293 #define DDR_PREAMBLE_DATA_ABORT_ALT 0x3 << 18
294 #define DDR_DATA(x) (((x) & GENMASK(17, 2)) >> 2)
295 #define DDR_EVEN_PARITY BIT(0)
296 #define DDR_ODD_PARITY BIT(1)
297 #define DDR_CRC_AND_HEADER_SIZE 0x4
298 #define DDR_CONVERT_BUF_LEN(x) (4 * (x))
299
300 #define HDR_CMD_RD BIT(15)
301 #define HDR_CMD_CODE(c) (((c) & GENMASK(6, 0)) << 8)
302 #define DDR_CRC_TOKEN (0xC << 14)
303 #define DDR_CRC_TOKEN_MASK GENMASK(17, 14)
304 #define DDR_CRC(t) (((t) & (GENMASK(13, 9))) >> 9)
305 #define DDR_CRC_WR_SETUP BIT(8)
306
307 #define CMD_IBI_THR_CTRL 0x90
308 #define IBIR_THR(t) ((t) << 24)
309 #define CMDR_THR(t) ((t) << 16)
310 #define CMDR_THR_MASK (GENMASK(20, 16))
311 #define IBI_THR(t) ((t) << 8)
312 #define CMD_THR(t) (t)
313
314 #define TX_RX_THR_CTRL 0x94
315 #define RX_THR(t) ((t) << 16)
316 #define RX_THR_MASK (GENMASK(31, 16))
317 #define TX_THR(t) (t)
318 #define TX_THR_MASK (GENMASK(15, 0))
319
320 #define SLV_DDR_TX_RX_THR_CTRL 0x98
321 #define SLV_DDR_RX_THR(t) ((t) << 16)
322 #define SLV_DDR_TX_THR(t) (t)
323
324 #define FLUSH_CTRL 0x9c
325 #define FLUSH_IBI_RESP BIT(24)
326 #define FLUSH_CMD_RESP BIT(23)
327 #define FLUSH_SLV_DDR_RX_FIFO BIT(22)
328 #define FLUSH_SLV_DDR_TX_FIFO BIT(21)
329 #define FLUSH_IMM_FIFO BIT(20)
330 #define FLUSH_IBI_FIFO BIT(19)
331 #define FLUSH_RX_FIFO BIT(18)
332 #define FLUSH_TX_FIFO BIT(17)
333 #define FLUSH_CMD_FIFO BIT(16)
334
335 #define SLV_CTRL 0xA0
336
337 #define SLV_PROT_ERR_TYPE 0xA4
338 #define SLV_ERR6_IBI BIT(9)
339 #define SLV_ERR6_PR BIT(8)
340 #define SLV_ERR_GETCCC BIT(7)
341 #define SLV_ERR5 BIT(6)
342 #define SLV_ERR4 BIT(5)
343 #define SLV_ERR3 BIT(4)
344 #define SLV_ERR2_PW BIT(3)
345 #define SLV_ERR2_SETCCC BIT(2)
346 #define SLV_ERR1 BIT(1)
347 #define SLV_ERR0 BIT(0)
348
349 #define SLV_STATUS2 0xA8
350 #define SLV_STATUS2_MRL(s) (((s) & GENMASK(23, 8)) >> 8)
351
352 #define SLV_STATUS3 0xAC
353 #define SLV_STATUS3_BC_FSM(s) (((s) & GENMASK(26, 16)) >> 16)
354 #define SLV_STATUS3_MWL(s) ((s) & GENMASK(15, 0))
355
356 #define TTO_PRESCL_CTRL0 0xb0
357 #define TTO_PRESCL_CTRL0_PRESCL_I2C(x) ((x) << 16)
358 #define TTO_PRESCL_CTRL0_PRESCL_I3C(x) (x)
359
360 #define TTO_PRESCL_CTRL1 0xb4
361 #define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16)
362 #define TTO_PRESCL_CTRL1_DIVA(x) (x)
363 #define TTO_PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
364 #define TTO_PRESCL_CTRL1_OD_LOW(x) (x)
365
366 #define DEVS_CTRL 0xb8
367 #define DEVS_CTRL_DEV_CLR_SHIFT 16
368 #define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16)
369 #define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev))
370 #define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev)
371 #define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0)
372 #define MAX_DEVS 16
373
374 #define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10))
375 #define DEV_ID_RR0_LVR_EXT_ADDR BIT(11)
376 #define DEV_ID_RR0_HDR_CAP BIT(10)
377 #define DEV_ID_RR0_IS_I3C BIT(9)
378 #define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(7, 1) | GENMASK(15, 13))
379 #define DEV_ID_RR0_SET_DEV_ADDR(a) (((a << 1) & GENMASK(7, 1)) | (((a) & GENMASK(9, 7)) << 13))
380 #define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | (((x) >> 6) & GENMASK(9, 7)))
381
382 #define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10))
383 #define DEV_ID_RR1_PID_MSB(pid) (pid)
384
385 #define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10))
386 #define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16)
387 #define DEV_ID_RR2_BCR(bcr) ((bcr) << 8)
388 #define DEV_ID_RR2_DCR(dcr) (dcr)
389 #define DEV_ID_RR2_LVR(lvr) (lvr)
390
391 #define SIR_MAP(x) (0x180 + ((x) * 4))
392 #define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2)
393 #define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0))
394 #define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
395 #define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0))
396 #define DEV_ROLE_SLAVE 0
397 #define DEV_ROLE_MASTER 1
398 #define SIR_MAP_DEV_ROLE(role) ((role) << 14)
399 #define SIR_MAP_DEV_SLOW BIT(13)
400 #define SIR_MAP_DEV_PL(l) ((l) << 8)
401 #define SIR_MAP_PL_MAX GENMASK(4, 0)
402 #define SIR_MAP_DEV_DA(a) ((a) << 1)
403 #define SIR_MAP_DEV_ACK BIT(0)
404
405 #define GRPADDR_LIST 0x198
406
407 #define GRPADDR_CS 0x19C
408
409 #define GPIR_WORD(x) (0x200 + ((x) * 4))
410 #define GPI_REG(val, id) (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
411
412 #define GPOR_WORD(x) (0x220 + ((x) * 4))
413 #define GPO_REG(val, id) (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
414
415 #define ASF_INT_STATUS 0x300
416 #define ASF_INT_RAW_STATUS 0x304
417 #define ASF_INT_MASK 0x308
418 #define ASF_INT_TEST 0x30c
419 #define ASF_INT_FATAL_SELECT 0x310
420 #define ASF_INTEGRITY_ERR BIT(6)
421 #define ASF_PROTOCOL_ERR BIT(5)
422 #define ASF_TRANS_TIMEOUT_ERR BIT(4)
423 #define ASF_CSR_ERR BIT(3)
424 #define ASF_DAP_ERR BIT(2)
425 #define ASF_SRAM_UNCORR_ERR BIT(1)
426 #define ASF_SRAM_CORR_ERR BIT(0)
427
428 #define ASF_SRAM_CORR_FAULT_STATUS 0x320
429 #define ASF_SRAM_UNCORR_FAULT_STATUS 0x324
430 #define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24)
431 #define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0))
432
433 #define ASF_SRAM_FAULT_STATS 0x328
434 #define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16)
435 #define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0))
436
437 #define ASF_TRANS_TOUT_CTRL 0x330
438 #define ASF_TRANS_TOUT_EN BIT(31)
439 #define ASF_TRANS_TOUT_VAL(x) (x)
440
441 #define ASF_TRANS_TOUT_FAULT_MASK 0x334
442 #define ASF_TRANS_TOUT_FAULT_STATUS 0x338
443 #define ASF_TRANS_TOUT_FAULT_APB BIT(3)
444 #define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2)
445 #define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1)
446 #define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0)
447
448 #define ASF_PROTO_FAULT_MASK 0x340
449 #define ASF_PROTO_FAULT_STATUS 0x344
450 #define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31)
451 #define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30)
452 #define ASF_PROTO_FAULT_S(x) BIT(16 + (x))
453 #define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15)
454 #define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14)
455 #define ASF_PROTO_FAULT_M(x) BIT(x)
456
457 /*******************************************************************************
458 * Local Constants Definition
459 ******************************************************************************/
460
461 /* TODO: this needs to be configurable in the dts...somehow */
462 #define I3C_CONTROLLER_ADDR 0x08
463
464 /* Maximum i3c devices that the IP can be built with */
465 #define I3C_MAX_DEVS 11
466 #define I3C_MAX_MSGS 10
467 #define I3C_SIR_DEFAULT_DA 0x7F
468 #define I3C_MAX_IDLE_CANCEL_WAIT_RETRIES 50
469 #define I3C_PRESCL_REG_SCALE (4)
470 #define I2C_PRESCL_REG_SCALE (5)
471 #define I3C_WAIT_FOR_IDLE_STATE_US 100
472 #define I3C_IDLE_TIMEOUT_CYC \
473 (I3C_WAIT_FOR_IDLE_STATE_US * (sys_clock_hw_cycles_per_sec() / USEC_PER_SEC))
474
475 /* Target T_LOW period in open-drain mode. */
476 #define I3C_BUS_TLOW_OD_MIN_NS 200
477
478 /*
479 * MIPI I3C v1.1.1 Spec defines SDA Signal Data Hold in Push Pull max as the
480 * minimum of the clock rise and fall time plus 3ns
481 */
482 #define I3C_HD_PP_DEFAULT_NS 10
483
484 /* Interrupt thresholds. */
485 /* command response fifo threshold */
486 #define I3C_CMDR_THR 1
487 /* command tx fifo threshold - unused */
488 #define I3C_CMDD_THR 1
489 /* in-band-interrupt response queue threshold */
490 #define I3C_IBIR_THR 1
491 /* tx data threshold - unused */
492 #define I3C_TX_THR 1
493
494 #define LOG_MODULE_NAME I3C_CADENCE
495 LOG_MODULE_REGISTER(I3C_CADENCE, CONFIG_I3C_CADENCE_LOG_LEVEL);
496
497 /*******************************************************************************
498 * Local Types Definition
499 ******************************************************************************/
500
501 /** Describes peripheral HW configuration determined from CONFx registers. */
502 struct cdns_i3c_hw_config {
503 /* Revision ID */
504 uint32_t rev_id;
505 /* The maxiumum command queue depth. */
506 uint32_t cmd_mem_depth;
507 /* The maxiumum command response queue depth. */
508 uint32_t cmdr_mem_depth;
509 /* The maximum RX FIFO depth. */
510 uint32_t rx_mem_depth;
511 /* The maximum TX FIFO depth. */
512 uint32_t tx_mem_depth;
513 /* The maximum DDR RX FIFO depth. */
514 uint32_t ddr_rx_mem_depth;
515 /* The maximum DDR TX FIFO depth. */
516 uint32_t ddr_tx_mem_depth;
517 /* The maximum IBIR FIFO depth. */
518 uint32_t ibir_mem_depth;
519 /* The maximum IBI FIFO depth. */
520 uint32_t ibi_mem_depth;
521 };
522
523 /* Cadence I3C/I2C Device Private Data */
524 struct cdns_i3c_i2c_dev_data {
525 /* Device id within the retaining registers. This is set after bus initialization by the
526 * controller.
527 */
528 uint8_t id;
529 };
530
531 /* Single command/transfer */
532 struct cdns_i3c_cmd {
533 uint32_t cmd0;
534 uint32_t cmd1;
535 uint32_t ddr_header;
536 uint32_t ddr_crc;
537 uint32_t len;
538 uint32_t *num_xfer;
539 void *buf;
540 uint32_t error;
541 enum i3c_sdr_controller_error_types *sdr_err;
542 enum i3c_data_rate hdr;
543 };
544
545 /* Transfer data */
546 struct cdns_i3c_xfer {
547 struct k_sem complete;
548 int ret;
549 int num_cmds;
550 struct cdns_i3c_cmd cmds[I3C_MAX_MSGS];
551 };
552
553 #ifdef CONFIG_I3C_USE_IBI
554 /* IBI transferred data */
555 struct cdns_i3c_ibi_buf {
556 uint8_t ibi_data[CONFIG_I3C_IBI_MAX_PAYLOAD_SIZE];
557 uint8_t ibi_data_cnt;
558 };
559 #endif
560
561 /* Driver config */
562 struct cdns_i3c_config {
563 struct i3c_driver_config common;
564 /** base address of the controller */
565 uintptr_t base;
566 /** input frequency to the I3C Cadence */
567 uint32_t input_frequency;
568 /** Interrupt configuration function. */
569 void (*irq_config_func)(const struct device *dev);
570 /** IBID Threshold value */
571 uint8_t ibid_thr;
572 };
573
574 /* Driver instance data */
575 struct cdns_i3c_data {
576 /* common must be first! */
577 struct i3c_driver_data common;
578 const struct device *dev;
579 struct cdns_i3c_hw_config hw_cfg;
580 #ifdef CONFIG_I3C_USE_IBI
581 struct cdns_i3c_ibi_buf ibi_buf;
582 #endif
583 struct k_mutex bus_lock;
584 struct cdns_i3c_i2c_dev_data cdns_i3c_i2c_priv_data[I3C_MAX_DEVS];
585 struct cdns_i3c_xfer xfer;
586 struct i3c_target_config *target_config;
587 struct k_work deftgts_work;
588 #ifdef CONFIG_I3C_USE_IBI
589 struct k_sem ibi_hj_complete;
590 struct k_sem ibi_cr_complete;
591 #endif
592 struct k_sem ch_complete;
593 uint32_t free_rr_slots;
594 uint16_t fifo_bytes_read;
595 uint8_t max_devs;
596 };
597
598 /*******************************************************************************
599 * Global Variables Declaration
600 ******************************************************************************/
601
602 /*******************************************************************************
603 * Local Functions Declaration
604 ******************************************************************************/
605
606 /*******************************************************************************
607 * Private Functions Code
608 ******************************************************************************/
609
i3c_cdns_crc5(uint8_t crc5,uint16_t word)610 static uint8_t i3c_cdns_crc5(uint8_t crc5, uint16_t word)
611 {
612 uint8_t crc0;
613 int i;
614
615 /*
616 * crc0 = next_data_bit ^ crc[4]
617 * 1 2 3 4
618 * crc[4:0] = { crc[3:2], crc[1]^crc0, crc[0], crc0 }
619 */
620 for (i = 15; i >= 0; --i) {
621 crc0 = ((word >> i) ^ (crc5 >> 4)) & 0x1;
622 crc5 = ((crc5 << 1) & 0x1a) | (((crc5 >> 1) ^ crc0) << 2) | crc0;
623 }
624
625 return crc5 & 0x1f;
626 }
627
cdns_i3c_ddr_parity(uint16_t payload)628 static uint8_t cdns_i3c_ddr_parity(uint16_t payload)
629 {
630 uint16_t pb;
631 uint8_t parity;
632
633 /* Calculate odd parity. */
634 pb = (payload >> 15) ^ (payload >> 13) ^ (payload >> 11) ^ (payload >> 9) ^ (payload >> 7) ^
635 (payload >> 5) ^ (payload >> 3) ^ (payload >> 1);
636 parity = (pb & 1) << 1;
637 /* Calculate even and 1 parity */
638 pb = (payload >> 14) ^ (payload >> 12) ^ (payload >> 10) ^ (payload >> 8) ^ (payload >> 6) ^
639 (payload >> 4) ^ (payload >> 2) ^ payload ^ 1;
640 parity |= (pb & 1);
641
642 return parity;
643 }
644
645 /* This prepares the ddr word from the payload add adding on parity, This
646 * does not write the preamble
647 */
prepare_ddr_word(uint16_t payload)648 static uint32_t prepare_ddr_word(uint16_t payload)
649 {
650 return (uint32_t)payload << 2 | cdns_i3c_ddr_parity(payload);
651 }
652
653 /* This ensures that PA0 contains 1'b1 which allows for easier Bus Turnaround */
prepare_ddr_cmd_parity_adjustment_bit(uint16_t word)654 static uint16_t prepare_ddr_cmd_parity_adjustment_bit(uint16_t word)
655 {
656 uint16_t pb;
657
658 pb = (word >> 14) ^ (word >> 12) ^ (word >> 10) ^ (word >> 8) ^ (word >> 6) ^ (word >> 4) ^
659 (word >> 2);
660
661 if (pb & 1) {
662 word |= BIT(0);
663 }
664
665 return word;
666 }
667
668 /* Computes and sets parity */
669 /* Returns [7:1] 7-bit addr, [0] even/xor parity */
cdns_i3c_even_parity_byte(uint8_t byte)670 static uint8_t cdns_i3c_even_parity_byte(uint8_t byte)
671 {
672 uint8_t parity = 0;
673 uint8_t b = byte;
674
675 while (b) {
676 parity = !parity;
677 b = b & (b - 1);
678 }
679 b = (byte << 1) | !parity;
680
681 return b;
682 }
683
684 /* Check if command response fifo is empty */
cdns_i3c_cmd_rsp_fifo_empty(const struct cdns_i3c_config * config)685 static inline bool cdns_i3c_cmd_rsp_fifo_empty(const struct cdns_i3c_config *config)
686 {
687 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
688
689 return ((mst_st & MST_STATUS0_CMDR_EMP) ? true : false);
690 }
691
692 /* Check if command fifo is empty */
cdns_i3c_cmd_fifo_empty(const struct cdns_i3c_config * config)693 static inline bool cdns_i3c_cmd_fifo_empty(const struct cdns_i3c_config *config)
694 {
695 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
696
697 return ((mst_st & MST_STATUS0_CMDD_EMP) ? true : false);
698 }
699
700 /* Check if command fifo is full */
cdns_i3c_cmd_fifo_full(const struct cdns_i3c_config * config)701 static inline bool cdns_i3c_cmd_fifo_full(const struct cdns_i3c_config *config)
702 {
703 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
704
705 return ((mst_st & MST_STATUS0_CMDD_FULL) ? true : false);
706 }
707
708 /* Check if ibi response fifo is empty */
cdns_i3c_ibi_rsp_fifo_empty(const struct cdns_i3c_config * config)709 static inline bool cdns_i3c_ibi_rsp_fifo_empty(const struct cdns_i3c_config *config)
710 {
711 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
712
713 return ((mst_st & MST_STATUS0_IBIR_EMP) ? true : false);
714 }
715
716 /* Check if tx fifo is full */
cdns_i3c_tx_fifo_full(const struct cdns_i3c_config * config)717 static inline bool cdns_i3c_tx_fifo_full(const struct cdns_i3c_config *config)
718 {
719 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
720
721 return ((mst_st & MST_STATUS0_TX_FULL) ? true : false);
722 }
723
724 /* Check if rx fifo is full */
cdns_i3c_rx_fifo_full(const struct cdns_i3c_config * config)725 static inline bool cdns_i3c_rx_fifo_full(const struct cdns_i3c_config *config)
726 {
727 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
728
729 return ((mst_st & MST_STATUS0_RX_FULL) ? true : false);
730 }
731
732 /* Check if rx fifo is empty */
cdns_i3c_rx_fifo_empty(const struct cdns_i3c_config * config)733 static inline bool cdns_i3c_rx_fifo_empty(const struct cdns_i3c_config *config)
734 {
735 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
736
737 return ((mst_st & MST_STATUS0_RX_EMP) ? true : false);
738 }
739
740 /* Check if ibi fifo is empty */
cdns_i3c_ibi_fifo_empty(const struct cdns_i3c_config * config)741 static inline bool cdns_i3c_ibi_fifo_empty(const struct cdns_i3c_config *config)
742 {
743 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
744
745 return ((mst_st & MST_STATUS0_IBID_EMP) ? true : false);
746 }
747
748 /* Interrupt handling */
cdns_i3c_interrupts_disable(const struct cdns_i3c_config * config)749 static inline void cdns_i3c_interrupts_disable(const struct cdns_i3c_config *config)
750 {
751 sys_write32(MST_INT_MASK, config->base + MST_IDR);
752 }
753
cdns_i3c_interrupts_clear(const struct cdns_i3c_config * config)754 static inline void cdns_i3c_interrupts_clear(const struct cdns_i3c_config *config)
755 {
756 sys_write32(MST_INT_MASK, config->base + MST_ICR);
757 }
758
759 /* FIFO mgmt */
cdns_i3c_write_tx_fifo(const struct cdns_i3c_config * config,const void * buf,uint32_t len)760 static void cdns_i3c_write_tx_fifo(const struct cdns_i3c_config *config, const void *buf,
761 uint32_t len)
762 {
763 const uint32_t *ptr = buf;
764 uint32_t remain, val;
765
766 for (remain = len; remain >= 4; remain -= 4) {
767 val = *ptr++;
768 sys_write32(val, config->base + TX_FIFO);
769 }
770
771 if (remain > 0) {
772 val = 0;
773 memcpy(&val, ptr, remain);
774 sys_write32(val, config->base + TX_FIFO);
775 }
776 }
777
cdns_i3c_write_ddr_tx_fifo(const struct cdns_i3c_config * config,const void * buf,uint32_t len)778 static void cdns_i3c_write_ddr_tx_fifo(const struct cdns_i3c_config *config, const void *buf,
779 uint32_t len)
780 {
781 const uint32_t *ptr = buf;
782 uint32_t remain, val;
783
784 for (remain = len; remain >= 4; remain -= 4) {
785 val = *ptr++;
786 sys_write32(val, config->base + SLV_DDR_TX_FIFO);
787 }
788
789 if (remain > 0) {
790 val = 0;
791 memcpy(&val, ptr, remain);
792 sys_write32(val, config->base + SLV_DDR_TX_FIFO);
793 }
794 }
795
796 #ifdef CONFIG_I3C_USE_IBI
cdns_i3c_write_ibi_fifo(const struct cdns_i3c_config * config,const void * buf,uint32_t len)797 static void cdns_i3c_write_ibi_fifo(const struct cdns_i3c_config *config, const void *buf,
798 uint32_t len)
799 {
800 const uint32_t *ptr = buf;
801 uint32_t remain, val;
802
803 for (remain = len; remain >= 4; remain -= 4) {
804 val = *ptr++;
805 sys_write32(val, config->base + IBI_DATA_FIFO);
806 }
807
808 if (remain > 0) {
809 val = 0;
810 memcpy(&val, ptr, remain);
811 sys_write32(val, config->base + IBI_DATA_FIFO);
812 }
813 }
814 #endif /* CONFIG_I3C_USE_IBI */
815
cdns_i3c_target_read_rx_fifo(const struct device * dev)816 static void cdns_i3c_target_read_rx_fifo(const struct device *dev)
817 {
818 const struct cdns_i3c_config *config = dev->config;
819 struct cdns_i3c_data *data = dev->data;
820 const struct i3c_target_callbacks *target_cb = data->target_config->callbacks;
821
822 /* Version 1p7 uses the full 32b FIFO width */
823 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
824 uint16_t xferred_bytes =
825 SLV_STATUS0_XFRD_BYTES(sys_read32(config->base + SLV_STATUS0));
826
827 for (int i = data->fifo_bytes_read; i < xferred_bytes; i += 4) {
828 uint32_t rx_data = sys_read32(config->base + RX_FIFO);
829 /* Call write received cb for each remaining byte */
830 for (int j = 0; j < MIN(4, xferred_bytes - i); j++) {
831 target_cb->write_received_cb(data->target_config,
832 (rx_data >> (8 * j)));
833 }
834 }
835 /*
836 * store the xfer bytes as the thr interrupt may trigger again as xferred_bytes will
837 * count up to the "total" bytes received
838 */
839 data->fifo_bytes_read = xferred_bytes;
840 } else {
841 /*
842 * Target writes only write to the first byte of the 32 bit
843 * width fifo for older version
844 */
845 uint8_t rx_data = (uint8_t)sys_read32(config->base + RX_FIFO);
846
847 target_cb->write_received_cb(data->target_config, rx_data);
848 }
849 }
850
cdns_i3c_read_rx_fifo(const struct cdns_i3c_config * config,void * buf,uint32_t len)851 static int cdns_i3c_read_rx_fifo(const struct cdns_i3c_config *config, void *buf, uint32_t len)
852 {
853 uint32_t *ptr = buf;
854 uint32_t remain, val;
855
856 for (remain = len; remain >= 4; remain -= 4) {
857 if (cdns_i3c_rx_fifo_empty(config)) {
858 return -EIO;
859 }
860 val = sys_le32_to_cpu(sys_read32(config->base + RX_FIFO));
861 *ptr++ = val;
862 }
863
864 if (remain > 0) {
865 if (cdns_i3c_rx_fifo_empty(config)) {
866 return -EIO;
867 }
868 val = sys_le32_to_cpu(sys_read32(config->base + RX_FIFO));
869 memcpy(ptr, &val, remain);
870 }
871
872 return 0;
873 }
874
cdns_i3c_read_rx_fifo_ddr_xfer(const struct cdns_i3c_config * config,void * buf,uint32_t len,uint32_t ddr_header)875 static int cdns_i3c_read_rx_fifo_ddr_xfer(const struct cdns_i3c_config *config, void *buf,
876 uint32_t len, uint32_t ddr_header)
877 {
878 uint16_t *ptr = buf;
879 uint32_t val;
880 uint32_t preamble;
881 uint8_t crc5 = 0x1F;
882
883 /*
884 * TODO: This function does not support threshold interrupts, it is expected that the
885 * whole packet to be within the FIFO and not split across multiple calls to this function.
886 */
887 crc5 = i3c_cdns_crc5(crc5, (uint16_t)DDR_DATA(ddr_header));
888
889 for (int i = 0; i < len; i++) {
890 if (cdns_i3c_rx_fifo_empty(config)) {
891 return -EIO;
892 }
893 val = sys_read32(config->base + RX_FIFO);
894 preamble = (val & DDR_PREAMBLE_MASK);
895
896 if (preamble == DDR_PREAMBLE_DATA_ABORT ||
897 preamble == DDR_PREAMBLE_DATA_ABORT_ALT) {
898 *ptr++ = sys_cpu_to_be16((uint16_t)DDR_DATA(val));
899 crc5 = i3c_cdns_crc5(crc5, (uint16_t)DDR_DATA(val));
900 } else if ((preamble == DDR_PREAMBLE_CMD_CRC) &&
901 ((val & DDR_CRC_TOKEN_MASK) == DDR_CRC_TOKEN)) {
902 uint8_t crc = (uint8_t)DDR_CRC(val);
903
904 if (crc5 != crc) {
905 LOG_ERR("DDR RX crc error");
906 return -EIO;
907 }
908 }
909 }
910
911 return 0;
912 }
913
cdns_i3c_wait_for_idle(const struct device * dev)914 static inline int cdns_i3c_wait_for_idle(const struct device *dev)
915 {
916 const struct cdns_i3c_config *config = dev->config;
917 uint32_t start_time = k_cycle_get_32();
918
919 /**
920 * Spin waiting for device to go idle. It is unlikely that this will
921 * actually take any time unless if the last transaction came immediately
922 * after an error condition.
923 */
924 while (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_IDLE)) {
925 if (k_cycle_get_32() - start_time > I3C_IDLE_TIMEOUT_CYC) {
926 return -EAGAIN;
927 }
928 }
929
930 return 0;
931 }
932
cdns_i3c_set_prescalers(const struct device * dev)933 static void cdns_i3c_set_prescalers(const struct device *dev)
934 {
935 struct cdns_i3c_data *data = dev->data;
936 const struct cdns_i3c_config *config = dev->config;
937 struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
938
939 /* These formulas are from section 6.2.1 of the Cadence I3C Master User Guide. */
940 uint32_t prescl_i3c = DIV_ROUND_UP(config->input_frequency,
941 (ctrl_config->scl.i3c * I3C_PRESCL_REG_SCALE)) -
942 1;
943 uint32_t prescl_i2c = DIV_ROUND_UP(config->input_frequency,
944 (ctrl_config->scl.i2c * I2C_PRESCL_REG_SCALE)) -
945 1;
946
947 /* update with actual value */
948 ctrl_config->scl.i3c = config->input_frequency / ((prescl_i3c + 1) * I3C_PRESCL_REG_SCALE);
949 ctrl_config->scl.i2c = config->input_frequency / ((prescl_i2c + 1) * I2C_PRESCL_REG_SCALE);
950
951 LOG_DBG("%s: I3C speed = %u, PRESCL_CTRL0.i3c = 0x%x", dev->name, ctrl_config->scl.i3c,
952 prescl_i3c);
953 LOG_DBG("%s: I2C speed = %u, PRESCL_CTRL0.i2c = 0x%x", dev->name, ctrl_config->scl.i2c,
954 prescl_i2c);
955
956 /* Calculate the OD_LOW value assuming a desired T_low period of 210ns. */
957 uint32_t pres_step = 1000000000 / (ctrl_config->scl.i3c * 4);
958 int32_t od_low = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
959
960 if (od_low < 0) {
961 od_low = 0;
962 }
963 LOG_DBG("%s: PRESCL_CTRL1.od_low = 0x%x", dev->name, od_low);
964
965 /* disable in order to update timing */
966 uint32_t ctrl = sys_read32(config->base + CTRL);
967
968 if (ctrl & CTRL_DEV_EN) {
969 sys_write32(~CTRL_DEV_EN & ctrl, config->base + CTRL);
970 }
971
972 sys_write32(PRESCL_CTRL0_I3C(prescl_i3c) | PRESCL_CTRL0_I2C(prescl_i2c),
973 config->base + PRESCL_CTRL0);
974
975 /* Sets the open drain low time relative to the push-pull. */
976 sys_write32(PRESCL_CTRL1_OD_LOW(od_low & PRESCL_CTRL1_OD_LOW_MASK),
977 config->base + PRESCL_CTRL1);
978
979 /* reenable */
980 if (ctrl & CTRL_DEV_EN) {
981 sys_write32(CTRL_DEV_EN | ctrl, config->base + CTRL);
982 }
983 }
984
985 /**
986 * @brief Compute RR0 Value from addr
987 *
988 * @param addr Address of the target
989 *
990 * @return RR0 value
991 */
prepare_rr0_dev_address(uint16_t addr)992 static uint32_t prepare_rr0_dev_address(uint16_t addr)
993 {
994 /* RR0[7:1] = addr[6:0] | parity^[0] */
995 uint32_t ret = cdns_i3c_even_parity_byte(addr);
996
997 if (addr & GENMASK(9, 7)) {
998 /* RR0[15:13] = addr[9:7] */
999 ret |= (addr & GENMASK(9, 7)) << 6;
1000 /* RR0[11] = 10b lvr addr */
1001 ret |= DEV_ID_RR0_LVR_EXT_ADDR;
1002 }
1003
1004 return ret;
1005 }
1006
1007 /**
1008 * @brief Program Retaining Registers with device lists
1009 *
1010 * This will program the retaining register with the controller itself, this should
1011 * only be called if it is a primary controller.
1012 *
1013 * @param dev Pointer to controller device driver instance.
1014 */
cdns_i3c_program_controller_retaining_reg(const struct device * dev)1015 static void cdns_i3c_program_controller_retaining_reg(const struct device *dev)
1016 {
1017 const struct cdns_i3c_config *config = dev->config;
1018 struct cdns_i3c_data *data = dev->data;
1019 /* Set controller retaining register */
1020 uint8_t controller_da = I3C_CONTROLLER_ADDR;
1021
1022 if (!i3c_addr_slots_is_free(&data->common.attached_dev.addr_slots, controller_da)) {
1023 controller_da =
1024 i3c_addr_slots_next_free_find(&data->common.attached_dev.addr_slots, 0);
1025 LOG_DBG("%s: 0x%02x DA selected for controller", dev->name, controller_da);
1026 }
1027 sys_write32(prepare_rr0_dev_address(controller_da) | DEV_ID_RR0_IS_I3C,
1028 config->base + DEV_ID_RR0(0));
1029 /* Mark the address as I3C device */
1030 i3c_addr_slots_mark_i3c(&data->common.attached_dev.addr_slots, controller_da);
1031 }
1032
1033 #ifdef CONFIG_I3C_USE_IBI
cdns_i3c_ibi_hj_response(const struct device * dev,bool ack)1034 static int cdns_i3c_ibi_hj_response(const struct device *dev, bool ack)
1035 {
1036 const struct cdns_i3c_config *config = dev->config;
1037
1038 if (ack) {
1039 sys_write32(CTRL_HJ_ACK | sys_read32(config->base + CTRL), config->base + CTRL);
1040 } else {
1041 sys_write32(~CTRL_HJ_ACK & sys_read32(config->base + CTRL), config->base + CTRL);
1042 }
1043
1044 return 0;
1045 }
1046
cdns_i3c_controller_ibi_enable(const struct device * dev,struct i3c_device_desc * target)1047 static int cdns_i3c_controller_ibi_enable(const struct device *dev, struct i3c_device_desc *target)
1048 {
1049 uint32_t sir_map;
1050 uint32_t sir_cfg;
1051 const struct cdns_i3c_config *config = dev->config;
1052 struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = target->controller_priv;
1053 struct i3c_ccc_events i3c_events;
1054 int ret = 0;
1055
1056 /* Check if the device can issue IBI TIRs or CR */
1057 if (!i3c_device_is_ibi_capable(target) && !i3c_device_is_controller_capable(target)) {
1058 ret = -EINVAL;
1059 return ret;
1060 }
1061
1062 /* TODO: check for duplicate in SIR */
1063
1064 sir_cfg = SIR_MAP_DEV_ROLE(I3C_BCR_DEVICE_ROLE(target->bcr)) |
1065 SIR_MAP_DEV_DA(target->dynamic_addr) |
1066 SIR_MAP_DEV_PL(target->data_length.max_ibi);
1067 /* ACK if there is an ibi tir cb or if it is controller capable*/
1068 if ((target->ibi_cb != NULL) || i3c_device_is_controller_capable(target)) {
1069 sir_cfg |= SIR_MAP_DEV_ACK;
1070 }
1071 if (target->bcr & I3C_BCR_MAX_DATA_SPEED_LIMIT) {
1072 sir_cfg |= SIR_MAP_DEV_SLOW;
1073 }
1074
1075 LOG_DBG("%s: IBI enabling for 0x%02x (BCR 0x%02x)", dev->name, target->dynamic_addr,
1076 target->bcr);
1077
1078 /* Tell target to enable IBI TIRs and CRs */
1079 i3c_events.events = I3C_CCC_EVT_INTR | I3C_CCC_EVT_CR;
1080 ret = i3c_ccc_do_events_set(target, true, &i3c_events);
1081 if (ret != 0) {
1082 LOG_ERR("%s: Error sending IBI ENEC for 0x%02x (%d)", dev->name,
1083 target->dynamic_addr, ret);
1084 return ret;
1085 }
1086
1087 sir_map = sys_read32(config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1));
1088 sir_map &= ~SIR_MAP_DEV_CONF_MASK(cdns_i3c_device_data->id - 1);
1089 sir_map |= SIR_MAP_DEV_CONF(cdns_i3c_device_data->id - 1, sir_cfg);
1090
1091 sys_write32(sir_map, config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1));
1092
1093 return ret;
1094 }
1095
cdns_i3c_controller_ibi_disable(const struct device * dev,struct i3c_device_desc * target)1096 static int cdns_i3c_controller_ibi_disable(const struct device *dev, struct i3c_device_desc *target)
1097 {
1098 uint32_t sir_map;
1099 const struct cdns_i3c_config *config = dev->config;
1100 struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = target->controller_priv;
1101 struct i3c_ccc_events i3c_events;
1102 int ret = 0;
1103
1104 if (!i3c_device_is_ibi_capable(target)) {
1105 ret = -EINVAL;
1106 return ret;
1107 }
1108
1109 /* Tell target to disable IBI */
1110 i3c_events.events = I3C_CCC_EVT_INTR;
1111 ret = i3c_ccc_do_events_set(target, false, &i3c_events);
1112 if (ret != 0) {
1113 LOG_ERR("%s: Error sending IBI DISEC for 0x%02x (%d)", dev->name,
1114 target->dynamic_addr, ret);
1115 return ret;
1116 }
1117
1118 sir_map = sys_read32(config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1));
1119 sir_map &= ~SIR_MAP_DEV_CONF_MASK(cdns_i3c_device_data->id - 1);
1120 sir_map |=
1121 SIR_MAP_DEV_CONF(cdns_i3c_device_data->id - 1, SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1122 sys_write32(sir_map, config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1));
1123
1124 return ret;
1125 }
1126
cdns_i3c_target_ibi_raise_hj(const struct device * dev)1127 static int cdns_i3c_target_ibi_raise_hj(const struct device *dev)
1128 {
1129 const struct cdns_i3c_config *config = dev->config;
1130 struct cdns_i3c_data *data = dev->data;
1131 struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
1132
1133 /* HJ requests should not be done by primary controllers */
1134 if (!ctrl_config->is_secondary) {
1135 LOG_ERR("%s: controller is primary, HJ not available", dev->name);
1136 return -ENOTSUP;
1137 }
1138 /* Check if target already has a DA assigned to it */
1139 if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_HAS_DA) {
1140 LOG_ERR("%s: HJ not available, DA already assigned", dev->name);
1141 return -EACCES;
1142 }
1143 /* Check if HJ requests DISEC CCC with DISHJ field set has been received */
1144 if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_HJ_DIS) {
1145 LOG_ERR("%s: HJ requests are currently disabled by DISEC", dev->name);
1146 return -EAGAIN;
1147 }
1148
1149 sys_write32(CTRL_HJ_INIT | sys_read32(config->base + CTRL), config->base + CTRL);
1150 k_sem_reset(&data->ibi_hj_complete);
1151 if (k_sem_take(&data->ibi_hj_complete, K_MSEC(500)) != 0) {
1152 LOG_ERR("%s: timeout waiting for DAA after HJ", dev->name);
1153 return -ETIMEDOUT;
1154 }
1155 return 0;
1156 }
1157
cdns_i3c_target_ibi_raise_cr(const struct device * dev)1158 static int cdns_i3c_target_ibi_raise_cr(const struct device *dev)
1159 {
1160 const struct cdns_i3c_config *config = dev->config;
1161 struct cdns_i3c_data *data = dev->data;
1162
1163 /* Check if target does not have a DA assigned to it */
1164 if (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_HAS_DA)) {
1165 LOG_ERR("%s: CR not available, DA not assigned", dev->name);
1166 return -EACCES;
1167 }
1168 /* Check if CR requests DISEC CCC with DISMR field set has been received */
1169 if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_MR_DIS) {
1170 LOG_ERR("%s: CR requests are currently disabled by DISEC", dev->name);
1171 return -EAGAIN;
1172 }
1173
1174 sys_write32(CTRL_MST_INIT | sys_read32(config->base + CTRL), config->base + CTRL);
1175 k_sem_reset(&data->ibi_cr_complete);
1176 if (k_sem_take(&data->ibi_cr_complete, K_MSEC(500)) != 0) {
1177 LOG_ERR("%s: timeout waiting for GETACCCR after CR", dev->name);
1178 return -ETIMEDOUT;
1179 }
1180 return 0;
1181 }
1182
cdns_i3c_target_ibi_raise_intr(const struct device * dev,struct i3c_ibi * request)1183 static int cdns_i3c_target_ibi_raise_intr(const struct device *dev, struct i3c_ibi *request)
1184 {
1185 const struct cdns_i3c_config *config = dev->config;
1186 const struct cdns_i3c_data *data = dev->data;
1187 uint32_t ibi_ctrl_val;
1188
1189 LOG_DBG("%s: issuing IBI TIR", dev->name);
1190
1191 /* Check if target does not have a DA assigned to it */
1192 if (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_HAS_DA)) {
1193 LOG_ERR("%s: TIR not available, DA not assigned", dev->name);
1194 return -EACCES;
1195 }
1196 /* Check if TIR requests DISEC CCC with DISMR field set has been received */
1197 if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_IBI_DIS) {
1198 LOG_ERR("%s: TIR requests are currently disabled by DISEC", dev->name);
1199 return -EAGAIN;
1200 }
1201
1202 /*
1203 * Ensure data will fit within FIFO
1204 *
1205 * TODO: This limitation prevents burst transfers greater than the
1206 * FIFO sizes and should be replaced with an implementation that
1207 * utilizes the IBI data threshold interrupts.
1208 */
1209 if (request->payload_len > data->hw_cfg.ibi_mem_depth) {
1210 LOG_ERR("%s: payload too large for IBI TIR", dev->name);
1211 return -ENOMEM;
1212 }
1213
1214 cdns_i3c_write_ibi_fifo(config, request->payload, request->payload_len);
1215
1216 /* Write Payload Length and Start Condition */
1217 ibi_ctrl_val = sys_read32(config->base + SLV_IBI_CTRL);
1218 ibi_ctrl_val |= SLV_IBI_PL(request->payload_len);
1219 ibi_ctrl_val |= SLV_IBI_REQ;
1220 sys_write32(ibi_ctrl_val, config->base + SLV_IBI_CTRL);
1221 return 0;
1222 }
1223
cdns_i3c_target_ibi_raise(const struct device * dev,struct i3c_ibi * request)1224 static int cdns_i3c_target_ibi_raise(const struct device *dev, struct i3c_ibi *request)
1225 {
1226 const struct cdns_i3c_config *config = dev->config;
1227 struct cdns_i3c_data *data = dev->data;
1228
1229 __ASSERT_NO_MSG(request != NULL);
1230
1231 /* make sure we are not currently the active controller */
1232 if (sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE) {
1233 return -EACCES;
1234 }
1235
1236 switch (request->ibi_type) {
1237 case I3C_IBI_TARGET_INTR:
1238 /* Check IP Revision since older versions of CDNS IP do not support IBI interrupt*/
1239 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
1240 return cdns_i3c_target_ibi_raise_intr(dev, request);
1241 } else {
1242 return -ENOTSUP;
1243 }
1244 case I3C_IBI_CONTROLLER_ROLE_REQUEST:
1245 return cdns_i3c_target_ibi_raise_cr(dev);
1246 case I3C_IBI_HOTJOIN:
1247 return cdns_i3c_target_ibi_raise_hj(dev);
1248 default:
1249 return -EINVAL;
1250 }
1251 }
1252 #endif
1253
cdns_i3c_cancel_transfer(const struct device * dev)1254 static void cdns_i3c_cancel_transfer(const struct device *dev)
1255 {
1256 struct cdns_i3c_data *data = dev->data;
1257 const struct cdns_i3c_config *config = dev->config;
1258 uint32_t val;
1259 uint32_t retry_count;
1260
1261 /* Disable further interrupts */
1262 sys_write32(MST_INT_CMDD_EMP, config->base + MST_IDR);
1263
1264 /* Ignore if no pending transfer */
1265 if (data->xfer.num_cmds == 0) {
1266 return;
1267 }
1268
1269 data->xfer.num_cmds = 0;
1270
1271 /* Clear main enable bit to disable further transactions */
1272 sys_write32(~CTRL_DEV_EN & sys_read32(config->base + CTRL), config->base + CTRL);
1273
1274 /**
1275 * Spin waiting for device to go idle. It is unlikely that this will
1276 * actually take any time since we only get here if a transaction didn't
1277 * complete in a long time.
1278 */
1279 retry_count = I3C_MAX_IDLE_CANCEL_WAIT_RETRIES;
1280 while (retry_count--) {
1281 val = sys_read32(config->base + MST_STATUS0);
1282 if (val & MST_STATUS0_IDLE) {
1283 break;
1284 }
1285 k_msleep(10);
1286 }
1287 if (retry_count == 0) {
1288 data->xfer.ret = -ETIMEDOUT;
1289 }
1290
1291 /**
1292 * Flush all queues.
1293 */
1294 sys_write32(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO | FLUSH_CMD_RESP,
1295 config->base + FLUSH_CTRL);
1296
1297 /* Re-enable device */
1298 sys_write32(CTRL_DEV_EN | sys_read32(config->base + CTRL), config->base + CTRL);
1299 }
1300
1301 /**
1302 * @brief Start a I3C/I2C Transfer
1303 *
1304 * This is to be called from a I3C/I2C transfer function. This will write
1305 * all data to tx and cmd fifos
1306 *
1307 * @param dev Pointer to controller device driver instance.
1308 */
cdns_i3c_start_transfer(const struct device * dev)1309 static void cdns_i3c_start_transfer(const struct device *dev)
1310 {
1311 struct cdns_i3c_data *data = dev->data;
1312 const struct cdns_i3c_config *config = dev->config;
1313 struct cdns_i3c_xfer *xfer = &data->xfer;
1314
1315 /* Ensure no pending command response queue threshold interrupt */
1316 sys_write32(MST_INT_CMDD_EMP, config->base + MST_ICR);
1317
1318 /* Make sure RX FIFO is empty. */
1319 while (!cdns_i3c_rx_fifo_empty(config)) {
1320 (void)sys_read32(config->base + RX_FIFO);
1321 }
1322 /* Make sure CMDR FIFO is empty too */
1323 while (!cdns_i3c_cmd_rsp_fifo_empty(config)) {
1324 (void)sys_read32(config->base + CMDR);
1325 }
1326
1327 /* Write all tx data to fifo */
1328 for (unsigned int i = 0; i < xfer->num_cmds; i++) {
1329 if (xfer->cmds[i].hdr == I3C_DATA_RATE_SDR) {
1330 if (!(xfer->cmds[i].cmd0 & CMD0_FIFO_RNW)) {
1331 cdns_i3c_write_tx_fifo(config, xfer->cmds[i].buf,
1332 xfer->cmds[i].len);
1333 }
1334 } else if (xfer->cmds[i].hdr == I3C_DATA_RATE_HDR_DDR) {
1335 /* DDR Xfer requires sending header block*/
1336 cdns_i3c_write_tx_fifo(config, &xfer->cmds[i].ddr_header,
1337 DDR_CRC_AND_HEADER_SIZE);
1338 /* If not read operation need to send data + crc of data*/
1339 if (!(DDR_DATA(xfer->cmds[i].ddr_header) & HDR_CMD_RD)) {
1340 uint8_t *buf = (uint8_t *)xfer->cmds[i].buf;
1341 uint32_t ddr_message = 0;
1342 uint16_t ddr_data_payload = sys_get_be16(&buf[0]);
1343 /* HDR-DDR Data Words */
1344 ddr_message = (DDR_PREAMBLE_DATA_ABORT |
1345 prepare_ddr_word(ddr_data_payload));
1346 cdns_i3c_write_tx_fifo(config, &ddr_message,
1347 DDR_CRC_AND_HEADER_SIZE);
1348 for (int j = 2; j < ((xfer->cmds[i].len - 2) * 2); j += 2) {
1349 ddr_data_payload = sys_get_be16(&buf[j]);
1350 ddr_message = (DDR_PREAMBLE_DATA_ABORT_ALT |
1351 prepare_ddr_word(ddr_data_payload));
1352 cdns_i3c_write_tx_fifo(config, &ddr_message,
1353 DDR_CRC_AND_HEADER_SIZE);
1354 }
1355 /* HDR-DDR CRC Word */
1356 cdns_i3c_write_tx_fifo(config, &xfer->cmds[i].ddr_crc,
1357 DDR_CRC_AND_HEADER_SIZE);
1358 }
1359 } else {
1360 xfer->ret = -ENOTSUP;
1361 return;
1362 }
1363 }
1364
1365 /* Write all data to cmd fifos */
1366 for (unsigned int i = 0; i < xfer->num_cmds; i++) {
1367 /* The command ID is just the msg index. */
1368 xfer->cmds[i].cmd1 |= CMD1_FIFO_CMDID(i);
1369 sys_write32(xfer->cmds[i].cmd1, config->base + CMD1_FIFO);
1370 sys_write32(xfer->cmds[i].cmd0, config->base + CMD0_FIFO);
1371
1372 if (xfer->cmds[i].hdr == I3C_DATA_RATE_HDR_DDR) {
1373 sys_write32(0x00, config->base + CMD1_FIFO);
1374 if ((DDR_DATA(xfer->cmds[i].ddr_header) & HDR_CMD_RD)) {
1375 sys_write32(CMD0_FIFO_IS_DDR | CMD0_FIFO_PL_LEN(1),
1376 config->base + CMD0_FIFO);
1377 } else {
1378 sys_write32(CMD0_FIFO_IS_DDR | CMD0_FIFO_PL_LEN(xfer->cmds[i].len),
1379 config->base + CMD0_FIFO);
1380 }
1381 }
1382 }
1383
1384 /* kickoff transfer */
1385 sys_write32(CTRL_MCS | sys_read32(config->base + CTRL), config->base + CTRL);
1386 sys_write32(MST_INT_CMDD_EMP, config->base + MST_IER);
1387 }
1388
1389 /**
1390 * @brief Send Common Command Code (CCC).
1391 *
1392 * @see i3c_do_ccc
1393 *
1394 * @param dev Pointer to controller device driver instance.
1395 * @param payload Pointer to CCC payload.
1396 *
1397 * @return @see i3c_do_ccc
1398 */
cdns_i3c_do_ccc(const struct device * dev,struct i3c_ccc_payload * payload)1399 static int cdns_i3c_do_ccc(const struct device *dev, struct i3c_ccc_payload *payload)
1400 {
1401 const struct cdns_i3c_config *config = dev->config;
1402 struct cdns_i3c_data *data = dev->data;
1403 struct cdns_i3c_cmd *cmd;
1404 int ret = 0;
1405 uint8_t num_cmds = 0;
1406
1407 /* make sure we are currently the active controller */
1408 if (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE)) {
1409 return -EACCES;
1410 }
1411
1412 if (payload == NULL) {
1413 return -EINVAL;
1414 }
1415
1416 /*
1417 * Ensure data will fit within FIFOs.
1418 *
1419 * TODO: This limitation prevents burst transfers greater than the
1420 * FIFO sizes and should be replaced with an implementation that
1421 * utilizes the RX/TX data threshold interrupts.
1422 */
1423 uint32_t num_msgs =
1424 1 + ((payload->ccc.data_len > 0) ? payload->targets.num_targets
1425 : MAX(payload->targets.num_targets - 1, 0));
1426 if (num_msgs > data->hw_cfg.cmd_mem_depth || num_msgs > data->hw_cfg.cmdr_mem_depth) {
1427 LOG_ERR("%s: Too many messages", dev->name);
1428 return -ENOMEM;
1429 }
1430
1431 uint32_t rxsize = 0;
1432 /* defining byte is stored in a separate register for direct CCCs */
1433 uint32_t txsize =
1434 i3c_ccc_is_payload_broadcast(payload) ? ROUND_UP(payload->ccc.data_len, 4) : 0;
1435
1436 for (int i = 0; i < payload->targets.num_targets; i++) {
1437 if (payload->targets.payloads[i].rnw) {
1438 rxsize += ROUND_UP(payload->targets.payloads[i].data_len, 4);
1439 } else {
1440 txsize += ROUND_UP(payload->targets.payloads[i].data_len, 4);
1441 }
1442 }
1443 if ((rxsize > data->hw_cfg.rx_mem_depth) || (txsize > data->hw_cfg.tx_mem_depth)) {
1444 LOG_ERR("%s: Total RX and/or TX transfer larger than FIFO", dev->name);
1445 return -ENOMEM;
1446 }
1447
1448 LOG_DBG("%s: CCC[0x%02x]", dev->name, payload->ccc.id);
1449
1450 k_mutex_lock(&data->bus_lock, K_FOREVER);
1451
1452 /* wait for idle */
1453 ret = cdns_i3c_wait_for_idle(dev);
1454 if (ret != 0) {
1455 goto error;
1456 }
1457
1458 /* if this is a direct CCC */
1459 if (!i3c_ccc_is_payload_broadcast(payload)) {
1460 /* if the CCC has no data bytes, then the target payload must be in
1461 * the same command buffer
1462 */
1463 for (int i = 0; i < payload->targets.num_targets; i++) {
1464 cmd = &data->xfer.cmds[i];
1465 num_cmds++;
1466 cmd->cmd1 = CMD1_FIFO_CCC(payload->ccc.id);
1467 cmd->cmd0 = CMD0_FIFO_IS_CCC;
1468 /* if there is a defining byte */
1469 if (payload->ccc.data_len == 1) {
1470 /* Only revision 1p7 supports defining byte for direct CCCs */
1471 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
1472 cmd->cmd0 |= CMD0_FIFO_IS_DB;
1473 cmd->cmd1 |= CMD1_FIFO_DB(payload->ccc.data[0]);
1474 } else {
1475 LOG_ERR("%s: Defining Byte with Direct CCC not supported "
1476 "with rev %lup%lu",
1477 dev->name, REV_ID_REV_MAJOR(data->hw_cfg.rev_id),
1478 REV_ID_REV_MINOR(data->hw_cfg.rev_id));
1479 ret = -ENOTSUP;
1480 goto error;
1481 }
1482 } else if (payload->ccc.data_len > 1) {
1483 LOG_ERR("%s: Defining Byte length greater than 1", dev->name);
1484 ret = -EINVAL;
1485 goto error;
1486 }
1487 /* for a short CCC, i.e. where a direct ccc has multiple targets,
1488 * BCH must be 0 for subsequent targets and RSBC must be 1, otherwise
1489 * if there is just one target, RSBC must be 0 on the first target
1490 */
1491 if (i == 0) {
1492 cmd->cmd0 |= CMD0_FIFO_BCH;
1493 }
1494 if (i < (payload->targets.num_targets - 1)) {
1495 cmd->cmd0 |= CMD0_FIFO_RSBC;
1496 }
1497 cmd->buf = payload->targets.payloads[i].data;
1498 cmd->len = payload->targets.payloads[i].data_len;
1499 cmd->cmd0 |= CMD0_FIFO_DEV_ADDR(payload->targets.payloads[i].addr) |
1500 CMD0_FIFO_PL_LEN(payload->targets.payloads[i].data_len);
1501 if (payload->targets.payloads[i].rnw) {
1502 cmd->cmd0 |= CMD0_FIFO_RNW;
1503 }
1504 cmd->hdr = I3C_DATA_RATE_SDR;
1505 /*
1506 * write the address of num_xfer and err which is to be updated upon
1507 * message completion
1508 */
1509 cmd->num_xfer = &(payload->targets.payloads[i].num_xfer);
1510 cmd->sdr_err = &(payload->targets.payloads[i].err);
1511 }
1512 } else {
1513 cmd = &data->xfer.cmds[0];
1514 num_cmds++;
1515 cmd->cmd1 = CMD1_FIFO_CCC(payload->ccc.id);
1516 cmd->cmd0 = CMD0_FIFO_IS_CCC | CMD0_FIFO_BCH;
1517 cmd->hdr = I3C_DATA_RATE_SDR;
1518
1519 if (payload->ccc.data_len > 0) {
1520 /* Write additional data for CCC if needed */
1521 cmd->buf = payload->ccc.data;
1522 cmd->len = payload->ccc.data_len;
1523 cmd->cmd0 |= CMD0_FIFO_PL_LEN(payload->ccc.data_len);
1524 /* write the address of num_xfer which is to be updated upon message
1525 * completion
1526 */
1527 cmd->num_xfer = &(payload->ccc.num_xfer);
1528 } else {
1529 /* no data to transfer */
1530 cmd->len = 0;
1531 cmd->num_xfer = NULL;
1532 }
1533 cmd->sdr_err = &(payload->ccc.err);
1534 }
1535
1536 data->xfer.ret = -ETIMEDOUT;
1537 data->xfer.num_cmds = num_cmds;
1538
1539 cdns_i3c_start_transfer(dev);
1540 if (k_sem_take(&data->xfer.complete, K_MSEC(1000)) != 0) {
1541 cdns_i3c_cancel_transfer(dev);
1542 }
1543
1544 if (data->xfer.ret < 0) {
1545 LOG_ERR("%s: CCC[0x%02x] error (%d)", dev->name, payload->ccc.id, data->xfer.ret);
1546 }
1547
1548 ret = data->xfer.ret;
1549
1550 /* TODO: decide if this is the right approach or add a new separate API for CH */
1551 /* Wait for Controller Handoff to finish */
1552 if (payload->ccc.id == I3C_CCC_GETACCCR) {
1553 ret = k_sem_take(&data->ch_complete, K_MSEC(1000));
1554 }
1555 error:
1556 k_mutex_unlock(&data->bus_lock);
1557
1558 return ret;
1559 }
1560
1561 /**
1562 * @brief Perform Dynamic Address Assignment.
1563 *
1564 * @see i3c_do_daa
1565 *
1566 * @param dev Pointer to controller device driver instance.
1567 *
1568 * @return @see i3c_do_daa
1569 */
cdns_i3c_do_daa(const struct device * dev)1570 static int cdns_i3c_do_daa(const struct device *dev)
1571 {
1572 struct cdns_i3c_data *data = dev->data;
1573 const struct cdns_i3c_config *config = dev->config;
1574 struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
1575 uint8_t last_addr = 0;
1576
1577 /* DAA should not be done by secondary controllers */
1578 if (ctrl_config->is_secondary) {
1579 return -EACCES;
1580 }
1581
1582 /* read dev active reg */
1583 uint32_t olddevs = sys_read32(config->base + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1584 /* ignore the controller register */
1585 olddevs |= BIT(0);
1586
1587 /* Assign dynamic addressses to available RRs */
1588 /* Loop through each clear bit */
1589 for (uint8_t i = find_lsb_set(~olddevs); i <= data->max_devs; i++) {
1590 uint8_t rr_idx = i - 1;
1591
1592 if (~olddevs & BIT(rr_idx)) {
1593 /* Read RRx registers */
1594 last_addr = i3c_addr_slots_next_free_find(
1595 &data->common.attached_dev.addr_slots, last_addr + 1);
1596 /* Write RRx registers */
1597 sys_write32(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C,
1598 config->base + DEV_ID_RR0(rr_idx));
1599 sys_write32(0, config->base + DEV_ID_RR1(rr_idx));
1600 sys_write32(0, config->base + DEV_ID_RR2(rr_idx));
1601 }
1602 }
1603
1604 /* the Cadence I3C IP will assign an address for it from the RR */
1605 struct i3c_ccc_payload entdaa_ccc;
1606
1607 memset(&entdaa_ccc, 0, sizeof(entdaa_ccc));
1608 entdaa_ccc.ccc.id = I3C_CCC_ENTDAA;
1609
1610 int status = cdns_i3c_do_ccc(dev, &entdaa_ccc);
1611
1612 if (status != 0) {
1613 return status;
1614 }
1615
1616 /* read again dev active reg */
1617 uint32_t newdevs = sys_read32(config->base + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1618 /* look for new bits that were set */
1619 newdevs &= ~olddevs;
1620
1621 if (newdevs) {
1622 /* loop through each set bit for new devices */
1623 for (uint8_t i = find_lsb_set(newdevs); i <= find_msb_set(newdevs); i++) {
1624 uint8_t rr_idx = i - 1;
1625
1626 if (newdevs & BIT(rr_idx)) {
1627 /* Read RRx registers */
1628 uint32_t dev_id_rr0 = sys_read32(config->base + DEV_ID_RR0(rr_idx));
1629 uint32_t dev_id_rr1 = sys_read32(config->base + DEV_ID_RR1(rr_idx));
1630 uint32_t dev_id_rr2 = sys_read32(config->base + DEV_ID_RR2(rr_idx));
1631
1632 uint64_t pid = ((uint64_t)dev_id_rr1 << 16) + (dev_id_rr2 >> 16);
1633 uint8_t dyn_addr = (dev_id_rr0 & 0xFE) >> 1;
1634 uint8_t bcr = dev_id_rr2 >> 8;
1635 uint8_t dcr = dev_id_rr2 & 0xFF;
1636
1637 const struct i3c_device_id i3c_id = I3C_DEVICE_ID(pid);
1638 struct i3c_device_desc *target = i3c_device_find(dev, &i3c_id);
1639
1640 if (!target) {
1641 /* Target found that is not known, allocate a desc */
1642 target = i3c_device_desc_alloc();
1643 if (target) {
1644 /*
1645 * able to allocate a descriptor
1646 * write all known values
1647 */
1648 *(const struct device **)&target->bus = dev;
1649 *(uint64_t *)&target->pid = pid;
1650 target->dynamic_addr = dyn_addr;
1651 target->bcr = bcr;
1652 target->dcr = dcr;
1653 /* attach it to the slist */
1654 sys_slist_append(
1655 &data->common.attached_dev.devices.i3c,
1656 &target->node);
1657
1658 data->cdns_i3c_i2c_priv_data[rr_idx].id = rr_idx;
1659 target->controller_priv =
1660 &(data->cdns_i3c_i2c_priv_data[rr_idx]);
1661 }
1662
1663 LOG_INF("%s: PID 0x%012llx is not in registered device "
1664 "list, given DA 0x%02x",
1665 dev->name, pid, dyn_addr);
1666 } else {
1667 target->dynamic_addr = dyn_addr;
1668 target->bcr = bcr;
1669 target->dcr = dcr;
1670
1671 data->cdns_i3c_i2c_priv_data[rr_idx].id = rr_idx;
1672 target->controller_priv =
1673 &(data->cdns_i3c_i2c_priv_data[rr_idx]);
1674
1675 LOG_DBG("%s: PID 0x%012llx assigned dynamic address 0x%02x",
1676 dev->name, pid, dyn_addr);
1677 }
1678 i3c_addr_slots_mark_i3c(&data->common.attached_dev.addr_slots,
1679 dyn_addr);
1680 }
1681 }
1682 } else {
1683 LOG_DBG("%s: ENTDAA: No devices found", dev->name);
1684 }
1685
1686 /* mark slot as not free, may already be set if already attached */
1687 data->free_rr_slots &= ~newdevs;
1688
1689 /* Unmask Hot-Join request interrupts. HJ will send DISEC HJ from the CTRL value */
1690 struct i3c_ccc_events i3c_events;
1691
1692 i3c_events.events = I3C_CCC_EVT_HJ;
1693 status = i3c_ccc_do_events_all_set(dev, true, &i3c_events);
1694 if (status != 0) {
1695 LOG_DBG("%s: Broadcast ENEC was NACK", dev->name);
1696 }
1697
1698 return 0;
1699 }
1700
1701 /**
1702 * @brief Configure I2C hardware.
1703 *
1704 * @param dev Pointer to controller device driver instance.
1705 * @param config Value of the configuration parameters.
1706 *
1707 * @retval 0 If successful.
1708 * @retval -EINVAL If invalid configure parameters.
1709 * @retval -EIO General Input/Output errors.
1710 * @retval -ENOSYS If not implemented.
1711 */
cdns_i3c_i2c_api_configure(const struct device * dev,uint32_t config)1712 static int cdns_i3c_i2c_api_configure(const struct device *dev, uint32_t config)
1713 {
1714 struct cdns_i3c_data *data = dev->data;
1715 struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
1716
1717 switch (I2C_SPEED_GET(config)) {
1718 case I2C_SPEED_STANDARD:
1719 ctrl_config->scl.i2c = 100000;
1720 break;
1721 case I2C_SPEED_FAST:
1722 ctrl_config->scl.i2c = 400000;
1723 break;
1724 case I2C_SPEED_FAST_PLUS:
1725 ctrl_config->scl.i2c = 1000000;
1726 break;
1727 case I2C_SPEED_HIGH:
1728 ctrl_config->scl.i2c = 3400000;
1729 break;
1730 case I2C_SPEED_ULTRA:
1731 ctrl_config->scl.i2c = 5000000;
1732 break;
1733 default:
1734 break;
1735 }
1736
1737 k_mutex_lock(&data->bus_lock, K_FOREVER);
1738 cdns_i3c_set_prescalers(dev);
1739 k_mutex_unlock(&data->bus_lock);
1740
1741 return 0;
1742 }
1743
1744 /**
1745 * @brief Configure I3C hardware.
1746 *
1747 * @param dev Pointer to controller device driver instance.
1748 * @param type Type of configuration parameters being passed
1749 * in @p config.
1750 * @param config Pointer to the configuration parameters.
1751 *
1752 * @retval 0 If successful.
1753 * @retval -EINVAL If invalid configure parameters.
1754 * @retval -EIO General Input/Output errors.
1755 * @retval -ENOSYS If not implemented.
1756 */
cdns_i3c_configure(const struct device * dev,enum i3c_config_type type,void * config)1757 static int cdns_i3c_configure(const struct device *dev, enum i3c_config_type type, void *config)
1758 {
1759 struct cdns_i3c_data *data = dev->data;
1760 struct i3c_config_controller *ctrl_cfg = config;
1761
1762 if ((ctrl_cfg->scl.i2c == 0U) || (ctrl_cfg->scl.i3c == 0U)) {
1763 return -EINVAL;
1764 }
1765
1766 data->common.ctrl_config.scl.i3c = ctrl_cfg->scl.i3c;
1767 data->common.ctrl_config.scl.i2c = ctrl_cfg->scl.i2c;
1768
1769 k_mutex_lock(&data->bus_lock, K_FOREVER);
1770 cdns_i3c_set_prescalers(dev);
1771 k_mutex_unlock(&data->bus_lock);
1772
1773 return 0;
1774 }
1775
1776 /**
1777 * @brief Complete a I3C/I2C Transfer
1778 *
1779 * This is to be called from an ISR when the Command Response FIFO
1780 * is Empty. This will check each Command Response reading the RX
1781 * FIFO if message was a RnW and if any message had an error.
1782 *
1783 * @param dev Pointer to controller device driver instance.
1784 */
cdns_i3c_complete_transfer(const struct device * dev)1785 static void cdns_i3c_complete_transfer(const struct device *dev)
1786 {
1787 struct cdns_i3c_data *data = dev->data;
1788 const struct cdns_i3c_config *config = dev->config;
1789 uint32_t cmdr;
1790 uint32_t id = 0;
1791 uint32_t xfer = 0;
1792 int ret = 0;
1793 struct cdns_i3c_cmd *cmd;
1794 bool was_full;
1795
1796 /* Used only to determine in the case of a controller abort */
1797 was_full = cdns_i3c_rx_fifo_full(config);
1798
1799 /* Disable further interrupts */
1800 sys_write32(MST_INT_CMDD_EMP, config->base + MST_IDR);
1801
1802 /* Ignore if no pending transfer */
1803 if (data->xfer.num_cmds == 0) {
1804 return;
1805 }
1806
1807 /* Process all results in fifo */
1808 for (uint32_t status0 = sys_read32(config->base + MST_STATUS0);
1809 !(status0 & MST_STATUS0_CMDR_EMP); status0 = sys_read32(config->base + MST_STATUS0)) {
1810 cmdr = sys_read32(config->base + CMDR);
1811 id = CMDR_CMDID(cmdr);
1812
1813 if (id == CMDR_CMDID_HJACK_DISEC || id == CMDR_CMDID_HJACK_ENTDAA ||
1814 id >= data->xfer.num_cmds) {
1815 continue;
1816 }
1817
1818 cmd = &data->xfer.cmds[id];
1819
1820 xfer = MIN(CMDR_XFER_BYTES(cmdr), cmd->len);
1821 if (cmd->num_xfer != NULL) {
1822 *cmd->num_xfer = xfer;
1823 }
1824 /* Read any rx data into buffer */
1825 if (cmd->cmd0 & CMD0_FIFO_RNW) {
1826 ret = cdns_i3c_read_rx_fifo(config, cmd->buf, xfer);
1827 }
1828
1829 if ((cmd->hdr == I3C_DATA_RATE_HDR_DDR) &&
1830 (DDR_DATA(cmd->ddr_header) & HDR_CMD_RD)) {
1831 ret = cdns_i3c_read_rx_fifo_ddr_xfer(config, cmd->buf, xfer,
1832 cmd->ddr_header);
1833 }
1834
1835 /* Record error */
1836 cmd->error = CMDR_ERROR(cmdr);
1837 }
1838
1839 for (int i = 0; i < data->xfer.num_cmds; i++) {
1840 switch (data->xfer.cmds[i].error) {
1841 case CMDR_NO_ERROR:
1842 if (data->xfer.cmds[i].sdr_err) {
1843 *data->xfer.cmds[i].sdr_err = I3C_ERROR_CE_NONE;
1844 }
1845 break;
1846
1847 case CMDR_MST_ABORT:
1848 /*
1849 * A controller abort is forced if the RX FIFO fills up
1850 * There is also the case where the fifo can be full as
1851 * the len of the packet is the same length of the fifo
1852 * Check that the requested len is greater than the total
1853 * transferred to confirm that is not case. Otherwise the
1854 * abort was caused by the buffer length being meet and
1855 * the target did not give an End of Data (EoD) in the T
1856 * bit. Do not treat that condition as an error because
1857 * some targets will just auto-increment the read address
1858 * way beyond the buffer not giving an EoD.
1859 */
1860 if ((was_full) && (data->xfer.cmds[i].len > *data->xfer.cmds[i].num_xfer)) {
1861 ret = -ENOSPC;
1862 } else {
1863 LOG_DBG("%s: Controller Abort due to buffer length excedded with "
1864 "no EoD from target",
1865 dev->name);
1866 }
1867 if (data->xfer.cmds[i].sdr_err) {
1868 *data->xfer.cmds[i].sdr_err = I3C_ERROR_CE_NONE;
1869 }
1870 break;
1871
1872 case CMDR_M0_ERROR: {
1873 uint8_t ccc = data->xfer.cmds[i].cmd1 & 0xFF;
1874 /*
1875 * The M0 is an illegally formatted CCC. i.e. the Controller
1876 * receives 1 byte instead of 2 with the GETMWL CCC. This can
1877 * be problematic for CCCs that can have variable length such
1878 * as GETMXDS and GETCAPS. Verify the number of bytes received matches
1879 * what's expected from the specification and ignore the error. The IP will
1880 * still retramsit the same CCC and theres nothing that can be done to
1881 * prevent this. It it still up to the application to read `num_xfer` to
1882 * determine the number of bytes returned.
1883 */
1884 if (ccc == I3C_CCC_GETMXDS) {
1885 /*
1886 * Whether GETMXDS format 1 and format 2 can't be known ahead of
1887 * time which will be returned.
1888 */
1889 if ((*data->xfer.cmds[i].num_xfer !=
1890 SIZEOF_FIELD(union i3c_ccc_getmxds, fmt1)) &&
1891 (*data->xfer.cmds[i].num_xfer !=
1892 SIZEOF_FIELD(union i3c_ccc_getmxds, fmt2))) {
1893 ret = -EIO;
1894 }
1895 } else if (ccc == I3C_CCC_GETCAPS) {
1896 /* GETCAPS can only return 1-4 bytes */
1897 if (*data->xfer.cmds[i].num_xfer > sizeof(union i3c_ccc_getcaps)) {
1898 ret = -EIO;
1899 }
1900 } else {
1901 if (data->xfer.cmds[i].sdr_err) {
1902 *data->xfer.cmds[i].sdr_err = I3C_ERROR_CE0;
1903 }
1904 ret = -EIO;
1905 }
1906 break;
1907 }
1908
1909 case CMDR_M1_ERROR:
1910 if (data->xfer.cmds[i].sdr_err) {
1911 *data->xfer.cmds[i].sdr_err = I3C_ERROR_CE1;
1912 }
1913 ret = -EIO;
1914 break;
1915 case CMDR_M2_ERROR:
1916 if (data->xfer.cmds[i].sdr_err) {
1917 *data->xfer.cmds[i].sdr_err = I3C_ERROR_CE2;
1918 }
1919 ret = -EIO;
1920 break;
1921
1922 case CMDR_DDR_PREAMBLE_ERROR:
1923 case CMDR_DDR_PARITY_ERROR:
1924 case CMDR_NACK_RESP:
1925 case CMDR_DDR_DROPPED:
1926 if (data->xfer.cmds[i].sdr_err) {
1927 *data->xfer.cmds[i].sdr_err = I3C_ERROR_CE_UNKNOWN;
1928 }
1929 ret = -EIO;
1930 break;
1931
1932 case CMDR_DDR_RX_FIFO_OVF:
1933 case CMDR_DDR_TX_FIFO_UNF:
1934 if (data->xfer.cmds[i].sdr_err) {
1935 *data->xfer.cmds[i].sdr_err = I3C_ERROR_CE_UNKNOWN;
1936 }
1937 ret = -ENOSPC;
1938 break;
1939
1940 case CMDR_INVALID_DA:
1941 default:
1942 if (data->xfer.cmds[i].sdr_err) {
1943 *data->xfer.cmds[i].sdr_err = I3C_ERROR_CE_UNKNOWN;
1944 }
1945 ret = -EINVAL;
1946 break;
1947 }
1948 }
1949
1950 data->xfer.ret = ret;
1951
1952 /* Indicate no transfer is pending */
1953 data->xfer.num_cmds = 0;
1954
1955 k_sem_give(&data->xfer.complete);
1956 }
1957
1958 /**
1959 * @brief Transfer messages in I2C mode.
1960 *
1961 * @param dev Pointer to device driver instance.
1962 * @param target Pointer to target device descriptor.
1963 * @param msgs Pointer to I2C messages.
1964 * @param num_msgs Number of messages to transfers.
1965 *
1966 * @retval 0 If successful.
1967 * @retval -EIO General input / output error.
1968 * @retval -EINVAL Address not registered
1969 */
cdns_i3c_i2c_transfer(const struct device * dev,struct i3c_i2c_device_desc * i2c_dev,struct i2c_msg * msgs,uint8_t num_msgs)1970 static int cdns_i3c_i2c_transfer(const struct device *dev, struct i3c_i2c_device_desc *i2c_dev,
1971 struct i2c_msg *msgs, uint8_t num_msgs)
1972 {
1973 const struct cdns_i3c_config *config = dev->config;
1974 struct cdns_i3c_data *data = dev->data;
1975 uint32_t txsize = 0;
1976 uint32_t rxsize = 0;
1977 int ret;
1978
1979 /* make sure we are currently the active controller */
1980 if (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE)) {
1981 return -EACCES;
1982 }
1983
1984 if (num_msgs == 0) {
1985 return 0;
1986 }
1987
1988 if (num_msgs > data->hw_cfg.cmd_mem_depth || num_msgs > data->hw_cfg.cmdr_mem_depth) {
1989 LOG_ERR("%s: Too many messages", dev->name);
1990 return -ENOMEM;
1991 }
1992
1993 /*
1994 * Ensure data will fit within FIFOs
1995 */
1996 for (unsigned int i = 0; i < num_msgs; i++) {
1997 if ((msgs[i].flags & I2C_MSG_RW_MASK) == I2C_MSG_READ) {
1998 rxsize += ROUND_UP(msgs[i].len, 4);
1999 } else {
2000 txsize += ROUND_UP(msgs[i].len, 4);
2001 }
2002 }
2003 if ((rxsize > data->hw_cfg.rx_mem_depth) || (txsize > data->hw_cfg.tx_mem_depth)) {
2004 LOG_ERR("%s: Total RX and/or TX transfer larger than FIFO", dev->name);
2005 return -ENOMEM;
2006 }
2007
2008 k_mutex_lock(&data->bus_lock, K_FOREVER);
2009
2010 /* wait for idle */
2011 ret = cdns_i3c_wait_for_idle(dev);
2012 if (ret != 0) {
2013 goto error;
2014 }
2015
2016 for (unsigned int i = 0; i < num_msgs; i++) {
2017 struct cdns_i3c_cmd *cmd = &data->xfer.cmds[i];
2018
2019 cmd->len = msgs[i].len;
2020 cmd->buf = msgs[i].buf;
2021 /* not an i3c transfer, but must be set to sdr */
2022 cmd->hdr = I3C_DATA_RATE_SDR;
2023
2024 cmd->cmd0 = CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
2025 cmd->cmd0 |= CMD0_FIFO_DEV_ADDR(i2c_dev->addr);
2026 cmd->cmd0 |= CMD0_FIFO_PL_LEN(msgs[i].len);
2027
2028 /* Send repeated start on all transfers except the last or those marked STOP. */
2029 if ((i < (num_msgs - 1)) && ((msgs[i].flags & I2C_MSG_STOP) == 0)) {
2030 cmd->cmd0 |= CMD0_FIFO_RSBC;
2031 }
2032
2033 if (msgs[i].flags & I2C_MSG_ADDR_10_BITS) {
2034 cmd->cmd0 |= CMD0_FIFO_IS_10B;
2035 }
2036
2037 if ((msgs[i].flags & I2C_MSG_RW_MASK) == I2C_MSG_READ) {
2038 cmd->cmd0 |= CMD0_FIFO_RNW;
2039 }
2040
2041 /* i2c transfers are a don't care for num_xfer and sdr error */
2042 cmd->num_xfer = NULL;
2043 cmd->sdr_err = NULL;
2044 }
2045
2046 data->xfer.ret = -ETIMEDOUT;
2047 data->xfer.num_cmds = num_msgs;
2048
2049 cdns_i3c_start_transfer(dev);
2050 if (k_sem_take(&data->xfer.complete, K_MSEC(1000)) != 0) {
2051 cdns_i3c_cancel_transfer(dev);
2052 }
2053
2054 ret = data->xfer.ret;
2055 error:
2056 k_mutex_unlock(&data->bus_lock);
2057
2058 return ret;
2059 }
2060
cdns_i3c_master_get_rr_slot(const struct device * dev,uint8_t dyn_addr)2061 static int cdns_i3c_master_get_rr_slot(const struct device *dev, uint8_t dyn_addr)
2062 {
2063 struct cdns_i3c_data *data = dev->data;
2064 const struct cdns_i3c_config *config = dev->config;
2065 uint8_t rr_idx, i;
2066 uint32_t rr, activedevs;
2067
2068 /* If it does not have a dynamic address, then assign it a free one */
2069 if (dyn_addr == 0) {
2070 if (!data->free_rr_slots) {
2071 return -ENOSPC;
2072 }
2073
2074 return find_lsb_set(data->free_rr_slots) - 1;
2075 }
2076
2077 /* Device already has a Dynamic Address, so assume it is already in the RRs */
2078 activedevs = sys_read32(config->base + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
2079 /* skip itself */
2080 activedevs &= ~BIT(0);
2081
2082 /* loop through each set bit for new devices */
2083 for (i = find_lsb_set(activedevs); i <= find_msb_set(activedevs); i++) {
2084 rr_idx = i - 1;
2085 if (activedevs & BIT(rr_idx)) {
2086 rr = sys_read32(config->base + DEV_ID_RR0(rr_idx));
2087 if ((rr & DEV_ID_RR0_IS_I3C) && (DEV_ID_RR0_GET_DEV_ADDR(rr) == dyn_addr)) {
2088 return rr_idx;
2089 }
2090 }
2091 }
2092
2093 return -EINVAL;
2094 }
2095
cdns_i3c_attach_device(const struct device * dev,struct i3c_device_desc * desc)2096 static int cdns_i3c_attach_device(const struct device *dev, struct i3c_device_desc *desc)
2097 {
2098 /*
2099 * Mark Devices as active, devices that will be found and marked active during DAA,
2100 * it will be given the exact DA programmed in it's RR, otherwise they get set as active
2101 * here. If dynamic address is set, then it assumed that it was already initialized by the
2102 * primary controller. When assigned through ENTDAA, the dynamic address, bcr, dcr, and pid
2103 * are all set in the RR along with setting the device as active. If it has a static addr,
2104 * then it is assumed that it will be programmed with SETDASA and will need to be marked
2105 * as active before sending out SETDASA.
2106 */
2107 if ((desc->static_addr != 0) || (desc->dynamic_addr != 0)) {
2108 const struct cdns_i3c_config *config = dev->config;
2109 struct cdns_i3c_data *data = dev->data;
2110
2111 int slot = cdns_i3c_master_get_rr_slot(dev, desc->dynamic_addr);
2112
2113 if (slot < 0) {
2114 LOG_ERR("%s: no space for i3c device: %s", dev->name, desc->dev->name);
2115 return slot;
2116 }
2117
2118 k_mutex_lock(&data->bus_lock, K_FOREVER);
2119
2120 sys_write32(sys_read32(config->base + DEVS_CTRL) | DEVS_CTRL_DEV_ACTIVE(slot),
2121 config->base + DEVS_CTRL);
2122
2123 data->cdns_i3c_i2c_priv_data[slot].id = slot;
2124 desc->controller_priv = &(data->cdns_i3c_i2c_priv_data[slot]);
2125 data->free_rr_slots &= ~BIT(slot);
2126
2127 uint32_t dev_id_rr0 =
2128 DEV_ID_RR0_IS_I3C |
2129 prepare_rr0_dev_address(desc->dynamic_addr ? desc->dynamic_addr
2130 : desc->static_addr);
2131 uint32_t dev_id_rr1 = DEV_ID_RR1_PID_MSB((desc->pid & 0xFFFFFFFF0000) >> 16);
2132 uint32_t dev_id_rr2 = DEV_ID_RR2_PID_LSB(desc->pid & 0xFFFF);
2133
2134 sys_write32(dev_id_rr0, config->base + DEV_ID_RR0(slot));
2135 sys_write32(dev_id_rr1, config->base + DEV_ID_RR1(slot));
2136 sys_write32(dev_id_rr2, config->base + DEV_ID_RR2(slot));
2137
2138 k_mutex_unlock(&data->bus_lock);
2139 }
2140
2141 return 0;
2142 }
2143
cdns_i3c_reattach_device(const struct device * dev,struct i3c_device_desc * desc,uint8_t old_dyn_addr)2144 static int cdns_i3c_reattach_device(const struct device *dev, struct i3c_device_desc *desc,
2145 uint8_t old_dyn_addr)
2146 {
2147 const struct cdns_i3c_config *config = dev->config;
2148 struct cdns_i3c_data *data = dev->data;
2149 struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = desc->controller_priv;
2150
2151 if (cdns_i3c_device_data == NULL) {
2152 LOG_ERR("%s: %s: device not attached", dev->name, desc->dev->name);
2153 return -EINVAL;
2154 }
2155
2156 k_mutex_lock(&data->bus_lock, K_FOREVER);
2157
2158 uint32_t dev_id_rr0 = DEV_ID_RR0_IS_I3C | prepare_rr0_dev_address(desc->dynamic_addr);
2159 uint32_t dev_id_rr1 = DEV_ID_RR1_PID_MSB((desc->pid & 0xFFFFFFFF0000) >> 16);
2160 uint32_t dev_id_rr2 = DEV_ID_RR2_PID_LSB(desc->pid & 0xFFFF) | DEV_ID_RR2_BCR(desc->bcr) |
2161 DEV_ID_RR2_DCR(desc->dcr);
2162
2163 sys_write32(dev_id_rr0, config->base + DEV_ID_RR0(cdns_i3c_device_data->id));
2164 sys_write32(dev_id_rr1, config->base + DEV_ID_RR1(cdns_i3c_device_data->id));
2165 sys_write32(dev_id_rr2, config->base + DEV_ID_RR2(cdns_i3c_device_data->id));
2166
2167 k_mutex_unlock(&data->bus_lock);
2168
2169 return 0;
2170 }
2171
cdns_i3c_detach_device(const struct device * dev,struct i3c_device_desc * desc)2172 static int cdns_i3c_detach_device(const struct device *dev, struct i3c_device_desc *desc)
2173 {
2174 const struct cdns_i3c_config *config = dev->config;
2175 struct cdns_i3c_data *data = dev->data;
2176 struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = desc->controller_priv;
2177
2178 if (cdns_i3c_device_data == NULL) {
2179 LOG_ERR("%s: %s: device not attached", dev->name, desc->dev->name);
2180 return -EINVAL;
2181 }
2182
2183 k_mutex_lock(&data->bus_lock, K_FOREVER);
2184
2185 sys_write32(sys_read32(config->base + DEVS_CTRL) |
2186 DEVS_CTRL_DEV_CLR(cdns_i3c_device_data->id),
2187 config->base + DEVS_CTRL);
2188 data->free_rr_slots |= BIT(cdns_i3c_device_data->id);
2189 desc->controller_priv = NULL;
2190
2191 k_mutex_unlock(&data->bus_lock);
2192
2193 return 0;
2194 }
2195
cdns_i3c_i2c_attach_device(const struct device * dev,struct i3c_i2c_device_desc * desc)2196 static int cdns_i3c_i2c_attach_device(const struct device *dev, struct i3c_i2c_device_desc *desc)
2197 {
2198 const struct cdns_i3c_config *config = dev->config;
2199 struct cdns_i3c_data *data = dev->data;
2200
2201 int slot = cdns_i3c_master_get_rr_slot(dev, 0);
2202
2203 if (slot < 0) {
2204 LOG_ERR("%s: no space for i2c device: addr 0x%02x", dev->name, desc->addr);
2205 return slot;
2206 }
2207
2208 k_mutex_lock(&data->bus_lock, K_FOREVER);
2209
2210 uint32_t dev_id_rr0 = prepare_rr0_dev_address(desc->addr);
2211 uint32_t dev_id_rr2 = DEV_ID_RR2_LVR(desc->lvr);
2212
2213 sys_write32(dev_id_rr0, config->base + DEV_ID_RR0(slot));
2214 sys_write32(0, config->base + DEV_ID_RR1(slot));
2215 sys_write32(dev_id_rr2, config->base + DEV_ID_RR2(slot));
2216
2217 data->cdns_i3c_i2c_priv_data[slot].id = slot;
2218 desc->controller_priv = &(data->cdns_i3c_i2c_priv_data[slot]);
2219 data->free_rr_slots &= ~BIT(slot);
2220
2221 sys_write32(sys_read32(config->base + DEVS_CTRL) | DEVS_CTRL_DEV_ACTIVE(slot),
2222 config->base + DEVS_CTRL);
2223
2224 k_mutex_unlock(&data->bus_lock);
2225
2226 return 0;
2227 }
2228
cdns_i3c_i2c_detach_device(const struct device * dev,struct i3c_i2c_device_desc * desc)2229 static int cdns_i3c_i2c_detach_device(const struct device *dev, struct i3c_i2c_device_desc *desc)
2230 {
2231 const struct cdns_i3c_config *config = dev->config;
2232 struct cdns_i3c_data *data = dev->data;
2233 struct cdns_i3c_i2c_dev_data *cdns_i2c_device_data = desc->controller_priv;
2234
2235 if (cdns_i2c_device_data == NULL) {
2236 LOG_ERR("%s: device not attached", dev->name);
2237 return -EINVAL;
2238 }
2239
2240 k_mutex_lock(&data->bus_lock, K_FOREVER);
2241
2242 sys_write32(sys_read32(config->base + DEVS_CTRL) |
2243 DEVS_CTRL_DEV_CLR(cdns_i2c_device_data->id),
2244 config->base + DEVS_CTRL);
2245 data->free_rr_slots |= BIT(cdns_i2c_device_data->id);
2246 desc->controller_priv = NULL;
2247
2248 k_mutex_unlock(&data->bus_lock);
2249
2250 return 0;
2251 }
2252
2253 /**
2254 * @brief Transfer messages in I3C mode.
2255 *
2256 * @see i3c_transfer
2257 *
2258 * @param dev Pointer to device driver instance.
2259 * @param target Pointer to target device descriptor.
2260 * @param msgs Pointer to I3C messages.
2261 * @param num_msgs Number of messages to transfers.
2262 *
2263 * @return @see i3c_transfer
2264 */
cdns_i3c_transfer(const struct device * dev,struct i3c_device_desc * target,struct i3c_msg * msgs,uint8_t num_msgs)2265 static int cdns_i3c_transfer(const struct device *dev, struct i3c_device_desc *target,
2266 struct i3c_msg *msgs, uint8_t num_msgs)
2267 {
2268 const struct cdns_i3c_config *config = dev->config;
2269 struct cdns_i3c_data *data = dev->data;
2270 int txsize = 0;
2271 int rxsize = 0;
2272 int ret;
2273
2274 /* make sure we are currently the active controller */
2275 if (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE)) {
2276 return -EACCES;
2277 }
2278
2279 if (num_msgs == 0) {
2280 return 0;
2281 }
2282
2283 if (num_msgs > data->hw_cfg.cmd_mem_depth || num_msgs > data->hw_cfg.cmdr_mem_depth) {
2284 LOG_ERR("%s: Too many messages", dev->name);
2285 return -ENOMEM;
2286 }
2287
2288 /*
2289 * Ensure data will fit within FIFOs.
2290 *
2291 * TODO: This limitation prevents burst transfers greater than the
2292 * FIFO sizes and should be replaced with an implementation that
2293 * utilizes the RX/TX data interrupts.
2294 */
2295 for (int i = 0; i < num_msgs; i++) {
2296 if ((msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ) {
2297 rxsize += ROUND_UP(msgs[i].len, 4);
2298 } else {
2299 txsize += ROUND_UP(msgs[i].len, 4);
2300 }
2301 }
2302 if ((rxsize > data->hw_cfg.rx_mem_depth) || (txsize > data->hw_cfg.tx_mem_depth)) {
2303 LOG_ERR("%s: Total RX and/or TX transfer larger than FIFO", dev->name);
2304 return -ENOMEM;
2305 }
2306
2307 k_mutex_lock(&data->bus_lock, K_FOREVER);
2308
2309 /* wait for idle */
2310 ret = cdns_i3c_wait_for_idle(dev);
2311 if (ret != 0) {
2312 goto error;
2313 }
2314
2315 /*
2316 * Prepare transfer commands. Currently there is only a single transfer
2317 * in-flight but it would be possible to keep a queue of transfers. If so,
2318 * this preparation could be completed outside of the bus lock allowing
2319 * greater parallelism.
2320 */
2321 bool send_broadcast = true;
2322
2323 for (int i = 0; i < num_msgs; i++) {
2324 struct cdns_i3c_cmd *cmd = &data->xfer.cmds[i];
2325 uint32_t pl = msgs[i].len;
2326 /* check hdr mode */
2327 if ((!(msgs[i].flags & I3C_MSG_HDR)) ||
2328 ((msgs[i].flags & I3C_MSG_HDR) && (msgs[i].hdr_mode == 0))) {
2329 /* HDR message flag is not set or if hdr flag is set but no hdr mode is set
2330 */
2331 cmd->len = pl;
2332 cmd->buf = msgs[i].buf;
2333
2334 cmd->cmd0 = CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
2335 cmd->cmd0 |= CMD0_FIFO_DEV_ADDR(target->dynamic_addr);
2336 if ((msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ) {
2337 cmd->cmd0 |= CMD0_FIFO_RNW;
2338 /*
2339 * For I3C_XMIT_MODE_NO_ADDR reads in SDN mode,
2340 * CMD0_FIFO_PL_LEN specifies the abort limit not bytes to read
2341 */
2342 cmd->cmd0 |= CMD0_FIFO_PL_LEN(pl + 1);
2343 } else {
2344 cmd->cmd0 |= CMD0_FIFO_PL_LEN(pl);
2345 }
2346
2347 /* Send broadcast header on first transfer or after a STOP. */
2348 if (!(msgs[i].flags & I3C_MSG_NBCH) && (send_broadcast)) {
2349 cmd->cmd0 |= CMD0_FIFO_BCH;
2350 send_broadcast = false;
2351 }
2352
2353 /*
2354 * Send repeated start on all transfers except the last or those marked
2355 * STOP.
2356 */
2357 if ((i < (num_msgs - 1)) && ((msgs[i].flags & I3C_MSG_STOP) == 0)) {
2358 cmd->cmd0 |= CMD0_FIFO_RSBC;
2359 } else {
2360 send_broadcast = true;
2361 }
2362
2363 /*
2364 * write the address of num_xfer which is to be updated upon message
2365 * completion
2366 */
2367 cmd->num_xfer = &(msgs[i].num_xfer);
2368 cmd->sdr_err = &(msgs[i].err);
2369 cmd->hdr = I3C_DATA_RATE_SDR;
2370 } else if ((data->common.ctrl_config.supported_hdr & I3C_MSG_HDR_DDR) &&
2371 (msgs[i].hdr_mode == I3C_MSG_HDR_DDR) && (msgs[i].flags & I3C_MSG_HDR)) {
2372 uint16_t ddr_header_payload;
2373
2374 /* DDR sends data out in 16b, so len must be a multiple of 2 */
2375 if (!((pl % 2) == 0)) {
2376 ret = -EINVAL;
2377 goto error;
2378 }
2379 /* HDR message flag is set and hdr mode is DDR */
2380 cmd->buf = msgs[i].buf;
2381 if ((msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ) {
2382 /* HDR-DDR Read */
2383 ddr_header_payload = HDR_CMD_RD |
2384 HDR_CMD_CODE(msgs[i].hdr_cmd_code) |
2385 (target->dynamic_addr << 1);
2386 /* Parity Adjustment Bit for Reads */
2387 ddr_header_payload =
2388 prepare_ddr_cmd_parity_adjustment_bit(ddr_header_payload);
2389 /* HDR-DDR Command Word */
2390 cmd->ddr_header =
2391 DDR_PREAMBLE_CMD_CRC | prepare_ddr_word(ddr_header_payload);
2392 } else {
2393 uint8_t crc5 = 0x1F;
2394 /* HDR-DDR Write */
2395 ddr_header_payload = HDR_CMD_CODE(msgs[i].hdr_cmd_code) |
2396 (target->dynamic_addr << 1);
2397 /* HDR-DDR Command Word */
2398 cmd->ddr_header =
2399 DDR_PREAMBLE_CMD_CRC | prepare_ddr_word(ddr_header_payload);
2400 /* calculate crc5 */
2401 crc5 = i3c_cdns_crc5(crc5, ddr_header_payload);
2402 for (int j = 0; j < pl; j += 2) {
2403 crc5 = i3c_cdns_crc5(
2404 crc5,
2405 sys_get_be16((void *)((uintptr_t)cmd->buf + j)));
2406 }
2407 cmd->ddr_crc = DDR_PREAMBLE_CMD_CRC | DDR_CRC_TOKEN | (crc5 << 9) |
2408 DDR_CRC_WR_SETUP;
2409 }
2410 /* Length of DDR Transfer is length of payload (in 16b) + header and CRC
2411 * blocks
2412 */
2413 cmd->len = ((pl / 2) + 2);
2414
2415 /* prep command FIFO for ENTHDR0 */
2416 cmd->cmd0 = CMD0_FIFO_IS_CCC;
2417 cmd->cmd1 = I3C_CCC_ENTHDR0;
2418 /* write the address of num_xfer which is to be updated upon message
2419 * completion
2420 */
2421 cmd->num_xfer = &(msgs[i].num_xfer);
2422 cmd->sdr_err = &(msgs[i].err);
2423 cmd->hdr = I3C_DATA_RATE_HDR_DDR;
2424 } else {
2425 LOG_ERR("%s: Unsupported HDR Mode %d", dev->name, msgs[i].hdr_mode);
2426 ret = -ENOTSUP;
2427 goto error;
2428 }
2429 }
2430
2431 data->xfer.ret = -ETIMEDOUT;
2432 data->xfer.num_cmds = num_msgs;
2433
2434 cdns_i3c_start_transfer(dev);
2435 if (k_sem_take(&data->xfer.complete, K_MSEC(1000)) != 0) {
2436 LOG_ERR("%s: transfer timed out", dev->name);
2437 cdns_i3c_cancel_transfer(dev);
2438 }
2439
2440 ret = data->xfer.ret;
2441 error:
2442 k_mutex_unlock(&data->bus_lock);
2443
2444 return ret;
2445 }
2446
2447 #ifdef CONFIG_I3C_USE_IBI
cdns_i3c_read_ibi_fifo(const struct cdns_i3c_config * config,void * buf,uint32_t len)2448 static int cdns_i3c_read_ibi_fifo(const struct cdns_i3c_config *config, void *buf, uint32_t len)
2449 {
2450 uint32_t *ptr = buf;
2451 uint32_t remain, val;
2452
2453 for (remain = len; remain >= 4; remain -= 4) {
2454 if (cdns_i3c_ibi_fifo_empty(config)) {
2455 return -EIO;
2456 }
2457 val = sys_le32_to_cpu(sys_read32(config->base + IBI_DATA_FIFO));
2458 *ptr++ = val;
2459 }
2460
2461 if (remain > 0) {
2462 if (cdns_i3c_ibi_fifo_empty(config)) {
2463 return -EIO;
2464 }
2465 val = sys_le32_to_cpu(sys_read32(config->base + IBI_DATA_FIFO));
2466 memcpy(ptr, &val, remain);
2467 }
2468
2469 return 0;
2470 }
2471
cdns_i3c_handle_ibi(const struct device * dev,uint32_t ibir)2472 static void cdns_i3c_handle_ibi(const struct device *dev, uint32_t ibir)
2473 {
2474 const struct cdns_i3c_config *config = dev->config;
2475 struct cdns_i3c_data *data = dev->data;
2476
2477 /* The slave ID returned here is the device ID in the SIR map NOT the device ID
2478 * in the RR map.
2479 */
2480 uint8_t slave_id = IBIR_SLVID(ibir);
2481
2482 if (slave_id == IBIR_SLVID_INV) {
2483 /* DA does not match any value among SIR map */
2484 return;
2485 }
2486
2487 uint32_t dev_id_rr0 = sys_read32(config->base + DEV_ID_RR0(slave_id + 1));
2488 uint8_t dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(dev_id_rr0);
2489 struct i3c_device_desc *desc = i3c_dev_list_i3c_addr_find(dev, dyn_addr);
2490
2491 /*
2492 * Check for NAK or error conditions.
2493 *
2494 * Note: The logging is for debugging only so will be compiled out in most cases.
2495 * However, if the log level for this module is DEBUG and log mode is IMMEDIATE or MINIMAL,
2496 * this option is also set this may cause problems due to being inside an ISR.
2497 */
2498 if (!(IBIR_ACKED & ibir)) {
2499 LOG_DBG("%s: NAK for slave ID %u", dev->name, (unsigned int)slave_id);
2500 return;
2501 }
2502 if (ibir & IBIR_ERROR) {
2503 /* Controller issued an Abort */
2504 LOG_ERR("%s: IBI Data overflow", dev->name);
2505 }
2506
2507 /* Read out any payload bytes */
2508 uint8_t ibi_len = IBIR_XFER_BYTES(ibir);
2509
2510 if (ibi_len > 0) {
2511 if (ibi_len - data->ibi_buf.ibi_data_cnt > 0) {
2512 if (cdns_i3c_read_ibi_fifo(
2513 config, &data->ibi_buf.ibi_data[data->ibi_buf.ibi_data_cnt],
2514 ibi_len - data->ibi_buf.ibi_data_cnt) < 0) {
2515 LOG_ERR("%s: Failed to get payload", dev->name);
2516 }
2517 }
2518 data->ibi_buf.ibi_data_cnt = 0;
2519 }
2520
2521 if (i3c_ibi_work_enqueue_target_irq(desc, data->ibi_buf.ibi_data, ibi_len) != 0) {
2522 LOG_ERR("%s: Error enqueue IBI IRQ work", dev->name);
2523 }
2524 }
2525
cdns_i3c_handle_cr(const struct device * dev,uint32_t ibir)2526 static void cdns_i3c_handle_cr(const struct device *dev, uint32_t ibir)
2527 {
2528 const struct cdns_i3c_config *config = dev->config;
2529
2530 /* The slave ID returned here is the device ID in the SIR map NOT the device ID
2531 * in the RR map.
2532 */
2533 uint8_t slave_id = IBIR_SLVID(ibir);
2534
2535 if (slave_id == IBIR_SLVID_INV) {
2536 /* DA does not match any value among SIR map */
2537 return;
2538 }
2539
2540 uint32_t dev_id_rr0 = sys_read32(config->base + DEV_ID_RR0(slave_id + 1));
2541 uint8_t dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(dev_id_rr0);
2542 struct i3c_device_desc *desc = i3c_dev_list_i3c_addr_find(dev, dyn_addr);
2543
2544 /*
2545 * Check for NAK or error conditions.
2546 *
2547 * Note: The logging is for debugging only so will be compiled out in most cases.
2548 * However, if the log level for this module is DEBUG and log mode is IMMEDIATE or MINIMAL,
2549 * this option is also set this may cause problems due to being inside an ISR.
2550 */
2551 if (!(IBIR_ACKED & ibir)) {
2552 LOG_DBG("%s: NAK for slave ID %u", dev->name, (unsigned int)slave_id);
2553 return;
2554 }
2555 if (ibir & IBIR_ERROR) {
2556 LOG_ERR("%s: Data overflow", dev->name);
2557 return;
2558 }
2559
2560 if (i3c_ibi_work_enqueue_controller_request(desc) != 0) {
2561 LOG_ERR("%s: Error enqueue IBI IRQ work", dev->name);
2562 }
2563 }
2564
cdns_i3c_handle_hj(const struct device * dev,uint32_t ibir)2565 static void cdns_i3c_handle_hj(const struct device *dev, uint32_t ibir)
2566 {
2567 if (!(IBIR_ACKED & ibir)) {
2568 LOG_DBG("%s: NAK for HJ", dev->name);
2569 return;
2570 }
2571
2572 /* TODO: disable CTRL_HJ_DISEC and process auto-ENTDAA*/
2573 if (i3c_ibi_work_enqueue_hotjoin(dev) != 0) {
2574 LOG_ERR("%s: Error enqueue IBI HJ work", dev->name);
2575 }
2576 }
2577
cnds_i3c_master_demux_ibis(const struct device * dev)2578 static void cnds_i3c_master_demux_ibis(const struct device *dev)
2579 {
2580 const struct cdns_i3c_config *config = dev->config;
2581
2582 for (uint32_t status0 = sys_read32(config->base + MST_STATUS0);
2583 !(status0 & MST_STATUS0_IBIR_EMP); status0 = sys_read32(config->base + MST_STATUS0)) {
2584 uint32_t ibir = sys_read32(config->base + IBIR);
2585
2586 switch (IBIR_TYPE(ibir)) {
2587 case IBIR_TYPE_IBI:
2588 cdns_i3c_handle_ibi(dev, ibir);
2589 break;
2590 case IBIR_TYPE_HJ:
2591 cdns_i3c_handle_hj(dev, ibir);
2592 break;
2593 case IBIR_TYPE_MR:
2594 cdns_i3c_handle_cr(dev, ibir);
2595 break;
2596 default:
2597 break;
2598 }
2599 }
2600 }
2601
cdns_i3c_target_ibi_hj_complete(const struct device * dev)2602 static void cdns_i3c_target_ibi_hj_complete(const struct device *dev)
2603 {
2604 struct cdns_i3c_data *data = dev->data;
2605
2606 k_sem_give(&data->ibi_hj_complete);
2607 }
2608
cdns_i3c_target_ibi_cr_complete(const struct device * dev)2609 static void cdns_i3c_target_ibi_cr_complete(const struct device *dev)
2610 {
2611 struct cdns_i3c_data *data = dev->data;
2612
2613 k_sem_give(&data->ibi_cr_complete);
2614 }
2615 #endif
2616
cdns_i3c_target_sdr_tx_thr_int_handler(const struct device * dev,const struct i3c_target_callbacks * target_cb)2617 static void cdns_i3c_target_sdr_tx_thr_int_handler(const struct device *dev,
2618 const struct i3c_target_callbacks *target_cb)
2619 {
2620 int status = 0;
2621 struct cdns_i3c_data *data = dev->data;
2622 const struct cdns_i3c_config *config = dev->config;
2623
2624 if (target_cb != NULL && target_cb->read_processed_cb) {
2625 /* with REV_ID 1.7, as a target, the fifos are full word, otherwise only the first
2626 * byte is used.
2627 */
2628 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
2629 /* while tx fifo is not full and there is still data available */
2630 while ((!(sys_read32(config->base + SLV_STATUS1) &
2631 SLV_STATUS1_SDR_TX_FULL)) &&
2632 (status == 0)) {
2633 /* call function pointer for read */
2634 uint32_t tx_data = 0;
2635 bool data_valid = false;
2636
2637 for (int j = 0; j < 4; j++) {
2638 uint8_t byte;
2639 /* will return negative if no data left to transmit and 0
2640 * if data available
2641 */
2642 status = target_cb->read_processed_cb(data->target_config,
2643 &byte);
2644 if (status == 0) {
2645 data_valid = true;
2646 tx_data |= (byte << (j * 8));
2647 }
2648 }
2649 if (data_valid) {
2650 cdns_i3c_write_tx_fifo(config, &tx_data, sizeof(uint32_t));
2651 }
2652 }
2653 } else {
2654 /* while tx fifo is not full and there is still data available */
2655 while ((!(sys_read32(config->base + SLV_STATUS1) &
2656 SLV_STATUS1_SDR_TX_FULL)) &&
2657 (status == 0)) {
2658 uint8_t byte;
2659 /* will return negative if no data left to transmit and 0 if
2660 * data available
2661 */
2662 status = target_cb->read_processed_cb(data->target_config, &byte);
2663 if (status == 0) {
2664 cdns_i3c_write_tx_fifo(config, &byte, sizeof(uint8_t));
2665 }
2666 }
2667 }
2668 }
2669 }
2670
cdns_i3c_irq_handler(const struct device * dev)2671 static void cdns_i3c_irq_handler(const struct device *dev)
2672 {
2673 const struct cdns_i3c_config *config = dev->config;
2674 struct cdns_i3c_data *data = dev->data;
2675 uint32_t int_st = sys_read32(config->base + MST_ISR);
2676
2677 sys_write32(int_st, config->base + MST_ICR);
2678
2679 /* Command queue empty */
2680 if (int_st & MST_INT_HALTED) {
2681 LOG_WRN("Core Halted, 2 read aborts");
2682 }
2683
2684 /* Command queue empty */
2685 if (int_st & MST_INT_CMDD_EMP) {
2686 cdns_i3c_complete_transfer(dev);
2687 }
2688
2689 /* In-band interrupt */
2690 if (int_st & MST_INT_IBIR_THR) {
2691 #ifdef CONFIG_I3C_USE_IBI
2692 cnds_i3c_master_demux_ibis(dev);
2693 #else
2694 LOG_ERR("%s: IBI received - Kconfig for using IBIs is not enabled", dev->name);
2695 #endif
2696 }
2697
2698 /* In-band interrupt data threshold */
2699 if (int_st & MST_INT_IBID_THR) {
2700 #ifdef CONFIG_I3C_USE_IBI
2701 /* pop data out of the IBI FIFO */
2702 while (!cdns_i3c_ibi_fifo_empty(config)) {
2703 uint32_t *ptr =
2704 (uint32_t *)&data->ibi_buf.ibi_data[data->ibi_buf.ibi_data_cnt];
2705 *ptr = sys_le32_to_cpu(sys_read32(config->base + IBI_DATA_FIFO));
2706 data->ibi_buf.ibi_data_cnt += 4;
2707 }
2708 #else
2709 LOG_ERR("%s: IBI received - Kconfig for using IBIs is not enabled", dev->name);
2710 #endif
2711 }
2712
2713 /* In-band interrupt response overflow */
2714 if (int_st & MST_INT_IBIR_OVF) {
2715 LOG_ERR("%s: controller ibir overflow,", dev->name);
2716 }
2717
2718 /* In-band interrupt data */
2719 if (int_st & MST_INT_TX_OVF) {
2720 LOG_ERR("%s: controller tx buffer overflow,", dev->name);
2721 }
2722
2723 /* In-band interrupt data */
2724 if (int_st & MST_INT_RX_UNF) {
2725 LOG_ERR("%s: controller rx buffer underflow,", dev->name);
2726 }
2727
2728 if (int_st & MST_INT_MR_DONE) {
2729 LOG_DBG("%s: controller CR Handoff done,", dev->name);
2730 k_sem_give(&data->ch_complete);
2731 }
2732
2733 uint32_t int_sl = sys_read32(config->base + SLV_ISR);
2734 const struct i3c_target_callbacks *target_cb =
2735 data->target_config ? data->target_config->callbacks : NULL;
2736 /* Clear interrupts */
2737 sys_write32(int_sl, config->base + SLV_ICR);
2738
2739 /* SLV SDR rx fifo threshold */
2740 if (int_sl & SLV_INT_SDR_RX_THR) {
2741 /* while rx fifo is not empty */
2742 while (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_SDR_RX_EMPTY)) {
2743 if (target_cb != NULL && target_cb->write_received_cb != NULL) {
2744 cdns_i3c_target_read_rx_fifo(dev);
2745 }
2746 }
2747 }
2748
2749 /* SLV SDR tx fifo threshold */
2750 if (int_sl & SLV_INT_SDR_TX_THR) {
2751 cdns_i3c_target_sdr_tx_thr_int_handler(dev, target_cb);
2752 }
2753
2754 /* SLV SDR rx complete */
2755 if (int_sl & SLV_INT_SDR_RD_COMP) {
2756 /* a read needs to be done on slv_status 0 else a NACK will happen */
2757 (void)sys_read32(config->base + SLV_STATUS0);
2758 /* call stop function pointer */
2759 if (target_cb != NULL && target_cb->stop_cb) {
2760 target_cb->stop_cb(data->target_config);
2761 }
2762 }
2763
2764 /* SLV SDR tx complete */
2765 if (int_sl & SLV_INT_SDR_WR_COMP) {
2766 /* a read needs to be done on slv_status 0 else a NACK will happen */
2767 (void)sys_read32(config->base + SLV_STATUS0);
2768 /* clear bytes read parameter */
2769 data->fifo_bytes_read = 0;
2770 /* call stop function pointer */
2771 if (target_cb != NULL && target_cb->stop_cb) {
2772 target_cb->stop_cb(data->target_config);
2773 }
2774 }
2775
2776 /* DA has been updated */
2777 if (int_sl & SLV_INT_DA_UPD) {
2778 LOG_INF("%s: DA updated to 0x%02lx", dev->name,
2779 SLV_STATUS1_DA(sys_read32(config->base + SLV_STATUS1)));
2780 #ifdef CONFIG_I3C_USE_IBI
2781 cdns_i3c_target_ibi_hj_complete(dev);
2782 #endif
2783 }
2784
2785 /* HJ complete and DA has been assigned or HJ NACK'ed or DISEC disabled HJ */
2786 if (int_sl & SLV_INT_HJ_DONE) {
2787
2788 }
2789
2790 /* Controllership has been been given to us */
2791 if (int_sl & SLV_INT_MR_DONE) {
2792 #ifdef CONFIG_I3C_USE_IBI
2793 cdns_i3c_target_ibi_cr_complete(dev);
2794 i3c_ibi_work_enqueue_cb(dev, i3c_sec_handoffed);
2795 if (target_cb != NULL && target_cb->controller_handoff_cb) {
2796 target_cb->controller_handoff_cb(data->target_config);
2797 }
2798 #endif
2799 }
2800
2801 /* EISC or DISEC has been received */
2802 if (int_sl & SLV_INT_EVENT_UP) {
2803 }
2804
2805 /* sdr transfer aborted by controller */
2806 if (int_sl & SLV_INT_M_RD_ABORT) {
2807 /* TODO: consider flushing tx buffer? */
2808 }
2809
2810 /* SLV SDR rx fifo underflow */
2811 if (int_sl & SLV_INT_SDR_RX_UNF) {
2812 LOG_ERR("%s: slave sdr rx buffer underflow", dev->name);
2813 }
2814
2815 /* SLV SDR tx fifo overflow */
2816 if (int_sl & SLV_INT_SDR_TX_OVF) {
2817 LOG_ERR("%s: slave sdr tx buffer overflow,", dev->name);
2818 }
2819
2820 if (int_sl & SLV_INT_DDR_RX_THR) {
2821 }
2822
2823 /* SLV DDR WR COMPLETE */
2824 if (int_sl & SLV_INT_DDR_WR_COMP) {
2825 /* initial value of CRC5 for HDR-DDR is 0x1F */
2826 uint8_t crc5 = 0x1F;
2827
2828 while (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_RX_EMPTY)) {
2829 uint32_t ddr_rx_data = sys_read32(config->base + SLV_DDR_RX_FIFO);
2830 uint32_t preamble = (ddr_rx_data & DDR_PREAMBLE_MASK);
2831
2832 if (preamble == DDR_PREAMBLE_DATA_ABORT ||
2833 preamble == DDR_PREAMBLE_DATA_ABORT_ALT) {
2834 uint16_t ddr_payload = DDR_DATA(ddr_rx_data);
2835
2836 if (cdns_i3c_ddr_parity(ddr_payload) !=
2837 (ddr_rx_data & (DDR_ODD_PARITY | DDR_EVEN_PARITY))) {
2838 LOG_ERR("%s: Received incorrect DDR Parity", dev->name);
2839 }
2840 /* calculate a running a crc */
2841 crc5 = i3c_cdns_crc5(crc5, ddr_payload);
2842
2843 if (target_cb != NULL && target_cb->write_received_cb != NULL) {
2844 /* DDR receives 2B for each payload */
2845 target_cb->write_received_cb(
2846 data->target_config,
2847 (uint8_t)((ddr_payload >> 8) & 0xFF));
2848 target_cb->write_received_cb(data->target_config,
2849 (uint8_t)(ddr_payload));
2850 }
2851
2852 } else if ((preamble == DDR_PREAMBLE_CMD_CRC) &&
2853 ((ddr_rx_data & DDR_CRC_TOKEN_MASK) == DDR_CRC_TOKEN)) {
2854 /* should come through here last */
2855 if (crc5 != DDR_CRC(ddr_rx_data)) {
2856 LOG_ERR("%s: Received incorrect DDR CRC5", dev->name);
2857 }
2858 } else if (preamble == DDR_PREAMBLE_CMD_CRC) {
2859 /* should come through here first */
2860 uint16_t ddr_header_payload = DDR_DATA(ddr_rx_data);
2861
2862 crc5 = i3c_cdns_crc5(crc5, ddr_header_payload);
2863 }
2864 }
2865
2866 if (target_cb != NULL && target_cb->stop_cb != NULL) {
2867 target_cb->stop_cb(data->target_config);
2868 }
2869 }
2870
2871 /* SLV SDR rx complete */
2872 if (int_sl & SLV_INT_DDR_RD_COMP) {
2873 /* a read needs to be done on slv_status 0 else a NACK will happen */
2874 (void)sys_read32(config->base + SLV_STATUS0);
2875 /* call stop function pointer */
2876 if (target_cb != NULL && target_cb->stop_cb) {
2877 target_cb->stop_cb(data->target_config);
2878 }
2879 }
2880
2881 /* SLV DDR TX THR */
2882 if (int_sl & SLV_INT_DDR_TX_THR) {
2883 int status = 0;
2884
2885 if (target_cb != NULL && target_cb->read_processed_cb) {
2886
2887 while ((!(sys_read32(config->base + SLV_STATUS1) &
2888 SLV_STATUS1_DDR_TX_FULL)) &&
2889 (status == 0)) {
2890 /* call function pointer for read */
2891 uint8_t byte;
2892 /* will return negative if no data left to transmit
2893 * and 0 if data available
2894 */
2895 status = target_cb->read_processed_cb(data->target_config, &byte);
2896 if (status == 0) {
2897 cdns_i3c_write_ddr_tx_fifo(config, &byte, sizeof(byte));
2898 }
2899 }
2900 }
2901 }
2902
2903 /* DEFTGTS */
2904 if (int_sl & SLV_INT_DEFSLVS) {
2905 /* Execute outside of the ISR context */
2906 k_work_submit(&data->deftgts_work);
2907 }
2908 }
2909
cdns_i3c_read_hw_cfg(const struct device * dev)2910 static void cdns_i3c_read_hw_cfg(const struct device *dev)
2911 {
2912 const struct cdns_i3c_config *config = dev->config;
2913 struct cdns_i3c_data *data = dev->data;
2914
2915 uint32_t devid = sys_read32(config->base + DEV_ID);
2916 uint32_t revid = sys_read32(config->base + REV_ID);
2917
2918 LOG_DBG("%s: Device info:\r\n"
2919 " vid: 0x%03lX, pid: 0x%03lX\r\n"
2920 " revision: major = %lu, minor = %lu\r\n"
2921 " device ID: 0x%04X",
2922 dev->name, REV_ID_VID(revid), REV_ID_PID(revid), REV_ID_REV_MAJOR(revid),
2923 REV_ID_REV_MINOR(revid), devid);
2924
2925 /*
2926 * Depths are specified as number of words (32bit), convert to bytes
2927 */
2928 uint32_t cfg0 = sys_read32(config->base + CONF_STATUS0);
2929 uint32_t cfg1 = sys_read32(config->base + CONF_STATUS1);
2930
2931 data->hw_cfg.rev_id = revid;
2932 data->hw_cfg.cmdr_mem_depth = CONF_STATUS0_CMDR_DEPTH(cfg0) * 4;
2933 data->hw_cfg.cmd_mem_depth = CONF_STATUS1_CMD_DEPTH(cfg1) * 4;
2934 data->hw_cfg.rx_mem_depth = CONF_STATUS1_RX_DEPTH(cfg1) * 4;
2935 data->hw_cfg.tx_mem_depth = CONF_STATUS1_TX_DEPTH(cfg1) * 4;
2936 data->hw_cfg.ddr_rx_mem_depth = CONF_STATUS1_SLV_DDR_RX_DEPTH(cfg1) * 4;
2937 data->hw_cfg.ddr_tx_mem_depth = CONF_STATUS1_SLV_DDR_TX_DEPTH(cfg1) * 4;
2938 data->hw_cfg.ibir_mem_depth = CONF_STATUS0_IBIR_DEPTH(cfg0) * 4;
2939 data->hw_cfg.ibi_mem_depth = CONF_STATUS1_IBI_DEPTH(cfg0) * 4;
2940
2941 LOG_DBG("%s: FIFO info:\r\n"
2942 " cmd_mem_depth = %u\r\n"
2943 " cmdr_mem_depth = %u\r\n"
2944 " rx_mem_depth = %u\r\n"
2945 " tx_mem_depth = %u\r\n"
2946 " ddr_rx_mem_depth = %u\r\n"
2947 " ddr_tx_mem_depth = %u\r\n"
2948 " ibi_mem_depth = %u\r\n"
2949 " ibir_mem_depth = %u",
2950 dev->name, data->hw_cfg.cmd_mem_depth, data->hw_cfg.cmdr_mem_depth,
2951 data->hw_cfg.rx_mem_depth, data->hw_cfg.tx_mem_depth, data->hw_cfg.ddr_rx_mem_depth,
2952 data->hw_cfg.ddr_tx_mem_depth, data->hw_cfg.ibi_mem_depth,
2953 data->hw_cfg.ibir_mem_depth);
2954
2955 /* Regardless of the cmd depth size we are limited by our cmd array length. */
2956 data->hw_cfg.cmd_mem_depth = MIN(data->hw_cfg.cmd_mem_depth, ARRAY_SIZE(data->xfer.cmds));
2957 }
2958
2959 /**
2960 * @brief Get configuration of the I3C hardware.
2961 *
2962 * This provides a way to get the current configuration of the I3C hardware.
2963 *
2964 * This can return cached config or probed hardware parameters, but it has to
2965 * be up to date with current configuration.
2966 *
2967 * @param[in] dev Pointer to controller device driver instance.
2968 * @param[in] type Type of configuration parameters being passed
2969 * in @p config.
2970 * @param[in,out] config Pointer to the configuration parameters.
2971 *
2972 * Note that if @p type is @c I3C_CONFIG_CUSTOM, @p config must contain
2973 * the ID of the parameter to be retrieved.
2974 *
2975 * @retval 0 If successful.
2976 * @retval -EIO General Input/Output errors.
2977 * @retval -ENOSYS If not implemented.
2978 */
cdns_i3c_config_get(const struct device * dev,enum i3c_config_type type,void * config)2979 static int cdns_i3c_config_get(const struct device *dev, enum i3c_config_type type, void *config)
2980 {
2981 const struct cdns_i3c_config *dev_config = dev->config;
2982 struct cdns_i3c_data *data = dev->data;
2983
2984 __ASSERT_NO_MSG(config != NULL);
2985
2986 if (type == I3C_CONFIG_CONTROLLER) {
2987 (void)memcpy(config, &data->common.ctrl_config, sizeof(data->common.ctrl_config));
2988 } else if (type == I3C_CONFIG_TARGET) {
2989 struct i3c_config_target *target_config = (struct i3c_config_target *)config;
2990 /* Read RR_0 registers for itself */
2991 uint32_t dev_id_rr0 = sys_read32(dev_config->base + DEV_ID_RR0(0));
2992 uint32_t dev_id_rr1 = sys_read32(dev_config->base + DEV_ID_RR1(0));
2993 uint32_t dev_id_rr2 = sys_read32(dev_config->base + DEV_ID_RR2(0));
2994 uint32_t slv_status1 = sys_read32(dev_config->base + SLV_STATUS1);
2995
2996 /* if we are currently a target */
2997 target_config->enabled =
2998 !!!(sys_read32(dev_config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE);
2999 if (data->common.ctrl_config.is_secondary) {
3000 target_config->dynamic_addr = SLV_STATUS1_DA(slv_status1);
3001 } else {
3002 target_config->dynamic_addr = (dev_id_rr0 & 0xFE) >> 1;
3003 }
3004 target_config->static_addr = 0;
3005 target_config->pid = ((uint64_t)dev_id_rr1 << 16) + (dev_id_rr2 >> 16);
3006 target_config->pid_random = !!(slv_status1 & SLV_STATUS1_VEN_TM);
3007 target_config->bcr = dev_id_rr2 >> 8;
3008 target_config->dcr = dev_id_rr2 & 0xFF;
3009 /* Version 1p7 supports reading MRL/MWL */
3010 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
3011 target_config->max_read_len =
3012 SLV_STATUS2_MRL(sys_read32(dev_config->base + SLV_STATUS2));
3013 target_config->max_write_len =
3014 SLV_STATUS3_MWL(sys_read32(dev_config->base + SLV_STATUS3));
3015 } else {
3016 target_config->max_read_len = 0;
3017 target_config->max_write_len = 0;
3018 }
3019 target_config->supported_hdr = data->common.ctrl_config.supported_hdr;
3020 } else {
3021 return -EINVAL;
3022 }
3023
3024 return 0;
3025 }
3026
cdns_i3c_target_tx_ddr_write(const struct device * dev,uint8_t * buf,uint16_t len)3027 static int cdns_i3c_target_tx_ddr_write(const struct device *dev, uint8_t *buf, uint16_t len)
3028 {
3029 const struct cdns_i3c_config *config = dev->config;
3030 struct cdns_i3c_data *data = dev->data;
3031 uint32_t i, preamble;
3032 uint32_t data_word;
3033 uint8_t crc5 = 0x1F;
3034
3035 /* check if there is space available in the tx fifo */
3036 if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_TX_FULL) {
3037 return -ENOSPC;
3038 }
3039
3040 /* DDR sends data out in 16b, so len must be a multiple of 2 */
3041 if (!((len % 2) == 0)) {
3042 return -EINVAL;
3043 }
3044
3045 /* Header shall be known in advanced to calculate crc5 */
3046 uint8_t slave_da = SLV_STATUS1_DA(sys_read32(config->base + SLV_STATUS1));
3047 uint16_t ddr_payload_header = HDR_CMD_RD | (slave_da << 1);
3048
3049 ddr_payload_header = prepare_ddr_cmd_parity_adjustment_bit(ddr_payload_header);
3050 crc5 = i3c_cdns_crc5(crc5, ddr_payload_header);
3051
3052 /* write as much as you can to the fifo */
3053 for (i = 0;
3054 i < len && (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_TX_FULL));
3055 i += 2) {
3056 /* Use ALT with other than first packets */
3057 preamble = (i > 0) ? DDR_PREAMBLE_DATA_ABORT_ALT : DDR_PREAMBLE_DATA_ABORT;
3058 data_word = (preamble | prepare_ddr_word(sys_get_be16(&buf[i])));
3059 crc5 = i3c_cdns_crc5(crc5, sys_get_be16(&buf[i]));
3060 sys_write32(data_word, config->base + SLV_DDR_TX_FIFO);
3061 }
3062 /* end of data buffer, write crc packet (if we are still not full) */
3063 if ((i == len) && (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_TX_FULL))) {
3064 sys_write32(DDR_PREAMBLE_CMD_CRC | DDR_CRC_TOKEN | crc5 << 9,
3065 config->base + SLV_DDR_TX_FIFO);
3066 }
3067
3068 /* setup THR interrupt */
3069 uint32_t thr_ctrl = sys_read32(config->base + SLV_DDR_TX_RX_THR_CTRL);
3070
3071 /*
3072 * Interrupt at half of the data or FIFO depth to give it enough time to be
3073 * processed. The ISR will then callback to the function pointer
3074 * `read_processed_cb` to collect more data to transmit
3075 */
3076 thr_ctrl &= ~TX_THR_MASK;
3077 thr_ctrl |= TX_THR(MIN((data->hw_cfg.tx_mem_depth / 4) / 2, len / 2));
3078
3079 sys_write32(thr_ctrl, config->base + SLV_DDR_TX_RX_THR_CTRL);
3080 /* return total bytes written */
3081 return i;
3082 }
3083
3084 /**
3085 * @brief Writes to the Target's TX FIFO
3086 *
3087 * The Cadence I3C will then ACK read requests to it's TX FIFO from a
3088 * Controller
3089 *
3090 * @param dev Pointer to the device structure for an I3C controller
3091 * driver configured in target mode.
3092 * @param buf Pointer to the buffer
3093 * @param len Length of the buffer
3094 *
3095 * @retval Total number of bytes written
3096 * @retval -EACCES Not in Target Mode
3097 * @retval -ENOSPC No space in Tx FIFO
3098 */
cdns_i3c_target_tx_write(const struct device * dev,uint8_t * buf,uint16_t len,uint8_t hdr_mode)3099 static int cdns_i3c_target_tx_write(const struct device *dev, uint8_t *buf, uint16_t len,
3100 uint8_t hdr_mode)
3101 {
3102 const struct cdns_i3c_config *config = dev->config;
3103 struct cdns_i3c_data *data = dev->data;
3104 struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
3105 const uint32_t *buf_32 = (uint32_t *)buf;
3106 uint32_t i = 0;
3107 uint32_t val = 0;
3108 uint16_t remain = len;
3109
3110 /* check if we are currently a target */
3111 if (sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE) {
3112 return -EACCES;
3113 }
3114
3115 /* check if there is space available in the tx fifo */
3116 if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_SDR_TX_FULL) {
3117 return -ENOSPC;
3118 }
3119
3120 k_mutex_lock(&data->bus_lock, K_FOREVER);
3121
3122 /* rev 1p7 requires the length be written to the SLV_CTRL reg */
3123 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
3124 sys_write32(len, config->base + SLV_CTRL);
3125 }
3126 if (hdr_mode == I3C_MSG_HDR_DDR) {
3127 if (ctrl_config->supported_hdr & I3C_MSG_HDR_DDR) {
3128 i = cdns_i3c_target_tx_ddr_write(dev, buf, len);
3129 /* TODO: DDR THR interrupt support not implemented yet*/
3130 } else {
3131 LOG_ERR("%s: HDR-DDR not supported", dev->name);
3132 i = -ENOTSUP;
3133 }
3134 } else if (hdr_mode == 0) {
3135 /* write as much as you can to the fifo */
3136 while (i < len &&
3137 (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_SDR_TX_FULL))) {
3138 /* with rev 1p7, while as a target, the fifos are using the full word,
3139 * otherwise only the first byte is used
3140 */
3141 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
3142 remain = len - i;
3143 if (remain >= 4) {
3144 val = *buf_32++;
3145 } else if (remain > 0) {
3146 val = 0;
3147 memcpy(&val, buf_32, remain);
3148 }
3149 sys_write32(val, config->base + TX_FIFO);
3150 i += 4;
3151 } else {
3152 sys_write32((uint32_t)buf[i], config->base + TX_FIFO);
3153 i++;
3154 }
3155 }
3156
3157 /* setup THR interrupt */
3158 uint32_t thr_ctrl = sys_read32(config->base + TX_RX_THR_CTRL);
3159
3160 /*
3161 * Interrupt at half of the data or FIFO depth to give it enough time to be
3162 * processed. The ISR will then callback to the function pointer
3163 * `read_processed_cb` to collect more data to transmit
3164 */
3165 thr_ctrl &= ~TX_THR_MASK;
3166 thr_ctrl |= TX_THR(MIN((data->hw_cfg.tx_mem_depth / 4) / 2, len / 2));
3167 sys_write32(thr_ctrl, config->base + TX_RX_THR_CTRL);
3168 } else {
3169 LOG_ERR("%s: Unsupported HDR Mode %d", dev->name, hdr_mode);
3170 i = -ENOTSUP;
3171 }
3172
3173 k_mutex_unlock(&data->bus_lock);
3174
3175 /* return total bytes written */
3176 return i;
3177 }
3178
3179 /**
3180 * @brief Instructs the I3C Target device to register itself to the I3C Controller
3181 *
3182 * This routine instructs the I3C Target device to register itself to the I3C
3183 * Controller via its parent controller's i3c_target_register() API.
3184 *
3185 * @param dev Pointer to target device driver instance.
3186 * @param cfg Config struct with functions and parameters used by the I3C driver
3187 * to send bus events
3188 *
3189 * @return @see i3c_device_find.
3190 */
cdns_i3c_target_register(const struct device * dev,struct i3c_target_config * cfg)3191 static int cdns_i3c_target_register(const struct device *dev, struct i3c_target_config *cfg)
3192 {
3193 struct cdns_i3c_data *data = dev->data;
3194
3195 data->target_config = cfg;
3196 return 0;
3197 }
3198
3199 /**
3200 * @brief Unregisters the provided config as Target device
3201 *
3202 * This routine disables I3C target mode for the 'dev' I3C bus driver using
3203 * the provided 'config' struct containing the functions and parameters
3204 * to send bus events.
3205 *
3206 * @param dev Pointer to target device driver instance.
3207 * @param cfg Config struct with functions and parameters used by the I3C driver
3208 * to send bus events
3209 *
3210 * @return @see i3c_device_find.
3211 */
cdns_i3c_target_unregister(const struct device * dev,struct i3c_target_config * cfg)3212 static int cdns_i3c_target_unregister(const struct device *dev, struct i3c_target_config *cfg)
3213 {
3214 /* no way to disable? maybe write DA to 0? */
3215 return 0;
3216 }
3217
3218 /**
3219 * @brief Find a registered I3C target device.
3220 *
3221 * This returns the I3C device descriptor of the I3C device
3222 * matching the incoming @p id.
3223 *
3224 * @param dev Pointer to controller device driver instance.
3225 * @param id Pointer to I3C device ID.
3226 *
3227 * @return @see i3c_device_find.
3228 */
cdns_i3c_device_find(const struct device * dev,const struct i3c_device_id * id)3229 static struct i3c_device_desc *cdns_i3c_device_find(const struct device *dev,
3230 const struct i3c_device_id *id)
3231 {
3232 const struct cdns_i3c_config *config = dev->config;
3233
3234 return i3c_dev_list_find(&config->common.dev_list, id);
3235 }
3236
3237 /**
3238 * Find a registered I2C target device.
3239 *
3240 * Controller only API.
3241 *
3242 * This returns the I2C device descriptor of the I2C device
3243 * matching the device address @p addr.
3244 *
3245 * @param dev Pointer to controller device driver instance.
3246 * @param id I2C target device address.
3247 *
3248 * @return @see i3c_i2c_device_find.
3249 */
cdns_i3c_i2c_device_find(const struct device * dev,uint16_t addr)3250 static struct i3c_i2c_device_desc *cdns_i3c_i2c_device_find(const struct device *dev, uint16_t addr)
3251 {
3252 return i3c_dev_list_i2c_addr_find(dev, addr);
3253 }
3254
3255 /**
3256 * @brief Transfer messages in I2C mode.
3257 *
3258 * @see i2c_transfer
3259 *
3260 * @param dev Pointer to device driver instance.
3261 * @param target Pointer to target device descriptor.
3262 * @param msgs Pointer to I2C messages.
3263 * @param num_msgs Number of messages to transfers.
3264 *
3265 * @return @see i2c_transfer
3266 */
cdns_i3c_i2c_api_transfer(const struct device * dev,struct i2c_msg * msgs,uint8_t num_msgs,uint16_t addr)3267 static int cdns_i3c_i2c_api_transfer(const struct device *dev, struct i2c_msg *msgs,
3268 uint8_t num_msgs, uint16_t addr)
3269 {
3270 struct i3c_i2c_device_desc *i2c_dev = cdns_i3c_i2c_device_find(dev, addr);
3271 int ret;
3272
3273 if (i2c_dev == NULL) {
3274 ret = -ENODEV;
3275 } else {
3276 ret = cdns_i3c_i2c_transfer(dev, i2c_dev, msgs, num_msgs);
3277 }
3278
3279 return ret;
3280 }
3281
3282 /**
3283 * ACK or NACK Controller Handoffs
3284 *
3285 * Reads the LVR of all I2C devices and returns the I3C bus
3286 * Mode
3287 *
3288 * @param dev Pointer to device driver instance.
3289 * @param accept True to accept controller handoffs, False to decline
3290 *
3291 * @return @see i3c_target_controller_handoff
3292 */
cdns_i3c_target_controller_handoff(const struct device * dev,bool accept)3293 static int cdns_i3c_target_controller_handoff(const struct device *dev, bool accept)
3294 {
3295 const struct cdns_i3c_config *config = dev->config;
3296 uint32_t ctrl = sys_read32(config->base + CTRL);
3297
3298 if (accept) {
3299 sys_write32(ctrl | CTRL_MST_ACK, config->base + CTRL);
3300 } else {
3301 sys_write32(ctrl & ~CTRL_MST_ACK, config->base + CTRL);
3302 }
3303
3304 return 0;
3305 }
3306
3307 /**
3308 * Determine I3C bus mode from the i2c devices on the bus
3309 *
3310 * Reads the LVR of all I2C devices and returns the I3C bus
3311 * Mode
3312 *
3313 * @param dev_list Pointer to device list
3314 *
3315 * @return @see enum i3c_bus_mode.
3316 */
i3c_bus_mode(const struct i3c_dev_list * dev_list)3317 static enum i3c_bus_mode i3c_bus_mode(const struct i3c_dev_list *dev_list)
3318 {
3319 enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
3320
3321 for (int i = 0; i < dev_list->num_i2c; i++) {
3322 switch (I3C_LVR_I2C_DEV_IDX(dev_list->i2c[i].lvr)) {
3323 case I3C_LVR_I2C_DEV_IDX_0:
3324 if (mode < I3C_BUS_MODE_MIXED_FAST) {
3325 mode = I3C_BUS_MODE_MIXED_FAST;
3326 }
3327 break;
3328 case I3C_LVR_I2C_DEV_IDX_1:
3329 if (mode < I3C_BUS_MODE_MIXED_LIMITED) {
3330 mode = I3C_BUS_MODE_MIXED_LIMITED;
3331 }
3332 break;
3333 case I3C_LVR_I2C_DEV_IDX_2:
3334 if (mode < I3C_BUS_MODE_MIXED_SLOW) {
3335 mode = I3C_BUS_MODE_MIXED_SLOW;
3336 }
3337 break;
3338 default:
3339 mode = I3C_BUS_MODE_INVALID;
3340 break;
3341 }
3342 }
3343 return mode;
3344 }
3345
3346 /**
3347 * Determine THD_DEL value for CTRL register
3348 *
3349 * Should be MIN(t_cf, t_cr) + 3ns
3350 *
3351 * @param dev Pointer to device driver instance.
3352 *
3353 * @return Value to be written to THD_DEL
3354 */
cdns_i3c_sda_data_hold(const struct device * dev)3355 static uint8_t cdns_i3c_sda_data_hold(const struct device *dev)
3356 {
3357 const struct cdns_i3c_config *config = dev->config;
3358 uint32_t input_clock_frequency = config->input_frequency;
3359 uint8_t thd_delay =
3360 DIV_ROUND_UP(I3C_HD_PP_DEFAULT_NS, (NSEC_PER_SEC / input_clock_frequency));
3361
3362 if (thd_delay > THD_DELAY_MAX) {
3363 thd_delay = THD_DELAY_MAX;
3364 }
3365
3366 return (THD_DELAY_MAX - thd_delay);
3367 }
3368
i3c_cdns_deftgts_work_fn(struct k_work * work)3369 static void i3c_cdns_deftgts_work_fn(struct k_work *work)
3370 {
3371 const struct cdns_i3c_config *config;
3372 struct cdns_i3c_data *data;
3373 const struct device *dev;
3374 uint32_t devs;
3375 uint8_t count;
3376 uint8_t n = 0;
3377
3378 data = CONTAINER_OF(work, struct cdns_i3c_data, deftgts_work);
3379 dev = data->dev;
3380 config = dev->config;
3381 data = dev->data;
3382
3383 devs = sys_read32(config->base + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
3384 data->free_rr_slots = GENMASK(data->max_devs, 1) & ~devs;
3385
3386 /*
3387 * count the number of ones in devs, The IP will 'skip' writing it self to the RR if
3388 * it was in DEFTGTS. Also, if the IP never had a DA, then the deftgts interrupt will
3389 * never fire. Subtract 1 as the active controller is not included in `count`.
3390 */
3391 count = POPCOUNT(devs) - 1;
3392
3393 /* Free memory if it was previously allocated */
3394 if (data->common.deftgts) {
3395 free(data->common.deftgts);
3396 data->common.deftgts = NULL;
3397 }
3398
3399 /* Allocate memory for deftgts */
3400 data->common.deftgts =
3401 malloc(sizeof(uint8_t) + sizeof(struct i3c_ccc_deftgts_active_controller) +
3402 (count * sizeof(struct i3c_ccc_deftgts_target)));
3403 if (!data->common.deftgts) {
3404 LOG_ERR("%s: Failed to allocate memory for DEFTGTS", dev->name);
3405 return;
3406 }
3407
3408 data->common.deftgts->count = count;
3409
3410 for (uint8_t i = find_lsb_set(devs); i <= find_msb_set(devs); i++) {
3411 uint8_t rr_idx = i - 1;
3412
3413 if (devs & BIT(rr_idx)) {
3414 /* Read RRx registers */
3415 uint32_t dev_id_rr0 = sys_read32(config->base + DEV_ID_RR0(rr_idx));
3416 uint32_t dev_id_rr2 = sys_read32(config->base + DEV_ID_RR2(rr_idx));
3417
3418 uint8_t addr = (dev_id_rr0 & 0xFE) >> 1;
3419 uint8_t bcr = dev_id_rr2 >> 8;
3420 uint8_t dcr_lvr = dev_id_rr2 & 0xFF;
3421 bool is_i3c = !!(dev_id_rr0 & DEV_ID_RR0_IS_I3C);
3422
3423
3424 /* RR IDX 1 should always be expected to be the AC */
3425 if (rr_idx == 1) {
3426 data->common.deftgts->active_controller.addr = addr;
3427 data->common.deftgts->active_controller.dcr = dcr_lvr;
3428 data->common.deftgts->active_controller.bcr = bcr;
3429 data->common.deftgts->active_controller.static_addr = 0;
3430 } else if (is_i3c) {
3431 data->common.deftgts->targets[n].addr = addr;
3432 data->common.deftgts->targets[n].dcr = dcr_lvr;
3433 data->common.deftgts->targets[n].bcr = bcr;
3434 data->common.deftgts->targets[n].static_addr = 0;
3435 n++;
3436 } else {
3437 data->common.deftgts->targets[n].addr = 0;
3438 data->common.deftgts->targets[n].lvr = dcr_lvr;
3439 data->common.deftgts->targets[n].bcr = 0;
3440 data->common.deftgts->targets[n].static_addr = addr;
3441 n++;
3442 }
3443 }
3444 }
3445 data->common.deftgts_refreshed = true;
3446 LOG_HEXDUMP_DBG((uint8_t *)data->common.deftgts,
3447 sizeof(uint8_t) + sizeof(struct i3c_ccc_deftgts_active_controller) +
3448 (data->common.deftgts->count * sizeof(struct i3c_ccc_deftgts_target)),
3449 "DEFTGTS Received");
3450 }
3451
3452 /**
3453 * @brief Initialize the hardware.
3454 *
3455 * @param dev Pointer to controller device driver instance.
3456 */
cdns_i3c_bus_init(const struct device * dev)3457 static int cdns_i3c_bus_init(const struct device *dev)
3458 {
3459 struct cdns_i3c_data *data = dev->data;
3460 const struct cdns_i3c_config *config = dev->config;
3461 struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
3462
3463 data->dev = dev;
3464
3465 cdns_i3c_read_hw_cfg(dev);
3466
3467 /* Clear all retaining regs */
3468 sys_write32(DEVS_CTRL_DEV_CLR_ALL, config->base + DEVS_CTRL);
3469
3470 uint32_t conf0 = sys_read32(config->base + CONF_STATUS0);
3471 uint32_t conf1 = sys_read32(config->base + CONF_STATUS1);
3472 data->max_devs = CONF_STATUS0_DEVS_NUM(conf0);
3473 data->free_rr_slots = GENMASK(data->max_devs, 1);
3474
3475 /* DDR supported bit moved in 1p7 revision along with dev role added */
3476 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
3477 ctrl_config->supported_hdr =
3478 (conf1 & CONF_STATUS1_SUPPORTS_DDR) ? I3C_MSG_HDR_DDR : 0;
3479 ctrl_config->is_secondary =
3480 (CONF_STATUS0_DEV_ROLE(conf0) == CONF_STATUS0_DEV_ROLE_SEC_MASTER) ? true
3481 : false;
3482 } else {
3483 ctrl_config->supported_hdr =
3484 (conf0 & CONF_STATUS0_SUPPORTS_DDR) ? I3C_MSG_HDR_DDR : 0;
3485 ctrl_config->is_secondary = (conf0 & CONF_STATUS0_SEC_MASTER) ? true : false;
3486 }
3487 k_mutex_init(&data->bus_lock);
3488 k_sem_init(&data->xfer.complete, 0, 1);
3489 k_sem_init(&data->ch_complete, 0, 1);
3490 k_work_init(&data->deftgts_work, i3c_cdns_deftgts_work_fn);
3491 #ifdef CONFIG_I3C_USE_IBI
3492 k_sem_init(&data->ibi_hj_complete, 0, 1);
3493 k_sem_init(&data->ibi_cr_complete, 0, 1);
3494 #endif
3495
3496 cdns_i3c_interrupts_disable(config);
3497 cdns_i3c_interrupts_clear(config);
3498
3499 config->irq_config_func(dev);
3500
3501 /* Ensure the bus is disabled. */
3502 sys_write32(~CTRL_DEV_EN & sys_read32(config->base + CTRL), config->base + CTRL);
3503
3504 /* determine prescaler timings for i3c and i2c scl */
3505 cdns_i3c_set_prescalers(dev);
3506
3507 enum i3c_bus_mode mode = i3c_bus_mode(&config->common.dev_list);
3508
3509 LOG_DBG("%s: i3c bus mode %d", dev->name, mode);
3510 int cdns_mode;
3511
3512 switch (mode) {
3513 case I3C_BUS_MODE_PURE:
3514 cdns_mode = CTRL_PURE_BUS_MODE;
3515 break;
3516 case I3C_BUS_MODE_MIXED_FAST:
3517 cdns_mode = CTRL_MIXED_FAST_BUS_MODE;
3518 break;
3519 case I3C_BUS_MODE_MIXED_LIMITED:
3520 case I3C_BUS_MODE_MIXED_SLOW:
3521 cdns_mode = CTRL_MIXED_SLOW_BUS_MODE;
3522 break;
3523 default:
3524 return -EINVAL;
3525 }
3526
3527 /*
3528 * When a Hot-Join request happens, disable all events coming from this device.
3529 * We will issue ENTDAA afterwards from the threaded IRQ handler.
3530 * Set HJ ACK later after bus init to prevent targets from indirect DAA enforcement.
3531 *
3532 * Set the I3C Bus Mode based on the LVR of the I2C devices
3533 */
3534 uint32_t ctrl =
3535 CTRL_HJ_DISEC | CTRL_MCS_EN | CTRL_MST_ACK | (CTRL_BUS_MODE_MASK & cdns_mode);
3536
3537 /*
3538 * Cadence I3C release r104v1p0 and above support configuration of the sda data hold time
3539 */
3540 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 4)) {
3541 ctrl |= CTRL_THD_DELAY(cdns_i3c_sda_data_hold(dev));
3542 }
3543
3544 /*
3545 * Cadence I3C release r105v1p0 and above support I3C v1.1 timing change
3546 * for tCASHr_min = tCAS_min / 2, otherwise tCASr_min = tCAS_min (as
3547 * per MIPI spec v1.0)
3548 */
3549 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 5)) {
3550 ctrl |= CTRL_I3C_11_SUPP;
3551 }
3552
3553 /* write ctrl register value */
3554 sys_write32(ctrl, config->base + CTRL);
3555
3556 /* enable Core */
3557 sys_write32(CTRL_DEV_EN | ctrl, config->base + CTRL);
3558
3559 /* Set fifo thresholds. */
3560 sys_write32(CMD_THR(I3C_CMDD_THR) | IBI_THR(config->ibid_thr) | CMDR_THR(I3C_CMDR_THR) |
3561 IBIR_THR(I3C_IBIR_THR),
3562 config->base + CMD_IBI_THR_CTRL);
3563
3564 /* Set TX/RX interrupt thresholds. */
3565 if (sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE) {
3566 sys_write32(TX_THR(I3C_TX_THR) | RX_THR(data->hw_cfg.rx_mem_depth),
3567 config->base + TX_RX_THR_CTRL);
3568 } else {
3569 sys_write32(TX_THR(1) | RX_THR(1), config->base + TX_RX_THR_CTRL);
3570 sys_write32(SLV_DDR_TX_THR(0) | SLV_DDR_RX_THR(1),
3571 config->base + SLV_DDR_TX_RX_THR_CTRL);
3572 }
3573
3574 /* enable target interrupts */
3575 sys_write32(SLV_INT_DA_UPD | SLV_INT_SDR_RD_COMP | SLV_INT_SDR_WR_COMP |
3576 SLV_INT_SDR_RX_THR | SLV_INT_SDR_TX_THR | SLV_INT_SDR_RX_UNF |
3577 SLV_INT_SDR_TX_OVF | SLV_INT_HJ_DONE | SLV_INT_MR_DONE |
3578 SLV_INT_DEFSLVS | SLV_INT_DDR_WR_COMP | SLV_INT_DDR_RD_COMP |
3579 SLV_INT_DDR_RX_THR | SLV_INT_DDR_TX_THR,
3580 config->base + SLV_IER);
3581
3582 /* Enable controller interrupts. */
3583 sys_write32(MST_INT_IBIR_THR | MST_INT_RX_UNF | MST_INT_HALTED | MST_INT_MR_DONE |
3584 MST_INT_TX_OVF | MST_INT_IBIR_OVF | MST_INT_IBID_THR,
3585 config->base + MST_IER);
3586
3587 int ret = i3c_addr_slots_init(dev);
3588
3589 if (ret != 0) {
3590 return ret;
3591 }
3592
3593 /* only primary controllers are responsible for initializing the bus */
3594 if (!ctrl_config->is_secondary) {
3595 /* Program retaining regs. */
3596 cdns_i3c_program_controller_retaining_reg(dev);
3597 /* Sleep to wait for bus idle. */
3598 k_busy_wait(201);
3599 /* Perform bus initialization */
3600 ret = i3c_bus_init(dev, &config->common.dev_list);
3601 #ifdef CONFIG_I3C_USE_IBI
3602 /* Bus Initialization Complete, allow HJ ACKs */
3603 sys_write32(CTRL_HJ_ACK | sys_read32(config->base + CTRL), config->base + CTRL);
3604 #endif
3605 }
3606
3607 return 0;
3608 }
3609
3610 static DEVICE_API(i3c, api) = {
3611 .i2c_api.configure = cdns_i3c_i2c_api_configure,
3612 .i2c_api.transfer = cdns_i3c_i2c_api_transfer,
3613 #ifdef CONFIG_I2C_RTIO
3614 .i2c_api.iodev_submit = i2c_iodev_submit_fallback,
3615 #endif
3616
3617 .configure = cdns_i3c_configure,
3618 .config_get = cdns_i3c_config_get,
3619
3620 .attach_i3c_device = cdns_i3c_attach_device,
3621 .reattach_i3c_device = cdns_i3c_reattach_device,
3622 .detach_i3c_device = cdns_i3c_detach_device,
3623 .attach_i2c_device = cdns_i3c_i2c_attach_device,
3624 .detach_i2c_device = cdns_i3c_i2c_detach_device,
3625
3626 .do_daa = cdns_i3c_do_daa,
3627 .do_ccc = cdns_i3c_do_ccc,
3628
3629 .i3c_device_find = cdns_i3c_device_find,
3630
3631 .i3c_xfers = cdns_i3c_transfer,
3632
3633 .target_tx_write = cdns_i3c_target_tx_write,
3634 .target_register = cdns_i3c_target_register,
3635 .target_unregister = cdns_i3c_target_unregister,
3636 .target_controller_handoff = cdns_i3c_target_controller_handoff,
3637
3638 #ifdef CONFIG_I3C_USE_IBI
3639 .ibi_hj_response = cdns_i3c_ibi_hj_response,
3640 .ibi_enable = cdns_i3c_controller_ibi_enable,
3641 .ibi_disable = cdns_i3c_controller_ibi_disable,
3642 .ibi_raise = cdns_i3c_target_ibi_raise,
3643 #endif
3644
3645 #ifdef CONFIG_I3C_RTIO
3646 .iodev_submit = i3c_iodev_submit_fallback,
3647 #endif
3648 };
3649
3650 #define CADENCE_I3C_INSTANTIATE(n) \
3651 static void cdns_i3c_config_func_##n(const struct device *dev); \
3652 static struct i3c_device_desc cdns_i3c_device_array_##n[] = I3C_DEVICE_ARRAY_DT_INST(n); \
3653 static struct i3c_i2c_device_desc cdns_i3c_i2c_device_array_##n[] = \
3654 I3C_I2C_DEVICE_ARRAY_DT_INST(n); \
3655 static const struct cdns_i3c_config i3c_config_##n = { \
3656 .base = DT_INST_REG_ADDR(n), \
3657 .input_frequency = DT_INST_PROP(n, input_clock_frequency), \
3658 .irq_config_func = cdns_i3c_config_func_##n, \
3659 .ibid_thr = DT_INST_PROP(n, ibid_thr), \
3660 .common.dev_list.i3c = cdns_i3c_device_array_##n, \
3661 .common.dev_list.num_i3c = ARRAY_SIZE(cdns_i3c_device_array_##n), \
3662 .common.dev_list.i2c = cdns_i3c_i2c_device_array_##n, \
3663 .common.dev_list.num_i2c = ARRAY_SIZE(cdns_i3c_i2c_device_array_##n), \
3664 }; \
3665 static struct cdns_i3c_data i3c_data_##n = { \
3666 .common.ctrl_config.scl.i3c = DT_INST_PROP_OR(n, i3c_scl_hz, 0), \
3667 .common.ctrl_config.scl.i2c = DT_INST_PROP_OR(n, i2c_scl_hz, 0), \
3668 }; \
3669 DEVICE_DT_INST_DEFINE(n, cdns_i3c_bus_init, NULL, &i3c_data_##n, &i3c_config_##n, \
3670 POST_KERNEL, CONFIG_I3C_CONTROLLER_INIT_PRIORITY, &api); \
3671 static void cdns_i3c_config_func_##n(const struct device *dev) \
3672 { \
3673 IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), cdns_i3c_irq_handler, \
3674 DEVICE_DT_INST_GET(n), 0); \
3675 irq_enable(DT_INST_IRQN(n)); \
3676 };
3677
3678 #define DT_DRV_COMPAT cdns_i3c
3679 DT_INST_FOREACH_STATUS_OKAY(CADENCE_I3C_INSTANTIATE)
3680