1 /*
2 * Copyright (c) 2022 Meta Platforms, Inc. and its affiliates.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <errno.h>
8
9 #include <zephyr/drivers/i3c.h>
10 #include <zephyr/init.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/logging/log.h>
13 #include <zephyr/sys/byteorder.h>
14 #include <zephyr/sys/sys_io.h>
15 #include <zephyr/sys/util.h>
16
17 #define DEV_ID 0x0
18 #define DEV_ID_I3C_MASTER 0x5034
19
20 #define CONF_STATUS0 0x4
21 #define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29))
22 #define CONF_STATUS0_ECC_CHK BIT(28)
23 #define CONF_STATUS0_INTEG_CHK BIT(27)
24 #define CONF_STATUS0_CSR_DAP_CHK BIT(26)
25 #define CONF_STATUS0_TRANS_TOUT_CHK BIT(25)
26 #define CONF_STATUS0_PROT_FAULTS_CHK BIT(24)
27 #define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16)
28 #define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8)
29 #define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7))
30 /* CONF_STATUS0_SUPPORTS_DDR moved to CONF_STATUS1 in rev >= 1p7 */
31 #define CONF_STATUS0_SUPPORTS_DDR BIT(5)
32 #define CONF_STATUS0_SEC_MASTER BIT(4)
33 /* And it was replaced with a Dev Role mask */
34 #define CONF_STATUS0_DEV_ROLE(x) ((x) & GENMASK(5, 4) >> 4)
35 #define CONF_STATUS0_DEV_ROLE_MAIN_MASTER 0
36 #define CONF_STATUS0_DEV_ROLE_SEC_MASTER 1
37 #define CONF_STATUS0_DEV_ROLE_SLAVE 2
38 #define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0))
39
40 #define CONF_STATUS1 0x8
41 #define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1)
42 #define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26))
43 #define CONF_STATUS1_SLV_DDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21))
44 #define CONF_STATUS1_SLV_DDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16))
45 #define CONF_STATUS1_SUPPORTS_DDR BIT(14)
46 #define CONF_STATUS1_ALT_MODE BIT(13)
47 #define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10))
48 #define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5))
49 #define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0)))
50
51 #define REV_ID 0xc
52 #define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20)
53 #define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8)
54 #define REV_ID_REV(id) ((id) & GENMASK(7, 0))
55 #define REV_ID_VERSION(m, n) ((m << 5) | (n))
56 #define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 5)) >> 5)
57 #define REV_ID_REV_MINOR(id) ((id) & GENMASK(4, 0))
58
59 #define CTRL 0x10
60 #define CTRL_DEV_EN BIT(31)
61 #define CTRL_HALT_EN BIT(30)
62 #define CTRL_MCS BIT(29)
63 #define CTRL_MCS_EN BIT(28)
64 #define CTRL_I3C_11_SUPP BIT(26)
65 #define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24))
66 #define CTRL_TC_EN BIT(9)
67 #define CTRL_HJ_DISEC BIT(8)
68 #define CTRL_MST_ACK BIT(7)
69 #define CTRL_HJ_ACK BIT(6)
70 #define CTRL_HJ_INIT BIT(5)
71 #define CTRL_MST_INIT BIT(4)
72 #define CTRL_AHDR_OPT BIT(3)
73 #define CTRL_PURE_BUS_MODE 0
74 #define CTRL_MIXED_FAST_BUS_MODE 2
75 #define CTRL_MIXED_SLOW_BUS_MODE 3
76 #define CTRL_BUS_MODE_MASK GENMASK(1, 0)
77 #define THD_DELAY_MAX 3
78
79 #define PRESCL_CTRL0 0x14
80 #define PRESCL_CTRL0_I2C(x) ((x) << 16)
81 #define PRESCL_CTRL0_I3C(x) (x)
82 #define PRESCL_CTRL0_I3C_MAX GENMASK(9, 0)
83 #define PRESCL_CTRL0_I2C_MAX GENMASK(15, 0)
84
85 #define PRESCL_CTRL1 0x18
86 #define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
87 #define PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
88 #define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0)
89 #define PRESCL_CTRL1_OD_LOW(x) (x)
90
91 #define SLV_STATUS4 0x1C
92 #define SLV_STATUS4_BUSCON_FILL_LVL GENMASK(16, 8)
93 #define SLV_STATUS5_BUSCON_DATA GENMASK(7, 0)
94
95 #define MST_IER 0x20
96 #define MST_IDR 0x24
97 #define MST_IMR 0x28
98 #define MST_ICR 0x2c
99 #define MST_ISR 0x30
100 #define MST_INT_HALTED BIT(18)
101 #define MST_INT_MR_DONE BIT(17)
102 #define MST_INT_IMM_COMP BIT(16)
103 #define MST_INT_TX_THR BIT(15)
104 #define MST_INT_TX_OVF BIT(14)
105 #define MST_INT_C_REF_ROV BIT(13)
106 #define MST_INT_IBID_THR BIT(12)
107 #define MST_INT_IBID_UNF BIT(11)
108 #define MST_INT_IBIR_THR BIT(10)
109 #define MST_INT_IBIR_UNF BIT(9)
110 #define MST_INT_IBIR_OVF BIT(8)
111 #define MST_INT_RX_THR BIT(7)
112 #define MST_INT_RX_UNF BIT(6)
113 #define MST_INT_CMDD_EMP BIT(5)
114 #define MST_INT_CMDD_THR BIT(4)
115 #define MST_INT_CMDD_OVF BIT(3)
116 #define MST_INT_CMDR_THR BIT(2)
117 #define MST_INT_CMDR_UNF BIT(1)
118 #define MST_INT_CMDR_OVF BIT(0)
119 #define MST_INT_MASK GENMASK(18, 0)
120
121 #define MST_STATUS0 0x34
122 #define MST_STATUS0_IDLE BIT(18)
123 #define MST_STATUS0_HALTED BIT(17)
124 #define MST_STATUS0_MASTER_MODE BIT(16)
125 #define MST_STATUS0_TX_FULL BIT(13)
126 #define MST_STATUS0_IBID_FULL BIT(12)
127 #define MST_STATUS0_IBIR_FULL BIT(11)
128 #define MST_STATUS0_RX_FULL BIT(10)
129 #define MST_STATUS0_CMDD_FULL BIT(9)
130 #define MST_STATUS0_CMDR_FULL BIT(8)
131 #define MST_STATUS0_TX_EMP BIT(5)
132 #define MST_STATUS0_IBID_EMP BIT(4)
133 #define MST_STATUS0_IBIR_EMP BIT(3)
134 #define MST_STATUS0_RX_EMP BIT(2)
135 #define MST_STATUS0_CMDD_EMP BIT(1)
136 #define MST_STATUS0_CMDR_EMP BIT(0)
137
138 #define CMDR 0x38
139 #define CMDR_NO_ERROR 0
140 #define CMDR_DDR_PREAMBLE_ERROR 1
141 #define CMDR_DDR_PARITY_ERROR 2
142 #define CMDR_DDR_RX_FIFO_OVF 3
143 #define CMDR_DDR_TX_FIFO_UNF 4
144 #define CMDR_M0_ERROR 5
145 #define CMDR_M1_ERROR 6
146 #define CMDR_M2_ERROR 7
147 #define CMDR_MST_ABORT 8
148 #define CMDR_NACK_RESP 9
149 #define CMDR_INVALID_DA 10
150 #define CMDR_DDR_DROPPED 11
151 #define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24)
152 #define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8)
153 #define CMDR_CMDID_HJACK_DISEC 0xfe
154 #define CMDR_CMDID_HJACK_ENTDAA 0xff
155 #define CMDR_CMDID(x) ((x) & GENMASK(7, 0))
156
157 #define IBIR 0x3c
158 #define IBIR_ACKED BIT(12)
159 #define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8)
160 #define IBIR_SLVID_INV 0xF
161 #define IBIR_ERROR BIT(7)
162 #define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2)
163 #define IBIR_TYPE_IBI 0
164 #define IBIR_TYPE_HJ 1
165 #define IBIR_TYPE_MR 2
166 #define IBIR_TYPE(x) ((x) & GENMASK(1, 0))
167
168 #define SLV_IER 0x40
169 #define SLV_IDR 0x44
170 #define SLV_IMR 0x48
171 #define SLV_ICR 0x4c
172 #define SLV_ISR 0x50
173 #define SLV_INT_CHIP_RST BIT(31)
174 #define SLV_INT_PERIPH_RST BIT(30)
175 #define SLV_INT_FLUSH_DONE BIT(29)
176 #define SLV_INT_RST_DAA BIT(28)
177 #define SLV_INT_BUSCON_UP BIT(26)
178 #define SLV_INT_MRL_UP BIT(25)
179 #define SLV_INT_MWL_UP BIT(24)
180 #define SLV_INT_IBI_THR BIT(23)
181 #define SLV_INT_IBI_DONE BIT(22)
182 #define SLV_INT_DEFSLVS BIT(21)
183 #define SLV_INT_TM BIT(20)
184 #define SLV_INT_ERROR BIT(19)
185 #define SLV_INT_EVENT_UP BIT(18)
186 #define SLV_INT_HJ_DONE BIT(17)
187 #define SLV_INT_MR_DONE BIT(16)
188 #define SLV_INT_DA_UPD BIT(15)
189 #define SLV_INT_SDR_FAIL BIT(14)
190 #define SLV_INT_DDR_FAIL BIT(13)
191 #define SLV_INT_M_RD_ABORT BIT(12)
192 #define SLV_INT_DDR_RX_THR BIT(11)
193 #define SLV_INT_DDR_TX_THR BIT(10)
194 #define SLV_INT_SDR_RX_THR BIT(9)
195 #define SLV_INT_SDR_TX_THR BIT(8)
196 #define SLV_INT_DDR_RX_UNF BIT(7)
197 #define SLV_INT_DDR_TX_OVF BIT(6)
198 #define SLV_INT_SDR_RX_UNF BIT(5)
199 #define SLV_INT_SDR_TX_OVF BIT(4)
200 #define SLV_INT_DDR_RD_COMP BIT(3)
201 #define SLV_INT_DDR_WR_COMP BIT(2)
202 #define SLV_INT_SDR_RD_COMP BIT(1)
203 #define SLV_INT_SDR_WR_COMP BIT(0)
204
205 #define SLV_STATUS0 0x54
206 #define SLV_STATUS0_IBI_XFRD_BYTEs(s) (((s) & GENMASK(31, 24)) >> 24)
207 #define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16)
208 #define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0))
209
210 #define SLV_STATUS1 0x58
211 #define SLV_STATUS1_SCL_IN_RST BIT(31)
212 #define SLV_STATUS1_HJ_IN_USE BIT(30)
213 #define SLV_STATUS1_NACK_NXT_PW BIT(29)
214 #define SLV_STATUS1_NACK_NXT_PR BIT(28)
215 #define SLV_STATUS1_MR_PEND BIT(27)
216 #define SLV_STATUS1_HJ_PEND BIT(26)
217 #define SLV_STATUS1_IBI_PEND BIT(25)
218 #define SLV_STATUS1_IBI_DIS BIT(24)
219 #define SLV_STATUS1_BUS_VAR BIT(23)
220 #define SLV_STATUS1_TCAM0_DIS BIT(22)
221 #define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20)
222 #define SLV_STATUS1_VEN_TM BIT(19)
223 #define SLV_STATUS1_HJ_DIS BIT(18)
224 #define SLV_STATUS1_MR_DIS BIT(17)
225 #define SLV_STATUS1_PROT_ERR BIT(16)
226 #define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
227 #define SLV_STATUS1_HAS_DA BIT(8)
228 #define SLV_STATUS1_DDR_RX_FULL BIT(7)
229 #define SLV_STATUS1_DDR_TX_FULL BIT(6)
230 #define SLV_STATUS1_DDR_RX_EMPTY BIT(5)
231 #define SLV_STATUS1_DDR_TX_EMPTY BIT(4)
232 #define SLV_STATUS1_SDR_RX_FULL BIT(3)
233 #define SLV_STATUS1_SDR_TX_FULL BIT(2)
234 #define SLV_STATUS1_SDR_RX_EMPTY BIT(1)
235 #define SLV_STATUS1_SDR_TX_EMPTY BIT(0)
236
237 #define SLV_IBI_CTRL 0x5c
238 #define SLV_IBI_TCAM_EVNT(x) ((x) << 27)
239 #define SLV_IBI_PL(x) ((x) << 16)
240 #define SLV_IBI_TCAM0 BIT(9)
241 #define SLV_IBI_REQ BIT(8)
242 #define SLV_IBI_AUTO_CLR_IBI 1
243 #define SLV_IBI_AUTO_CLR_PR 2
244 #define SLV_IBI_AUTO_CLR_IBI_OR_PR 3
245 #define SLV_IBI_CLEAR_TRIGGER(x) ((x) << 4)
246
247 #define CMD0_FIFO 0x60
248 #define CMD0_FIFO_IS_DDR BIT(31)
249 #define CMD0_FIFO_IS_CCC BIT(30)
250 #define CMD0_FIFO_BCH BIT(29)
251 #define XMIT_BURST_STATIC_SUBADDR 0
252 #define XMIT_SINGLE_INC_SUBADDR 1
253 #define XMIT_SINGLE_STATIC_SUBADDR 2
254 #define XMIT_BURST_WITHOUT_SUBADDR 3
255 #define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27)
256 #define CMD0_FIFO_SBCA BIT(26)
257 #define CMD0_FIFO_RSBC BIT(25)
258 #define CMD0_FIFO_IS_10B BIT(24)
259 #define CMD0_FIFO_PL_LEN(l) ((l) << 12)
260 #define CMD0_FIFO_IS_DB BIT(11)
261 #define CMD0_FIFO_PL_LEN_MAX 4095
262 #define CMD0_FIFO_DEV_ADDR(a) ((a) << 1)
263 #define CMD0_FIFO_RNW BIT(0)
264
265 #define CMD1_FIFO 0x64
266 #define CMD1_FIFO_CMDID(id) ((id) << 24)
267 #define CMD1_FIFO_DB(db) (((db) & GENMASK(15, 8)) << 8)
268 #define CMD1_FIFO_CSRADDR(a) (a)
269 #define CMD1_FIFO_CCC(id) (id)
270
271 #define TX_FIFO 0x68
272
273 #define TX_FIFO_STATUS 0x6C
274
275 #define IMD_CMD0 0x70
276 #define IMD_CMD0_PL_LEN(l) ((l) << 12)
277 #define IMD_CMD0_DEV_ADDR(a) ((a) << 1)
278 #define IMD_CMD0_RNW BIT(0)
279
280 #define IMD_CMD1 0x74
281 #define IMD_CMD1_CCC(id) (id)
282
283 #define IMD_DATA 0x78
284 #define RX_FIFO 0x80
285 #define IBI_DATA_FIFO 0x84
286 #define SLV_DDR_TX_FIFO 0x88
287 #define SLV_DDR_RX_FIFO 0x8c
288 #define DDR_PREAMBLE_MASK GENMASK(19, 18)
289 #define DDR_PREAMBLE_CMD_CRC 0x1 << 18
290 #define DDR_PREAMBLE_DATA_ABORT 0x2 << 18
291 #define DDR_PREAMBLE_DATA_ABORT_ALT 0x3 << 18
292 #define DDR_DATA(x) (((x) & GENMASK(17, 2)) >> 2)
293 #define DDR_EVEN_PARITY BIT(0)
294 #define DDR_ODD_PARITY BIT(1)
295 #define DDR_CRC_AND_HEADER_SIZE 0x4
296 #define DDR_CONVERT_BUF_LEN(x) (4 * (x))
297
298 #define HDR_CMD_RD BIT(15)
299 #define HDR_CMD_CODE(c) (((c) & GENMASK(6, 0)) << 8)
300 #define DDR_CRC_TOKEN (0xC << 14)
301 #define DDR_CRC_TOKEN_MASK GENMASK(17, 14)
302 #define DDR_CRC(t) (((t) & (GENMASK(13, 9))) >> 9)
303
304 #define CMD_IBI_THR_CTRL 0x90
305 #define IBIR_THR(t) ((t) << 24)
306 #define CMDR_THR(t) ((t) << 16)
307 #define CMDR_THR_MASK (GENMASK(20, 16))
308 #define IBI_THR(t) ((t) << 8)
309 #define CMD_THR(t) (t)
310
311 #define TX_RX_THR_CTRL 0x94
312 #define RX_THR(t) ((t) << 16)
313 #define RX_THR_MASK (GENMASK(31, 16))
314 #define TX_THR(t) (t)
315 #define TX_THR_MASK (GENMASK(15, 0))
316
317 #define SLV_DDR_TX_RX_THR_CTRL 0x98
318 #define SLV_DDR_RX_THR(t) ((t) << 16)
319 #define SLV_DDR_TX_THR(t) (t)
320
321 #define FLUSH_CTRL 0x9c
322 #define FLUSH_IBI_RESP BIT(24)
323 #define FLUSH_CMD_RESP BIT(23)
324 #define FLUSH_SLV_DDR_RX_FIFO BIT(22)
325 #define FLUSH_SLV_DDR_TX_FIFO BIT(21)
326 #define FLUSH_IMM_FIFO BIT(20)
327 #define FLUSH_IBI_FIFO BIT(19)
328 #define FLUSH_RX_FIFO BIT(18)
329 #define FLUSH_TX_FIFO BIT(17)
330 #define FLUSH_CMD_FIFO BIT(16)
331
332 #define SLV_CTRL 0xA0
333
334 #define SLV_PROT_ERR_TYPE 0xA4
335 #define SLV_ERR6_IBI BIT(9)
336 #define SLV_ERR6_PR BIT(8)
337 #define SLV_ERR_GETCCC BIT(7)
338 #define SLV_ERR5 BIT(6)
339 #define SLV_ERR4 BIT(5)
340 #define SLV_ERR3 BIT(4)
341 #define SLV_ERR2_PW BIT(3)
342 #define SLV_ERR2_SETCCC BIT(2)
343 #define SLV_ERR1 BIT(1)
344 #define SLV_ERR0 BIT(0)
345
346 #define SLV_STATUS2 0xA8
347
348 #define SLV_STATUS3 0xAC
349 #define SLV_STATUS3_BC_FSM(s) (((s) & GENMASK(26, 16)) >> 16)
350 #define SLV_STATUS3_MWL(s) ((s) & GENMASK(15, 0))
351
352 #define TTO_PRESCL_CTRL0 0xb0
353 #define TTO_PRESCL_CTRL0_PRESCL_I2C(x) ((x) << 16)
354 #define TTO_PRESCL_CTRL0_PRESCL_I3C(x) (x)
355
356 #define TTO_PRESCL_CTRL1 0xb4
357 #define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16)
358 #define TTO_PRESCL_CTRL1_DIVA(x) (x)
359 #define TTO_PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
360 #define TTO_PRESCL_CTRL1_OD_LOW(x) (x)
361
362 #define DEVS_CTRL 0xb8
363 #define DEVS_CTRL_DEV_CLR_SHIFT 16
364 #define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16)
365 #define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev))
366 #define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev)
367 #define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0)
368 #define MAX_DEVS 16
369
370 #define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10))
371 #define DEV_ID_RR0_LVR_EXT_ADDR BIT(11)
372 #define DEV_ID_RR0_HDR_CAP BIT(10)
373 #define DEV_ID_RR0_IS_I3C BIT(9)
374 #define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(7, 1) | GENMASK(15, 13))
375 #define DEV_ID_RR0_SET_DEV_ADDR(a) (((a << 1) & GENMASK(7, 1)) | (((a) & GENMASK(9, 7)) << 13))
376 #define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | (((x) >> 6) & GENMASK(9, 7)))
377
378 #define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10))
379 #define DEV_ID_RR1_PID_MSB(pid) (pid)
380
381 #define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10))
382 #define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16)
383 #define DEV_ID_RR2_BCR(bcr) ((bcr) << 8)
384 #define DEV_ID_RR2_DCR(dcr) (dcr)
385 #define DEV_ID_RR2_LVR(lvr) (lvr)
386
387 #define SIR_MAP(x) (0x180 + ((x) * 4))
388 #define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2)
389 #define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0))
390 #define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
391 #define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0))
392 #define DEV_ROLE_SLAVE 0
393 #define DEV_ROLE_MASTER 1
394 #define SIR_MAP_DEV_ROLE(role) ((role) << 14)
395 #define SIR_MAP_DEV_SLOW BIT(13)
396 #define SIR_MAP_DEV_PL(l) ((l) << 8)
397 #define SIR_MAP_PL_MAX GENMASK(4, 0)
398 #define SIR_MAP_DEV_DA(a) ((a) << 1)
399 #define SIR_MAP_DEV_ACK BIT(0)
400
401 #define GRPADDR_LIST 0x198
402
403 #define GRPADDR_CS 0x19C
404
405 #define GPIR_WORD(x) (0x200 + ((x) * 4))
406 #define GPI_REG(val, id) (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
407
408 #define GPOR_WORD(x) (0x220 + ((x) * 4))
409 #define GPO_REG(val, id) (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
410
411 #define ASF_INT_STATUS 0x300
412 #define ASF_INT_RAW_STATUS 0x304
413 #define ASF_INT_MASK 0x308
414 #define ASF_INT_TEST 0x30c
415 #define ASF_INT_FATAL_SELECT 0x310
416 #define ASF_INTEGRITY_ERR BIT(6)
417 #define ASF_PROTOCOL_ERR BIT(5)
418 #define ASF_TRANS_TIMEOUT_ERR BIT(4)
419 #define ASF_CSR_ERR BIT(3)
420 #define ASF_DAP_ERR BIT(2)
421 #define ASF_SRAM_UNCORR_ERR BIT(1)
422 #define ASF_SRAM_CORR_ERR BIT(0)
423
424 #define ASF_SRAM_CORR_FAULT_STATUS 0x320
425 #define ASF_SRAM_UNCORR_FAULT_STATUS 0x324
426 #define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24)
427 #define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0))
428
429 #define ASF_SRAM_FAULT_STATS 0x328
430 #define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16)
431 #define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0))
432
433 #define ASF_TRANS_TOUT_CTRL 0x330
434 #define ASF_TRANS_TOUT_EN BIT(31)
435 #define ASF_TRANS_TOUT_VAL(x) (x)
436
437 #define ASF_TRANS_TOUT_FAULT_MASK 0x334
438 #define ASF_TRANS_TOUT_FAULT_STATUS 0x338
439 #define ASF_TRANS_TOUT_FAULT_APB BIT(3)
440 #define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2)
441 #define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1)
442 #define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0)
443
444 #define ASF_PROTO_FAULT_MASK 0x340
445 #define ASF_PROTO_FAULT_STATUS 0x344
446 #define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31)
447 #define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30)
448 #define ASF_PROTO_FAULT_S(x) BIT(16 + (x))
449 #define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15)
450 #define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14)
451 #define ASF_PROTO_FAULT_M(x) BIT(x)
452
453 /*******************************************************************************
454 * Local Constants Definition
455 ******************************************************************************/
456
457 /* TODO: this needs to be configurable in the dts...somehow */
458 #define I3C_CONTROLLER_ADDR 0x08
459
460 /* Maximum i3c devices that the IP can be built with */
461 #define I3C_MAX_DEVS 11
462 #define I3C_MAX_MSGS 10
463 #define I3C_SIR_DEFAULT_DA 0x7F
464 #define I3C_MAX_IDLE_CANCEL_WAIT_RETRIES 50
465 #define I3C_PRESCL_REG_SCALE (4)
466 #define I2C_PRESCL_REG_SCALE (5)
467 #define I3C_WAIT_FOR_IDLE_STATE_US 100
468 #define I3C_IDLE_TIMEOUT_CYC \
469 (I3C_WAIT_FOR_IDLE_STATE_US * (sys_clock_hw_cycles_per_sec() / USEC_PER_SEC))
470
471 /* Target T_LOW period in open-drain mode. */
472 #define I3C_BUS_TLOW_OD_MIN_NS 200
473
474 /* MIPI I3C v1.1.1 Spec defines tsco max as 12ns */
475 #define I3C_TSCO_DEFAULT_NS 10
476
477 /* Interrupt thresholds. */
478 /* command response fifo threshold */
479 #define I3C_CMDR_THR 1
480 /* command tx fifo threshold - unused */
481 #define I3C_CMDD_THR 1
482 /* in-band-interrupt data fifo threshold - unused */
483 #define I3C_IBID_THR 1
484 /* in-band-interrupt response queue threshold */
485 #define I3C_IBIR_THR 1
486 /* tx data threshold - unused */
487 #define I3C_TX_THR 1
488
489 #define LOG_MODULE_NAME I3C_CADENCE
490 LOG_MODULE_REGISTER(I3C_CADENCE, CONFIG_I3C_CADENCE_LOG_LEVEL);
491
492 /*******************************************************************************
493 * Local Types Definition
494 ******************************************************************************/
495
496 /** Describes peripheral HW configuration determined from CONFx registers. */
497 struct cdns_i3c_hw_config {
498 /* Revision ID */
499 uint32_t rev_id;
500 /* The maxiumum command queue depth. */
501 uint32_t cmd_mem_depth;
502 /* The maxiumum command response queue depth. */
503 uint32_t cmdr_mem_depth;
504 /* The maximum RX FIFO depth. */
505 uint32_t rx_mem_depth;
506 /* The maximum TX FIFO depth. */
507 uint32_t tx_mem_depth;
508 /* The maximum DDR RX FIFO depth. */
509 uint32_t ddr_rx_mem_depth;
510 /* The maximum DDR TX FIFO depth. */
511 uint32_t ddr_tx_mem_depth;
512 /* The maximum IBIR FIFO depth. */
513 uint32_t ibir_mem_depth;
514 /* The maximum IBI FIFO depth. */
515 uint32_t ibi_mem_depth;
516 };
517
518 /* Cadence I3C/I2C Device Private Data */
519 struct cdns_i3c_i2c_dev_data {
520 /* Device id within the retaining registers. This is set after bus initialization by the
521 * controller.
522 */
523 uint8_t id;
524 };
525
526 /* Single command/transfer */
527 struct cdns_i3c_cmd {
528 uint32_t cmd0;
529 uint32_t cmd1;
530 uint32_t ddr_header;
531 uint32_t ddr_crc;
532 uint32_t len;
533 uint32_t *num_xfer;
534 void *buf;
535 uint32_t error;
536 enum i3c_data_rate hdr;
537 };
538
539 /* Transfer data */
540 struct cdns_i3c_xfer {
541 struct k_sem complete;
542 int ret;
543 int num_cmds;
544 struct cdns_i3c_cmd cmds[I3C_MAX_MSGS];
545 };
546
547 /* Driver config */
548 struct cdns_i3c_config {
549 struct i3c_driver_config common;
550 /** base address of the controller */
551 uintptr_t base;
552 /** input frequency to the I3C Cadence */
553 uint32_t input_frequency;
554 /** Interrupt configuration function. */
555 void (*irq_config_func)(const struct device *dev);
556 };
557
558 /* Driver instance data */
559 struct cdns_i3c_data {
560 struct i3c_driver_data common;
561 struct cdns_i3c_hw_config hw_cfg;
562 struct k_mutex bus_lock;
563 struct cdns_i3c_i2c_dev_data cdns_i3c_i2c_priv_data[I3C_MAX_DEVS];
564 struct cdns_i3c_xfer xfer;
565 struct i3c_target_config *target_config;
566 struct k_sem ibi_hj_complete;
567 uint32_t free_rr_slots;
568 uint16_t fifo_bytes_read;
569 uint8_t max_devs;
570 };
571
572 /*******************************************************************************
573 * Global Variables Declaration
574 ******************************************************************************/
575
576 /*******************************************************************************
577 * Local Functions Declaration
578 ******************************************************************************/
579
580 /*******************************************************************************
581 * Private Functions Code
582 ******************************************************************************/
583
i3c_cdns_crc5(uint8_t crc5,uint16_t word)584 static uint8_t i3c_cdns_crc5(uint8_t crc5, uint16_t word)
585 {
586 uint8_t crc0;
587 int i;
588
589 /*
590 * crc0 = next_data_bit ^ crc[4]
591 * 1 2 3 4
592 * crc[4:0] = { crc[3:2], crc[1]^crc0, crc[0], crc0 }
593 */
594 for (i = 15; i >= 0; --i) {
595 crc0 = ((word >> i) ^ (crc5 >> 4)) & 0x1;
596 crc5 = ((crc5 << 1) & 0x1a) | (((crc5 >> 1) ^ crc0) << 2) | crc0;
597 }
598
599 return crc5 & 0x1f;
600 }
601
cdns_i3c_ddr_parity(uint16_t payload)602 static uint8_t cdns_i3c_ddr_parity(uint16_t payload)
603 {
604 uint16_t pb;
605 uint8_t parity;
606
607 /* Calculate odd parity. */
608 pb = (payload >> 15) ^ (payload >> 13) ^ (payload >> 11) ^ (payload >> 9) ^ (payload >> 7) ^
609 (payload >> 5) ^ (payload >> 3) ^ (payload >> 1);
610 parity = (pb & 1) << 1;
611 /* Calculate even and 1 parity */
612 pb = (payload >> 14) ^ (payload >> 12) ^ (payload >> 10) ^ (payload >> 8) ^ (payload >> 6) ^
613 (payload >> 4) ^ (payload >> 2) ^ payload ^ 1;
614 parity |= (pb & 1);
615
616 return parity;
617 }
618
619 /* This prepares the ddr word from the payload add adding on parity, This
620 * does not write the preamble
621 */
prepare_ddr_word(uint16_t payload)622 static uint32_t prepare_ddr_word(uint16_t payload)
623 {
624 return (uint32_t)payload << 2 | cdns_i3c_ddr_parity(payload);
625 }
626
627 /* This ensures that PA0 contains 1'b1 which allows for easier Bus Turnaround */
prepare_ddr_cmd_parity_adjustment_bit(uint16_t word)628 static uint16_t prepare_ddr_cmd_parity_adjustment_bit(uint16_t word)
629 {
630 uint16_t pb;
631
632 pb = (word >> 14) ^ (word >> 12) ^ (word >> 10) ^ (word >> 8) ^ (word >> 6) ^ (word >> 4) ^
633 (word >> 2);
634
635 if (pb & 1) {
636 word |= BIT(0);
637 }
638
639 return word;
640 }
641
642 /* Computes and sets parity */
643 /* Returns [7:1] 7-bit addr, [0] even/xor parity */
cdns_i3c_even_parity_byte(uint8_t byte)644 static uint8_t cdns_i3c_even_parity_byte(uint8_t byte)
645 {
646 uint8_t parity = 0;
647 uint8_t b = byte;
648
649 while (b) {
650 parity = !parity;
651 b = b & (b - 1);
652 }
653 b = (byte << 1) | !parity;
654
655 return b;
656 }
657
658 /* Check if command response fifo is empty */
cdns_i3c_cmd_rsp_fifo_empty(const struct cdns_i3c_config * config)659 static inline bool cdns_i3c_cmd_rsp_fifo_empty(const struct cdns_i3c_config *config)
660 {
661 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
662
663 return ((mst_st & MST_STATUS0_CMDR_EMP) ? true : false);
664 }
665
666 /* Check if command fifo is empty */
cdns_i3c_cmd_fifo_empty(const struct cdns_i3c_config * config)667 static inline bool cdns_i3c_cmd_fifo_empty(const struct cdns_i3c_config *config)
668 {
669 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
670
671 return ((mst_st & MST_STATUS0_CMDD_EMP) ? true : false);
672 }
673
674 /* Check if command fifo is full */
cdns_i3c_cmd_fifo_full(const struct cdns_i3c_config * config)675 static inline bool cdns_i3c_cmd_fifo_full(const struct cdns_i3c_config *config)
676 {
677 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
678
679 return ((mst_st & MST_STATUS0_CMDD_FULL) ? true : false);
680 }
681
682 /* Check if ibi response fifo is empty */
cdns_i3c_ibi_rsp_fifo_empty(const struct cdns_i3c_config * config)683 static inline bool cdns_i3c_ibi_rsp_fifo_empty(const struct cdns_i3c_config *config)
684 {
685 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
686
687 return ((mst_st & MST_STATUS0_IBIR_EMP) ? true : false);
688 }
689
690 /* Check if tx fifo is full */
cdns_i3c_tx_fifo_full(const struct cdns_i3c_config * config)691 static inline bool cdns_i3c_tx_fifo_full(const struct cdns_i3c_config *config)
692 {
693 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
694
695 return ((mst_st & MST_STATUS0_TX_FULL) ? true : false);
696 }
697
698 /* Check if rx fifo is full */
cdns_i3c_rx_fifo_full(const struct cdns_i3c_config * config)699 static inline bool cdns_i3c_rx_fifo_full(const struct cdns_i3c_config *config)
700 {
701 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
702
703 return ((mst_st & MST_STATUS0_RX_FULL) ? true : false);
704 }
705
706 /* Check if rx fifo is empty */
cdns_i3c_rx_fifo_empty(const struct cdns_i3c_config * config)707 static inline bool cdns_i3c_rx_fifo_empty(const struct cdns_i3c_config *config)
708 {
709 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
710
711 return ((mst_st & MST_STATUS0_RX_EMP) ? true : false);
712 }
713
714 /* Check if ibi fifo is empty */
cdns_i3c_ibi_fifo_empty(const struct cdns_i3c_config * config)715 static inline bool cdns_i3c_ibi_fifo_empty(const struct cdns_i3c_config *config)
716 {
717 uint32_t mst_st = sys_read32(config->base + MST_STATUS0);
718
719 return ((mst_st & MST_STATUS0_IBID_EMP) ? true : false);
720 }
721
722 /* Interrupt handling */
cdns_i3c_interrupts_disable(const struct cdns_i3c_config * config)723 static inline void cdns_i3c_interrupts_disable(const struct cdns_i3c_config *config)
724 {
725 sys_write32(MST_INT_MASK, config->base + MST_IDR);
726 }
727
cdns_i3c_interrupts_clear(const struct cdns_i3c_config * config)728 static inline void cdns_i3c_interrupts_clear(const struct cdns_i3c_config *config)
729 {
730 sys_write32(MST_INT_MASK, config->base + MST_ICR);
731 }
732
733 /* FIFO mgmt */
cdns_i3c_write_tx_fifo(const struct cdns_i3c_config * config,const void * buf,uint32_t len)734 static void cdns_i3c_write_tx_fifo(const struct cdns_i3c_config *config, const void *buf,
735 uint32_t len)
736 {
737 const uint32_t *ptr = buf;
738 uint32_t remain, val;
739
740 for (remain = len; remain >= 4; remain -= 4) {
741 val = *ptr++;
742 sys_write32(val, config->base + TX_FIFO);
743 }
744
745 if (remain > 0) {
746 val = 0;
747 memcpy(&val, ptr, remain);
748 sys_write32(val, config->base + TX_FIFO);
749 }
750 }
751
cdns_i3c_write_ddr_tx_fifo(const struct cdns_i3c_config * config,const void * buf,uint32_t len)752 static void cdns_i3c_write_ddr_tx_fifo(const struct cdns_i3c_config *config, const void *buf,
753 uint32_t len)
754 {
755 const uint32_t *ptr = buf;
756 uint32_t remain, val;
757
758 for (remain = len; remain >= 4; remain -= 4) {
759 val = *ptr++;
760 sys_write32(val, config->base + SLV_DDR_TX_FIFO);
761 }
762
763 if (remain > 0) {
764 val = 0;
765 memcpy(&val, ptr, remain);
766 sys_write32(val, config->base + SLV_DDR_TX_FIFO);
767 }
768 }
769
770 #ifdef CONFIG_I3C_USE_IBI
cdns_i3c_write_ibi_fifo(const struct cdns_i3c_config * config,const void * buf,uint32_t len)771 static void cdns_i3c_write_ibi_fifo(const struct cdns_i3c_config *config, const void *buf,
772 uint32_t len)
773 {
774 const uint32_t *ptr = buf;
775 uint32_t remain, val;
776
777 for (remain = len; remain >= 4; remain -= 4) {
778 val = *ptr++;
779 sys_write32(val, config->base + IBI_DATA_FIFO);
780 }
781
782 if (remain > 0) {
783 val = 0;
784 memcpy(&val, ptr, remain);
785 sys_write32(val, config->base + IBI_DATA_FIFO);
786 }
787 }
788 #endif /* CONFIG_I3C_USE_IBI */
789
cdns_i3c_target_read_rx_fifo(const struct device * dev)790 static void cdns_i3c_target_read_rx_fifo(const struct device *dev)
791 {
792 const struct cdns_i3c_config *config = dev->config;
793 struct cdns_i3c_data *data = dev->data;
794 const struct i3c_target_callbacks *target_cb = data->target_config->callbacks;
795
796 /* Version 1p7 uses the full 32b FIFO width */
797 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
798 uint16_t xferred_bytes =
799 SLV_STATUS0_XFRD_BYTES(sys_read32(config->base + SLV_STATUS0));
800
801 for (int i = data->fifo_bytes_read; i < xferred_bytes; i += 4) {
802 uint32_t rx_data = sys_read32(config->base + RX_FIFO);
803 /* Call write received cb for each remaining byte */
804 for (int j = 0; j < MIN(4, xferred_bytes - i); j++) {
805 target_cb->write_received_cb(data->target_config,
806 (rx_data >> (8 * j)));
807 }
808 }
809 /*
810 * store the xfer bytes as the thr interrupt may trigger again as xferred_bytes will
811 * count up to the "total" bytes received
812 */
813 data->fifo_bytes_read = xferred_bytes;
814 } else {
815 /*
816 * Target writes only write to the first byte of the 32 bit
817 * width fifo for older version
818 */
819 uint8_t rx_data = (uint8_t)sys_read32(config->base + RX_FIFO);
820
821 target_cb->write_received_cb(data->target_config, rx_data);
822 }
823 }
824
cdns_i3c_read_rx_fifo(const struct cdns_i3c_config * config,void * buf,uint32_t len)825 static int cdns_i3c_read_rx_fifo(const struct cdns_i3c_config *config, void *buf, uint32_t len)
826 {
827 uint32_t *ptr = buf;
828 uint32_t remain, val;
829
830 for (remain = len; remain >= 4; remain -= 4) {
831 if (cdns_i3c_rx_fifo_empty(config)) {
832 return -EIO;
833 }
834 val = sys_le32_to_cpu(sys_read32(config->base + RX_FIFO));
835 *ptr++ = val;
836 }
837
838 if (remain > 0) {
839 if (cdns_i3c_rx_fifo_empty(config)) {
840 return -EIO;
841 }
842 val = sys_le32_to_cpu(sys_read32(config->base + RX_FIFO));
843 memcpy(ptr, &val, remain);
844 }
845
846 return 0;
847 }
848
cdns_i3c_read_rx_fifo_ddr_xfer(const struct cdns_i3c_config * config,void * buf,uint32_t len,uint32_t ddr_header)849 static int cdns_i3c_read_rx_fifo_ddr_xfer(const struct cdns_i3c_config *config, void *buf,
850 uint32_t len, uint32_t ddr_header)
851 {
852 uint16_t *ptr = buf;
853 uint32_t val;
854 uint32_t preamble;
855 uint8_t crc5 = 0x1F;
856
857 /*
858 * TODO: This function does not support threshold interrupts, it is expected that the
859 * whole packet to be within the FIFO and not split across multiple calls to this function.
860 */
861 crc5 = i3c_cdns_crc5(crc5, (uint16_t)DDR_DATA(ddr_header));
862
863 for (int i = 0; i < len; i++) {
864 if (cdns_i3c_rx_fifo_empty(config)) {
865 return -EIO;
866 }
867 val = sys_read32(config->base + RX_FIFO);
868 preamble = (val & DDR_PREAMBLE_MASK);
869
870 if (preamble == DDR_PREAMBLE_DATA_ABORT ||
871 preamble == DDR_PREAMBLE_DATA_ABORT_ALT) {
872 *ptr++ = sys_cpu_to_be16((uint16_t)DDR_DATA(val));
873 crc5 = i3c_cdns_crc5(crc5, (uint16_t)DDR_DATA(val));
874 } else if ((preamble == DDR_PREAMBLE_CMD_CRC) &&
875 ((val & DDR_CRC_TOKEN_MASK) == DDR_CRC_TOKEN)) {
876 uint8_t crc = (uint8_t)DDR_CRC(val);
877
878 if (crc5 != crc) {
879 LOG_ERR("DDR RX crc error");
880 return -EIO;
881 }
882 }
883 }
884
885 return 0;
886 }
887
cdns_i3c_wait_for_idle(const struct device * dev)888 static inline int cdns_i3c_wait_for_idle(const struct device *dev)
889 {
890 const struct cdns_i3c_config *config = dev->config;
891 uint32_t start_time = k_cycle_get_32();
892
893 /**
894 * Spin waiting for device to go idle. It is unlikely that this will
895 * actually take any time unless if the last transaction came immediately
896 * after an error condition.
897 */
898 while (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_IDLE)) {
899 if (k_cycle_get_32() - start_time > I3C_IDLE_TIMEOUT_CYC) {
900 return -EAGAIN;
901 }
902 }
903
904 return 0;
905 }
906
cdns_i3c_set_prescalers(const struct device * dev)907 static void cdns_i3c_set_prescalers(const struct device *dev)
908 {
909 struct cdns_i3c_data *data = dev->data;
910 const struct cdns_i3c_config *config = dev->config;
911 struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
912
913 /* These formulas are from section 6.2.1 of the Cadence I3C Master User Guide. */
914 uint32_t prescl_i3c = DIV_ROUND_UP(config->input_frequency,
915 (ctrl_config->scl.i3c * I3C_PRESCL_REG_SCALE)) -
916 1;
917 uint32_t prescl_i2c = DIV_ROUND_UP(config->input_frequency,
918 (ctrl_config->scl.i2c * I2C_PRESCL_REG_SCALE)) -
919 1;
920
921 /* update with actual value */
922 ctrl_config->scl.i3c = config->input_frequency / ((prescl_i3c + 1) * I3C_PRESCL_REG_SCALE);
923 ctrl_config->scl.i2c = config->input_frequency / ((prescl_i2c + 1) * I2C_PRESCL_REG_SCALE);
924
925 LOG_DBG("%s: I3C speed = %u, PRESCL_CTRL0.i3c = 0x%x", dev->name, ctrl_config->scl.i3c,
926 prescl_i3c);
927 LOG_DBG("%s: I2C speed = %u, PRESCL_CTRL0.i2c = 0x%x", dev->name, ctrl_config->scl.i2c,
928 prescl_i2c);
929
930 /* Calculate the OD_LOW value assuming a desired T_low period of 210ns. */
931 uint32_t pres_step = 1000000000 / (ctrl_config->scl.i3c * 4);
932 int32_t od_low = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
933
934 if (od_low < 0) {
935 od_low = 0;
936 }
937 LOG_DBG("%s: PRESCL_CTRL1.od_low = 0x%x", dev->name, od_low);
938
939 /* disable in order to update timing */
940 uint32_t ctrl = sys_read32(config->base + CTRL);
941
942 if (ctrl & CTRL_DEV_EN) {
943 sys_write32(~CTRL_DEV_EN & ctrl, config->base + CTRL);
944 }
945
946 sys_write32(PRESCL_CTRL0_I3C(prescl_i3c) | PRESCL_CTRL0_I2C(prescl_i2c),
947 config->base + PRESCL_CTRL0);
948
949 /* Sets the open drain low time relative to the push-pull. */
950 sys_write32(PRESCL_CTRL1_OD_LOW(od_low & PRESCL_CTRL1_OD_LOW_MASK),
951 config->base + PRESCL_CTRL1);
952
953 /* reenable */
954 if (ctrl & CTRL_DEV_EN) {
955 sys_write32(CTRL_DEV_EN | ctrl, config->base + CTRL);
956 }
957 }
958
959 /**
960 * @brief Compute RR0 Value from addr
961 *
962 * @param addr Address of the target
963 *
964 * @return RR0 value
965 */
prepare_rr0_dev_address(uint16_t addr)966 static uint32_t prepare_rr0_dev_address(uint16_t addr)
967 {
968 /* RR0[7:1] = addr[6:0] | parity^[0] */
969 uint32_t ret = cdns_i3c_even_parity_byte(addr);
970
971 if (addr & GENMASK(9, 7)) {
972 /* RR0[15:13] = addr[9:7] */
973 ret |= (addr & GENMASK(9, 7)) << 6;
974 /* RR0[11] = 10b lvr addr */
975 ret |= DEV_ID_RR0_LVR_EXT_ADDR;
976 }
977
978 return ret;
979 }
980
981 /**
982 * @brief Program Retaining Registers with device lists
983 *
984 * This will program the retaining register with the controller itself
985 *
986 * @param dev Pointer to controller device driver instance.
987 */
cdns_i3c_program_controller_retaining_reg(const struct device * dev)988 static void cdns_i3c_program_controller_retaining_reg(const struct device *dev)
989 {
990 const struct cdns_i3c_config *config = dev->config;
991 struct cdns_i3c_data *data = dev->data;
992 /* Set controller retaining register */
993 uint8_t controller_da = I3C_CONTROLLER_ADDR;
994
995 if (!i3c_addr_slots_is_free(&data->common.attached_dev.addr_slots, controller_da)) {
996 controller_da =
997 i3c_addr_slots_next_free_find(&data->common.attached_dev.addr_slots, 0);
998 LOG_DBG("%s: 0x%02x DA selected for controller", dev->name, controller_da);
999 }
1000 sys_write32(prepare_rr0_dev_address(controller_da), config->base + DEV_ID_RR0(0));
1001 /* Mark the address as I3C device */
1002 i3c_addr_slots_mark_i3c(&data->common.attached_dev.addr_slots, controller_da);
1003 }
1004
1005 #ifdef CONFIG_I3C_USE_IBI
cdns_i3c_controller_ibi_enable(const struct device * dev,struct i3c_device_desc * target)1006 static int cdns_i3c_controller_ibi_enable(const struct device *dev, struct i3c_device_desc *target)
1007 {
1008 uint32_t sir_map;
1009 uint32_t sir_cfg;
1010 const struct cdns_i3c_config *config = dev->config;
1011 struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = target->controller_priv;
1012 struct i3c_ccc_events i3c_events;
1013 int ret = 0;
1014
1015 if (!i3c_device_is_ibi_capable(target)) {
1016 ret = -EINVAL;
1017 return ret;
1018 }
1019
1020 /* TODO: check for duplicate in SIR */
1021
1022 sir_cfg = SIR_MAP_DEV_ROLE(I3C_BCR_DEVICE_ROLE(target->bcr)) |
1023 SIR_MAP_DEV_DA(target->dynamic_addr) |
1024 SIR_MAP_DEV_PL(target->data_length.max_ibi);
1025 if (target->ibi_cb != NULL) {
1026 sir_cfg |= SIR_MAP_DEV_ACK;
1027 }
1028 if (target->bcr & I3C_BCR_MAX_DATA_SPEED_LIMIT) {
1029 sir_cfg |= SIR_MAP_DEV_SLOW;
1030 }
1031
1032 LOG_DBG("%s: IBI enabling for 0x%02x (BCR 0x%02x)", dev->name, target->dynamic_addr,
1033 target->bcr);
1034
1035 /* Tell target to enable IBI */
1036 i3c_events.events = I3C_CCC_EVT_INTR;
1037 ret = i3c_ccc_do_events_set(target, true, &i3c_events);
1038 if (ret != 0) {
1039 LOG_ERR("%s: Error sending IBI ENEC for 0x%02x (%d)", dev->name,
1040 target->dynamic_addr, ret);
1041 return ret;
1042 }
1043
1044 sir_map = sys_read32(config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1));
1045 sir_map &= ~SIR_MAP_DEV_CONF_MASK(cdns_i3c_device_data->id - 1);
1046 sir_map |= SIR_MAP_DEV_CONF(cdns_i3c_device_data->id - 1, sir_cfg);
1047
1048 sys_write32(sir_map, config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1));
1049
1050 return ret;
1051 }
1052
cdns_i3c_controller_ibi_disable(const struct device * dev,struct i3c_device_desc * target)1053 static int cdns_i3c_controller_ibi_disable(const struct device *dev, struct i3c_device_desc *target)
1054 {
1055 uint32_t sir_map;
1056 const struct cdns_i3c_config *config = dev->config;
1057 struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = target->controller_priv;
1058 struct i3c_ccc_events i3c_events;
1059 int ret = 0;
1060
1061 if (!i3c_device_is_ibi_capable(target)) {
1062 ret = -EINVAL;
1063 return ret;
1064 }
1065
1066 /* Tell target to disable IBI */
1067 i3c_events.events = I3C_CCC_EVT_INTR;
1068 ret = i3c_ccc_do_events_set(target, false, &i3c_events);
1069 if (ret != 0) {
1070 LOG_ERR("%s: Error sending IBI DISEC for 0x%02x (%d)", dev->name,
1071 target->dynamic_addr, ret);
1072 return ret;
1073 }
1074
1075 sir_map = sys_read32(config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1));
1076 sir_map &= ~SIR_MAP_DEV_CONF_MASK(cdns_i3c_device_data->id - 1);
1077 sir_map |=
1078 SIR_MAP_DEV_CONF(cdns_i3c_device_data->id - 1, SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1079 sys_write32(sir_map, config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1));
1080
1081 return ret;
1082 }
1083
cdns_i3c_target_ibi_raise_hj(const struct device * dev)1084 static int cdns_i3c_target_ibi_raise_hj(const struct device *dev)
1085 {
1086 const struct cdns_i3c_config *config = dev->config;
1087 struct cdns_i3c_data *data = dev->data;
1088 struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
1089
1090 /* HJ requests should not be done by primary controllers */
1091 if (!ctrl_config->is_secondary) {
1092 LOG_ERR("%s: controller is primary, HJ not available", dev->name);
1093 return -ENOTSUP;
1094 }
1095 /* Check if target already has a DA assigned to it */
1096 if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_HAS_DA) {
1097 LOG_ERR("%s: HJ not available, DA already assigned", dev->name);
1098 return -EACCES;
1099 }
1100 /* Check if HJ requests DISEC CCC with DISHJ field set has been received */
1101 if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_HJ_DIS) {
1102 LOG_ERR("%s: HJ requests are currently disabled by DISEC", dev->name);
1103 return -EAGAIN;
1104 }
1105
1106 sys_write32(CTRL_HJ_INIT | sys_read32(config->base + CTRL), config->base + CTRL);
1107 k_sem_reset(&data->ibi_hj_complete);
1108 if (k_sem_take(&data->ibi_hj_complete, K_MSEC(500)) != 0) {
1109 LOG_ERR("%s: timeout waiting for DAA after HJ", dev->name);
1110 return -ETIMEDOUT;
1111 }
1112 return 0;
1113 }
1114
cdns_i3c_target_ibi_raise_intr(const struct device * dev,struct i3c_ibi * request)1115 static int cdns_i3c_target_ibi_raise_intr(const struct device *dev, struct i3c_ibi *request)
1116 {
1117 const struct cdns_i3c_config *config = dev->config;
1118 const struct cdns_i3c_data *data = dev->data;
1119 uint32_t ibi_ctrl_val;
1120
1121 LOG_DBG("%s: issuing IBI TIR", dev->name);
1122
1123 /*
1124 * Ensure data will fit within FIFO
1125 *
1126 * TODO: This limitation prevents burst transfers greater than the
1127 * FIFO sizes and should be replaced with an implementation that
1128 * utilizes the IBI data threshold interrupts.
1129 */
1130 if (request->payload_len > data->hw_cfg.ibi_mem_depth) {
1131 LOG_ERR("%s: payload too large for IBI TIR", dev->name);
1132 return -ENOMEM;
1133 }
1134
1135 cdns_i3c_write_ibi_fifo(config, request->payload, request->payload_len);
1136
1137 /* Write Payload Length and Start Condition */
1138 ibi_ctrl_val = sys_read32(config->base + SLV_IBI_CTRL);
1139 ibi_ctrl_val |= SLV_IBI_PL(request->payload_len);
1140 ibi_ctrl_val |= SLV_IBI_REQ;
1141 sys_write32(ibi_ctrl_val, config->base + SLV_IBI_CTRL);
1142 return 0;
1143 }
1144
cdns_i3c_target_ibi_raise(const struct device * dev,struct i3c_ibi * request)1145 static int cdns_i3c_target_ibi_raise(const struct device *dev, struct i3c_ibi *request)
1146 {
1147 struct cdns_i3c_data *data = dev->data;
1148
1149 if (request == NULL) {
1150 return -EINVAL;
1151 }
1152
1153 switch (request->ibi_type) {
1154 case I3C_IBI_TARGET_INTR:
1155 /* Check IP Revision since older versions of CDNS IP do not support IBI interrupt*/
1156 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
1157 return cdns_i3c_target_ibi_raise_intr(dev, request);
1158 } else {
1159 return -ENOTSUP;
1160 }
1161 case I3C_IBI_CONTROLLER_ROLE_REQUEST:
1162 /* TODO: Cadence I3C can support CR, but not implemented yet */
1163 return -ENOTSUP;
1164 case I3C_IBI_HOTJOIN:
1165 return cdns_i3c_target_ibi_raise_hj(dev);
1166 default:
1167 return -EINVAL;
1168 }
1169 }
1170 #endif
1171
cdns_i3c_cancel_transfer(const struct device * dev)1172 static void cdns_i3c_cancel_transfer(const struct device *dev)
1173 {
1174 struct cdns_i3c_data *data = dev->data;
1175 const struct cdns_i3c_config *config = dev->config;
1176 uint32_t val;
1177 uint32_t retry_count;
1178
1179 /* Disable further interrupts */
1180 sys_write32(MST_INT_CMDD_EMP, config->base + MST_IDR);
1181
1182 /* Ignore if no pending transfer */
1183 if (data->xfer.num_cmds == 0) {
1184 return;
1185 }
1186
1187 data->xfer.num_cmds = 0;
1188
1189 /* Clear main enable bit to disable further transactions */
1190 sys_write32(~CTRL_DEV_EN & sys_read32(config->base + CTRL), config->base + CTRL);
1191
1192 /**
1193 * Spin waiting for device to go idle. It is unlikely that this will
1194 * actually take any time since we only get here if a transaction didn't
1195 * complete in a long time.
1196 */
1197 retry_count = I3C_MAX_IDLE_CANCEL_WAIT_RETRIES;
1198 while (retry_count--) {
1199 val = sys_read32(config->base + MST_STATUS0);
1200 if (val & MST_STATUS0_IDLE) {
1201 break;
1202 }
1203 k_msleep(10);
1204 }
1205 if (retry_count == 0) {
1206 data->xfer.ret = -ETIMEDOUT;
1207 }
1208
1209 /**
1210 * Flush all queues.
1211 */
1212 sys_write32(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO | FLUSH_CMD_RESP,
1213 config->base + FLUSH_CTRL);
1214
1215 /* Re-enable device */
1216 sys_write32(CTRL_DEV_EN | sys_read32(config->base + CTRL), config->base + CTRL);
1217 }
1218
1219 /**
1220 * @brief Start a I3C/I2C Transfer
1221 *
1222 * This is to be called from a I3C/I2C transfer function. This will write
1223 * all data to tx and cmd fifos
1224 *
1225 * @param dev Pointer to controller device driver instance.
1226 */
cdns_i3c_start_transfer(const struct device * dev)1227 static void cdns_i3c_start_transfer(const struct device *dev)
1228 {
1229 struct cdns_i3c_data *data = dev->data;
1230 const struct cdns_i3c_config *config = dev->config;
1231 struct cdns_i3c_xfer *xfer = &data->xfer;
1232
1233 /* Ensure no pending command response queue threshold interrupt */
1234 sys_write32(MST_INT_CMDD_EMP, config->base + MST_ICR);
1235
1236 /* Make sure RX FIFO is empty. */
1237 while (!cdns_i3c_rx_fifo_empty(config)) {
1238 (void)sys_read32(config->base + RX_FIFO);
1239 }
1240 /* Make sure CMDR FIFO is empty too */
1241 while (!cdns_i3c_cmd_rsp_fifo_empty(config)) {
1242 (void)sys_read32(config->base + CMDR);
1243 }
1244
1245 /* Write all tx data to fifo */
1246 for (unsigned int i = 0; i < xfer->num_cmds; i++) {
1247 if (xfer->cmds[i].hdr == I3C_DATA_RATE_SDR) {
1248 if (!(xfer->cmds[i].cmd0 & CMD0_FIFO_RNW)) {
1249 cdns_i3c_write_tx_fifo(config, xfer->cmds[i].buf,
1250 xfer->cmds[i].len);
1251 }
1252 } else if (xfer->cmds[i].hdr == I3C_DATA_RATE_HDR_DDR) {
1253 /* DDR Xfer requires sending header block*/
1254 cdns_i3c_write_tx_fifo(config, &xfer->cmds[i].ddr_header,
1255 DDR_CRC_AND_HEADER_SIZE);
1256 /* If not read operation need to send data + crc of data*/
1257 if (!(DDR_DATA(xfer->cmds[i].ddr_header) & HDR_CMD_RD)) {
1258 uint8_t *buf = (uint8_t *)xfer->cmds[i].buf;
1259 uint32_t ddr_message = 0;
1260 uint16_t ddr_data_payload = sys_get_be16(&buf[0]);
1261 /* HDR-DDR Data Words */
1262 ddr_message = (DDR_PREAMBLE_DATA_ABORT |
1263 prepare_ddr_word(ddr_data_payload));
1264 cdns_i3c_write_tx_fifo(config, &ddr_message,
1265 DDR_CRC_AND_HEADER_SIZE);
1266 for (int j = 2; j < ((xfer->cmds[i].len - 2) * 2); j += 2) {
1267 ddr_data_payload = sys_get_be16(&buf[j]);
1268 ddr_message = (DDR_PREAMBLE_DATA_ABORT_ALT |
1269 prepare_ddr_word(ddr_data_payload));
1270 cdns_i3c_write_tx_fifo(config, &ddr_message,
1271 DDR_CRC_AND_HEADER_SIZE);
1272 }
1273 /* HDR-DDR CRC Word */
1274 cdns_i3c_write_tx_fifo(config, &xfer->cmds[i].ddr_crc,
1275 DDR_CRC_AND_HEADER_SIZE);
1276 }
1277 } else {
1278 xfer->ret = -ENOTSUP;
1279 return;
1280 }
1281 }
1282
1283 /* Write all data to cmd fifos */
1284 for (unsigned int i = 0; i < xfer->num_cmds; i++) {
1285 /* The command ID is just the msg index. */
1286 xfer->cmds[i].cmd1 |= CMD1_FIFO_CMDID(i);
1287 sys_write32(xfer->cmds[i].cmd1, config->base + CMD1_FIFO);
1288 sys_write32(xfer->cmds[i].cmd0, config->base + CMD0_FIFO);
1289
1290 if (xfer->cmds[i].hdr == I3C_DATA_RATE_HDR_DDR) {
1291 sys_write32(0x00, config->base + CMD1_FIFO);
1292 if ((DDR_DATA(xfer->cmds[i].ddr_header) & HDR_CMD_RD)) {
1293 sys_write32(CMD0_FIFO_IS_DDR | CMD0_FIFO_PL_LEN(1),
1294 config->base + CMD0_FIFO);
1295 } else {
1296 sys_write32(CMD0_FIFO_IS_DDR | CMD0_FIFO_PL_LEN(xfer->cmds[i].len),
1297 config->base + CMD0_FIFO);
1298 }
1299 }
1300 }
1301
1302 /* kickoff transfer */
1303 sys_write32(CTRL_MCS | sys_read32(config->base + CTRL), config->base + CTRL);
1304 sys_write32(MST_INT_CMDD_EMP, config->base + MST_IER);
1305 }
1306
1307 /**
1308 * @brief Send Common Command Code (CCC).
1309 *
1310 * @see i3c_do_ccc
1311 *
1312 * @param dev Pointer to controller device driver instance.
1313 * @param payload Pointer to CCC payload.
1314 *
1315 * @return @see i3c_do_ccc
1316 */
cdns_i3c_do_ccc(const struct device * dev,struct i3c_ccc_payload * payload)1317 static int cdns_i3c_do_ccc(const struct device *dev, struct i3c_ccc_payload *payload)
1318 {
1319 const struct cdns_i3c_config *config = dev->config;
1320 struct cdns_i3c_data *data = dev->data;
1321 struct cdns_i3c_cmd *cmd;
1322 int ret = 0;
1323 uint8_t num_cmds = 0;
1324
1325 /* make sure we are currently the active controller */
1326 if (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE)) {
1327 return -EACCES;
1328 }
1329
1330 if (payload == NULL) {
1331 return -EINVAL;
1332 }
1333
1334 /*
1335 * Ensure data will fit within FIFOs.
1336 *
1337 * TODO: This limitation prevents burst transfers greater than the
1338 * FIFO sizes and should be replaced with an implementation that
1339 * utilizes the RX/TX data threshold interrupts.
1340 */
1341 uint32_t num_msgs =
1342 1 + ((payload->ccc.data_len > 0) ? payload->targets.num_targets
1343 : MAX(payload->targets.num_targets - 1, 0));
1344 if (num_msgs > data->hw_cfg.cmd_mem_depth || num_msgs > data->hw_cfg.cmdr_mem_depth) {
1345 LOG_ERR("%s: Too many messages", dev->name);
1346 return -ENOMEM;
1347 }
1348
1349 uint32_t rxsize = 0;
1350 /* defining byte is stored in a separate register for direct CCCs */
1351 uint32_t txsize =
1352 i3c_ccc_is_payload_broadcast(payload) ? ROUND_UP(payload->ccc.data_len, 4) : 0;
1353
1354 for (int i = 0; i < payload->targets.num_targets; i++) {
1355 if (payload->targets.payloads[i].rnw) {
1356 rxsize += ROUND_UP(payload->targets.payloads[i].data_len, 4);
1357 } else {
1358 txsize += ROUND_UP(payload->targets.payloads[i].data_len, 4);
1359 }
1360 }
1361 if ((rxsize > data->hw_cfg.rx_mem_depth) || (txsize > data->hw_cfg.tx_mem_depth)) {
1362 LOG_ERR("%s: Total RX and/or TX transfer larger than FIFO", dev->name);
1363 return -ENOMEM;
1364 }
1365
1366 LOG_DBG("%s: CCC[0x%02x]", dev->name, payload->ccc.id);
1367
1368 k_mutex_lock(&data->bus_lock, K_FOREVER);
1369
1370 /* wait for idle */
1371 ret = cdns_i3c_wait_for_idle(dev);
1372 if (ret != 0) {
1373 goto error;
1374 }
1375
1376 /* if this is a direct CCC */
1377 if (!i3c_ccc_is_payload_broadcast(payload)) {
1378 /* if the CCC has no data bytes, then the target payload must be in
1379 * the same command buffer
1380 */
1381 for (int i = 0; i < payload->targets.num_targets; i++) {
1382 cmd = &data->xfer.cmds[i];
1383 num_cmds++;
1384 cmd->cmd1 = CMD1_FIFO_CCC(payload->ccc.id);
1385 cmd->cmd0 = CMD0_FIFO_IS_CCC;
1386 /* if there is a defining byte */
1387 if (payload->ccc.data_len == 1) {
1388 /* Only revision 1p7 supports defining byte for direct CCCs */
1389 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
1390 cmd->cmd0 |= CMD0_FIFO_IS_DB;
1391 cmd->cmd1 |= CMD1_FIFO_DB(payload->ccc.data[0]);
1392 } else {
1393 LOG_ERR("%s: Defining Byte with Direct CCC not supported "
1394 "with rev %lup%lu",
1395 dev->name, REV_ID_REV_MAJOR(data->hw_cfg.rev_id),
1396 REV_ID_REV_MINOR(data->hw_cfg.rev_id));
1397 ret = -ENOTSUP;
1398 goto error;
1399 }
1400 } else if (payload->ccc.data_len > 1) {
1401 LOG_ERR("%s: Defining Byte length greater than 1", dev->name);
1402 ret = -EINVAL;
1403 goto error;
1404 }
1405 /* for a short CCC, i.e. where a direct ccc has multiple targets,
1406 * BCH must be 0 for subsequent targets and RSBC must be 1, otherwise
1407 * if there is just one target, RSBC must be 0 on the first target
1408 */
1409 if (i == 0) {
1410 cmd->cmd0 |= CMD0_FIFO_BCH;
1411 }
1412 if (i < (payload->targets.num_targets - 1)) {
1413 cmd->cmd0 |= CMD0_FIFO_RSBC;
1414 }
1415 cmd->buf = payload->targets.payloads[i].data;
1416 cmd->len = payload->targets.payloads[i].data_len;
1417 cmd->cmd0 |= CMD0_FIFO_DEV_ADDR(payload->targets.payloads[i].addr) |
1418 CMD0_FIFO_PL_LEN(payload->targets.payloads[i].data_len);
1419 if (payload->targets.payloads[i].rnw) {
1420 cmd->cmd0 |= CMD0_FIFO_RNW;
1421 }
1422 cmd->hdr = I3C_DATA_RATE_SDR;
1423 /*
1424 * write the address of num_xfer which is to be updated upon message
1425 * completion
1426 */
1427 cmd->num_xfer = &(payload->targets.payloads[i].num_xfer);
1428 }
1429 } else {
1430 cmd = &data->xfer.cmds[0];
1431 num_cmds++;
1432 cmd->cmd1 = CMD1_FIFO_CCC(payload->ccc.id);
1433 cmd->cmd0 = CMD0_FIFO_IS_CCC | CMD0_FIFO_BCH;
1434 cmd->hdr = I3C_DATA_RATE_SDR;
1435
1436 if (payload->ccc.data_len > 0) {
1437 /* Write additional data for CCC if needed */
1438 cmd->buf = payload->ccc.data;
1439 cmd->len = payload->ccc.data_len;
1440 cmd->cmd0 |= CMD0_FIFO_PL_LEN(payload->ccc.data_len);
1441 /* write the address of num_xfer which is to be updated upon message
1442 * completion
1443 */
1444 cmd->num_xfer = &(payload->ccc.num_xfer);
1445 } else {
1446 /* no data to transfer */
1447 cmd->len = 0;
1448 cmd->num_xfer = NULL;
1449 }
1450 }
1451
1452 data->xfer.ret = -ETIMEDOUT;
1453 data->xfer.num_cmds = num_cmds;
1454
1455 cdns_i3c_start_transfer(dev);
1456 if (k_sem_take(&data->xfer.complete, K_MSEC(1000)) != 0) {
1457 cdns_i3c_cancel_transfer(dev);
1458 }
1459
1460 if (data->xfer.ret < 0) {
1461 LOG_ERR("%s: CCC[0x%02x] error (%d)", dev->name, payload->ccc.id, data->xfer.ret);
1462 }
1463
1464 ret = data->xfer.ret;
1465 error:
1466 k_mutex_unlock(&data->bus_lock);
1467
1468 return ret;
1469 }
1470
1471 /**
1472 * @brief Perform Dynamic Address Assignment.
1473 *
1474 * @see i3c_do_daa
1475 *
1476 * @param dev Pointer to controller device driver instance.
1477 *
1478 * @return @see i3c_do_daa
1479 */
cdns_i3c_do_daa(const struct device * dev)1480 static int cdns_i3c_do_daa(const struct device *dev)
1481 {
1482 struct cdns_i3c_data *data = dev->data;
1483 const struct cdns_i3c_config *config = dev->config;
1484 struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
1485
1486 /* DAA should not be done by secondary controllers */
1487 if (ctrl_config->is_secondary) {
1488 return -EACCES;
1489 }
1490
1491 /* read dev active reg */
1492 uint32_t olddevs = sys_read32(config->base + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1493 /* ignore the controller register */
1494 olddevs |= BIT(0);
1495
1496 /* the Cadence I3C IP will assign an address for it from the RR */
1497 struct i3c_ccc_payload entdaa_ccc;
1498
1499 memset(&entdaa_ccc, 0, sizeof(entdaa_ccc));
1500 entdaa_ccc.ccc.id = I3C_CCC_ENTDAA;
1501
1502 int status = cdns_i3c_do_ccc(dev, &entdaa_ccc);
1503
1504 if (status != 0) {
1505 return status;
1506 }
1507
1508 /* read again dev active reg */
1509 uint32_t newdevs = sys_read32(config->base + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1510 /* look for new bits that were set */
1511 newdevs &= ~olddevs;
1512
1513 if (newdevs) {
1514 /* loop through each set bit for new devices */
1515 for (uint8_t i = find_lsb_set(newdevs); i <= find_msb_set(newdevs); i++) {
1516 uint8_t rr_idx = i - 1;
1517
1518 if (newdevs & BIT(rr_idx)) {
1519 /* Read RRx registers */
1520 uint32_t dev_id_rr0 = sys_read32(config->base + DEV_ID_RR0(rr_idx));
1521 uint32_t dev_id_rr1 = sys_read32(config->base + DEV_ID_RR1(rr_idx));
1522 uint32_t dev_id_rr2 = sys_read32(config->base + DEV_ID_RR2(rr_idx));
1523
1524 uint64_t pid = ((uint64_t)dev_id_rr1 << 16) + (dev_id_rr2 >> 16);
1525 uint8_t dyn_addr = (dev_id_rr0 & 0xFE) >> 1;
1526 uint8_t bcr = dev_id_rr2 >> 8;
1527 uint8_t dcr = dev_id_rr2 & 0xFF;
1528
1529 const struct i3c_device_id i3c_id = I3C_DEVICE_ID(pid);
1530 struct i3c_device_desc *target = i3c_device_find(dev, &i3c_id);
1531
1532 if (target == NULL) {
1533 LOG_INF("%s: PID 0x%012llx is not in registered device "
1534 "list, given DA 0x%02x",
1535 dev->name, pid, dyn_addr);
1536 i3c_addr_slots_mark_i3c(
1537 &data->common.attached_dev.addr_slots, dyn_addr);
1538 } else {
1539 target->dynamic_addr = dyn_addr;
1540 target->bcr = bcr;
1541 target->dcr = dcr;
1542
1543 LOG_DBG("%s: PID 0x%012llx assigned dynamic address 0x%02x",
1544 dev->name, pid, dyn_addr);
1545 }
1546 }
1547 }
1548 } else {
1549 LOG_DBG("%s: ENTDAA: No devices found", dev->name);
1550 }
1551
1552 /* mark slot as not free, may already be set if already attached */
1553 data->free_rr_slots &= ~newdevs;
1554
1555 /* Unmask Hot-Join request interrupts. HJ will send DISEC HJ from the CTRL value */
1556 struct i3c_ccc_events i3c_events;
1557
1558 i3c_events.events = I3C_CCC_EVT_HJ;
1559 status = i3c_ccc_do_events_all_set(dev, true, &i3c_events);
1560 if (status != 0) {
1561 LOG_DBG("%s: Broadcast ENEC was NACK", dev->name);
1562 }
1563
1564 return 0;
1565 }
1566
1567 /**
1568 * @brief Configure I2C hardware.
1569 *
1570 * @param dev Pointer to controller device driver instance.
1571 * @param config Value of the configuration parameters.
1572 *
1573 * @retval 0 If successful.
1574 * @retval -EINVAL If invalid configure parameters.
1575 * @retval -EIO General Input/Output errors.
1576 * @retval -ENOSYS If not implemented.
1577 */
cdns_i3c_i2c_api_configure(const struct device * dev,uint32_t config)1578 static int cdns_i3c_i2c_api_configure(const struct device *dev, uint32_t config)
1579 {
1580 struct cdns_i3c_data *data = dev->data;
1581 struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
1582
1583 switch (I2C_SPEED_GET(config)) {
1584 case I2C_SPEED_STANDARD:
1585 ctrl_config->scl.i2c = 100000;
1586 break;
1587 case I2C_SPEED_FAST:
1588 ctrl_config->scl.i2c = 400000;
1589 break;
1590 case I2C_SPEED_FAST_PLUS:
1591 ctrl_config->scl.i2c = 1000000;
1592 break;
1593 case I2C_SPEED_HIGH:
1594 ctrl_config->scl.i2c = 3400000;
1595 break;
1596 case I2C_SPEED_ULTRA:
1597 ctrl_config->scl.i2c = 5000000;
1598 break;
1599 default:
1600 break;
1601 }
1602
1603 cdns_i3c_set_prescalers(dev);
1604
1605 return 0;
1606 }
1607
1608 /**
1609 * @brief Configure I3C hardware.
1610 *
1611 * @param dev Pointer to controller device driver instance.
1612 * @param type Type of configuration parameters being passed
1613 * in @p config.
1614 * @param config Pointer to the configuration parameters.
1615 *
1616 * @retval 0 If successful.
1617 * @retval -EINVAL If invalid configure parameters.
1618 * @retval -EIO General Input/Output errors.
1619 * @retval -ENOSYS If not implemented.
1620 */
cdns_i3c_configure(const struct device * dev,enum i3c_config_type type,void * config)1621 static int cdns_i3c_configure(const struct device *dev, enum i3c_config_type type, void *config)
1622 {
1623 struct cdns_i3c_data *data = dev->data;
1624 struct i3c_config_controller *ctrl_cfg = config;
1625
1626 if ((ctrl_cfg->scl.i2c == 0U) || (ctrl_cfg->scl.i3c == 0U)) {
1627 return -EINVAL;
1628 }
1629
1630 data->common.ctrl_config.scl.i3c = ctrl_cfg->scl.i3c;
1631 data->common.ctrl_config.scl.i2c = ctrl_cfg->scl.i2c;
1632 cdns_i3c_set_prescalers(dev);
1633
1634 return 0;
1635 }
1636
1637 /**
1638 * @brief Complete a I3C/I2C Transfer
1639 *
1640 * This is to be called from an ISR when the Command Response FIFO
1641 * is Empty. This will check each Command Response reading the RX
1642 * FIFO if message was a RnW and if any message had an error.
1643 *
1644 * @param dev Pointer to controller device driver instance.
1645 */
cdns_i3c_complete_transfer(const struct device * dev)1646 static void cdns_i3c_complete_transfer(const struct device *dev)
1647 {
1648 struct cdns_i3c_data *data = dev->data;
1649 const struct cdns_i3c_config *config = dev->config;
1650 uint32_t cmdr;
1651 uint32_t id = 0;
1652 uint32_t xfer = 0;
1653 int ret = 0;
1654 struct cdns_i3c_cmd *cmd;
1655 bool was_full;
1656
1657 /* Used only to determine in the case of a controller abort */
1658 was_full = cdns_i3c_rx_fifo_full(config);
1659
1660 /* Disable further interrupts */
1661 sys_write32(MST_INT_CMDD_EMP, config->base + MST_IDR);
1662
1663 /* Ignore if no pending transfer */
1664 if (data->xfer.num_cmds == 0) {
1665 return;
1666 }
1667
1668 /* Process all results in fifo */
1669 for (uint32_t status0 = sys_read32(config->base + MST_STATUS0);
1670 !(status0 & MST_STATUS0_CMDR_EMP); status0 = sys_read32(config->base + MST_STATUS0)) {
1671 cmdr = sys_read32(config->base + CMDR);
1672 id = CMDR_CMDID(cmdr);
1673
1674 if (id == CMDR_CMDID_HJACK_DISEC || id == CMDR_CMDID_HJACK_ENTDAA ||
1675 id >= data->xfer.num_cmds) {
1676 continue;
1677 }
1678
1679 cmd = &data->xfer.cmds[id];
1680
1681 xfer = MIN(CMDR_XFER_BYTES(cmdr), cmd->len);
1682 if (cmd->num_xfer != NULL) {
1683 *cmd->num_xfer = xfer;
1684 }
1685 /* Read any rx data into buffer */
1686 if (cmd->cmd0 & CMD0_FIFO_RNW) {
1687 ret = cdns_i3c_read_rx_fifo(config, cmd->buf, xfer);
1688 }
1689
1690 if ((cmd->hdr == I3C_DATA_RATE_HDR_DDR) &&
1691 (DDR_DATA(cmd->ddr_header) & HDR_CMD_RD)) {
1692 ret = cdns_i3c_read_rx_fifo_ddr_xfer(config, cmd->buf, xfer,
1693 cmd->ddr_header);
1694 }
1695
1696 /* Record error */
1697 cmd->error = CMDR_ERROR(cmdr);
1698 }
1699
1700 for (int i = 0; i < data->xfer.num_cmds; i++) {
1701 switch (data->xfer.cmds[i].error) {
1702 case CMDR_NO_ERROR:
1703 break;
1704
1705 case CMDR_MST_ABORT:
1706 /*
1707 * A controller abort is forced if the RX FIFO fills up
1708 * There is also the case where the fifo can be full as
1709 * the len of the packet is the same length of the fifo
1710 * Check that the requested len is greater than the total
1711 * transferred to confirm that is not case. Otherwise the
1712 * abort was caused by the buffer length being meet and
1713 * the target did not give an End of Data (EoD) in the T
1714 * bit. Do not treat that condition as an error because
1715 * some targets will just auto-increment the read address
1716 * way beyond the buffer not giving an EoD.
1717 */
1718 if ((was_full) && (data->xfer.cmds[i].len > *data->xfer.cmds[i].num_xfer)) {
1719 ret = -ENOSPC;
1720 } else {
1721 LOG_DBG("%s: Controller Abort due to buffer length excedded with "
1722 "no EoD from target",
1723 dev->name);
1724 }
1725 break;
1726
1727 case CMDR_M0_ERROR: {
1728 uint8_t ccc = data->xfer.cmds[i].cmd1 & 0xFF;
1729 /*
1730 * The M0 is an illegally formatted CCC. i.e. the Controller
1731 * receives 1 byte instead of 2 with the GETMWL CCC. This can
1732 * be problematic for CCCs that can have variable length such
1733 * as GETMXDS and GETCAPS. Verify the number of bytes received matches
1734 * what's expected from the specification and ignore the error. The IP will
1735 * still retramsit the same CCC and theres nothing that can be done to
1736 * prevent this. It it still up to the application to read `num_xfer` to
1737 * determine the number of bytes returned.
1738 */
1739 if (ccc == I3C_CCC_GETMXDS) {
1740 /*
1741 * Whether GETMXDS format 1 and format 2 can't be known ahead of
1742 * time which will be returned.
1743 */
1744 if ((*data->xfer.cmds[i].num_xfer !=
1745 sizeof(((union i3c_ccc_getmxds *)0)->fmt1)) &&
1746 (*data->xfer.cmds[i].num_xfer !=
1747 sizeof(((union i3c_ccc_getmxds *)0)->fmt2))) {
1748 ret = -EIO;
1749 }
1750 } else if (ccc == I3C_CCC_GETCAPS) {
1751 /* GETCAPS can only return 1-4 bytes */
1752 if (*data->xfer.cmds[i].num_xfer > sizeof(union i3c_ccc_getcaps)) {
1753 ret = -EIO;
1754 }
1755 } else {
1756 ret = -EIO;
1757 }
1758 break;
1759 }
1760
1761 case CMDR_DDR_PREAMBLE_ERROR:
1762 case CMDR_DDR_PARITY_ERROR:
1763 case CMDR_M1_ERROR:
1764 case CMDR_M2_ERROR:
1765 case CMDR_NACK_RESP:
1766 case CMDR_DDR_DROPPED:
1767 ret = -EIO;
1768 break;
1769
1770 case CMDR_DDR_RX_FIFO_OVF:
1771 case CMDR_DDR_TX_FIFO_UNF:
1772 ret = -ENOSPC;
1773 break;
1774
1775 case CMDR_INVALID_DA:
1776 default:
1777 ret = -EINVAL;
1778 break;
1779 }
1780 }
1781
1782 data->xfer.ret = ret;
1783
1784 /* Indicate no transfer is pending */
1785 data->xfer.num_cmds = 0;
1786
1787 k_sem_give(&data->xfer.complete);
1788 }
1789
1790 /**
1791 * @brief Transfer messages in I2C mode.
1792 *
1793 * @param dev Pointer to device driver instance.
1794 * @param target Pointer to target device descriptor.
1795 * @param msgs Pointer to I2C messages.
1796 * @param num_msgs Number of messages to transfers.
1797 *
1798 * @retval 0 If successful.
1799 * @retval -EIO General input / output error.
1800 * @retval -EINVAL Address not registered
1801 */
cdns_i3c_i2c_transfer(const struct device * dev,struct i3c_i2c_device_desc * i2c_dev,struct i2c_msg * msgs,uint8_t num_msgs)1802 static int cdns_i3c_i2c_transfer(const struct device *dev, struct i3c_i2c_device_desc *i2c_dev,
1803 struct i2c_msg *msgs, uint8_t num_msgs)
1804 {
1805 const struct cdns_i3c_config *config = dev->config;
1806 struct cdns_i3c_data *data = dev->data;
1807 uint32_t txsize = 0;
1808 uint32_t rxsize = 0;
1809 int ret;
1810
1811 /* make sure we are currently the active controller */
1812 if (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE)) {
1813 return -EACCES;
1814 }
1815
1816 if (num_msgs == 0) {
1817 return 0;
1818 }
1819
1820 if (num_msgs > data->hw_cfg.cmd_mem_depth || num_msgs > data->hw_cfg.cmdr_mem_depth) {
1821 LOG_ERR("%s: Too many messages", dev->name);
1822 return -ENOMEM;
1823 }
1824
1825 /*
1826 * Ensure data will fit within FIFOs
1827 */
1828 for (unsigned int i = 0; i < num_msgs; i++) {
1829 if ((msgs[i].flags & I2C_MSG_RW_MASK) == I2C_MSG_READ) {
1830 rxsize += ROUND_UP(msgs[i].len, 4);
1831 } else {
1832 txsize += ROUND_UP(msgs[i].len, 4);
1833 }
1834 }
1835 if ((rxsize > data->hw_cfg.rx_mem_depth) || (txsize > data->hw_cfg.tx_mem_depth)) {
1836 LOG_ERR("%s: Total RX and/or TX transfer larger than FIFO", dev->name);
1837 return -ENOMEM;
1838 }
1839
1840 k_mutex_lock(&data->bus_lock, K_FOREVER);
1841
1842 /* wait for idle */
1843 ret = cdns_i3c_wait_for_idle(dev);
1844 if (ret != 0) {
1845 goto error;
1846 }
1847
1848 for (unsigned int i = 0; i < num_msgs; i++) {
1849 struct cdns_i3c_cmd *cmd = &data->xfer.cmds[i];
1850
1851 cmd->len = msgs[i].len;
1852 cmd->buf = msgs[i].buf;
1853 /* not an i3c transfer, but must be set to sdr */
1854 cmd->hdr = I3C_DATA_RATE_SDR;
1855
1856 cmd->cmd0 = CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
1857 cmd->cmd0 |= CMD0_FIFO_DEV_ADDR(i2c_dev->addr);
1858 cmd->cmd0 |= CMD0_FIFO_PL_LEN(msgs[i].len);
1859
1860 /* Send repeated start on all transfers except the last or those marked STOP. */
1861 if ((i < (num_msgs - 1)) && ((msgs[i].flags & I2C_MSG_STOP) == 0)) {
1862 cmd->cmd0 |= CMD0_FIFO_RSBC;
1863 }
1864
1865 if (msgs[i].flags & I2C_MSG_ADDR_10_BITS) {
1866 cmd->cmd0 |= CMD0_FIFO_IS_10B;
1867 }
1868
1869 if ((msgs[i].flags & I2C_MSG_RW_MASK) == I2C_MSG_READ) {
1870 cmd->cmd0 |= CMD0_FIFO_RNW;
1871 }
1872
1873 /* i2c transfers are a don't care for num_xfer */
1874 cmd->num_xfer = NULL;
1875 }
1876
1877 data->xfer.ret = -ETIMEDOUT;
1878 data->xfer.num_cmds = num_msgs;
1879
1880 cdns_i3c_start_transfer(dev);
1881 if (k_sem_take(&data->xfer.complete, K_MSEC(1000)) != 0) {
1882 cdns_i3c_cancel_transfer(dev);
1883 }
1884
1885 ret = data->xfer.ret;
1886 error:
1887 k_mutex_unlock(&data->bus_lock);
1888
1889 return ret;
1890 }
1891
cdns_i3c_master_get_rr_slot(const struct device * dev,uint8_t dyn_addr)1892 static int cdns_i3c_master_get_rr_slot(const struct device *dev, uint8_t dyn_addr)
1893 {
1894 struct cdns_i3c_data *data = dev->data;
1895 const struct cdns_i3c_config *config = dev->config;
1896
1897 if (dyn_addr == 0) {
1898 if (!data->free_rr_slots) {
1899 return -ENOSPC;
1900 }
1901
1902 return find_lsb_set(data->free_rr_slots) - 1;
1903 }
1904
1905 uint32_t activedevs = sys_read32(config->base + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1906
1907 activedevs &= ~BIT(0);
1908
1909 /* loop through each set bit for new devices */
1910 for (uint8_t i = find_lsb_set(activedevs); i <= find_msb_set(activedevs); i++) {
1911 if (activedevs & BIT(i)) {
1912 uint32_t rr = sys_read32(config->base + DEV_ID_RR0(i));
1913
1914 if (!(rr & DEV_ID_RR0_IS_I3C) || DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr) {
1915 continue;
1916 }
1917 return i;
1918 }
1919 }
1920
1921 return -EINVAL;
1922 }
1923
cdns_i3c_attach_device(const struct device * dev,struct i3c_device_desc * desc,uint8_t addr)1924 static int cdns_i3c_attach_device(const struct device *dev, struct i3c_device_desc *desc,
1925 uint8_t addr)
1926 {
1927 const struct cdns_i3c_config *config = dev->config;
1928 struct cdns_i3c_data *data = dev->data;
1929 int slot = cdns_i3c_master_get_rr_slot(dev, desc->dynamic_addr);
1930
1931 if (slot < 0) {
1932 LOG_ERR("%s: no space for i3c device: %s", dev->name, desc->dev->name);
1933 return slot;
1934 }
1935
1936 k_mutex_lock(&data->bus_lock, K_FOREVER);
1937
1938 data->cdns_i3c_i2c_priv_data[slot].id = slot;
1939 desc->controller_priv = &(data->cdns_i3c_i2c_priv_data[slot]);
1940 data->free_rr_slots &= ~BIT(slot);
1941
1942 uint32_t dev_id_rr0 = DEV_ID_RR0_IS_I3C | prepare_rr0_dev_address(addr);
1943 uint32_t dev_id_rr1 = DEV_ID_RR1_PID_MSB((desc->pid & 0xFFFFFFFF0000) >> 16);
1944 uint32_t dev_id_rr2 = DEV_ID_RR2_PID_LSB(desc->pid & 0xFFFF);
1945
1946 sys_write32(dev_id_rr0, config->base + DEV_ID_RR0(slot));
1947 sys_write32(dev_id_rr1, config->base + DEV_ID_RR1(slot));
1948 sys_write32(dev_id_rr2, config->base + DEV_ID_RR2(slot));
1949
1950 /** Mark Devices as active, devices that will be found and marked active during DAA,
1951 * it will be given the exact DA programmed in it's RR if the PID matches and marked
1952 * as active duing ENTDAA, otherwise they get set as active here. If dynamic address
1953 * is set, then it assumed that it was already initialized by the primary controller.
1954 */
1955 if ((desc->static_addr != 0) || (desc->dynamic_addr != 0)) {
1956 sys_write32(sys_read32(config->base + DEVS_CTRL) | DEVS_CTRL_DEV_ACTIVE(slot),
1957 config->base + DEVS_CTRL);
1958 }
1959
1960 k_mutex_unlock(&data->bus_lock);
1961
1962 return 0;
1963 }
1964
cdns_i3c_reattach_device(const struct device * dev,struct i3c_device_desc * desc,uint8_t old_dyn_addr)1965 static int cdns_i3c_reattach_device(const struct device *dev, struct i3c_device_desc *desc,
1966 uint8_t old_dyn_addr)
1967 {
1968 const struct cdns_i3c_config *config = dev->config;
1969 struct cdns_i3c_data *data = dev->data;
1970 struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = desc->controller_priv;
1971
1972 if (cdns_i3c_device_data == NULL) {
1973 LOG_ERR("%s: %s: device not attached", dev->name, desc->dev->name);
1974 return -EINVAL;
1975 }
1976
1977 k_mutex_lock(&data->bus_lock, K_FOREVER);
1978
1979 uint32_t dev_id_rr0 = DEV_ID_RR0_IS_I3C | prepare_rr0_dev_address(desc->dynamic_addr);
1980 uint32_t dev_id_rr1 = DEV_ID_RR1_PID_MSB((desc->pid & 0xFFFFFFFF0000) >> 16);
1981 uint32_t dev_id_rr2 = DEV_ID_RR2_PID_LSB(desc->pid & 0xFFFF) | DEV_ID_RR2_BCR(desc->bcr) |
1982 DEV_ID_RR2_DCR(desc->dcr);
1983
1984 sys_write32(dev_id_rr0, config->base + DEV_ID_RR0(cdns_i3c_device_data->id));
1985 sys_write32(dev_id_rr1, config->base + DEV_ID_RR1(cdns_i3c_device_data->id));
1986 sys_write32(dev_id_rr2, config->base + DEV_ID_RR2(cdns_i3c_device_data->id));
1987
1988 k_mutex_unlock(&data->bus_lock);
1989
1990 return 0;
1991 }
1992
cdns_i3c_detach_device(const struct device * dev,struct i3c_device_desc * desc)1993 static int cdns_i3c_detach_device(const struct device *dev, struct i3c_device_desc *desc)
1994 {
1995 const struct cdns_i3c_config *config = dev->config;
1996 struct cdns_i3c_data *data = dev->data;
1997 struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = desc->controller_priv;
1998
1999 if (cdns_i3c_device_data == NULL) {
2000 LOG_ERR("%s: %s: device not attached", dev->name, desc->dev->name);
2001 return -EINVAL;
2002 }
2003
2004 k_mutex_lock(&data->bus_lock, K_FOREVER);
2005
2006 sys_write32(sys_read32(config->base + DEVS_CTRL) |
2007 DEVS_CTRL_DEV_CLR(cdns_i3c_device_data->id),
2008 config->base + DEVS_CTRL);
2009 data->free_rr_slots |= BIT(cdns_i3c_device_data->id);
2010 desc->controller_priv = NULL;
2011
2012 k_mutex_unlock(&data->bus_lock);
2013
2014 return 0;
2015 }
2016
cdns_i3c_i2c_attach_device(const struct device * dev,struct i3c_i2c_device_desc * desc)2017 static int cdns_i3c_i2c_attach_device(const struct device *dev, struct i3c_i2c_device_desc *desc)
2018 {
2019 const struct cdns_i3c_config *config = dev->config;
2020 struct cdns_i3c_data *data = dev->data;
2021
2022 int slot = cdns_i3c_master_get_rr_slot(dev, 0);
2023
2024 if (slot < 0) {
2025 LOG_ERR("%s: no space for i2c device: addr 0x%02x", dev->name, desc->addr);
2026 return slot;
2027 }
2028
2029 k_mutex_lock(&data->bus_lock, K_FOREVER);
2030
2031 uint32_t dev_id_rr0 = prepare_rr0_dev_address(desc->addr);
2032 uint32_t dev_id_rr2 = DEV_ID_RR2_LVR(desc->lvr);
2033
2034 sys_write32(dev_id_rr0, config->base + DEV_ID_RR0(slot));
2035 sys_write32(0, config->base + DEV_ID_RR1(slot));
2036 sys_write32(dev_id_rr2, config->base + DEV_ID_RR2(slot));
2037
2038 data->cdns_i3c_i2c_priv_data[slot].id = slot;
2039 desc->controller_priv = &(data->cdns_i3c_i2c_priv_data[slot]);
2040 data->free_rr_slots &= ~BIT(slot);
2041
2042 sys_write32(sys_read32(config->base + DEVS_CTRL) | DEVS_CTRL_DEV_ACTIVE(slot),
2043 config->base + DEVS_CTRL);
2044
2045 k_mutex_unlock(&data->bus_lock);
2046
2047 return 0;
2048 }
2049
cdns_i3c_i2c_detach_device(const struct device * dev,struct i3c_i2c_device_desc * desc)2050 static int cdns_i3c_i2c_detach_device(const struct device *dev, struct i3c_i2c_device_desc *desc)
2051 {
2052 const struct cdns_i3c_config *config = dev->config;
2053 struct cdns_i3c_data *data = dev->data;
2054 struct cdns_i3c_i2c_dev_data *cdns_i2c_device_data = desc->controller_priv;
2055
2056 if (cdns_i2c_device_data == NULL) {
2057 LOG_ERR("%s: device not attached", dev->name);
2058 return -EINVAL;
2059 }
2060
2061 k_mutex_lock(&data->bus_lock, K_FOREVER);
2062
2063 sys_write32(sys_read32(config->base + DEVS_CTRL) |
2064 DEVS_CTRL_DEV_CLR(cdns_i2c_device_data->id),
2065 config->base + DEVS_CTRL);
2066 data->free_rr_slots |= BIT(cdns_i2c_device_data->id);
2067 desc->controller_priv = NULL;
2068
2069 k_mutex_unlock(&data->bus_lock);
2070
2071 return 0;
2072 }
2073
2074 /**
2075 * @brief Transfer messages in I3C mode.
2076 *
2077 * @see i3c_transfer
2078 *
2079 * @param dev Pointer to device driver instance.
2080 * @param target Pointer to target device descriptor.
2081 * @param msgs Pointer to I3C messages.
2082 * @param num_msgs Number of messages to transfers.
2083 *
2084 * @return @see i3c_transfer
2085 */
cdns_i3c_transfer(const struct device * dev,struct i3c_device_desc * target,struct i3c_msg * msgs,uint8_t num_msgs)2086 static int cdns_i3c_transfer(const struct device *dev, struct i3c_device_desc *target,
2087 struct i3c_msg *msgs, uint8_t num_msgs)
2088 {
2089 const struct cdns_i3c_config *config = dev->config;
2090 struct cdns_i3c_data *data = dev->data;
2091 int txsize = 0;
2092 int rxsize = 0;
2093 int ret;
2094
2095 /* make sure we are currently the active controller */
2096 if (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE)) {
2097 return -EACCES;
2098 }
2099
2100 if (num_msgs == 0) {
2101 return 0;
2102 }
2103
2104 if (num_msgs > data->hw_cfg.cmd_mem_depth || num_msgs > data->hw_cfg.cmdr_mem_depth) {
2105 LOG_ERR("%s: Too many messages", dev->name);
2106 return -ENOMEM;
2107 }
2108
2109 /*
2110 * Ensure data will fit within FIFOs.
2111 *
2112 * TODO: This limitation prevents burst transfers greater than the
2113 * FIFO sizes and should be replaced with an implementation that
2114 * utilizes the RX/TX data interrupts.
2115 */
2116 for (int i = 0; i < num_msgs; i++) {
2117 if ((msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ) {
2118 rxsize += ROUND_UP(msgs[i].len, 4);
2119 } else {
2120 txsize += ROUND_UP(msgs[i].len, 4);
2121 }
2122 }
2123 if ((rxsize > data->hw_cfg.rx_mem_depth) || (txsize > data->hw_cfg.tx_mem_depth)) {
2124 LOG_ERR("%s: Total RX and/or TX transfer larger than FIFO", dev->name);
2125 return -ENOMEM;
2126 }
2127
2128 k_mutex_lock(&data->bus_lock, K_FOREVER);
2129
2130 /* wait for idle */
2131 ret = cdns_i3c_wait_for_idle(dev);
2132 if (ret != 0) {
2133 goto error;
2134 }
2135
2136 /*
2137 * Prepare transfer commands. Currently there is only a single transfer
2138 * in-flight but it would be possible to keep a queue of transfers. If so,
2139 * this preparation could be completed outside of the bus lock allowing
2140 * greater parallelism.
2141 */
2142 bool send_broadcast = true;
2143
2144 for (int i = 0; i < num_msgs; i++) {
2145 struct cdns_i3c_cmd *cmd = &data->xfer.cmds[i];
2146 uint32_t pl = msgs[i].len;
2147 /* check hdr mode */
2148 if ((!(msgs[i].flags & I3C_MSG_HDR)) ||
2149 ((msgs[i].flags & I3C_MSG_HDR) && (msgs[i].hdr_mode == 0))) {
2150 /* HDR message flag is not set or if hdr flag is set but no hdr mode is set
2151 */
2152 cmd->len = pl;
2153 cmd->buf = msgs[i].buf;
2154
2155 cmd->cmd0 = CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
2156 cmd->cmd0 |= CMD0_FIFO_DEV_ADDR(target->dynamic_addr);
2157 if ((msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ) {
2158 cmd->cmd0 |= CMD0_FIFO_RNW;
2159 /*
2160 * For I3C_XMIT_MODE_NO_ADDR reads in SDN mode,
2161 * CMD0_FIFO_PL_LEN specifies the abort limit not bytes to read
2162 */
2163 cmd->cmd0 |= CMD0_FIFO_PL_LEN(pl + 1);
2164 } else {
2165 cmd->cmd0 |= CMD0_FIFO_PL_LEN(pl);
2166 }
2167
2168 /* Send broadcast header on first transfer or after a STOP. */
2169 if (!(msgs[i].flags & I3C_MSG_NBCH) && (send_broadcast)) {
2170 cmd->cmd0 |= CMD0_FIFO_BCH;
2171 send_broadcast = false;
2172 }
2173
2174 /*
2175 * Send repeated start on all transfers except the last or those marked
2176 * STOP.
2177 */
2178 if ((i < (num_msgs - 1)) && ((msgs[i].flags & I3C_MSG_STOP) == 0)) {
2179 cmd->cmd0 |= CMD0_FIFO_RSBC;
2180 } else {
2181 send_broadcast = true;
2182 }
2183
2184 /*
2185 * write the address of num_xfer which is to be updated upon message
2186 * completion
2187 */
2188 cmd->num_xfer = &(msgs[i].num_xfer);
2189 cmd->hdr = I3C_DATA_RATE_SDR;
2190 } else if ((data->common.ctrl_config.supported_hdr & I3C_MSG_HDR_DDR) &&
2191 (msgs[i].hdr_mode == I3C_MSG_HDR_DDR) && (msgs[i].flags & I3C_MSG_HDR)) {
2192 uint16_t ddr_header_payload;
2193
2194 /* DDR sends data out in 16b, so len must be a multiple of 2 */
2195 if (!((pl % 2) == 0)) {
2196 ret = -EINVAL;
2197 goto error;
2198 }
2199 /* HDR message flag is set and hdr mode is DDR */
2200 cmd->buf = msgs[i].buf;
2201 if ((msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ) {
2202 /* HDR-DDR Read */
2203 ddr_header_payload = HDR_CMD_RD |
2204 HDR_CMD_CODE(msgs[i].hdr_cmd_code) |
2205 (target->dynamic_addr << 1);
2206 /* Parity Adjustment Bit for Reads */
2207 ddr_header_payload =
2208 prepare_ddr_cmd_parity_adjustment_bit(ddr_header_payload);
2209 /* HDR-DDR Command Word */
2210 cmd->ddr_header =
2211 DDR_PREAMBLE_CMD_CRC | prepare_ddr_word(ddr_header_payload);
2212 } else {
2213 uint8_t crc5 = 0x1F;
2214 /* HDR-DDR Write */
2215 ddr_header_payload = HDR_CMD_CODE(msgs[i].hdr_cmd_code) |
2216 (target->dynamic_addr << 1);
2217 /* HDR-DDR Command Word */
2218 cmd->ddr_header =
2219 DDR_PREAMBLE_CMD_CRC | prepare_ddr_word(ddr_header_payload);
2220 /* calculate crc5 */
2221 crc5 = i3c_cdns_crc5(crc5, ddr_header_payload);
2222 for (int j = 0; j < pl; j += 2) {
2223 crc5 = i3c_cdns_crc5(
2224 crc5,
2225 sys_get_be16((void *)((uintptr_t)cmd->buf + j)));
2226 }
2227 cmd->ddr_crc = DDR_PREAMBLE_CMD_CRC | DDR_CRC_TOKEN | (crc5 << 9);
2228 }
2229 /* Length of DDR Transfer is length of payload (in 16b) + header and CRC
2230 * blocks
2231 */
2232 cmd->len = ((pl / 2) + 2);
2233
2234 /* prep command FIFO for ENTHDR0 */
2235 cmd->cmd0 = CMD0_FIFO_IS_CCC;
2236 cmd->cmd1 = I3C_CCC_ENTHDR0;
2237 /* write the address of num_xfer which is to be updated upon message
2238 * completion
2239 */
2240 cmd->num_xfer = &(msgs[i].num_xfer);
2241 cmd->hdr = I3C_DATA_RATE_HDR_DDR;
2242 } else {
2243 LOG_ERR("%s: Unsupported HDR Mode %d", dev->name, msgs[i].hdr_mode);
2244 ret = -ENOTSUP;
2245 goto error;
2246 }
2247 }
2248
2249 data->xfer.ret = -ETIMEDOUT;
2250 data->xfer.num_cmds = num_msgs;
2251
2252 cdns_i3c_start_transfer(dev);
2253 if (k_sem_take(&data->xfer.complete, K_MSEC(1000)) != 0) {
2254 LOG_ERR("%s: transfer timed out", dev->name);
2255 cdns_i3c_cancel_transfer(dev);
2256 }
2257
2258 ret = data->xfer.ret;
2259 error:
2260 k_mutex_unlock(&data->bus_lock);
2261
2262 return ret;
2263 }
2264
2265 #ifdef CONFIG_I3C_USE_IBI
cdns_i3c_read_ibi_fifo(const struct cdns_i3c_config * config,void * buf,uint32_t len)2266 static int cdns_i3c_read_ibi_fifo(const struct cdns_i3c_config *config, void *buf, uint32_t len)
2267 {
2268 uint32_t *ptr = buf;
2269 uint32_t remain, val;
2270
2271 for (remain = len; remain >= 4; remain -= 4) {
2272 if (cdns_i3c_ibi_fifo_empty(config)) {
2273 return -EIO;
2274 }
2275 val = sys_le32_to_cpu(sys_read32(config->base + IBI_DATA_FIFO));
2276 *ptr++ = val;
2277 }
2278
2279 if (remain > 0) {
2280 if (cdns_i3c_ibi_fifo_empty(config)) {
2281 return -EIO;
2282 }
2283 val = sys_le32_to_cpu(sys_read32(config->base + IBI_DATA_FIFO));
2284 memcpy(ptr, &val, remain);
2285 }
2286
2287 return 0;
2288 }
2289
cdns_i3c_handle_ibi(const struct device * dev,uint32_t ibir)2290 static void cdns_i3c_handle_ibi(const struct device *dev, uint32_t ibir)
2291 {
2292 const struct cdns_i3c_config *config = dev->config;
2293 struct cdns_i3c_data *data = dev->data;
2294
2295 uint8_t ibi_data[CONFIG_I3C_IBI_MAX_PAYLOAD_SIZE];
2296
2297 /* The slave ID returned here is the device ID in the SIR map NOT the device ID
2298 * in the RR map.
2299 */
2300 uint8_t slave_id = IBIR_SLVID(ibir);
2301
2302 if (slave_id == IBIR_SLVID_INV) {
2303 /* DA does not match any value among SIR map */
2304 return;
2305 }
2306
2307 uint32_t dev_id_rr0 = sys_read32(config->base + DEV_ID_RR0(slave_id + 1));
2308 uint8_t dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(dev_id_rr0);
2309 struct i3c_device_desc *desc =
2310 i3c_dev_list_i3c_addr_find(&data->common.attached_dev, dyn_addr);
2311
2312 /*
2313 * Check for NAK or error conditions.
2314 *
2315 * Note: The logging is for debugging only so will be compiled out in most cases.
2316 * However, if the log level for this module is DEBUG and log mode is IMMEDIATE or MINIMAL,
2317 * this option is also set this may cause problems due to being inside an ISR.
2318 */
2319 if (!(IBIR_ACKED & ibir)) {
2320 LOG_DBG("%s: NAK for slave ID %u", dev->name, (unsigned int)slave_id);
2321 return;
2322 }
2323 if (ibir & IBIR_ERROR) {
2324 LOG_ERR("%s: Data overflow", dev->name);
2325 return;
2326 }
2327
2328 /* Read out any payload bytes */
2329 uint8_t ibi_len = IBIR_XFER_BYTES(ibir);
2330
2331 if (ibi_len > 0) {
2332 if (cdns_i3c_read_ibi_fifo(config, ibi_data, ibi_len) < 0) {
2333 LOG_ERR("%s: Failed to get payload", dev->name);
2334 }
2335 }
2336
2337 if (i3c_ibi_work_enqueue_target_irq(desc, ibi_data, ibi_len) != 0) {
2338 LOG_ERR("%s: Error enqueue IBI IRQ work", dev->name);
2339 }
2340 }
2341
cdns_i3c_handle_hj(const struct device * dev,uint32_t ibir)2342 static void cdns_i3c_handle_hj(const struct device *dev, uint32_t ibir)
2343 {
2344 if (!(IBIR_ACKED & ibir)) {
2345 LOG_DBG("%s: NAK for HJ", dev->name);
2346 return;
2347 }
2348
2349 if (i3c_ibi_work_enqueue_hotjoin(dev) != 0) {
2350 LOG_ERR("%s: Error enqueue IBI HJ work", dev->name);
2351 }
2352 }
2353
cnds_i3c_master_demux_ibis(const struct device * dev)2354 static void cnds_i3c_master_demux_ibis(const struct device *dev)
2355 {
2356 const struct cdns_i3c_config *config = dev->config;
2357
2358 for (uint32_t status0 = sys_read32(config->base + MST_STATUS0);
2359 !(status0 & MST_STATUS0_IBIR_EMP); status0 = sys_read32(config->base + MST_STATUS0)) {
2360 uint32_t ibir = sys_read32(config->base + IBIR);
2361
2362 switch (IBIR_TYPE(ibir)) {
2363 case IBIR_TYPE_IBI:
2364 cdns_i3c_handle_ibi(dev, ibir);
2365 break;
2366 case IBIR_TYPE_HJ:
2367 cdns_i3c_handle_hj(dev, ibir);
2368 break;
2369 case IBIR_TYPE_MR:
2370 /* not implemented */
2371 break;
2372 default:
2373 break;
2374 }
2375 }
2376 }
2377
cdns_i3c_target_ibi_hj_complete(const struct device * dev)2378 static void cdns_i3c_target_ibi_hj_complete(const struct device *dev)
2379 {
2380 struct cdns_i3c_data *data = dev->data;
2381
2382 k_sem_give(&data->ibi_hj_complete);
2383 }
2384 #endif
2385
cdns_i3c_irq_handler(const struct device * dev)2386 static void cdns_i3c_irq_handler(const struct device *dev)
2387 {
2388 const struct cdns_i3c_config *config = dev->config;
2389
2390 if (sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE) {
2391 uint32_t int_st = sys_read32(config->base + MST_ISR);
2392
2393 /* Command queue empty */
2394 if (int_st & MST_INT_HALTED) {
2395 LOG_WRN("Core Halted, 2 read aborts");
2396 sys_write32(MST_INT_HALTED, config->base + MST_ICR);
2397 }
2398
2399 /* Command queue empty */
2400 if (int_st & MST_INT_CMDD_EMP) {
2401 cdns_i3c_complete_transfer(dev);
2402 sys_write32(MST_INT_CMDD_EMP, config->base + MST_ICR);
2403 }
2404
2405 /* Command queue threshold */
2406 if (int_st & MST_INT_CMDD_THR) {
2407 sys_write32(MST_INT_CMDD_THR, config->base + MST_ICR);
2408 }
2409
2410 /* Command response threshold hit */
2411 if (int_st & MST_INT_CMDR_THR) {
2412 sys_write32(MST_INT_CMDR_THR, config->base + MST_ICR);
2413 }
2414
2415 /* RX data ready */
2416 if (int_st & MST_INT_RX_THR) {
2417 sys_write32(MST_INT_RX_THR, config->base + MST_ICR);
2418 }
2419
2420 /* In-band interrupt */
2421 if (int_st & MST_INT_IBIR_THR) {
2422 sys_write32(MST_INT_IBIR_THR, config->base + MST_ICR);
2423 #ifdef CONFIG_I3C_USE_IBI
2424 cnds_i3c_master_demux_ibis(dev);
2425 #else
2426 LOG_ERR("%s: IBI received - Kconfig for using IBIs is not enabled",
2427 dev->name);
2428 #endif
2429 }
2430 /* In-band interrupt data */
2431 if (int_st & MST_INT_TX_OVF) {
2432 sys_write32(MST_INT_TX_OVF, config->base + MST_ICR);
2433 LOG_ERR("%s: controller tx buffer overflow,", dev->name);
2434 }
2435
2436 /* In-band interrupt data */
2437 if (int_st & MST_INT_RX_UNF) {
2438 sys_write32(MST_INT_RX_UNF, config->base + MST_ICR);
2439 LOG_ERR("%s: controller rx buffer underflow,", dev->name);
2440 }
2441
2442 /* In-band interrupt data */
2443 if (int_st & MST_INT_IBID_THR) {
2444 sys_write32(MST_INT_IBID_THR, config->base + MST_ICR);
2445 }
2446 } else {
2447 uint32_t int_sl = sys_read32(config->base + SLV_ISR);
2448 struct cdns_i3c_data *data = dev->data;
2449 const struct i3c_target_callbacks *target_cb = data->target_config->callbacks;
2450 /* Clear interrupts */
2451 sys_write32(int_sl, config->base + SLV_ICR);
2452
2453 /* SLV SDR rx fifo threshold */
2454 if (int_sl & SLV_INT_SDR_RX_THR) {
2455 /* while rx fifo is not empty */
2456 while (!(sys_read32(config->base + SLV_STATUS1) &
2457 SLV_STATUS1_SDR_RX_EMPTY)) {
2458 if (target_cb != NULL && target_cb->write_received_cb != NULL) {
2459 cdns_i3c_target_read_rx_fifo(dev);
2460 }
2461 }
2462 }
2463
2464 /* SLV SDR tx fifo threshold */
2465 if (int_sl & SLV_INT_SDR_TX_THR) {
2466 int status = 0;
2467
2468 if (target_cb != NULL && target_cb->read_processed_cb) {
2469 /* while tx fifo is not full and there is still data available */
2470 while ((!(sys_read32(config->base + SLV_STATUS1) &
2471 SLV_STATUS1_SDR_TX_FULL)) &&
2472 (status == 0)) {
2473 /* call function pointer for read */
2474 uint8_t byte;
2475 /* will return negative if no data left to transmit and 0 if
2476 * data available
2477 */
2478 status = target_cb->read_processed_cb(data->target_config,
2479 &byte);
2480 if (status == 0) {
2481 cdns_i3c_write_tx_fifo(config, &byte, sizeof(byte));
2482 }
2483 }
2484 }
2485 }
2486
2487 /* SLV SDR rx complete */
2488 if (int_sl & SLV_INT_SDR_RD_COMP) {
2489 /* a read needs to be done on slv_status 0 else a NACK will happen */
2490 (void)sys_read32(config->base + SLV_STATUS0);
2491 /* call stop function pointer */
2492 if (target_cb != NULL && target_cb->stop_cb) {
2493 target_cb->stop_cb(data->target_config);
2494 }
2495 }
2496
2497 /* SLV SDR tx complete */
2498 if (int_sl & SLV_INT_SDR_WR_COMP) {
2499 /* a read needs to be done on slv_status 0 else a NACK will happen */
2500 (void)sys_read32(config->base + SLV_STATUS0);
2501 /* clear bytes read parameter */
2502 data->fifo_bytes_read = 0;
2503 /* call stop function pointer */
2504 if (target_cb != NULL && target_cb->stop_cb) {
2505 target_cb->stop_cb(data->target_config);
2506 }
2507 }
2508
2509 /* DA has been updated */
2510 if (int_sl & SLV_INT_DA_UPD) {
2511 LOG_INF("%s: DA updated to 0x%02lx", dev->name,
2512 SLV_STATUS1_DA(sys_read32(config->base + SLV_STATUS1)));
2513 /* HJ could send a DISEC which would trigger the SLV_INT_EVENT_UP bit,
2514 * but it's still expected to eventually send a DAA
2515 */
2516 #ifdef CONFIG_I3C_USE_IBI
2517 cdns_i3c_target_ibi_hj_complete(dev);
2518 #endif
2519 }
2520
2521 /* HJ complete and DA has been assigned */
2522 if (int_sl & SLV_INT_HJ_DONE) {
2523 }
2524
2525 /* Controllership has been been given */
2526 if (int_sl & SLV_INT_MR_DONE) {
2527 /* TODO: implement support for controllership handoff */
2528 }
2529
2530 /* EISC or DISEC has been received */
2531 if (int_sl & SLV_INT_EVENT_UP) {
2532 }
2533
2534 /* sdr transfer aborted by controller */
2535 if (int_sl & SLV_INT_M_RD_ABORT) {
2536 /* TODO: consider flushing tx buffer? */
2537 }
2538
2539 /* SLV SDR rx fifo underflow */
2540 if (int_sl & SLV_INT_SDR_RX_UNF) {
2541 LOG_ERR("%s: slave sdr rx buffer underflow", dev->name);
2542 }
2543
2544 /* SLV SDR tx fifo overflow */
2545 if (int_sl & SLV_INT_SDR_TX_OVF) {
2546 LOG_ERR("%s: slave sdr tx buffer overflow,", dev->name);
2547 }
2548
2549 if (int_sl & SLV_INT_DDR_RX_THR) {
2550 }
2551
2552 /* SLV DDR WR COMPLETE */
2553 if (int_sl & SLV_INT_DDR_WR_COMP) {
2554 /* initial value of CRC5 for HDR-DDR is 0x1F */
2555 uint8_t crc5 = 0x1F;
2556
2557 while (!(sys_read32(config->base + SLV_STATUS1) &
2558 SLV_STATUS1_DDR_RX_EMPTY)) {
2559 uint32_t ddr_rx_data = sys_read32(config->base + SLV_DDR_RX_FIFO);
2560 uint32_t preamble = (ddr_rx_data & DDR_PREAMBLE_MASK);
2561
2562 if (preamble == DDR_PREAMBLE_DATA_ABORT ||
2563 preamble == DDR_PREAMBLE_DATA_ABORT_ALT) {
2564 uint16_t ddr_payload = DDR_DATA(ddr_rx_data);
2565
2566 if (cdns_i3c_ddr_parity(ddr_payload) !=
2567 (ddr_rx_data & (DDR_ODD_PARITY | DDR_EVEN_PARITY))) {
2568 LOG_ERR("%s: Received incorrect DDR Parity",
2569 dev->name);
2570 }
2571 /* calculate a running a crc */
2572 crc5 = i3c_cdns_crc5(crc5, ddr_payload);
2573
2574 if (target_cb != NULL &&
2575 target_cb->write_received_cb != NULL) {
2576 /* DDR receives 2B for each payload */
2577 target_cb->write_received_cb(
2578 data->target_config,
2579 (uint8_t)((ddr_payload >> 8) & 0xFF));
2580 target_cb->write_received_cb(
2581 data->target_config,
2582 (uint8_t)(ddr_payload));
2583 }
2584
2585 } else if ((preamble == DDR_PREAMBLE_CMD_CRC) &&
2586 ((ddr_rx_data & DDR_CRC_TOKEN_MASK) == DDR_CRC_TOKEN)) {
2587 /* should come through here last */
2588 if (crc5 != DDR_CRC(ddr_rx_data)) {
2589 LOG_ERR("%s: Received incorrect DDR CRC5",
2590 dev->name);
2591 }
2592 } else if (preamble == DDR_PREAMBLE_CMD_CRC) {
2593 /* should come through here first */
2594 uint16_t ddr_header_payload = DDR_DATA(ddr_rx_data);
2595
2596 crc5 = i3c_cdns_crc5(crc5, ddr_header_payload);
2597 }
2598 }
2599
2600 if (target_cb != NULL && target_cb->stop_cb != NULL) {
2601 target_cb->stop_cb(data->target_config);
2602 }
2603 }
2604
2605 /* SLV SDR rx complete */
2606 if (int_sl & SLV_INT_DDR_RD_COMP) {
2607 /* a read needs to be done on slv_status 0 else a NACK will happen */
2608 (void)sys_read32(config->base + SLV_STATUS0);
2609 /* call stop function pointer */
2610 if (target_cb != NULL && target_cb->stop_cb) {
2611 target_cb->stop_cb(data->target_config);
2612 }
2613 }
2614
2615 /*SLV DDR TX THR*/
2616 if (int_sl & SLV_INT_DDR_TX_THR) {
2617 int status = 0;
2618
2619 if (target_cb != NULL && target_cb->read_processed_cb) {
2620
2621 while ((!(sys_read32(config->base + SLV_STATUS1) &
2622 SLV_STATUS1_DDR_TX_FULL)) &&
2623 (status == 0)) {
2624 /* call function pointer for read */
2625 uint8_t byte;
2626 /* will return negative if no data left to transmit
2627 * and 0 if data available
2628 */
2629 status = target_cb->read_processed_cb(data->target_config,
2630 &byte);
2631 if (status == 0) {
2632 cdns_i3c_write_ddr_tx_fifo(config, &byte,
2633 sizeof(byte));
2634 }
2635 }
2636 }
2637 }
2638 }
2639 }
2640
cdns_i3c_read_hw_cfg(const struct device * dev)2641 static void cdns_i3c_read_hw_cfg(const struct device *dev)
2642 {
2643 const struct cdns_i3c_config *config = dev->config;
2644 struct cdns_i3c_data *data = dev->data;
2645
2646 uint32_t devid = sys_read32(config->base + DEV_ID);
2647 uint32_t revid = sys_read32(config->base + REV_ID);
2648
2649 LOG_DBG("%s: Device info:\r\n"
2650 " vid: 0x%03lX, pid: 0x%03lX\r\n"
2651 " revision: major = %lu, minor = %lu\r\n"
2652 " device ID: 0x%04X",
2653 dev->name, REV_ID_VID(revid), REV_ID_PID(revid), REV_ID_REV_MAJOR(revid),
2654 REV_ID_REV_MINOR(revid), devid);
2655
2656 /*
2657 * Depths are specified as number of words (32bit), convert to bytes
2658 */
2659 uint32_t cfg0 = sys_read32(config->base + CONF_STATUS0);
2660 uint32_t cfg1 = sys_read32(config->base + CONF_STATUS1);
2661
2662 data->hw_cfg.rev_id = revid;
2663 data->hw_cfg.cmdr_mem_depth = CONF_STATUS0_CMDR_DEPTH(cfg0) * 4;
2664 data->hw_cfg.cmd_mem_depth = CONF_STATUS1_CMD_DEPTH(cfg1) * 4;
2665 data->hw_cfg.rx_mem_depth = CONF_STATUS1_RX_DEPTH(cfg1) * 4;
2666 data->hw_cfg.tx_mem_depth = CONF_STATUS1_TX_DEPTH(cfg1) * 4;
2667 data->hw_cfg.ddr_rx_mem_depth = CONF_STATUS1_SLV_DDR_RX_DEPTH(cfg1) * 4;
2668 data->hw_cfg.ddr_tx_mem_depth = CONF_STATUS1_SLV_DDR_TX_DEPTH(cfg1) * 4;
2669 data->hw_cfg.ibir_mem_depth = CONF_STATUS0_IBIR_DEPTH(cfg0) * 4;
2670 data->hw_cfg.ibi_mem_depth = CONF_STATUS1_IBI_DEPTH(cfg0) * 4;
2671
2672 LOG_DBG("%s: FIFO info:\r\n"
2673 " cmd_mem_depth = %u\r\n"
2674 " cmdr_mem_depth = %u\r\n"
2675 " rx_mem_depth = %u\r\n"
2676 " tx_mem_depth = %u\r\n"
2677 " ddr_rx_mem_depth = %u\r\n"
2678 " ddr_tx_mem_depth = %u\r\n"
2679 " ibi_mem_depth = %u\r\n"
2680 " ibir_mem_depth = %u",
2681 dev->name, data->hw_cfg.cmd_mem_depth, data->hw_cfg.cmdr_mem_depth,
2682 data->hw_cfg.rx_mem_depth, data->hw_cfg.tx_mem_depth, data->hw_cfg.ddr_rx_mem_depth,
2683 data->hw_cfg.ddr_tx_mem_depth, data->hw_cfg.ibi_mem_depth,
2684 data->hw_cfg.ibir_mem_depth);
2685
2686 /* Regardless of the cmd depth size we are limited by our cmd array length. */
2687 data->hw_cfg.cmd_mem_depth = MIN(data->hw_cfg.cmd_mem_depth, ARRAY_SIZE(data->xfer.cmds));
2688 }
2689
2690 /**
2691 * @brief Get configuration of the I3C hardware.
2692 *
2693 * This provides a way to get the current configuration of the I3C hardware.
2694 *
2695 * This can return cached config or probed hardware parameters, but it has to
2696 * be up to date with current configuration.
2697 *
2698 * @param[in] dev Pointer to controller device driver instance.
2699 * @param[in] type Type of configuration parameters being passed
2700 * in @p config.
2701 * @param[in,out] config Pointer to the configuration parameters.
2702 *
2703 * Note that if @p type is @c I3C_CONFIG_CUSTOM, @p config must contain
2704 * the ID of the parameter to be retrieved.
2705 *
2706 * @retval 0 If successful.
2707 * @retval -EIO General Input/Output errors.
2708 * @retval -ENOSYS If not implemented.
2709 */
cdns_i3c_config_get(const struct device * dev,enum i3c_config_type type,void * config)2710 static int cdns_i3c_config_get(const struct device *dev, enum i3c_config_type type, void *config)
2711 {
2712 struct cdns_i3c_data *data = dev->data;
2713 int ret = 0;
2714
2715 if (config == NULL) {
2716 ret = -EINVAL;
2717 goto out_configure;
2718 }
2719
2720 (void)memcpy(config, &data->common.ctrl_config, sizeof(data->common.ctrl_config));
2721
2722 out_configure:
2723 return ret;
2724 }
2725
cdns_i3c_target_tx_ddr_write(const struct device * dev,uint8_t * buf,uint16_t len)2726 static int cdns_i3c_target_tx_ddr_write(const struct device *dev, uint8_t *buf, uint16_t len)
2727 {
2728 const struct cdns_i3c_config *config = dev->config;
2729 struct cdns_i3c_data *data = dev->data;
2730 uint32_t i, preamble;
2731 uint32_t data_word;
2732 uint8_t crc5 = 0x1F;
2733
2734 /* check if there is space available in the tx fifo */
2735 if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_TX_FULL) {
2736 return -ENOSPC;
2737 }
2738
2739 /* DDR sends data out in 16b, so len must be a multiple of 2 */
2740 if (!((len % 2) == 0)) {
2741 return -EINVAL;
2742 }
2743
2744 /* Header shall be known in advanced to calculate crc5 */
2745 uint8_t slave_da = SLV_STATUS1_DA(sys_read32(config->base + SLV_STATUS1));
2746 uint16_t ddr_payload_header = HDR_CMD_RD | (slave_da << 1);
2747
2748 ddr_payload_header = prepare_ddr_cmd_parity_adjustment_bit(ddr_payload_header);
2749 crc5 = i3c_cdns_crc5(crc5, ddr_payload_header);
2750
2751 /* write as much as you can to the fifo */
2752 for (i = 0;
2753 i < len && (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_TX_FULL));
2754 i += 2) {
2755 /* Use ALT with other than first packets */
2756 preamble = (i > 0) ? DDR_PREAMBLE_DATA_ABORT_ALT : DDR_PREAMBLE_DATA_ABORT;
2757 data_word = (preamble | prepare_ddr_word(sys_get_be16(&buf[i])));
2758 crc5 = i3c_cdns_crc5(crc5, sys_get_be16(&buf[i]));
2759 sys_write32(data_word, config->base + SLV_DDR_TX_FIFO);
2760 }
2761 /* end of data buffer, write crc packet (if we are still not full) */
2762 if ((i == len) && (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_TX_FULL))) {
2763 sys_write32(DDR_PREAMBLE_CMD_CRC | DDR_CRC_TOKEN | crc5 << 9,
2764 config->base + SLV_DDR_TX_FIFO);
2765 }
2766
2767 /* setup THR interrupt */
2768 uint32_t thr_ctrl = sys_read32(config->base + SLV_DDR_TX_RX_THR_CTRL);
2769
2770 /*
2771 * Interrupt at half of the data or FIFO depth to give it enough time to be
2772 * processed. The ISR will then callback to the function pointer
2773 * `read_processed_cb` to collect more data to transmit
2774 */
2775 thr_ctrl &= ~TX_THR_MASK;
2776 thr_ctrl |= TX_THR(MIN((data->hw_cfg.tx_mem_depth / 4) / 2, len / 2));
2777
2778 sys_write32(thr_ctrl, config->base + SLV_DDR_TX_RX_THR_CTRL);
2779 /* return total bytes written */
2780 return i;
2781 }
2782
2783 /**
2784 * @brief Writes to the Target's TX FIFO
2785 *
2786 * The Cadence I3C will then ACK read requests to it's TX FIFO from a
2787 * Controller
2788 *
2789 * @param dev Pointer to the device structure for an I3C controller
2790 * driver configured in target mode.
2791 * @param buf Pointer to the buffer
2792 * @param len Length of the buffer
2793 *
2794 * @retval Total number of bytes written
2795 * @retval -EACCES Not in Target Mode
2796 * @retval -ENOSPC No space in Tx FIFO
2797 */
cdns_i3c_target_tx_write(const struct device * dev,uint8_t * buf,uint16_t len,uint8_t hdr_mode)2798 static int cdns_i3c_target_tx_write(const struct device *dev, uint8_t *buf, uint16_t len,
2799 uint8_t hdr_mode)
2800 {
2801 const struct cdns_i3c_config *config = dev->config;
2802 struct cdns_i3c_data *data = dev->data;
2803 struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
2804 const uint32_t *buf_32 = (uint32_t *)buf;
2805 uint32_t i = 0;
2806 uint32_t val = 0;
2807 uint16_t remain = len;
2808
2809 /* check if we are currently a target */
2810 if (sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE) {
2811 return -EACCES;
2812 }
2813
2814 /* check if there is space available in the tx fifo */
2815 if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_SDR_TX_FULL) {
2816 return -ENOSPC;
2817 }
2818
2819 k_mutex_lock(&data->bus_lock, K_FOREVER);
2820
2821 /* rev 1p7 requires the length be written to the SLV_CTRL reg */
2822 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
2823 sys_write32(len, config->base + SLV_CTRL);
2824 }
2825 if (hdr_mode == I3C_MSG_HDR_DDR) {
2826 if (ctrl_config->supported_hdr & I3C_MSG_HDR_DDR) {
2827 i = cdns_i3c_target_tx_ddr_write(dev, buf, len);
2828 /* TODO: DDR THR interrupt support not implemented yet*/
2829 } else {
2830 LOG_ERR("%s: HDR-DDR not supported", dev->name);
2831 i = -ENOTSUP;
2832 }
2833 } else if (hdr_mode == 0) {
2834 /* write as much as you can to the fifo */
2835 while (i < len &&
2836 (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_SDR_TX_FULL))) {
2837 /* with rev 1p7, while as a target, the fifos are using the full word,
2838 * otherwise only the first byte is used
2839 */
2840 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
2841 remain = len - i;
2842 if (remain >= 4) {
2843 val = *buf_32++;
2844 } else if (remain > 0) {
2845 val = 0;
2846 memcpy(&val, buf_32, remain);
2847 }
2848 sys_write32(val, config->base + TX_FIFO);
2849 i += 4;
2850 } else {
2851 sys_write32((uint32_t)buf[i], config->base + TX_FIFO);
2852 i++;
2853 }
2854 }
2855
2856 /* setup THR interrupt */
2857 uint32_t thr_ctrl = sys_read32(config->base + TX_RX_THR_CTRL);
2858
2859 /*
2860 * Interrupt at half of the data or FIFO depth to give it enough time to be
2861 * processed. The ISR will then callback to the function pointer
2862 * `read_processed_cb` to collect more data to transmit
2863 */
2864 thr_ctrl &= ~TX_THR_MASK;
2865 thr_ctrl |= TX_THR(MIN((data->hw_cfg.tx_mem_depth / 4) / 2, len / 2));
2866 sys_write32(thr_ctrl, config->base + TX_RX_THR_CTRL);
2867 } else {
2868 LOG_ERR("%s: Unsupported HDR Mode %d", dev->name, hdr_mode);
2869 i = -ENOTSUP;
2870 }
2871
2872 k_mutex_unlock(&data->bus_lock);
2873
2874 /* return total bytes written */
2875 return i;
2876 }
2877
2878 /**
2879 * @brief Instructs the I3C Target device to register itself to the I3C Controller
2880 *
2881 * This routine instructs the I3C Target device to register itself to the I3C
2882 * Controller via its parent controller's i3c_target_register() API.
2883 *
2884 * @param dev Pointer to target device driver instance.
2885 * @param cfg Config struct with functions and parameters used by the I3C driver
2886 * to send bus events
2887 *
2888 * @return @see i3c_device_find.
2889 */
cdns_i3c_target_register(const struct device * dev,struct i3c_target_config * cfg)2890 static int cdns_i3c_target_register(const struct device *dev, struct i3c_target_config *cfg)
2891 {
2892 struct cdns_i3c_data *data = dev->data;
2893
2894 data->target_config = cfg;
2895 return 0;
2896 }
2897
2898 /**
2899 * @brief Unregisters the provided config as Target device
2900 *
2901 * This routine disables I3C target mode for the 'dev' I3C bus driver using
2902 * the provided 'config' struct containing the functions and parameters
2903 * to send bus events.
2904 *
2905 * @param dev Pointer to target device driver instance.
2906 * @param cfg Config struct with functions and parameters used by the I3C driver
2907 * to send bus events
2908 *
2909 * @return @see i3c_device_find.
2910 */
cdns_i3c_target_unregister(const struct device * dev,struct i3c_target_config * cfg)2911 static int cdns_i3c_target_unregister(const struct device *dev, struct i3c_target_config *cfg)
2912 {
2913 /* no way to disable? maybe write DA to 0? */
2914 return 0;
2915 }
2916
2917 /**
2918 * @brief Find a registered I3C target device.
2919 *
2920 * This returns the I3C device descriptor of the I3C device
2921 * matching the incoming @p id.
2922 *
2923 * @param dev Pointer to controller device driver instance.
2924 * @param id Pointer to I3C device ID.
2925 *
2926 * @return @see i3c_device_find.
2927 */
cdns_i3c_device_find(const struct device * dev,const struct i3c_device_id * id)2928 static struct i3c_device_desc *cdns_i3c_device_find(const struct device *dev,
2929 const struct i3c_device_id *id)
2930 {
2931 const struct cdns_i3c_config *config = dev->config;
2932
2933 return i3c_dev_list_find(&config->common.dev_list, id);
2934 }
2935
2936 /**
2937 * Find a registered I2C target device.
2938 *
2939 * Controller only API.
2940 *
2941 * This returns the I2C device descriptor of the I2C device
2942 * matching the device address @p addr.
2943 *
2944 * @param dev Pointer to controller device driver instance.
2945 * @param id I2C target device address.
2946 *
2947 * @return @see i3c_i2c_device_find.
2948 */
cdns_i3c_i2c_device_find(const struct device * dev,uint16_t addr)2949 static struct i3c_i2c_device_desc *cdns_i3c_i2c_device_find(const struct device *dev, uint16_t addr)
2950 {
2951 struct cdns_i3c_data *data = dev->data;
2952
2953 return i3c_dev_list_i2c_addr_find(&data->common.attached_dev, addr);
2954 }
2955
2956 /**
2957 * @brief Transfer messages in I2C mode.
2958 *
2959 * @see i2c_transfer
2960 *
2961 * @param dev Pointer to device driver instance.
2962 * @param target Pointer to target device descriptor.
2963 * @param msgs Pointer to I2C messages.
2964 * @param num_msgs Number of messages to transfers.
2965 *
2966 * @return @see i2c_transfer
2967 */
cdns_i3c_i2c_api_transfer(const struct device * dev,struct i2c_msg * msgs,uint8_t num_msgs,uint16_t addr)2968 static int cdns_i3c_i2c_api_transfer(const struct device *dev, struct i2c_msg *msgs,
2969 uint8_t num_msgs, uint16_t addr)
2970 {
2971 struct i3c_i2c_device_desc *i2c_dev = cdns_i3c_i2c_device_find(dev, addr);
2972 int ret;
2973
2974 if (i2c_dev == NULL) {
2975 ret = -ENODEV;
2976 } else {
2977 ret = cdns_i3c_i2c_transfer(dev, i2c_dev, msgs, num_msgs);
2978 }
2979
2980 return ret;
2981 }
2982
2983 /**
2984 * Determine I3C bus mode from the i2c devices on the bus
2985 *
2986 * Reads the LVR of all I2C devices and returns the I3C bus
2987 * Mode
2988 *
2989 * @param dev_list Pointer to device list
2990 *
2991 * @return @see enum i3c_bus_mode.
2992 */
i3c_bus_mode(const struct i3c_dev_list * dev_list)2993 static enum i3c_bus_mode i3c_bus_mode(const struct i3c_dev_list *dev_list)
2994 {
2995 enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
2996
2997 for (int i = 0; i < dev_list->num_i2c; i++) {
2998 switch (I3C_LVR_I2C_DEV_IDX(dev_list->i2c[i].lvr)) {
2999 case I3C_LVR_I2C_DEV_IDX_0:
3000 if (mode < I3C_BUS_MODE_MIXED_FAST) {
3001 mode = I3C_BUS_MODE_MIXED_FAST;
3002 }
3003 break;
3004 case I3C_LVR_I2C_DEV_IDX_1:
3005 if (mode < I3C_BUS_MODE_MIXED_LIMITED) {
3006 mode = I3C_BUS_MODE_MIXED_LIMITED;
3007 }
3008 break;
3009 case I3C_LVR_I2C_DEV_IDX_2:
3010 if (mode < I3C_BUS_MODE_MIXED_SLOW) {
3011 mode = I3C_BUS_MODE_MIXED_SLOW;
3012 }
3013 break;
3014 default:
3015 mode = I3C_BUS_MODE_INVALID;
3016 break;
3017 }
3018 }
3019 return mode;
3020 }
3021
3022 /**
3023 * Determine THD_DEL value for CTRL register
3024 *
3025 * @param dev Pointer to device driver instance.
3026 *
3027 * @return Value to be written to THD_DEL
3028 */
cdns_i3c_clk_to_data_turnaround(const struct device * dev)3029 static uint8_t cdns_i3c_clk_to_data_turnaround(const struct device *dev)
3030 {
3031 const struct cdns_i3c_config *config = dev->config;
3032 uint32_t input_clock_frequency = config->input_frequency;
3033 uint8_t thd_delay =
3034 DIV_ROUND_UP(I3C_TSCO_DEFAULT_NS, (NSEC_PER_SEC / input_clock_frequency));
3035
3036 if (thd_delay > THD_DELAY_MAX) {
3037 thd_delay = THD_DELAY_MAX;
3038 }
3039
3040 return (THD_DELAY_MAX - thd_delay);
3041 }
3042
3043 /**
3044 * @brief Initialize the hardware.
3045 *
3046 * @param dev Pointer to controller device driver instance.
3047 */
cdns_i3c_bus_init(const struct device * dev)3048 static int cdns_i3c_bus_init(const struct device *dev)
3049 {
3050 struct cdns_i3c_data *data = dev->data;
3051 const struct cdns_i3c_config *config = dev->config;
3052 struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
3053
3054 cdns_i3c_read_hw_cfg(dev);
3055
3056 /* Clear all retaining regs */
3057 sys_write32(DEVS_CTRL_DEV_CLR_ALL, config->base + DEVS_CTRL);
3058
3059 uint32_t conf0 = sys_read32(config->base + CONF_STATUS0);
3060 uint32_t conf1 = sys_read32(config->base + CONF_STATUS1);
3061 data->max_devs = CONF_STATUS0_DEVS_NUM(conf0);
3062 data->free_rr_slots = GENMASK(data->max_devs, 1);
3063
3064 /* DDR supported bit moved in 1p7 revision along with dev role added */
3065 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) {
3066 ctrl_config->supported_hdr =
3067 (conf1 & CONF_STATUS1_SUPPORTS_DDR) ? I3C_MSG_HDR_DDR : 0;
3068 ctrl_config->is_secondary =
3069 (CONF_STATUS0_DEV_ROLE(conf0) == CONF_STATUS0_DEV_ROLE_SEC_MASTER) ? true
3070 : false;
3071 } else {
3072 ctrl_config->supported_hdr =
3073 (conf0 & CONF_STATUS0_SUPPORTS_DDR) ? I3C_MSG_HDR_DDR : 0;
3074 ctrl_config->is_secondary = (conf0 & CONF_STATUS0_SEC_MASTER) ? true : false;
3075 }
3076 k_mutex_init(&data->bus_lock);
3077 k_sem_init(&data->xfer.complete, 0, 1);
3078 k_sem_init(&data->ibi_hj_complete, 0, 1);
3079
3080 cdns_i3c_interrupts_disable(config);
3081 cdns_i3c_interrupts_clear(config);
3082
3083 config->irq_config_func(dev);
3084
3085 /* Ensure the bus is disabled. */
3086 sys_write32(~CTRL_DEV_EN & sys_read32(config->base + CTRL), config->base + CTRL);
3087
3088 /* determine prescaler timings for i3c and i2c scl */
3089 cdns_i3c_set_prescalers(dev);
3090
3091 enum i3c_bus_mode mode = i3c_bus_mode(&config->common.dev_list);
3092
3093 LOG_DBG("%s: i3c bus mode %d", dev->name, mode);
3094 int cdns_mode;
3095
3096 switch (mode) {
3097 case I3C_BUS_MODE_PURE:
3098 cdns_mode = CTRL_PURE_BUS_MODE;
3099 break;
3100 case I3C_BUS_MODE_MIXED_FAST:
3101 cdns_mode = CTRL_MIXED_FAST_BUS_MODE;
3102 break;
3103 case I3C_BUS_MODE_MIXED_LIMITED:
3104 case I3C_BUS_MODE_MIXED_SLOW:
3105 cdns_mode = CTRL_MIXED_SLOW_BUS_MODE;
3106 break;
3107 default:
3108 return -EINVAL;
3109 }
3110
3111 /*
3112 * When a Hot-Join request happens, disable all events coming from this device.
3113 * We will issue ENTDAA afterwards from the threaded IRQ handler.
3114 * Set HJ ACK later after bus init to prevent targets from indirect DAA enforcement.
3115 *
3116 * Set the I3C Bus Mode based on the LVR of the I2C devices
3117 */
3118 uint32_t ctrl = CTRL_HJ_DISEC | CTRL_MCS_EN | (CTRL_BUS_MODE_MASK & cdns_mode) |
3119 CTRL_THD_DELAY(cdns_i3c_clk_to_data_turnaround(dev));
3120 /* Disable Controllership requests as it is not supported yet by the driver */
3121 ctrl &= ~CTRL_MST_ACK;
3122
3123 /*
3124 * Cadence I3C release r105v1p0 and above support I3C v1.1 timing change
3125 * for tCASHr_min = tCAS_min / 2, otherwise tCASr_min = tCAS_min (as
3126 * per MIPI spec v1.0)
3127 */
3128 if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 5)) {
3129 ctrl |= CTRL_I3C_11_SUPP;
3130 }
3131
3132 /* write ctrl register value */
3133 sys_write32(ctrl, config->base + CTRL);
3134
3135 /* enable Core */
3136 sys_write32(CTRL_DEV_EN | ctrl, config->base + CTRL);
3137
3138 /* Set fifo thresholds. */
3139 sys_write32(CMD_THR(I3C_CMDD_THR) | IBI_THR(I3C_IBID_THR) | CMDR_THR(I3C_CMDR_THR) |
3140 IBIR_THR(I3C_IBIR_THR),
3141 config->base + CMD_IBI_THR_CTRL);
3142
3143 /* Set TX/RX interrupt thresholds. */
3144 if (sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE) {
3145 sys_write32(TX_THR(I3C_TX_THR) | RX_THR(data->hw_cfg.rx_mem_depth),
3146 config->base + TX_RX_THR_CTRL);
3147 } else {
3148 sys_write32(TX_THR(1) | RX_THR(1), config->base + TX_RX_THR_CTRL);
3149 sys_write32(SLV_DDR_TX_THR(0) | SLV_DDR_RX_THR(1),
3150 config->base + SLV_DDR_TX_RX_THR_CTRL);
3151 }
3152 /* enable target interrupts */
3153 sys_write32(SLV_INT_DA_UPD | SLV_INT_SDR_RD_COMP | SLV_INT_SDR_WR_COMP |
3154 SLV_INT_SDR_RX_THR | SLV_INT_SDR_TX_THR | SLV_INT_SDR_RX_UNF |
3155 SLV_INT_SDR_TX_OVF | SLV_INT_HJ_DONE | SLV_INT_DDR_WR_COMP |
3156 SLV_INT_DDR_RD_COMP | SLV_INT_DDR_RX_THR | SLV_INT_DDR_TX_THR,
3157 config->base + SLV_IER);
3158
3159 /* Enable IBI interrupts. */
3160 sys_write32(MST_INT_IBIR_THR | MST_INT_RX_UNF | MST_INT_HALTED | MST_INT_TX_OVF,
3161 config->base + MST_IER);
3162
3163 int ret = i3c_addr_slots_init(dev);
3164
3165 if (ret != 0) {
3166 return ret;
3167 }
3168
3169 /* Program retaining regs. */
3170 cdns_i3c_program_controller_retaining_reg(dev);
3171
3172 /* only primary controllers are responsible for initializing the bus */
3173 if (!ctrl_config->is_secondary) {
3174 /* Sleep to wait for bus idle. */
3175 k_busy_wait(201);
3176 /* Perform bus initialization */
3177 ret = i3c_bus_init(dev, &config->common.dev_list);
3178 #ifdef CONFIG_I3C_USE_IBI
3179 /* Bus Initialization Complete, allow HJ ACKs */
3180 sys_write32(CTRL_HJ_ACK | sys_read32(config->base + CTRL), config->base + CTRL);
3181 #endif
3182 }
3183
3184 return 0;
3185 }
3186
3187 static struct i3c_driver_api api = {
3188 .i2c_api.configure = cdns_i3c_i2c_api_configure,
3189 .i2c_api.transfer = cdns_i3c_i2c_api_transfer,
3190
3191 .configure = cdns_i3c_configure,
3192 .config_get = cdns_i3c_config_get,
3193
3194 .attach_i3c_device = cdns_i3c_attach_device,
3195 .reattach_i3c_device = cdns_i3c_reattach_device,
3196 .detach_i3c_device = cdns_i3c_detach_device,
3197 .attach_i2c_device = cdns_i3c_i2c_attach_device,
3198 .detach_i2c_device = cdns_i3c_i2c_detach_device,
3199
3200 .do_daa = cdns_i3c_do_daa,
3201 .do_ccc = cdns_i3c_do_ccc,
3202
3203 .i3c_device_find = cdns_i3c_device_find,
3204
3205 .i3c_xfers = cdns_i3c_transfer,
3206
3207 .target_tx_write = cdns_i3c_target_tx_write,
3208 .target_register = cdns_i3c_target_register,
3209 .target_unregister = cdns_i3c_target_unregister,
3210
3211 #ifdef CONFIG_I3C_USE_IBI
3212 .ibi_enable = cdns_i3c_controller_ibi_enable,
3213 .ibi_disable = cdns_i3c_controller_ibi_disable,
3214 .ibi_raise = cdns_i3c_target_ibi_raise,
3215 #endif
3216 };
3217
3218 #define CADENCE_I3C_INSTANTIATE(n) \
3219 static void cdns_i3c_config_func_##n(const struct device *dev); \
3220 static struct i3c_device_desc cdns_i3c_device_array_##n[] = I3C_DEVICE_ARRAY_DT_INST(n); \
3221 static struct i3c_i2c_device_desc cdns_i3c_i2c_device_array_##n[] = \
3222 I3C_I2C_DEVICE_ARRAY_DT_INST(n); \
3223 static const struct cdns_i3c_config i3c_config_##n = { \
3224 .base = DT_INST_REG_ADDR(n), \
3225 .input_frequency = DT_INST_PROP(n, input_clock_frequency), \
3226 .irq_config_func = cdns_i3c_config_func_##n, \
3227 .common.dev_list.i3c = cdns_i3c_device_array_##n, \
3228 .common.dev_list.num_i3c = ARRAY_SIZE(cdns_i3c_device_array_##n), \
3229 .common.dev_list.i2c = cdns_i3c_i2c_device_array_##n, \
3230 .common.dev_list.num_i2c = ARRAY_SIZE(cdns_i3c_i2c_device_array_##n), \
3231 }; \
3232 static struct cdns_i3c_data i3c_data_##n = { \
3233 .common.ctrl_config.scl.i3c = DT_INST_PROP_OR(n, i3c_scl_hz, 0), \
3234 .common.ctrl_config.scl.i2c = DT_INST_PROP_OR(n, i2c_scl_hz, 0), \
3235 }; \
3236 DEVICE_DT_INST_DEFINE(n, cdns_i3c_bus_init, NULL, &i3c_data_##n, &i3c_config_##n, \
3237 POST_KERNEL, CONFIG_I3C_CONTROLLER_INIT_PRIORITY, &api); \
3238 static void cdns_i3c_config_func_##n(const struct device *dev) \
3239 { \
3240 IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), cdns_i3c_irq_handler, \
3241 DEVICE_DT_INST_GET(n), 0); \
3242 irq_enable(DT_INST_IRQN(n)); \
3243 };
3244
3245 #define DT_DRV_COMPAT cdns_i3c
3246 DT_INST_FOREACH_STATUS_OKAY(CADENCE_I3C_INSTANTIATE)
3247