1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2018 Cadence Design Systems Inc.
4 *
5 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
6 */
7
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/err.h>
11 #include <linux/errno.h>
12 #include <linux/i3c/master.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/ioport.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/workqueue.h>
25
26 #define DEV_ID 0x0
27 #define DEV_ID_I3C_MASTER 0x5034
28
29 #define CONF_STATUS0 0x4
30 #define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29))
31 #define CONF_STATUS0_ECC_CHK BIT(28)
32 #define CONF_STATUS0_INTEG_CHK BIT(27)
33 #define CONF_STATUS0_CSR_DAP_CHK BIT(26)
34 #define CONF_STATUS0_TRANS_TOUT_CHK BIT(25)
35 #define CONF_STATUS0_PROT_FAULTS_CHK BIT(24)
36 #define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16)
37 #define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8)
38 #define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7))
39 #define CONF_STATUS0_SUPPORTS_DDR BIT(5)
40 #define CONF_STATUS0_SEC_MASTER BIT(4)
41 #define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0))
42
43 #define CONF_STATUS1 0x8
44 #define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1)
45 #define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26))
46 #define CONF_STATUS1_SLVDDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21))
47 #define CONF_STATUS1_SLVDDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16))
48 #define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10))
49 #define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5))
50 #define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0)))
51
52 #define REV_ID 0xc
53 #define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20)
54 #define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8)
55 #define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 4)) >> 4)
56 #define REV_ID_REV_MINOR(id) ((id) & GENMASK(3, 0))
57
58 #define CTRL 0x10
59 #define CTRL_DEV_EN BIT(31)
60 #define CTRL_HALT_EN BIT(30)
61 #define CTRL_MCS BIT(29)
62 #define CTRL_MCS_EN BIT(28)
63 #define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24))
64 #define CTRL_HJ_DISEC BIT(8)
65 #define CTRL_MST_ACK BIT(7)
66 #define CTRL_HJ_ACK BIT(6)
67 #define CTRL_HJ_INIT BIT(5)
68 #define CTRL_MST_INIT BIT(4)
69 #define CTRL_AHDR_OPT BIT(3)
70 #define CTRL_PURE_BUS_MODE 0
71 #define CTRL_MIXED_FAST_BUS_MODE 2
72 #define CTRL_MIXED_SLOW_BUS_MODE 3
73 #define CTRL_BUS_MODE_MASK GENMASK(1, 0)
74 #define THD_DELAY_MAX 3
75
76 #define PRESCL_CTRL0 0x14
77 #define PRESCL_CTRL0_I2C(x) ((x) << 16)
78 #define PRESCL_CTRL0_I3C(x) (x)
79 #define PRESCL_CTRL0_MAX GENMASK(9, 0)
80
81 #define PRESCL_CTRL1 0x18
82 #define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
83 #define PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
84 #define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0)
85 #define PRESCL_CTRL1_OD_LOW(x) (x)
86
87 #define MST_IER 0x20
88 #define MST_IDR 0x24
89 #define MST_IMR 0x28
90 #define MST_ICR 0x2c
91 #define MST_ISR 0x30
92 #define MST_INT_HALTED BIT(18)
93 #define MST_INT_MR_DONE BIT(17)
94 #define MST_INT_IMM_COMP BIT(16)
95 #define MST_INT_TX_THR BIT(15)
96 #define MST_INT_TX_OVF BIT(14)
97 #define MST_INT_IBID_THR BIT(12)
98 #define MST_INT_IBID_UNF BIT(11)
99 #define MST_INT_IBIR_THR BIT(10)
100 #define MST_INT_IBIR_UNF BIT(9)
101 #define MST_INT_IBIR_OVF BIT(8)
102 #define MST_INT_RX_THR BIT(7)
103 #define MST_INT_RX_UNF BIT(6)
104 #define MST_INT_CMDD_EMP BIT(5)
105 #define MST_INT_CMDD_THR BIT(4)
106 #define MST_INT_CMDD_OVF BIT(3)
107 #define MST_INT_CMDR_THR BIT(2)
108 #define MST_INT_CMDR_UNF BIT(1)
109 #define MST_INT_CMDR_OVF BIT(0)
110
111 #define MST_STATUS0 0x34
112 #define MST_STATUS0_IDLE BIT(18)
113 #define MST_STATUS0_HALTED BIT(17)
114 #define MST_STATUS0_MASTER_MODE BIT(16)
115 #define MST_STATUS0_TX_FULL BIT(13)
116 #define MST_STATUS0_IBID_FULL BIT(12)
117 #define MST_STATUS0_IBIR_FULL BIT(11)
118 #define MST_STATUS0_RX_FULL BIT(10)
119 #define MST_STATUS0_CMDD_FULL BIT(9)
120 #define MST_STATUS0_CMDR_FULL BIT(8)
121 #define MST_STATUS0_TX_EMP BIT(5)
122 #define MST_STATUS0_IBID_EMP BIT(4)
123 #define MST_STATUS0_IBIR_EMP BIT(3)
124 #define MST_STATUS0_RX_EMP BIT(2)
125 #define MST_STATUS0_CMDD_EMP BIT(1)
126 #define MST_STATUS0_CMDR_EMP BIT(0)
127
128 #define CMDR 0x38
129 #define CMDR_NO_ERROR 0
130 #define CMDR_DDR_PREAMBLE_ERROR 1
131 #define CMDR_DDR_PARITY_ERROR 2
132 #define CMDR_DDR_RX_FIFO_OVF 3
133 #define CMDR_DDR_TX_FIFO_UNF 4
134 #define CMDR_M0_ERROR 5
135 #define CMDR_M1_ERROR 6
136 #define CMDR_M2_ERROR 7
137 #define CMDR_MST_ABORT 8
138 #define CMDR_NACK_RESP 9
139 #define CMDR_INVALID_DA 10
140 #define CMDR_DDR_DROPPED 11
141 #define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24)
142 #define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8)
143 #define CMDR_CMDID_HJACK_DISEC 0xfe
144 #define CMDR_CMDID_HJACK_ENTDAA 0xff
145 #define CMDR_CMDID(x) ((x) & GENMASK(7, 0))
146
147 #define IBIR 0x3c
148 #define IBIR_ACKED BIT(12)
149 #define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8)
150 #define IBIR_ERROR BIT(7)
151 #define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2)
152 #define IBIR_TYPE_IBI 0
153 #define IBIR_TYPE_HJ 1
154 #define IBIR_TYPE_MR 2
155 #define IBIR_TYPE(x) ((x) & GENMASK(1, 0))
156
157 #define SLV_IER 0x40
158 #define SLV_IDR 0x44
159 #define SLV_IMR 0x48
160 #define SLV_ICR 0x4c
161 #define SLV_ISR 0x50
162 #define SLV_INT_TM BIT(20)
163 #define SLV_INT_ERROR BIT(19)
164 #define SLV_INT_EVENT_UP BIT(18)
165 #define SLV_INT_HJ_DONE BIT(17)
166 #define SLV_INT_MR_DONE BIT(16)
167 #define SLV_INT_DA_UPD BIT(15)
168 #define SLV_INT_SDR_FAIL BIT(14)
169 #define SLV_INT_DDR_FAIL BIT(13)
170 #define SLV_INT_M_RD_ABORT BIT(12)
171 #define SLV_INT_DDR_RX_THR BIT(11)
172 #define SLV_INT_DDR_TX_THR BIT(10)
173 #define SLV_INT_SDR_RX_THR BIT(9)
174 #define SLV_INT_SDR_TX_THR BIT(8)
175 #define SLV_INT_DDR_RX_UNF BIT(7)
176 #define SLV_INT_DDR_TX_OVF BIT(6)
177 #define SLV_INT_SDR_RX_UNF BIT(5)
178 #define SLV_INT_SDR_TX_OVF BIT(4)
179 #define SLV_INT_DDR_RD_COMP BIT(3)
180 #define SLV_INT_DDR_WR_COMP BIT(2)
181 #define SLV_INT_SDR_RD_COMP BIT(1)
182 #define SLV_INT_SDR_WR_COMP BIT(0)
183
184 #define SLV_STATUS0 0x54
185 #define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16)
186 #define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0))
187
188 #define SLV_STATUS1 0x58
189 #define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20)
190 #define SLV_STATUS1_VEN_TM BIT(19)
191 #define SLV_STATUS1_HJ_DIS BIT(18)
192 #define SLV_STATUS1_MR_DIS BIT(17)
193 #define SLV_STATUS1_PROT_ERR BIT(16)
194 #define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9)
195 #define SLV_STATUS1_HAS_DA BIT(8)
196 #define SLV_STATUS1_DDR_RX_FULL BIT(7)
197 #define SLV_STATUS1_DDR_TX_FULL BIT(6)
198 #define SLV_STATUS1_DDR_RX_EMPTY BIT(5)
199 #define SLV_STATUS1_DDR_TX_EMPTY BIT(4)
200 #define SLV_STATUS1_SDR_RX_FULL BIT(3)
201 #define SLV_STATUS1_SDR_TX_FULL BIT(2)
202 #define SLV_STATUS1_SDR_RX_EMPTY BIT(1)
203 #define SLV_STATUS1_SDR_TX_EMPTY BIT(0)
204
205 #define CMD0_FIFO 0x60
206 #define CMD0_FIFO_IS_DDR BIT(31)
207 #define CMD0_FIFO_IS_CCC BIT(30)
208 #define CMD0_FIFO_BCH BIT(29)
209 #define XMIT_BURST_STATIC_SUBADDR 0
210 #define XMIT_SINGLE_INC_SUBADDR 1
211 #define XMIT_SINGLE_STATIC_SUBADDR 2
212 #define XMIT_BURST_WITHOUT_SUBADDR 3
213 #define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27)
214 #define CMD0_FIFO_SBCA BIT(26)
215 #define CMD0_FIFO_RSBC BIT(25)
216 #define CMD0_FIFO_IS_10B BIT(24)
217 #define CMD0_FIFO_PL_LEN(l) ((l) << 12)
218 #define CMD0_FIFO_PL_LEN_MAX 4095
219 #define CMD0_FIFO_DEV_ADDR(a) ((a) << 1)
220 #define CMD0_FIFO_RNW BIT(0)
221
222 #define CMD1_FIFO 0x64
223 #define CMD1_FIFO_CMDID(id) ((id) << 24)
224 #define CMD1_FIFO_CSRADDR(a) (a)
225 #define CMD1_FIFO_CCC(id) (id)
226
227 #define TX_FIFO 0x68
228
229 #define IMD_CMD0 0x70
230 #define IMD_CMD0_PL_LEN(l) ((l) << 12)
231 #define IMD_CMD0_DEV_ADDR(a) ((a) << 1)
232 #define IMD_CMD0_RNW BIT(0)
233
234 #define IMD_CMD1 0x74
235 #define IMD_CMD1_CCC(id) (id)
236
237 #define IMD_DATA 0x78
238 #define RX_FIFO 0x80
239 #define IBI_DATA_FIFO 0x84
240 #define SLV_DDR_TX_FIFO 0x88
241 #define SLV_DDR_RX_FIFO 0x8c
242
243 #define CMD_IBI_THR_CTRL 0x90
244 #define IBIR_THR(t) ((t) << 24)
245 #define CMDR_THR(t) ((t) << 16)
246 #define IBI_THR(t) ((t) << 8)
247 #define CMD_THR(t) (t)
248
249 #define TX_RX_THR_CTRL 0x94
250 #define RX_THR(t) ((t) << 16)
251 #define TX_THR(t) (t)
252
253 #define SLV_DDR_TX_RX_THR_CTRL 0x98
254 #define SLV_DDR_RX_THR(t) ((t) << 16)
255 #define SLV_DDR_TX_THR(t) (t)
256
257 #define FLUSH_CTRL 0x9c
258 #define FLUSH_IBI_RESP BIT(23)
259 #define FLUSH_CMD_RESP BIT(22)
260 #define FLUSH_SLV_DDR_RX_FIFO BIT(22)
261 #define FLUSH_SLV_DDR_TX_FIFO BIT(21)
262 #define FLUSH_IMM_FIFO BIT(20)
263 #define FLUSH_IBI_FIFO BIT(19)
264 #define FLUSH_RX_FIFO BIT(18)
265 #define FLUSH_TX_FIFO BIT(17)
266 #define FLUSH_CMD_FIFO BIT(16)
267
268 #define TTO_PRESCL_CTRL0 0xb0
269 #define TTO_PRESCL_CTRL0_DIVB(x) ((x) << 16)
270 #define TTO_PRESCL_CTRL0_DIVA(x) (x)
271
272 #define TTO_PRESCL_CTRL1 0xb4
273 #define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16)
274 #define TTO_PRESCL_CTRL1_DIVA(x) (x)
275
276 #define DEVS_CTRL 0xb8
277 #define DEVS_CTRL_DEV_CLR_SHIFT 16
278 #define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16)
279 #define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev))
280 #define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev)
281 #define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0)
282 #define MAX_DEVS 16
283
284 #define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10))
285 #define DEV_ID_RR0_LVR_EXT_ADDR BIT(11)
286 #define DEV_ID_RR0_HDR_CAP BIT(10)
287 #define DEV_ID_RR0_IS_I3C BIT(9)
288 #define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(6, 0) | GENMASK(15, 13))
289 #define DEV_ID_RR0_SET_DEV_ADDR(a) (((a) & GENMASK(6, 0)) | \
290 (((a) & GENMASK(9, 7)) << 6))
291 #define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | \
292 (((x) >> 6) & GENMASK(9, 7)))
293
294 #define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10))
295 #define DEV_ID_RR1_PID_MSB(pid) (pid)
296
297 #define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10))
298 #define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16)
299 #define DEV_ID_RR2_BCR(bcr) ((bcr) << 8)
300 #define DEV_ID_RR2_DCR(dcr) (dcr)
301 #define DEV_ID_RR2_LVR(lvr) (lvr)
302
303 #define SIR_MAP(x) (0x180 + ((x) * 4))
304 #define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2)
305 #define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0))
306 #define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
307 #define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0))
308 #define DEV_ROLE_SLAVE 0
309 #define DEV_ROLE_MASTER 1
310 #define SIR_MAP_DEV_ROLE(role) ((role) << 14)
311 #define SIR_MAP_DEV_SLOW BIT(13)
312 #define SIR_MAP_DEV_PL(l) ((l) << 8)
313 #define SIR_MAP_PL_MAX GENMASK(4, 0)
314 #define SIR_MAP_DEV_DA(a) ((a) << 1)
315 #define SIR_MAP_DEV_ACK BIT(0)
316
317 #define GPIR_WORD(x) (0x200 + ((x) * 4))
318 #define GPI_REG(val, id) \
319 (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
320
321 #define GPOR_WORD(x) (0x220 + ((x) * 4))
322 #define GPO_REG(val, id) \
323 (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
324
325 #define ASF_INT_STATUS 0x300
326 #define ASF_INT_RAW_STATUS 0x304
327 #define ASF_INT_MASK 0x308
328 #define ASF_INT_TEST 0x30c
329 #define ASF_INT_FATAL_SELECT 0x310
330 #define ASF_INTEGRITY_ERR BIT(6)
331 #define ASF_PROTOCOL_ERR BIT(5)
332 #define ASF_TRANS_TIMEOUT_ERR BIT(4)
333 #define ASF_CSR_ERR BIT(3)
334 #define ASF_DAP_ERR BIT(2)
335 #define ASF_SRAM_UNCORR_ERR BIT(1)
336 #define ASF_SRAM_CORR_ERR BIT(0)
337
338 #define ASF_SRAM_CORR_FAULT_STATUS 0x320
339 #define ASF_SRAM_UNCORR_FAULT_STATUS 0x324
340 #define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24)
341 #define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0))
342
343 #define ASF_SRAM_FAULT_STATS 0x328
344 #define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16)
345 #define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0))
346
347 #define ASF_TRANS_TOUT_CTRL 0x330
348 #define ASF_TRANS_TOUT_EN BIT(31)
349 #define ASF_TRANS_TOUT_VAL(x) (x)
350
351 #define ASF_TRANS_TOUT_FAULT_MASK 0x334
352 #define ASF_TRANS_TOUT_FAULT_STATUS 0x338
353 #define ASF_TRANS_TOUT_FAULT_APB BIT(3)
354 #define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2)
355 #define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1)
356 #define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0)
357
358 #define ASF_PROTO_FAULT_MASK 0x340
359 #define ASF_PROTO_FAULT_STATUS 0x344
360 #define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31)
361 #define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30)
362 #define ASF_PROTO_FAULT_S(x) BIT(16 + (x))
363 #define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15)
364 #define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14)
365 #define ASF_PROTO_FAULT_M(x) BIT(x)
366
367 struct cdns_i3c_master_caps {
368 u32 cmdfifodepth;
369 u32 cmdrfifodepth;
370 u32 txfifodepth;
371 u32 rxfifodepth;
372 u32 ibirfifodepth;
373 };
374
375 struct cdns_i3c_cmd {
376 u32 cmd0;
377 u32 cmd1;
378 u32 tx_len;
379 const void *tx_buf;
380 u32 rx_len;
381 void *rx_buf;
382 u32 error;
383 };
384
385 struct cdns_i3c_xfer {
386 struct list_head node;
387 struct completion comp;
388 int ret;
389 unsigned int ncmds;
390 struct cdns_i3c_cmd cmds[];
391 };
392
393 struct cdns_i3c_data {
394 u8 thd_delay_ns;
395 };
396
397 struct cdns_i3c_master {
398 struct work_struct hj_work;
399 struct i3c_master_controller base;
400 u32 free_rr_slots;
401 unsigned int maxdevs;
402 struct {
403 unsigned int num_slots;
404 struct i3c_dev_desc **slots;
405 spinlock_t lock;
406 } ibi;
407 struct {
408 struct list_head list;
409 struct cdns_i3c_xfer *cur;
410 spinlock_t lock;
411 } xferqueue;
412 void __iomem *regs;
413 struct clk *sysclk;
414 struct clk *pclk;
415 struct cdns_i3c_master_caps caps;
416 unsigned long i3c_scl_lim;
417 const struct cdns_i3c_data *devdata;
418 };
419
420 static inline struct cdns_i3c_master *
to_cdns_i3c_master(struct i3c_master_controller * master)421 to_cdns_i3c_master(struct i3c_master_controller *master)
422 {
423 return container_of(master, struct cdns_i3c_master, base);
424 }
425
cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master * master,const u8 * bytes,int nbytes)426 static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
427 const u8 *bytes, int nbytes)
428 {
429 writesl(master->regs + TX_FIFO, bytes, nbytes / 4);
430 if (nbytes & 3) {
431 u32 tmp = 0;
432
433 memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
434 writesl(master->regs + TX_FIFO, &tmp, 1);
435 }
436 }
437
cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master * master,u8 * bytes,int nbytes)438 static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
439 u8 *bytes, int nbytes)
440 {
441 readsl(master->regs + RX_FIFO, bytes, nbytes / 4);
442 if (nbytes & 3) {
443 u32 tmp;
444
445 readsl(master->regs + RX_FIFO, &tmp, 1);
446 memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
447 }
448 }
449
cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller * m,const struct i3c_ccc_cmd * cmd)450 static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
451 const struct i3c_ccc_cmd *cmd)
452 {
453 if (cmd->ndests > 1)
454 return false;
455
456 switch (cmd->id) {
457 case I3C_CCC_ENEC(true):
458 case I3C_CCC_ENEC(false):
459 case I3C_CCC_DISEC(true):
460 case I3C_CCC_DISEC(false):
461 case I3C_CCC_ENTAS(0, true):
462 case I3C_CCC_ENTAS(0, false):
463 case I3C_CCC_RSTDAA(true):
464 case I3C_CCC_RSTDAA(false):
465 case I3C_CCC_ENTDAA:
466 case I3C_CCC_SETMWL(true):
467 case I3C_CCC_SETMWL(false):
468 case I3C_CCC_SETMRL(true):
469 case I3C_CCC_SETMRL(false):
470 case I3C_CCC_DEFSLVS:
471 case I3C_CCC_ENTHDR(0):
472 case I3C_CCC_SETDASA:
473 case I3C_CCC_SETNEWDA:
474 case I3C_CCC_GETMWL:
475 case I3C_CCC_GETMRL:
476 case I3C_CCC_GETPID:
477 case I3C_CCC_GETBCR:
478 case I3C_CCC_GETDCR:
479 case I3C_CCC_GETSTATUS:
480 case I3C_CCC_GETACCMST:
481 case I3C_CCC_GETMXDS:
482 case I3C_CCC_GETHDRCAP:
483 return true;
484 default:
485 break;
486 }
487
488 return false;
489 }
490
cdns_i3c_master_disable(struct cdns_i3c_master * master)491 static int cdns_i3c_master_disable(struct cdns_i3c_master *master)
492 {
493 u32 status;
494
495 writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL);
496
497 return readl_poll_timeout(master->regs + MST_STATUS0, status,
498 status & MST_STATUS0_IDLE, 10, 1000000);
499 }
500
cdns_i3c_master_enable(struct cdns_i3c_master * master)501 static void cdns_i3c_master_enable(struct cdns_i3c_master *master)
502 {
503 writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL);
504 }
505
506 static struct cdns_i3c_xfer *
cdns_i3c_master_alloc_xfer(struct cdns_i3c_master * master,unsigned int ncmds)507 cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds)
508 {
509 struct cdns_i3c_xfer *xfer;
510
511 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
512 if (!xfer)
513 return NULL;
514
515 INIT_LIST_HEAD(&xfer->node);
516 xfer->ncmds = ncmds;
517 xfer->ret = -ETIMEDOUT;
518
519 return xfer;
520 }
521
cdns_i3c_master_free_xfer(struct cdns_i3c_xfer * xfer)522 static void cdns_i3c_master_free_xfer(struct cdns_i3c_xfer *xfer)
523 {
524 kfree(xfer);
525 }
526
cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master * master)527 static void cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master *master)
528 {
529 struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
530 unsigned int i;
531
532 if (!xfer)
533 return;
534
535 writel(MST_INT_CMDD_EMP, master->regs + MST_ICR);
536 for (i = 0; i < xfer->ncmds; i++) {
537 struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
538
539 cdns_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf,
540 cmd->tx_len);
541 }
542
543 for (i = 0; i < xfer->ncmds; i++) {
544 struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
545
546 writel(cmd->cmd1 | CMD1_FIFO_CMDID(i),
547 master->regs + CMD1_FIFO);
548 writel(cmd->cmd0, master->regs + CMD0_FIFO);
549 }
550
551 writel(readl(master->regs + CTRL) | CTRL_MCS,
552 master->regs + CTRL);
553 writel(MST_INT_CMDD_EMP, master->regs + MST_IER);
554 }
555
cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master * master,u32 isr)556 static void cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master *master,
557 u32 isr)
558 {
559 struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
560 int i, ret = 0;
561 u32 status0;
562
563 if (!xfer)
564 return;
565
566 if (!(isr & MST_INT_CMDD_EMP))
567 return;
568
569 writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
570
571 for (status0 = readl(master->regs + MST_STATUS0);
572 !(status0 & MST_STATUS0_CMDR_EMP);
573 status0 = readl(master->regs + MST_STATUS0)) {
574 struct cdns_i3c_cmd *cmd;
575 u32 cmdr, rx_len, id;
576
577 cmdr = readl(master->regs + CMDR);
578 id = CMDR_CMDID(cmdr);
579 if (id == CMDR_CMDID_HJACK_DISEC ||
580 id == CMDR_CMDID_HJACK_ENTDAA ||
581 WARN_ON(id >= xfer->ncmds))
582 continue;
583
584 cmd = &xfer->cmds[CMDR_CMDID(cmdr)];
585 rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len);
586 cdns_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
587 cmd->error = CMDR_ERROR(cmdr);
588 }
589
590 for (i = 0; i < xfer->ncmds; i++) {
591 switch (xfer->cmds[i].error) {
592 case CMDR_NO_ERROR:
593 break;
594
595 case CMDR_DDR_PREAMBLE_ERROR:
596 case CMDR_DDR_PARITY_ERROR:
597 case CMDR_M0_ERROR:
598 case CMDR_M1_ERROR:
599 case CMDR_M2_ERROR:
600 case CMDR_MST_ABORT:
601 case CMDR_NACK_RESP:
602 case CMDR_DDR_DROPPED:
603 ret = -EIO;
604 break;
605
606 case CMDR_DDR_RX_FIFO_OVF:
607 case CMDR_DDR_TX_FIFO_UNF:
608 ret = -ENOSPC;
609 break;
610
611 case CMDR_INVALID_DA:
612 default:
613 ret = -EINVAL;
614 break;
615 }
616 }
617
618 xfer->ret = ret;
619 complete(&xfer->comp);
620
621 xfer = list_first_entry_or_null(&master->xferqueue.list,
622 struct cdns_i3c_xfer, node);
623 if (xfer)
624 list_del_init(&xfer->node);
625
626 master->xferqueue.cur = xfer;
627 cdns_i3c_master_start_xfer_locked(master);
628 }
629
cdns_i3c_master_queue_xfer(struct cdns_i3c_master * master,struct cdns_i3c_xfer * xfer)630 static void cdns_i3c_master_queue_xfer(struct cdns_i3c_master *master,
631 struct cdns_i3c_xfer *xfer)
632 {
633 unsigned long flags;
634
635 init_completion(&xfer->comp);
636 spin_lock_irqsave(&master->xferqueue.lock, flags);
637 if (master->xferqueue.cur) {
638 list_add_tail(&xfer->node, &master->xferqueue.list);
639 } else {
640 master->xferqueue.cur = xfer;
641 cdns_i3c_master_start_xfer_locked(master);
642 }
643 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
644 }
645
cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master * master,struct cdns_i3c_xfer * xfer)646 static void cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master *master,
647 struct cdns_i3c_xfer *xfer)
648 {
649 unsigned long flags;
650
651 spin_lock_irqsave(&master->xferqueue.lock, flags);
652 if (master->xferqueue.cur == xfer) {
653 u32 status;
654
655 writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN,
656 master->regs + CTRL);
657 readl_poll_timeout_atomic(master->regs + MST_STATUS0, status,
658 status & MST_STATUS0_IDLE, 10,
659 1000000);
660 master->xferqueue.cur = NULL;
661 writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO |
662 FLUSH_CMD_RESP,
663 master->regs + FLUSH_CTRL);
664 writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
665 writel(readl(master->regs + CTRL) | CTRL_DEV_EN,
666 master->regs + CTRL);
667 } else {
668 list_del_init(&xfer->node);
669 }
670 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
671 }
672
cdns_i3c_cmd_get_err(struct cdns_i3c_cmd * cmd)673 static enum i3c_error_code cdns_i3c_cmd_get_err(struct cdns_i3c_cmd *cmd)
674 {
675 switch (cmd->error) {
676 case CMDR_M0_ERROR:
677 return I3C_ERROR_M0;
678
679 case CMDR_M1_ERROR:
680 return I3C_ERROR_M1;
681
682 case CMDR_M2_ERROR:
683 case CMDR_NACK_RESP:
684 return I3C_ERROR_M2;
685
686 default:
687 break;
688 }
689
690 return I3C_ERROR_UNKNOWN;
691 }
692
cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)693 static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
694 struct i3c_ccc_cmd *cmd)
695 {
696 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
697 struct cdns_i3c_xfer *xfer;
698 struct cdns_i3c_cmd *ccmd;
699 int ret;
700
701 xfer = cdns_i3c_master_alloc_xfer(master, 1);
702 if (!xfer)
703 return -ENOMEM;
704
705 ccmd = xfer->cmds;
706 ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id);
707 ccmd->cmd0 = CMD0_FIFO_IS_CCC |
708 CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len);
709
710 if (cmd->id & I3C_CCC_DIRECT)
711 ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr);
712
713 if (cmd->rnw) {
714 ccmd->cmd0 |= CMD0_FIFO_RNW;
715 ccmd->rx_buf = cmd->dests[0].payload.data;
716 ccmd->rx_len = cmd->dests[0].payload.len;
717 } else {
718 ccmd->tx_buf = cmd->dests[0].payload.data;
719 ccmd->tx_len = cmd->dests[0].payload.len;
720 }
721
722 cdns_i3c_master_queue_xfer(master, xfer);
723 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
724 cdns_i3c_master_unqueue_xfer(master, xfer);
725
726 ret = xfer->ret;
727 cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]);
728 cdns_i3c_master_free_xfer(xfer);
729
730 return ret;
731 }
732
cdns_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)733 static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
734 struct i3c_priv_xfer *xfers,
735 int nxfers)
736 {
737 struct i3c_master_controller *m = i3c_dev_get_master(dev);
738 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
739 int txslots = 0, rxslots = 0, i, ret;
740 struct cdns_i3c_xfer *cdns_xfer;
741
742 for (i = 0; i < nxfers; i++) {
743 if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
744 return -ENOTSUPP;
745 }
746
747 if (!nxfers)
748 return 0;
749
750 if (nxfers > master->caps.cmdfifodepth ||
751 nxfers > master->caps.cmdrfifodepth)
752 return -ENOTSUPP;
753
754 /*
755 * First make sure that all transactions (block of transfers separated
756 * by a STOP marker) fit in the FIFOs.
757 */
758 for (i = 0; i < nxfers; i++) {
759 if (xfers[i].rnw)
760 rxslots += DIV_ROUND_UP(xfers[i].len, 4);
761 else
762 txslots += DIV_ROUND_UP(xfers[i].len, 4);
763 }
764
765 if (rxslots > master->caps.rxfifodepth ||
766 txslots > master->caps.txfifodepth)
767 return -ENOTSUPP;
768
769 cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
770 if (!cdns_xfer)
771 return -ENOMEM;
772
773 for (i = 0; i < nxfers; i++) {
774 struct cdns_i3c_cmd *ccmd = &cdns_xfer->cmds[i];
775 u32 pl_len = xfers[i].len;
776
777 ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) |
778 CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
779
780 if (xfers[i].rnw) {
781 ccmd->cmd0 |= CMD0_FIFO_RNW;
782 ccmd->rx_buf = xfers[i].data.in;
783 ccmd->rx_len = xfers[i].len;
784 pl_len++;
785 } else {
786 ccmd->tx_buf = xfers[i].data.out;
787 ccmd->tx_len = xfers[i].len;
788 }
789
790 ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len);
791
792 if (i < nxfers - 1)
793 ccmd->cmd0 |= CMD0_FIFO_RSBC;
794
795 if (!i)
796 ccmd->cmd0 |= CMD0_FIFO_BCH;
797 }
798
799 cdns_i3c_master_queue_xfer(master, cdns_xfer);
800 if (!wait_for_completion_timeout(&cdns_xfer->comp,
801 msecs_to_jiffies(1000)))
802 cdns_i3c_master_unqueue_xfer(master, cdns_xfer);
803
804 ret = cdns_xfer->ret;
805
806 for (i = 0; i < nxfers; i++)
807 xfers[i].err = cdns_i3c_cmd_get_err(&cdns_xfer->cmds[i]);
808
809 cdns_i3c_master_free_xfer(cdns_xfer);
810
811 return ret;
812 }
813
cdns_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,const struct i2c_msg * xfers,int nxfers)814 static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
815 const struct i2c_msg *xfers, int nxfers)
816 {
817 struct i3c_master_controller *m = i2c_dev_get_master(dev);
818 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
819 unsigned int nrxwords = 0, ntxwords = 0;
820 struct cdns_i3c_xfer *xfer;
821 int i, ret = 0;
822
823 if (nxfers > master->caps.cmdfifodepth)
824 return -ENOTSUPP;
825
826 for (i = 0; i < nxfers; i++) {
827 if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
828 return -ENOTSUPP;
829
830 if (xfers[i].flags & I2C_M_RD)
831 nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
832 else
833 ntxwords += DIV_ROUND_UP(xfers[i].len, 4);
834 }
835
836 if (ntxwords > master->caps.txfifodepth ||
837 nrxwords > master->caps.rxfifodepth)
838 return -ENOTSUPP;
839
840 xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
841 if (!xfer)
842 return -ENOMEM;
843
844 for (i = 0; i < nxfers; i++) {
845 struct cdns_i3c_cmd *ccmd = &xfer->cmds[i];
846
847 ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) |
848 CMD0_FIFO_PL_LEN(xfers[i].len) |
849 CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
850
851 if (xfers[i].flags & I2C_M_TEN)
852 ccmd->cmd0 |= CMD0_FIFO_IS_10B;
853
854 if (xfers[i].flags & I2C_M_RD) {
855 ccmd->cmd0 |= CMD0_FIFO_RNW;
856 ccmd->rx_buf = xfers[i].buf;
857 ccmd->rx_len = xfers[i].len;
858 } else {
859 ccmd->tx_buf = xfers[i].buf;
860 ccmd->tx_len = xfers[i].len;
861 }
862 }
863
864 cdns_i3c_master_queue_xfer(master, xfer);
865 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
866 cdns_i3c_master_unqueue_xfer(master, xfer);
867
868 ret = xfer->ret;
869 cdns_i3c_master_free_xfer(xfer);
870
871 return ret;
872 }
873
874 struct cdns_i3c_i2c_dev_data {
875 u16 id;
876 s16 ibi;
877 struct i3c_generic_ibi_pool *ibi_pool;
878 };
879
prepare_rr0_dev_address(u32 addr)880 static u32 prepare_rr0_dev_address(u32 addr)
881 {
882 u32 ret = (addr << 1) & 0xff;
883
884 /* RR0[7:1] = addr[6:0] */
885 ret |= (addr & GENMASK(6, 0)) << 1;
886
887 /* RR0[15:13] = addr[9:7] */
888 ret |= (addr & GENMASK(9, 7)) << 6;
889
890 /* RR0[0] = ~XOR(addr[6:0]) */
891 if (!(hweight8(addr & 0x7f) & 1))
892 ret |= 1;
893
894 return ret;
895 }
896
cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc * dev)897 static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev)
898 {
899 struct i3c_master_controller *m = i3c_dev_get_master(dev);
900 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
901 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
902 u32 rr;
903
904 rr = prepare_rr0_dev_address(dev->info.dyn_addr ?
905 dev->info.dyn_addr :
906 dev->info.static_addr);
907 writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id));
908 }
909
cdns_i3c_master_get_rr_slot(struct cdns_i3c_master * master,u8 dyn_addr)910 static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
911 u8 dyn_addr)
912 {
913 unsigned long activedevs;
914 u32 rr;
915 int i;
916
917 if (!dyn_addr) {
918 if (!master->free_rr_slots)
919 return -ENOSPC;
920
921 return ffs(master->free_rr_slots) - 1;
922 }
923
924 activedevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
925 activedevs &= ~BIT(0);
926
927 for_each_set_bit(i, &activedevs, master->maxdevs + 1) {
928 rr = readl(master->regs + DEV_ID_RR0(i));
929 if (!(rr & DEV_ID_RR0_IS_I3C) ||
930 DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
931 continue;
932
933 return i;
934 }
935
936 return -EINVAL;
937 }
938
cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)939 static int cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
940 u8 old_dyn_addr)
941 {
942 cdns_i3c_master_upd_i3c_addr(dev);
943
944 return 0;
945 }
946
cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)947 static int cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
948 {
949 struct i3c_master_controller *m = i3c_dev_get_master(dev);
950 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
951 struct cdns_i3c_i2c_dev_data *data;
952 int slot;
953
954 data = kzalloc(sizeof(*data), GFP_KERNEL);
955 if (!data)
956 return -ENOMEM;
957
958 slot = cdns_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
959 if (slot < 0) {
960 kfree(data);
961 return slot;
962 }
963
964 data->ibi = -1;
965 data->id = slot;
966 i3c_dev_set_master_data(dev, data);
967 master->free_rr_slots &= ~BIT(slot);
968
969 if (!dev->info.dyn_addr) {
970 cdns_i3c_master_upd_i3c_addr(dev);
971 writel(readl(master->regs + DEVS_CTRL) |
972 DEVS_CTRL_DEV_ACTIVE(data->id),
973 master->regs + DEVS_CTRL);
974 }
975
976 return 0;
977 }
978
cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)979 static void cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
980 {
981 struct i3c_master_controller *m = i3c_dev_get_master(dev);
982 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
983 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
984
985 writel(readl(master->regs + DEVS_CTRL) |
986 DEVS_CTRL_DEV_CLR(data->id),
987 master->regs + DEVS_CTRL);
988
989 i3c_dev_set_master_data(dev, NULL);
990 master->free_rr_slots |= BIT(data->id);
991 kfree(data);
992 }
993
cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)994 static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
995 {
996 struct i3c_master_controller *m = i2c_dev_get_master(dev);
997 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
998 struct cdns_i3c_i2c_dev_data *data;
999 int slot;
1000
1001 slot = cdns_i3c_master_get_rr_slot(master, 0);
1002 if (slot < 0)
1003 return slot;
1004
1005 data = kzalloc(sizeof(*data), GFP_KERNEL);
1006 if (!data)
1007 return -ENOMEM;
1008
1009 data->id = slot;
1010 master->free_rr_slots &= ~BIT(slot);
1011 i2c_dev_set_master_data(dev, data);
1012
1013 writel(prepare_rr0_dev_address(dev->addr),
1014 master->regs + DEV_ID_RR0(data->id));
1015 writel(dev->lvr, master->regs + DEV_ID_RR2(data->id));
1016 writel(readl(master->regs + DEVS_CTRL) |
1017 DEVS_CTRL_DEV_ACTIVE(data->id),
1018 master->regs + DEVS_CTRL);
1019
1020 return 0;
1021 }
1022
cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)1023 static void cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
1024 {
1025 struct i3c_master_controller *m = i2c_dev_get_master(dev);
1026 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1027 struct cdns_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1028
1029 writel(readl(master->regs + DEVS_CTRL) |
1030 DEVS_CTRL_DEV_CLR(data->id),
1031 master->regs + DEVS_CTRL);
1032 master->free_rr_slots |= BIT(data->id);
1033
1034 i2c_dev_set_master_data(dev, NULL);
1035 kfree(data);
1036 }
1037
cdns_i3c_master_bus_cleanup(struct i3c_master_controller * m)1038 static void cdns_i3c_master_bus_cleanup(struct i3c_master_controller *m)
1039 {
1040 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1041
1042 cdns_i3c_master_disable(master);
1043 }
1044
cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master * master,unsigned int slot,struct i3c_device_info * info)1045 static void cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master *master,
1046 unsigned int slot,
1047 struct i3c_device_info *info)
1048 {
1049 u32 rr;
1050
1051 memset(info, 0, sizeof(*info));
1052 rr = readl(master->regs + DEV_ID_RR0(slot));
1053 info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr);
1054 rr = readl(master->regs + DEV_ID_RR2(slot));
1055 info->dcr = rr;
1056 info->bcr = rr >> 8;
1057 info->pid = rr >> 16;
1058 info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16;
1059 }
1060
cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master * master)1061 static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master)
1062 {
1063 struct i3c_master_controller *m = &master->base;
1064 unsigned long i3c_lim_period, pres_step, ncycles;
1065 struct i3c_bus *bus = i3c_master_get_bus(m);
1066 unsigned long new_i3c_scl_lim = 0;
1067 struct i3c_dev_desc *dev;
1068 u32 prescl1, ctrl;
1069
1070 i3c_bus_for_each_i3cdev(bus, dev) {
1071 unsigned long max_fscl;
1072
1073 max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
1074 I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
1075 switch (max_fscl) {
1076 case I3C_SDR1_FSCL_8MHZ:
1077 max_fscl = 8000000;
1078 break;
1079 case I3C_SDR2_FSCL_6MHZ:
1080 max_fscl = 6000000;
1081 break;
1082 case I3C_SDR3_FSCL_4MHZ:
1083 max_fscl = 4000000;
1084 break;
1085 case I3C_SDR4_FSCL_2MHZ:
1086 max_fscl = 2000000;
1087 break;
1088 case I3C_SDR0_FSCL_MAX:
1089 default:
1090 max_fscl = 0;
1091 break;
1092 }
1093
1094 if (max_fscl &&
1095 (new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim))
1096 new_i3c_scl_lim = max_fscl;
1097 }
1098
1099 /* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */
1100 if (new_i3c_scl_lim == master->i3c_scl_lim)
1101 return;
1102 master->i3c_scl_lim = new_i3c_scl_lim;
1103 if (!new_i3c_scl_lim)
1104 return;
1105 pres_step = 1000000000UL / (bus->scl_rate.i3c * 4);
1106
1107 /* Configure PP_LOW to meet I3C slave limitations. */
1108 prescl1 = readl(master->regs + PRESCL_CTRL1) &
1109 ~PRESCL_CTRL1_PP_LOW_MASK;
1110 ctrl = readl(master->regs + CTRL);
1111
1112 i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim);
1113 ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step);
1114 if (ncycles < 4)
1115 ncycles = 0;
1116 else
1117 ncycles -= 4;
1118
1119 prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles);
1120
1121 /* Disable I3C master before updating PRESCL_CTRL1. */
1122 if (ctrl & CTRL_DEV_EN)
1123 cdns_i3c_master_disable(master);
1124
1125 writel(prescl1, master->regs + PRESCL_CTRL1);
1126
1127 if (ctrl & CTRL_DEV_EN)
1128 cdns_i3c_master_enable(master);
1129 }
1130
cdns_i3c_master_do_daa(struct i3c_master_controller * m)1131 static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
1132 {
1133 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1134 unsigned long olddevs, newdevs;
1135 int ret, slot;
1136 u8 addrs[MAX_DEVS] = { };
1137 u8 last_addr = 0;
1138
1139 olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1140 olddevs |= BIT(0);
1141
1142 /* Prepare RR slots before launching DAA. */
1143 for_each_clear_bit(slot, &olddevs, master->maxdevs + 1) {
1144 ret = i3c_master_get_free_addr(m, last_addr + 1);
1145 if (ret < 0)
1146 return -ENOSPC;
1147
1148 last_addr = ret;
1149 addrs[slot] = last_addr;
1150 writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C,
1151 master->regs + DEV_ID_RR0(slot));
1152 writel(0, master->regs + DEV_ID_RR1(slot));
1153 writel(0, master->regs + DEV_ID_RR2(slot));
1154 }
1155
1156 ret = i3c_master_entdaa_locked(&master->base);
1157 if (ret && ret != I3C_ERROR_M2)
1158 return ret;
1159
1160 newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1161 newdevs &= ~olddevs;
1162
1163 /*
1164 * Clear all retaining registers filled during DAA. We already
1165 * have the addressed assigned to them in the addrs array.
1166 */
1167 for_each_set_bit(slot, &newdevs, master->maxdevs + 1)
1168 i3c_master_add_i3c_dev_locked(m, addrs[slot]);
1169
1170 /*
1171 * Clear slots that ended up not being used. Can be caused by I3C
1172 * device creation failure or when the I3C device was already known
1173 * by the system but with a different address (in this case the device
1174 * already has a slot and does not need a new one).
1175 */
1176 writel(readl(master->regs + DEVS_CTRL) |
1177 master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT,
1178 master->regs + DEVS_CTRL);
1179
1180 i3c_master_defslvs_locked(&master->base);
1181
1182 cdns_i3c_master_upd_i3c_scl_lim(master);
1183
1184 /* Unmask Hot-Join and Mastership request interrupts. */
1185 i3c_master_enec_locked(m, I3C_BROADCAST_ADDR,
1186 I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR);
1187
1188 return 0;
1189 }
1190
cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master * master)1191 static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
1192 {
1193 unsigned long sysclk_rate = clk_get_rate(master->sysclk);
1194 u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
1195 (NSEC_PER_SEC / sysclk_rate));
1196
1197 /* Every value greater than 3 is not valid. */
1198 if (thd_delay > THD_DELAY_MAX)
1199 thd_delay = THD_DELAY_MAX;
1200
1201 /* CTLR_THD_DEL value is encoded. */
1202 return (THD_DELAY_MAX - thd_delay);
1203 }
1204
cdns_i3c_master_bus_init(struct i3c_master_controller * m)1205 static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
1206 {
1207 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1208 unsigned long pres_step, sysclk_rate, max_i2cfreq;
1209 struct i3c_bus *bus = i3c_master_get_bus(m);
1210 u32 ctrl, prescl0, prescl1, pres, low;
1211 struct i3c_device_info info = { };
1212 int ret, ncycles;
1213
1214 switch (bus->mode) {
1215 case I3C_BUS_MODE_PURE:
1216 ctrl = CTRL_PURE_BUS_MODE;
1217 break;
1218
1219 case I3C_BUS_MODE_MIXED_FAST:
1220 ctrl = CTRL_MIXED_FAST_BUS_MODE;
1221 break;
1222
1223 case I3C_BUS_MODE_MIXED_SLOW:
1224 ctrl = CTRL_MIXED_SLOW_BUS_MODE;
1225 break;
1226
1227 default:
1228 return -EINVAL;
1229 }
1230
1231 sysclk_rate = clk_get_rate(master->sysclk);
1232 if (!sysclk_rate)
1233 return -EINVAL;
1234
1235 pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
1236 if (pres > PRESCL_CTRL0_MAX)
1237 return -ERANGE;
1238
1239 bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
1240
1241 prescl0 = PRESCL_CTRL0_I3C(pres);
1242
1243 low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2;
1244 prescl1 = PRESCL_CTRL1_OD_LOW(low);
1245
1246 max_i2cfreq = bus->scl_rate.i2c;
1247
1248 pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
1249 if (pres > PRESCL_CTRL0_MAX)
1250 return -ERANGE;
1251
1252 bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
1253
1254 prescl0 |= PRESCL_CTRL0_I2C(pres);
1255 writel(prescl0, master->regs + PRESCL_CTRL0);
1256
1257 /* Calculate OD and PP low. */
1258 pres_step = 1000000000 / (bus->scl_rate.i3c * 4);
1259 ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
1260 if (ncycles < 0)
1261 ncycles = 0;
1262 prescl1 = PRESCL_CTRL1_OD_LOW(ncycles);
1263 writel(prescl1, master->regs + PRESCL_CTRL1);
1264
1265 /* Get an address for the master. */
1266 ret = i3c_master_get_free_addr(m, 0);
1267 if (ret < 0)
1268 return ret;
1269
1270 writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C,
1271 master->regs + DEV_ID_RR0(0));
1272
1273 cdns_i3c_master_dev_rr_to_info(master, 0, &info);
1274 if (info.bcr & I3C_BCR_HDR_CAP)
1275 info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
1276
1277 ret = i3c_master_set_info(&master->base, &info);
1278 if (ret)
1279 return ret;
1280
1281 /*
1282 * Enable Hot-Join, and, when a Hot-Join request happens, disable all
1283 * events coming from this device.
1284 *
1285 * We will issue ENTDAA afterwards from the threaded IRQ handler.
1286 */
1287 ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
1288
1289 /*
1290 * Configure data hold delay based on device-specific data.
1291 *
1292 * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
1293 * master output. This setting allows to meet this timing on master's
1294 * SoC outputs, regardless of PCB balancing.
1295 */
1296 ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
1297 writel(ctrl, master->regs + CTRL);
1298
1299 cdns_i3c_master_enable(master);
1300
1301 return 0;
1302 }
1303
cdns_i3c_master_handle_ibi(struct cdns_i3c_master * master,u32 ibir)1304 static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
1305 u32 ibir)
1306 {
1307 struct cdns_i3c_i2c_dev_data *data;
1308 bool data_consumed = false;
1309 struct i3c_ibi_slot *slot;
1310 u32 id = IBIR_SLVID(ibir);
1311 struct i3c_dev_desc *dev;
1312 size_t nbytes;
1313 u8 *buf;
1314
1315 /*
1316 * FIXME: maybe we should report the FIFO OVF errors to the upper
1317 * layer.
1318 */
1319 if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR))
1320 goto out;
1321
1322 dev = master->ibi.slots[id];
1323 spin_lock(&master->ibi.lock);
1324
1325 data = i3c_dev_get_master_data(dev);
1326 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
1327 if (!slot)
1328 goto out_unlock;
1329
1330 buf = slot->data;
1331
1332 nbytes = IBIR_XFER_BYTES(ibir);
1333 readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4);
1334 if (nbytes % 3) {
1335 u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO);
1336
1337 memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
1338 }
1339
1340 slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
1341 dev->ibi->max_payload_len);
1342 i3c_master_queue_ibi(dev, slot);
1343 data_consumed = true;
1344
1345 out_unlock:
1346 spin_unlock(&master->ibi.lock);
1347
1348 out:
1349 /* Consume data from the FIFO if it's not been done already. */
1350 if (!data_consumed) {
1351 int i;
1352
1353 for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4)
1354 readl(master->regs + IBI_DATA_FIFO);
1355 }
1356 }
1357
cnds_i3c_master_demux_ibis(struct cdns_i3c_master * master)1358 static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master)
1359 {
1360 u32 status0;
1361
1362 writel(MST_INT_IBIR_THR, master->regs + MST_ICR);
1363
1364 for (status0 = readl(master->regs + MST_STATUS0);
1365 !(status0 & MST_STATUS0_IBIR_EMP);
1366 status0 = readl(master->regs + MST_STATUS0)) {
1367 u32 ibir = readl(master->regs + IBIR);
1368
1369 switch (IBIR_TYPE(ibir)) {
1370 case IBIR_TYPE_IBI:
1371 cdns_i3c_master_handle_ibi(master, ibir);
1372 break;
1373
1374 case IBIR_TYPE_HJ:
1375 WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
1376 queue_work(master->base.wq, &master->hj_work);
1377 break;
1378
1379 case IBIR_TYPE_MR:
1380 WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
1381 break;
1382
1383 default:
1384 break;
1385 }
1386 }
1387 }
1388
cdns_i3c_master_interrupt(int irq,void * data)1389 static irqreturn_t cdns_i3c_master_interrupt(int irq, void *data)
1390 {
1391 struct cdns_i3c_master *master = data;
1392 u32 status;
1393
1394 status = readl(master->regs + MST_ISR);
1395 if (!(status & readl(master->regs + MST_IMR)))
1396 return IRQ_NONE;
1397
1398 spin_lock(&master->xferqueue.lock);
1399 cdns_i3c_master_end_xfer_locked(master, status);
1400 spin_unlock(&master->xferqueue.lock);
1401
1402 if (status & MST_INT_IBIR_THR)
1403 cnds_i3c_master_demux_ibis(master);
1404
1405 return IRQ_HANDLED;
1406 }
1407
cdns_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1408 static int cdns_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1409 {
1410 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1411 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1412 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1413 unsigned long flags;
1414 u32 sirmap;
1415 int ret;
1416
1417 ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
1418 I3C_CCC_EVENT_SIR);
1419 if (ret)
1420 return ret;
1421
1422 spin_lock_irqsave(&master->ibi.lock, flags);
1423 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1424 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1425 sirmap |= SIR_MAP_DEV_CONF(data->ibi,
1426 SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1427 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1428 spin_unlock_irqrestore(&master->ibi.lock, flags);
1429
1430 return ret;
1431 }
1432
cdns_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1433 static int cdns_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1434 {
1435 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1436 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1437 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1438 unsigned long flags;
1439 u32 sircfg, sirmap;
1440 int ret;
1441
1442 spin_lock_irqsave(&master->ibi.lock, flags);
1443 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1444 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1445 sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) |
1446 SIR_MAP_DEV_DA(dev->info.dyn_addr) |
1447 SIR_MAP_DEV_PL(dev->info.max_ibi_len) |
1448 SIR_MAP_DEV_ACK;
1449
1450 if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM)
1451 sircfg |= SIR_MAP_DEV_SLOW;
1452
1453 sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg);
1454 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1455 spin_unlock_irqrestore(&master->ibi.lock, flags);
1456
1457 ret = i3c_master_enec_locked(m, dev->info.dyn_addr,
1458 I3C_CCC_EVENT_SIR);
1459 if (ret) {
1460 spin_lock_irqsave(&master->ibi.lock, flags);
1461 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1462 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1463 sirmap |= SIR_MAP_DEV_CONF(data->ibi,
1464 SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1465 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1466 spin_unlock_irqrestore(&master->ibi.lock, flags);
1467 }
1468
1469 return ret;
1470 }
1471
cdns_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1472 static int cdns_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1473 const struct i3c_ibi_setup *req)
1474 {
1475 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1476 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1477 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1478 unsigned long flags;
1479 unsigned int i;
1480
1481 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1482 if (IS_ERR(data->ibi_pool))
1483 return PTR_ERR(data->ibi_pool);
1484
1485 spin_lock_irqsave(&master->ibi.lock, flags);
1486 for (i = 0; i < master->ibi.num_slots; i++) {
1487 if (!master->ibi.slots[i]) {
1488 data->ibi = i;
1489 master->ibi.slots[i] = dev;
1490 break;
1491 }
1492 }
1493 spin_unlock_irqrestore(&master->ibi.lock, flags);
1494
1495 if (i < master->ibi.num_slots)
1496 return 0;
1497
1498 i3c_generic_ibi_free_pool(data->ibi_pool);
1499 data->ibi_pool = NULL;
1500
1501 return -ENOSPC;
1502 }
1503
cdns_i3c_master_free_ibi(struct i3c_dev_desc * dev)1504 static void cdns_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1505 {
1506 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1507 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1508 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1509 unsigned long flags;
1510
1511 spin_lock_irqsave(&master->ibi.lock, flags);
1512 master->ibi.slots[data->ibi] = NULL;
1513 data->ibi = -1;
1514 spin_unlock_irqrestore(&master->ibi.lock, flags);
1515
1516 i3c_generic_ibi_free_pool(data->ibi_pool);
1517 }
1518
cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1519 static void cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1520 struct i3c_ibi_slot *slot)
1521 {
1522 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1523
1524 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1525 }
1526
1527 static const struct i3c_master_controller_ops cdns_i3c_master_ops = {
1528 .bus_init = cdns_i3c_master_bus_init,
1529 .bus_cleanup = cdns_i3c_master_bus_cleanup,
1530 .do_daa = cdns_i3c_master_do_daa,
1531 .attach_i3c_dev = cdns_i3c_master_attach_i3c_dev,
1532 .reattach_i3c_dev = cdns_i3c_master_reattach_i3c_dev,
1533 .detach_i3c_dev = cdns_i3c_master_detach_i3c_dev,
1534 .attach_i2c_dev = cdns_i3c_master_attach_i2c_dev,
1535 .detach_i2c_dev = cdns_i3c_master_detach_i2c_dev,
1536 .supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd,
1537 .send_ccc_cmd = cdns_i3c_master_send_ccc_cmd,
1538 .priv_xfers = cdns_i3c_master_priv_xfers,
1539 .i2c_xfers = cdns_i3c_master_i2c_xfers,
1540 .enable_ibi = cdns_i3c_master_enable_ibi,
1541 .disable_ibi = cdns_i3c_master_disable_ibi,
1542 .request_ibi = cdns_i3c_master_request_ibi,
1543 .free_ibi = cdns_i3c_master_free_ibi,
1544 .recycle_ibi_slot = cdns_i3c_master_recycle_ibi_slot,
1545 };
1546
cdns_i3c_master_hj(struct work_struct * work)1547 static void cdns_i3c_master_hj(struct work_struct *work)
1548 {
1549 struct cdns_i3c_master *master = container_of(work,
1550 struct cdns_i3c_master,
1551 hj_work);
1552
1553 i3c_master_do_daa(&master->base);
1554 }
1555
1556 static struct cdns_i3c_data cdns_i3c_devdata = {
1557 .thd_delay_ns = 10,
1558 };
1559
1560 static const struct of_device_id cdns_i3c_master_of_ids[] = {
1561 { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
1562 { /* sentinel */ },
1563 };
1564
cdns_i3c_master_probe(struct platform_device * pdev)1565 static int cdns_i3c_master_probe(struct platform_device *pdev)
1566 {
1567 struct cdns_i3c_master *master;
1568 int ret, irq;
1569 u32 val;
1570
1571 master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
1572 if (!master)
1573 return -ENOMEM;
1574
1575 master->devdata = of_device_get_match_data(&pdev->dev);
1576 if (!master->devdata)
1577 return -EINVAL;
1578
1579 master->regs = devm_platform_ioremap_resource(pdev, 0);
1580 if (IS_ERR(master->regs))
1581 return PTR_ERR(master->regs);
1582
1583 master->pclk = devm_clk_get(&pdev->dev, "pclk");
1584 if (IS_ERR(master->pclk))
1585 return PTR_ERR(master->pclk);
1586
1587 master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
1588 if (IS_ERR(master->sysclk))
1589 return PTR_ERR(master->sysclk);
1590
1591 irq = platform_get_irq(pdev, 0);
1592 if (irq < 0)
1593 return irq;
1594
1595 ret = clk_prepare_enable(master->pclk);
1596 if (ret)
1597 return ret;
1598
1599 ret = clk_prepare_enable(master->sysclk);
1600 if (ret)
1601 goto err_disable_pclk;
1602
1603 if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) {
1604 ret = -EINVAL;
1605 goto err_disable_sysclk;
1606 }
1607
1608 spin_lock_init(&master->xferqueue.lock);
1609 INIT_LIST_HEAD(&master->xferqueue.list);
1610
1611 INIT_WORK(&master->hj_work, cdns_i3c_master_hj);
1612 writel(0xffffffff, master->regs + MST_IDR);
1613 writel(0xffffffff, master->regs + SLV_IDR);
1614 ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
1615 dev_name(&pdev->dev), master);
1616 if (ret)
1617 goto err_disable_sysclk;
1618
1619 platform_set_drvdata(pdev, master);
1620
1621 val = readl(master->regs + CONF_STATUS0);
1622
1623 /* Device ID0 is reserved to describe this master. */
1624 master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
1625 master->free_rr_slots = GENMASK(master->maxdevs, 1);
1626
1627 val = readl(master->regs + CONF_STATUS1);
1628 master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
1629 master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
1630 master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
1631 master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
1632 master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
1633
1634 spin_lock_init(&master->ibi.lock);
1635 master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
1636 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1637 sizeof(*master->ibi.slots),
1638 GFP_KERNEL);
1639 if (!master->ibi.slots) {
1640 ret = -ENOMEM;
1641 goto err_disable_sysclk;
1642 }
1643
1644 writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
1645 writel(MST_INT_IBIR_THR, master->regs + MST_IER);
1646 writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
1647
1648 ret = i3c_master_register(&master->base, &pdev->dev,
1649 &cdns_i3c_master_ops, false);
1650 if (ret)
1651 goto err_disable_sysclk;
1652
1653 return 0;
1654
1655 err_disable_sysclk:
1656 clk_disable_unprepare(master->sysclk);
1657
1658 err_disable_pclk:
1659 clk_disable_unprepare(master->pclk);
1660
1661 return ret;
1662 }
1663
cdns_i3c_master_remove(struct platform_device * pdev)1664 static void cdns_i3c_master_remove(struct platform_device *pdev)
1665 {
1666 struct cdns_i3c_master *master = platform_get_drvdata(pdev);
1667
1668 i3c_master_unregister(&master->base);
1669
1670 clk_disable_unprepare(master->sysclk);
1671 clk_disable_unprepare(master->pclk);
1672 }
1673
1674 static struct platform_driver cdns_i3c_master = {
1675 .probe = cdns_i3c_master_probe,
1676 .remove_new = cdns_i3c_master_remove,
1677 .driver = {
1678 .name = "cdns-i3c-master",
1679 .of_match_table = cdns_i3c_master_of_ids,
1680 },
1681 };
1682 module_platform_driver(cdns_i3c_master);
1683
1684 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
1685 MODULE_DESCRIPTION("Cadence I3C master driver");
1686 MODULE_LICENSE("GPL v2");
1687 MODULE_ALIAS("platform:cdns-i3c-master");
1688