1 /*
2 * Copyright (c) 2024 Nuvoton Technology Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 /*132*/
7 #define DT_DRV_COMPAT nuvoton_npcx_i3c
8
9 #include <string.h>
10
11 #include <zephyr/device.h>
12 #include <zephyr/irq.h>
13 #include <zephyr/sys/__assert.h>
14 #include <zephyr/sys/sys_io.h>
15
16 #include <zephyr/drivers/clock_control.h>
17 #include <zephyr/drivers/i3c.h>
18 #include <zephyr/drivers/i3c/target_device.h>
19 #include <zephyr/drivers/pinctrl.h>
20 #include <zephyr/drivers/reset.h>
21
22 #include <zephyr/logging/log.h>
23 LOG_MODULE_REGISTER(npcx_i3c, CONFIG_I3C_LOG_LEVEL);
24
25 /* MCONFIG register options */
26 #define MCONFIG_CTRENA_OFF 0x0
27 #define MCONFIG_CTRENA_ON 0x1
28 #define MCONFIG_CTRENA_CAPABLE 0x2
29 #define MCONFIG_HKEEP_EXT_SDA_SCL 0x3
30
31 /* MCTRL register options */
32 #define MCTRL_REQUEST_NONE 0 /* None */
33 #define MCTRL_REQUEST_EMITSTARTADDR 1 /* Emit a START */
34 #define MCTRL_REQUEST_EMITSTOP 2 /* Emit a STOP */
35 #define MCTRL_REQUEST_IBIACKNACK 3 /* Manually ACK or NACK an IBI */
36 #define MCTRL_REQUEST_PROCESSDAA 4 /* Starts the DAA process */
37 #define MCTRL_REQUEST_FORCEEXIT 6 /* Emit HDR Exit Pattern */
38 /* Emits a START with address 7Eh when a slave pulls I3C_SDA low to request an IBI */
39 #define MCTRL_REQUEST_AUTOIBI 7
40
41 /* ACK with mandatory byte determined by IBIRULES or ACK with no mandatory byte */
42 #define MCTRL_IBIRESP_ACK 0
43 #define MCTRL_IBIRESP_NACK 1 /* NACK */
44 #define MCTRL_IBIRESP_ACK_MANDATORY 2 /* ACK with mandatory byte */
45 #define MCTRL_IBIRESP_MANUAL 3
46
47 /* For REQUEST = EmitStartAddr */
48 enum npcx_i3c_mctrl_type {
49 NPCX_I3C_MCTRL_TYPE_I3C,
50 NPCX_I3C_MCTRL_TYPE_I2C,
51 NPCX_I3C_MCTRL_TYPE_I3C_HDR_DDR,
52 };
53
54 /* For REQUEST = ForceExit/Target Reset */
55 #define MCTRL_TYPE_HDR_EXIT 0
56 #define MCTRL_TYPE_TGT_RESTART 2
57
58 /* MSTATUS register options */
59 #define MSTATUS_STATE_IDLE 0x0
60 #define MSTATUS_STATE_TGTREQ 0x1
61 #define MSTATUS_STATE_NORMACT 0x3 /* SDR message mode */
62 #define MSTATUS_STATE_MSGDDR 0x4
63 #define MSTATUS_STATE_DAA 0x5
64 #define MSTATUS_STATE_IBIACK 0x6
65 #define MSTATUS_STATE_IBIRCV 0x7
66 #define MSTATUS_IBITYPE_NONE 0x0
67 #define MSTATUS_IBITYPE_IBI 0x1
68 #define MSTATUS_IBITYPE_CR 0x2
69 #define MSTATUS_IBITYPE_HJ 0x3
70
71 /* IBIRULES register options */
72 #define IBIRULES_ADDR_MSK 0x3F
73 #define IBIRULES_ADDR_SHIFT 0x6
74
75 /* MDMACTRL register options */
76 #define MDMA_DMAFB_DISABLE 0x0
77 #define MDMA_DMAFB_EN_ONE_FRAME 0x1
78 #define MDMA_DMAFB_EN_MANUAL 0x2
79 #define MDMA_DMATB_DISABLE 0x0
80 #define MDMA_DMATB_EN_ONE_FRAME 0x1
81 #define MDMA_DMATB_EN_MANUAL 0x2
82
83 /* CONFIG register options */
84 #define CFG_HDRCMD_RD_FROM_FIFIO 0
85
86 /* CTRL register options */
87 #define CTRL_EVENT_NORMAL 0
88 #define CTRL_EVENT_IBI 1
89 #define CTRL_EVENT_CNTLR_REQ 2
90 #define CTRL_EVENT_HJ 3
91
92 /* STATUS register options */
93 #define STATUS_EVDET_NONE 0
94 #define STATUS_EVDET_REQ_NOT_SENT 1
95 #define STATUS_EVDET_REQ_SENT_NACKED 2
96 #define STATUS_EVDET_REQ_SENT_ACKED 3
97
98 /* Local Constants Definition */
99 #define NPCX_I3C_CHK_TIMEOUT_US 10000 /* Timeout for checking register status */
100 #define I3C_SCL_PP_FREQ_MAX_MHZ 12500000
101 #define I3C_SCL_OD_FREQ_MAX_MHZ 4170000
102
103 #define I3C_BUS_TLOW_PP_MIN_NS 24 /* T_LOW period in push-pull mode */
104 #define I3C_BUS_THigh_PP_MIN_NS 24 /* T_High period in push-pull mode */
105 #define I3C_BUS_TLOW_OD_MIN_NS 200 /* T_LOW period in open-drain mode */
106
107 #define PPBAUD_DIV_MAX (BIT(GET_FIELD_SZ(NPCX_I3C_MCONFIG_PPBAUD)) - 1) /* PPBAUD divider max */
108
109 #define I3C_BUS_I2C_BAUD_RATE_FAST_MODE 0x0D
110 #define I3C_BUS_I2C_BAUD_RATE_FAST_MODE_PLUS 0x03
111
112 #define DAA_TGT_INFO_SZ 0x8 /* 8 bytes = PID(6) + BCR(1) + DCR(1) */
113 #define BAMATCH_DIV 0x4 /* BAMATCH = APB4_CLK divided by four */
114
115 /* Default maximum time we allow for an I3C transfer */
116 #define I3C_TRANS_TIMEOUT_MS K_MSEC(100)
117
118 #define MCLKD_FREQ_MHZ(freq) MHZ(freq)
119
120 #define I3C_STATUS_CLR_MASK \
121 (BIT(NPCX_I3C_MSTATUS_MCTRLDONE) | BIT(NPCX_I3C_MSTATUS_COMPLETE) | \
122 BIT(NPCX_I3C_MSTATUS_IBIWON) | BIT(NPCX_I3C_MSTATUS_NOWCNTLR))
123
124 #define I3C_TGT_INTSET_MASK \
125 (BIT(NPCX_I3C_INTSET_START) | BIT(NPCX_I3C_INTSET_MATCHED) | BIT(NPCX_I3C_INTSET_STOP) | \
126 BIT(NPCX_I3C_INTSET_DACHG) | BIT(NPCX_I3C_INTSET_CCC) | BIT(NPCX_I3C_INTSET_ERRWARN) | \
127 BIT(NPCX_I3C_INTSET_HDRMATCH) | BIT(NPCX_I3C_INTSET_CHANDLED) | \
128 BIT(NPCX_I3C_INTSET_EVENT))
129
130 #define HDR_DDR_CMD_AND_CRC_SZ_WORD 0x2 /* 2 words = Command(1 word) + CRC(1 word) */
131 #define HDR_RD_CMD 0x80
132
133 /* I3C moudle and port parsing from instance_id */
134 #define GET_MODULE_ID(inst_id) ((inst_id & 0xf0) >> 4)
135 #define GET_PORT_ID(inst_id) (inst_id & 0xf)
136
137 /* I3C target PID parsing */
138 #define GET_PID_VENDOR_ID(pid) (((uint64_t)pid >> 33) & 0x7fff) /* PID[47:33] */
139 #define GET_PID_ID_TYP(pid) (((uint64_t)pid >> 32) & 0x1) /* PID[32] */
140 #define GET_PID_PARTNO(pid) (pid & 0xffffffff) /* PID[31:0] */
141
142 #define I3C_TGT_WR_REQ_WAIT_US 10 /* I3C target write request MDMA completion after stop */
143
144 /* Supported I3C MCLKD frequency */
145 enum npcx_i3c_speed {
146 NPCX_I3C_BUS_SPEED_40MHZ,
147 NPCX_I3C_BUS_SPEED_45MHZ,
148 NPCX_I3C_BUS_SPEED_48MHZ,
149 NPCX_I3C_BUS_SPEED_50MHZ,
150 };
151
152 /* Operation type */
153 enum npcx_i3c_oper_state {
154 NPCX_I3C_OP_STATE_IDLE,
155 NPCX_I3C_OP_STATE_WR,
156 NPCX_I3C_OP_STATE_RD,
157 NPCX_I3C_OP_STATE_IBI,
158 NPCX_I3C_OP_STATE_MAX,
159 };
160
161 /* I3C timing configuration for each i3c speed */
162 struct npcx_i3c_timing_cfg {
163 uint8_t ppbaud; /* Push-Pull high period */
164 uint8_t pplow; /* Push-Pull low period */
165 uint8_t odhpp; /* Open-Drain high period */
166 uint8_t odbaud; /* Open-Drain low period */
167 };
168
169 /* Recommended I3C timing values are based on different MCLKD frequency */
170 static const struct npcx_i3c_timing_cfg npcx_def_speed_cfg[] = {
171 /* PP = 12.5 mhz, OD = 4.17 Mhz */
172 [NPCX_I3C_BUS_SPEED_40MHZ] = {.ppbaud = 1, .pplow = 0, .odhpp = 1, .odbaud = 3},
173 [NPCX_I3C_BUS_SPEED_45MHZ] = {.ppbaud = 1, .pplow = 0, .odhpp = 1, .odbaud = 4},
174 [NPCX_I3C_BUS_SPEED_48MHZ] = {.ppbaud = 1, .pplow = 0, .odhpp = 1, .odbaud = 4},
175 [NPCX_I3C_BUS_SPEED_50MHZ] = {.ppbaud = 1, .pplow = 0, .odhpp = 1, .odbaud = 4},
176 };
177
178 struct npcx_i3c_config {
179 /* Common I3C Driver Config */
180 struct i3c_driver_config common;
181
182 /* Pointer to controller registers. */
183 struct i3c_reg *base;
184
185 /* Pointer to the clock device. */
186 const struct device *clock_dev;
187
188 /* Reset controller */
189 struct reset_dt_spec reset;
190
191 /* Clock control subsys related struct. */
192 struct npcx_clk_cfg clock_subsys;
193
194 /* Reference clock to determine 1 μs bus available time */
195 struct npcx_clk_cfg ref_clk_subsys;
196
197 /* Pointer to pin control device. */
198 const struct pinctrl_dev_config *pincfg;
199
200 /* Interrupt configuration function. */
201 void (*irq_config_func)(const struct device *dev);
202
203 uint8_t instance_id; /* bit[8:4] module id, bit[3:0] port id */
204
205 /* I3C clock frequency configuration */
206 struct {
207 uint32_t i3c_pp_scl_hz; /* I3C push pull clock frequency in Hz. */
208 uint32_t i3c_od_scl_hz; /* I3C open drain clock frequency in Hz. */
209 } clocks;
210
211 #ifdef CONFIG_I3C_NPCX_DMA
212 struct npcx_clk_cfg mdma_clk_subsys;
213 struct mdma_reg *mdma_base;
214 #endif
215 };
216
217 struct npcx_i3c_data {
218 /* Controller data */
219 struct i3c_driver_data common; /* Common i3c driver data */
220 struct k_mutex lock_mutex; /* Mutex of i3c controller */
221 struct k_sem sync_sem; /* Semaphore used for synchronization */
222 struct k_sem ibi_lock_sem; /* Semaphore used for ibi */
223
224 /* Target data */
225 struct i3c_target_config *target_config;
226 /* Configuration parameters for I3C hardware to act as target device */
227 struct i3c_config_target config_target;
228 struct k_sem target_lock_sem; /* Semaphore used for i3c target */
229 struct k_sem target_event_lock_sem; /* Semaphore used for i3c target ibi_raise() */
230
231 enum npcx_i3c_oper_state oper_state; /* Operation state */
232
233 #ifdef CONFIG_I3C_NPCX_DMA
234 uint8_t mdma_rx_buf[4096];
235 #endif /* End of CONFIG_I3C_NPCX_DMA */
236
237 #ifdef CONFIG_I3C_USE_IBI
238 struct {
239 /* List of addresses used in the MIBIRULES register. */
240 uint8_t addr[5];
241
242 /* Number of valid addresses in MIBIRULES. */
243 uint8_t num_addr;
244
245 /* True if all addresses have MSB set. */
246 bool msb;
247
248 /*
249 * True if all target devices require mandatory byte
250 * for IBI.
251 */
252 bool has_mandatory_byte;
253 } ibi;
254 #endif
255 };
256
npcx_i3c_mutex_lock(const struct device * dev)257 static void npcx_i3c_mutex_lock(const struct device *dev)
258 {
259 struct npcx_i3c_data *const data = dev->data;
260
261 k_mutex_lock(&data->lock_mutex, K_FOREVER);
262 }
263
npcx_i3c_mutex_unlock(const struct device * dev)264 static void npcx_i3c_mutex_unlock(const struct device *dev)
265 {
266 struct npcx_i3c_data *const data = dev->data;
267
268 k_mutex_unlock(&data->lock_mutex);
269 }
270
271 #ifdef CONFIG_I3C_NPCX_DMA
i3c_ctrl_notify(const struct device * dev)272 static void i3c_ctrl_notify(const struct device *dev)
273 {
274 struct npcx_i3c_data *const data = dev->data;
275
276 k_sem_give(&data->sync_sem);
277 }
278
i3c_ctrl_wait_completion(const struct device * dev)279 static int i3c_ctrl_wait_completion(const struct device *dev)
280 {
281 struct npcx_i3c_data *const data = dev->data;
282
283 return k_sem_take(&data->sync_sem, I3C_TRANS_TIMEOUT_MS);
284 }
285
get_oper_state(const struct device * dev)286 static enum npcx_i3c_oper_state get_oper_state(const struct device *dev)
287 {
288 struct npcx_i3c_data *const data = dev->data;
289
290 return data->oper_state;
291 }
292 #endif /* CONFIG_I3C_NPCX_DMA */
293
set_oper_state(const struct device * dev,enum npcx_i3c_oper_state state)294 static void set_oper_state(const struct device *dev, enum npcx_i3c_oper_state state)
295 {
296 struct npcx_i3c_data *const data = dev->data;
297
298 data->oper_state = state;
299 }
300
get_bus_available_match_val(uint32_t apb4_freq)301 static uint8_t get_bus_available_match_val(uint32_t apb4_freq)
302 {
303 uint8_t bamatch;
304
305 bamatch = DIV_ROUND_UP(apb4_freq, MHZ(1));
306 /* The clock of this counter is APB4_CLK divided by four */
307 bamatch = DIV_ROUND_UP(bamatch, BAMATCH_DIV);
308
309 return bamatch;
310 }
311
312 /*
313 * brief: Wait for status bit done and clear the status
314 *
315 * param[in] inst Pointer to I3C register.
316 *
317 * return 0, success
318 * -ETIMEDOUT: check status timeout.
319 */
npcx_i3c_status_wait_clear(struct i3c_reg * inst,uint8_t bit_offset)320 static inline int npcx_i3c_status_wait_clear(struct i3c_reg *inst, uint8_t bit_offset)
321 {
322 if (WAIT_FOR(IS_BIT_SET(inst->MSTATUS, bit_offset), NPCX_I3C_CHK_TIMEOUT_US, NULL) ==
323 false) {
324 return -ETIMEDOUT;
325 }
326
327 inst->MSTATUS = BIT(bit_offset); /* W1C */
328
329 return 0;
330 }
331
npcx_i3c_state_get(struct i3c_reg * inst)332 static inline uint32_t npcx_i3c_state_get(struct i3c_reg *inst)
333 {
334 return GET_FIELD(inst->MSTATUS, NPCX_I3C_MSTATUS_STATE);
335 }
336
npcx_i3c_interrupt_all_disable(struct i3c_reg * inst)337 static inline void npcx_i3c_interrupt_all_disable(struct i3c_reg *inst)
338 {
339 uint32_t intmask = inst->MINTSET;
340
341 inst->MINTCLR = intmask;
342 }
343
npcx_i3c_interrupt_enable(struct i3c_reg * inst,uint32_t mask)344 static inline void npcx_i3c_interrupt_enable(struct i3c_reg *inst, uint32_t mask)
345 {
346 inst->MINTSET = mask;
347 }
348
npcx_i3c_enable_target_interrupt(const struct device * dev,bool enable)349 static void npcx_i3c_enable_target_interrupt(const struct device *dev, bool enable)
350 {
351 const struct npcx_i3c_config *config = dev->config;
352 struct i3c_reg *inst = config->base;
353
354 /* Disable the target interrupt events */
355 inst->INTCLR = inst->INTSET;
356
357 /* Clear the target interrupt status */
358 inst->STATUS = inst->STATUS;
359
360 /* Enable the target interrupt events */
361 if (enable) {
362 inst->INTSET = I3C_TGT_INTSET_MASK;
363 inst->MINTSET |= BIT(NPCX_I3C_MINTSET_NOWCNTLR); /* I3C target is now controller */
364
365 #ifndef CONFIG_I3C_NPCX_DMA
366 /* Receive buffer pending (FIFO mode) */
367 inst->INTSET |= BIT(NPCX_I3C_INTSET_RXPEND);
368 #endif
369 }
370 }
371
npcx_i3c_has_error(struct i3c_reg * inst)372 static bool npcx_i3c_has_error(struct i3c_reg *inst)
373 {
374 if (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_ERRWARN)) {
375 LOG_ERR("ERROR: MSTATUS 0x%08x MERRWARN 0x%08x", inst->MSTATUS, inst->MERRWARN);
376
377 return true;
378 }
379
380 return false;
381 }
382
npcx_i3c_status_clear_all(struct i3c_reg * inst)383 static inline void npcx_i3c_status_clear_all(struct i3c_reg *inst)
384 {
385 uint32_t mask = I3C_STATUS_CLR_MASK;
386
387 inst->MSTATUS = mask;
388 }
389
npcx_i3c_errwarn_clear_all(struct i3c_reg * inst)390 static inline void npcx_i3c_errwarn_clear_all(struct i3c_reg *inst)
391 {
392 inst->MERRWARN = inst->MERRWARN;
393 }
394
npcx_i3c_fifo_flush(struct i3c_reg * inst)395 static inline void npcx_i3c_fifo_flush(struct i3c_reg *inst)
396 {
397 inst->MDATACTRL |= (BIT(NPCX_I3C_MDATACTRL_FLUSHTB) | BIT(NPCX_I3C_MDATACTRL_FLUSHFB));
398 }
399
400 /*
401 * brief: Send request and check the request is valid
402 *
403 * param[in] inst Pointer to I3C register.
404 *
405 * return 0, success
406 * -ETIMEDOUT check MCTRLDONE timeout.
407 * -ENOSYS invalid use of request.
408 */
npcx_i3c_send_request(struct i3c_reg * inst,uint32_t mctrl_val)409 static inline int npcx_i3c_send_request(struct i3c_reg *inst, uint32_t mctrl_val)
410 {
411 inst->MCTRL = mctrl_val;
412
413 if (npcx_i3c_status_wait_clear(inst, NPCX_I3C_MSTATUS_MCTRLDONE) != 0) {
414 return -ETIMEDOUT;
415 }
416
417 /* Check invalid use of request */
418 if (IS_BIT_SET(inst->MERRWARN, NPCX_I3C_MERRWARN_INVERQ)) {
419 LOG_ERR("%s: Invalid request, merrwarn: %#x", __func__, inst->MERRWARN);
420 return -ENOSYS;
421 }
422
423 return 0;
424 }
425
426 /* Start DAA procedure and continue the DAA with a Repeated START */
npcx_i3c_request_daa(struct i3c_reg * inst)427 static inline int npcx_i3c_request_daa(struct i3c_reg *inst)
428 {
429 uint32_t val = 0;
430 int ret;
431
432 /* Set IBI response NACK while processing DAA */
433 SET_FIELD(val, NPCX_I3C_MCTRL_IBIRESP, MCTRL_IBIRESP_NACK);
434
435 /* Send DAA request */
436 SET_FIELD(val, NPCX_I3C_MCTRL_REQUEST, MCTRL_REQUEST_PROCESSDAA);
437
438 ret = npcx_i3c_send_request(inst, val);
439 if (ret != 0) {
440 LOG_ERR("Request DAA error, %d", ret);
441 return ret;
442 }
443
444 return 0;
445 }
446
447 /* Tell controller to start auto IBI */
npcx_i3c_request_auto_ibi(struct i3c_reg * inst)448 static inline int npcx_i3c_request_auto_ibi(struct i3c_reg *inst)
449 {
450 uint32_t val = 0;
451 int ret;
452
453 SET_FIELD(val, NPCX_I3C_MCTRL_IBIRESP, MCTRL_IBIRESP_ACK);
454 SET_FIELD(val, NPCX_I3C_MCTRL_REQUEST, MCTRL_REQUEST_AUTOIBI);
455
456 ret = npcx_i3c_send_request(inst, val);
457 if (ret != 0) {
458 LOG_ERR("Request auto ibi error, %d", ret);
459 return ret;
460 }
461
462 return 0;
463 }
464
465 /*
466 * brief: Controller emit start and send address
467 *
468 * param[in] inst Pointer to I3C register.
469 * param[in] addr Dyamic address for xfer or 0x7E for CCC command.
470 * param[in] op_type Request type.
471 * param[in] is_read Read(true) or write(false) operation.
472 * param[in] read_sz Read size in bytes.
473 * If op_tye is HDR-DDR, the read_sz must be the number of words.
474 *
475 * return 0, success
476 * else, error
477 */
npcx_i3c_request_emit_start(struct i3c_reg * inst,uint8_t addr,enum npcx_i3c_mctrl_type op_type,bool is_read,size_t read_sz)478 static int npcx_i3c_request_emit_start(struct i3c_reg *inst, uint8_t addr,
479 enum npcx_i3c_mctrl_type op_type, bool is_read,
480 size_t read_sz)
481 {
482 uint32_t mctrl = 0;
483 int ret;
484
485 /* Set request and target address*/
486 SET_FIELD(mctrl, NPCX_I3C_MCTRL_REQUEST, MCTRL_REQUEST_EMITSTARTADDR);
487
488 /* Set operation type */
489 SET_FIELD(mctrl, NPCX_I3C_MCTRL_TYPE, op_type);
490
491 /* Set IBI response NACK in emit start */
492 SET_FIELD(mctrl, NPCX_I3C_MCTRL_IBIRESP, MCTRL_IBIRESP_NACK);
493
494 /* Set dynamic address */
495 SET_FIELD(mctrl, NPCX_I3C_MCTRL_ADDR, addr);
496
497 /* Set read(1) or write(0) */
498 if (is_read) {
499 mctrl |= BIT(NPCX_I3C_MCTRL_DIR);
500 SET_FIELD(mctrl, NPCX_I3C_MCTRL_RDTERM, read_sz); /* Set read length */
501 } else {
502 mctrl &= ~BIT(NPCX_I3C_MCTRL_DIR);
503 }
504
505 ret = npcx_i3c_send_request(inst, mctrl);
506 if (ret != 0) {
507 LOG_ERR("Request start error, %d", ret);
508 return ret;
509 }
510
511 /* Check NACK after MCTRLDONE is get */
512 if (IS_BIT_SET(inst->MERRWARN, NPCX_I3C_MERRWARN_NACK)) {
513 LOG_DBG("Address nacked");
514 return -ENODEV;
515 }
516
517 return 0;
518 }
519
520 /*
521 * brief: Controller emit STOP.
522 *
523 * This emits STOP when controller is in NORMACT state.
524 *
525 * param[in] inst Pointer to I3C register.
526 *
527 * return 0 success
528 * -ECANCELED i3c state not as expected.
529 * -ETIMEDOUT check MCTRLDONE timeout.
530 * -ENOSYS invalid use of request.
531 */
npcx_i3c_request_emit_stop(struct i3c_reg * inst)532 static inline int npcx_i3c_request_emit_stop(struct i3c_reg *inst)
533 {
534 uint32_t val = 0;
535 int ret;
536 uint32_t i3c_state = npcx_i3c_state_get(inst);
537
538 /* Make sure we are in a state where we can emit STOP */
539 if (i3c_state == MSTATUS_STATE_IDLE) {
540 LOG_WRN("Request stop in idle state, state= %#x", i3c_state);
541 return -ECANCELED;
542 }
543
544 SET_FIELD(val, NPCX_I3C_MCTRL_REQUEST, MCTRL_REQUEST_EMITSTOP);
545
546 ret = npcx_i3c_send_request(inst, val);
547 if (ret != 0) {
548 LOG_ERR("Request stop error, %d", ret);
549 return ret;
550 }
551
552 return 0;
553 }
554
npcx_i3c_request_hdr_exit(struct i3c_reg * inst)555 static inline int npcx_i3c_request_hdr_exit(struct i3c_reg *inst)
556 {
557 uint32_t val = 0;
558 uint32_t state;
559 int ret;
560
561 /* Before sending the HDR exit command, check the HDR mode */
562 state = npcx_i3c_state_get(inst);
563 if (state != MSTATUS_STATE_MSGDDR) {
564 LOG_ERR("%s, state error: %#x", __func__, state);
565 return -EPERM;
566 }
567
568 SET_FIELD(val, NPCX_I3C_MCTRL_TYPE, MCTRL_TYPE_HDR_EXIT);
569 SET_FIELD(val, NPCX_I3C_MCTRL_REQUEST, MCTRL_REQUEST_FORCEEXIT);
570
571 ret = npcx_i3c_send_request(inst, val);
572 if (ret != 0) {
573 LOG_ERR("Request hdr exit error %d", ret);
574 return ret;
575 }
576
577 return 0;
578 }
579
npcx_i3c_xfer_stop(struct i3c_reg * inst)580 static inline int npcx_i3c_xfer_stop(struct i3c_reg *inst)
581 {
582 uint32_t state;
583 int ret;
584
585 state = npcx_i3c_state_get(inst);
586 LOG_DBG("Current working state=%d", state);
587
588 switch (state) {
589 case MSTATUS_STATE_NORMACT: /* SDR */
590 ret = npcx_i3c_request_emit_stop(inst);
591 break;
592 case MSTATUS_STATE_MSGDDR: /* HDR-DDR */
593 ret = npcx_i3c_request_hdr_exit(inst);
594 break;
595 default:
596 /* Not supported */
597 ret = -ENOTSUP;
598 LOG_WRN("xfer_stop state not supported, state:%d", state);
599 break;
600 }
601
602 return ret;
603 }
604
npcx_i3c_ibi_respond_nack(struct i3c_reg * inst)605 static inline int npcx_i3c_ibi_respond_nack(struct i3c_reg *inst)
606 {
607 uint32_t val = 0;
608 int ret;
609
610 SET_FIELD(val, NPCX_I3C_MCTRL_IBIRESP, MCTRL_IBIRESP_NACK);
611 SET_FIELD(val, NPCX_I3C_MCTRL_REQUEST, MCTRL_REQUEST_IBIACKNACK);
612
613 ret = npcx_i3c_send_request(inst, val);
614 if (ret != 0) {
615 LOG_ERR("Request ibi_rsp nack error, %d", ret);
616 return ret;
617 }
618
619 return 0;
620 }
621
npcx_i3c_ibi_respond_ack(struct i3c_reg * inst)622 static inline int npcx_i3c_ibi_respond_ack(struct i3c_reg *inst)
623 {
624 uint32_t val = 0;
625 int ret;
626
627 SET_FIELD(val, NPCX_I3C_MCTRL_IBIRESP, MCTRL_IBIRESP_ACK);
628 SET_FIELD(val, NPCX_I3C_MCTRL_REQUEST, MCTRL_REQUEST_IBIACKNACK);
629
630 ret = npcx_i3c_send_request(inst, val);
631 if (ret != 0) {
632 LOG_ERR("Request ibi_rsp ack error %d", ret);
633 return ret;
634 }
635
636 return 0;
637 }
638
639 /*
640 * brief: Find a registered I3C target device.
641 *
642 * This returns the I3C device descriptor of the I3C device
643 * matching the incoming id.
644 *
645 * param[in] dev Pointer to controller device driver instance.
646 * param[in] id Pointer to I3C device ID.
647 *
648 * return see i3c_device_find.
649 */
npcx_i3c_device_find(const struct device * dev,const struct i3c_device_id * id)650 static inline struct i3c_device_desc *npcx_i3c_device_find(const struct device *dev,
651 const struct i3c_device_id *id)
652 {
653 const struct npcx_i3c_config *config = dev->config;
654
655 return i3c_dev_list_find(&config->common.dev_list, id);
656 }
657
658 /*
659 * brief: Perform bus recovery.
660 *
661 * param[in] dev Pointer to controller device driver instance.
662 *
663 * return 0 success, otherwise error
664 */
npcx_i3c_recover_bus(const struct device * dev)665 static int npcx_i3c_recover_bus(const struct device *dev)
666 {
667 const struct npcx_i3c_config *config = dev->config;
668 struct i3c_reg *inst = config->base;
669
670 /*
671 * If the controller is in NORMACT state, tells it to emit STOP
672 * so it can return to IDLE, or is ready to clear any pending
673 * target initiated IBIs.
674 */
675 if (npcx_i3c_state_get(inst) == MSTATUS_STATE_NORMACT) {
676 npcx_i3c_request_emit_stop(inst);
677 };
678
679 /* Exhaust all target initiated IBI */
680 while (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_TGTSTART)) {
681 inst->MSTATUS = BIT(NPCX_I3C_MSTATUS_TGTSTART); /* W1C */
682
683 /* Tell the controller to perform auto IBI. */
684 npcx_i3c_request_auto_ibi(inst);
685
686 if (WAIT_FOR(IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_COMPLETE),
687 NPCX_I3C_CHK_TIMEOUT_US, NULL) == false) {
688 break;
689 }
690
691 /* Once auto IBI is done, discard bytes in FIFO. */
692 while (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_RXPEND)) {
693 /* Flush FIFO as long as RXPEND is set. */
694 npcx_i3c_fifo_flush(inst);
695 }
696
697 /*
698 * There might be other IBIs waiting.
699 * So pause a bit to let other targets initiates
700 * their IBIs.
701 */
702 k_busy_wait(100);
703 }
704
705 /* Check IDLE state */
706 if (WAIT_FOR((npcx_i3c_state_get(inst) == MSTATUS_STATE_IDLE), NPCX_I3C_CHK_TIMEOUT_US,
707 NULL) == false) {
708 return -EBUSY;
709 }
710
711 return 0;
712 }
713
npcx_i3c_xfer_reset(struct i3c_reg * inst)714 static inline void npcx_i3c_xfer_reset(struct i3c_reg *inst)
715 {
716 npcx_i3c_status_clear_all(inst);
717 npcx_i3c_errwarn_clear_all(inst);
718 npcx_i3c_fifo_flush(inst);
719 }
720
721 /*
722 * brief: Perform one write transaction.
723 *
724 * This writes all data in buf to TX FIFO or time out
725 * waiting for FIFO spaces.
726 *
727 * param[in] inst Pointer to controller registers.
728 * param[in] buf Buffer containing data to be sent.
729 * param[in] buf_sz Number of bytes in buf to send.
730 * param[in] no_ending True, not including ending byte in message.
731 * False, including ending byte in message
732 *
733 * return Number of bytes written, or negative if error.
734 *
735 */
npcx_i3c_xfer_write_fifo(struct i3c_reg * inst,uint8_t * buf,uint8_t buf_sz,bool no_ending)736 static int npcx_i3c_xfer_write_fifo(struct i3c_reg *inst, uint8_t *buf, uint8_t buf_sz,
737 bool no_ending)
738 {
739 int offset = 0;
740 int remaining = buf_sz;
741
742 while (remaining > 0) {
743 /* Check tx fifo not full */
744 if (WAIT_FOR(!IS_BIT_SET(inst->MDATACTRL, NPCX_I3C_MDATACTRL_TXFULL),
745 NPCX_I3C_CHK_TIMEOUT_US, NULL) == false) {
746 LOG_DBG("Check tx fifo not full timed out");
747 return -ETIMEDOUT;
748 }
749
750 if ((remaining > 1) || no_ending) {
751 inst->MWDATAB = (uint32_t)buf[offset];
752 } else {
753 inst->MWDATABE = (uint32_t)buf[offset]; /* Set last byte */
754 }
755
756 offset += 1;
757 remaining -= 1;
758 }
759
760 return offset;
761 }
762
763 /*
764 * brief: Perform read transaction.
765 *
766 * This reads from RX FIFO until COMPLETE bit is set in MSTATUS
767 * or time out.
768 *
769 * param[in] inst Pointer to controller registers.
770 * param[in] buf Buffer to store data.
771 * param[in] buf_sz Number of bytes to read.
772 *
773 * return Number of bytes read, or negative if error.
774 *
775 */
npcx_i3c_xfer_read_fifo(struct i3c_reg * inst,uint8_t * buf,uint8_t rd_sz)776 static int npcx_i3c_xfer_read_fifo(struct i3c_reg *inst, uint8_t *buf, uint8_t rd_sz)
777 {
778 bool is_done = false;
779 int offset = 0;
780
781 while (is_done == false) {
782 /* Check message is terminated */
783 if (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_COMPLETE)) {
784 is_done = true;
785 }
786
787 /* Check I3C bus error */
788 if (npcx_i3c_has_error(inst)) {
789 /* Check timeout*/
790 if (IS_BIT_SET(inst->MERRWARN, NPCX_I3C_MERRWARN_TIMEOUT)) {
791 LOG_WRN("%s: ERR: timeout", __func__);
792 }
793
794 inst->MERRWARN = inst->MERRWARN;
795
796 return -EIO;
797 }
798
799 /* Check rx not empty */
800 if (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_RXPEND)) {
801
802 /* Receive all the data in this round.
803 * Read in a tight loop to reduce chance of losing
804 * FIFO data when the i3c speed is high.
805 */
806 while (offset < rd_sz) {
807 if (GET_FIELD(inst->MDATACTRL, NPCX_I3C_MDATACTRL_RXCOUNT) == 0) {
808 break;
809 }
810
811 buf[offset++] = (uint8_t)inst->MRDATAB;
812 }
813 }
814 }
815
816 return offset;
817 }
818
819 #ifdef CONFIG_I3C_NPCX_DMA
820 /*
821 * brief: Perform DMA write transaction.
822 *
823 * For write end, use the interrupt generated by COMPLETE bit in MSTATUS register.
824 *
825 * param[in] dev Pointer to controller device driver instance.
826 * param[in] buf Buffer to store data.
827 * param[in] buf_sz Number of bytes to read.
828 *
829 * return Number of bytes read, or negative if error.
830 *
831 */
npcx_i3c_xfer_write_fifo_dma(const struct device * dev,uint8_t * buf,uint32_t buf_sz)832 static int npcx_i3c_xfer_write_fifo_dma(const struct device *dev, uint8_t *buf, uint32_t buf_sz)
833 {
834 const struct npcx_i3c_config *config = dev->config;
835 struct i3c_reg *i3c_inst = config->base;
836 struct mdma_reg *mdma_inst = config->mdma_base;
837 int ret;
838
839 set_oper_state(dev, NPCX_I3C_OP_STATE_WR);
840
841 /* Enable I3C MDMA write for one frame */
842 SET_FIELD(i3c_inst->MDMACTRL, NPCX_I3C_MDMACTRL_DMATB, MDMA_DMATB_EN_ONE_FRAME);
843 i3c_inst->MINTSET |= BIT(NPCX_I3C_MINTCLR_COMPLETE); /* Enable I3C complete interrupt */
844
845 /* Write Operation (MDMA CH_1) */
846 mdma_inst->MDMA_TCNT1 = buf_sz; /* Set MDMA transfer count */
847 mdma_inst->MDMA_SRCB1 = (uint32_t)buf; /* Set source address */
848 mdma_inst->MDMA_CTL1 |= BIT(NPCX_MDMA_CTL_MDMAEN); /* Start DMA transfer */
849
850 /* Wait I3C COMPLETE */
851 ret = i3c_ctrl_wait_completion(dev);
852 if (ret < 0) {
853 LOG_DBG("Check complete time out, buf_size:%d", buf_sz);
854 goto out_wr_fifo_dma;
855 }
856
857 /* Check and clear DMA TC after complete */
858 if (!IS_BIT_SET(mdma_inst->MDMA_CTL1, NPCX_MDMA_CTL_TC)) {
859 LOG_DBG("DMA busy, TC=%d", IS_BIT_SET(mdma_inst->MDMA_CTL1, NPCX_MDMA_CTL_TC));
860 ret = -EBUSY;
861 goto out_wr_fifo_dma;
862 }
863
864 mdma_inst->MDMA_CTL1 &= ~BIT(NPCX_MDMA_CTL_TC); /* Clear TC, W0C */
865 ret = buf_sz - mdma_inst->MDMA_CTCNT1; /* Set transferred count */
866 LOG_DBG("Write cnt=%d", ret);
867
868 out_wr_fifo_dma:
869 i3c_inst->MINTCLR |= BIT(NPCX_I3C_MINTCLR_COMPLETE); /* Disable I3C complete interrupt */
870 npcx_i3c_fifo_flush(i3c_inst);
871 set_oper_state(dev, NPCX_I3C_OP_STATE_IDLE);
872
873 return ret;
874 }
875
876 /*
877 * brief: Perform DMA read transaction.
878 * (Data width used for DMA transfers is "byte")
879 *
880 * For read end, use the MDMA end-of-transfer interrupt(SIEN bit)
881 * instead of using the I3CI interrupt generated by COMPLETE bit in MSTATUS register.
882 *
883 * param[in] dev Pointer to controller device driver instance.
884 * param[in] buf Buffer to store data.
885 * param[in] buf_sz Number of bytes to read.
886 *
887 * return Number of bytes read, or negative if error.
888 *
889 */
npcx_i3c_xfer_read_fifo_dma(const struct device * dev,uint8_t * buf,uint32_t buf_sz)890 static int npcx_i3c_xfer_read_fifo_dma(const struct device *dev, uint8_t *buf, uint32_t buf_sz)
891 {
892 const struct npcx_i3c_config *config = dev->config;
893 struct i3c_reg *i3c_inst = config->base;
894 struct mdma_reg *mdma_inst = config->mdma_base;
895 int ret;
896
897 set_oper_state(dev, NPCX_I3C_OP_STATE_RD);
898
899 /* Enable DMA until DMA is disabled by setting DMAFB to 00 */
900 SET_FIELD(i3c_inst->MDMACTRL, NPCX_I3C_MDMACTRL_DMAFB, MDMA_DMAFB_EN_MANUAL);
901
902 /* Read Operation (MDMA CH_0) */
903 mdma_inst->MDMA_TCNT0 = buf_sz; /* Set MDMA transfer count */
904 mdma_inst->MDMA_DSTB0 = (uint32_t)buf; /* Set destination address */
905 mdma_inst->MDMA_CTL0 |= BIT(NPCX_MDMA_CTL_SIEN); /* Enable stop interrupt */
906 mdma_inst->MDMA_CTL0 |= BIT(NPCX_MDMA_CTL_MDMAEN); /* Start DMA transfer */
907
908 /* Wait MDMA TC */
909 ret = i3c_ctrl_wait_completion(dev);
910 if (ret < 0) {
911 LOG_DBG("Check DMA done time out");
912 } else {
913 ret = buf_sz - mdma_inst->MDMA_CTCNT0; /* Set transferred count */
914 LOG_DBG("Read cnt=%d", ret);
915 }
916
917 mdma_inst->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_SIEN); /* Disable stop interrupt */
918 /* Disable I3C MDMA read */
919 SET_FIELD(i3c_inst->MDMACTRL, NPCX_I3C_MDMACTRL_DMAFB, MDMA_DMAFB_DISABLE);
920 npcx_i3c_fifo_flush(i3c_inst);
921 set_oper_state(dev, NPCX_I3C_OP_STATE_IDLE);
922
923 return ret;
924 }
925
926 /*
927 * brief: Perform one transfer transaction by DMA.
928 * (Support SDR and HDR-DDR)
929 *
930 * param[in] inst Pointer to controller registers.
931 * param[in] addr Target address.
932 * param[in] op_type Request type.
933 * param[in] buf Buffer for data to be sent or received.
934 * param[in] buf_sz Buffer size in bytes.
935 * param[in] is_read True if this is a read transaction, false if write.
936 * param[in] emit_start True if START is needed before read/write.
937 * param[in] emit_stop True if STOP is needed after read/write.
938 *
939 * return Number of bytes read/written, or negative if error.
940 */
npcx_i3c_do_one_xfer_dma(const struct device * dev,uint8_t addr,enum npcx_i3c_mctrl_type op_type,uint8_t * buf,size_t buf_sz,bool is_read,bool emit_start,bool emit_stop,uint8_t hdr_cmd)941 static int npcx_i3c_do_one_xfer_dma(const struct device *dev, uint8_t addr,
942 enum npcx_i3c_mctrl_type op_type, uint8_t *buf, size_t buf_sz,
943 bool is_read, bool emit_start, bool emit_stop, uint8_t hdr_cmd)
944 {
945 const struct npcx_i3c_config *config = dev->config;
946 struct i3c_reg *inst = config->base;
947 int ret = 0;
948 bool is_hdr_ddr = (op_type == NPCX_I3C_MCTRL_TYPE_I3C_HDR_DDR) ? true : false;
949 size_t rd_len = buf_sz;
950
951 npcx_i3c_status_clear_all(inst);
952 npcx_i3c_errwarn_clear_all(inst);
953
954 /* Check HDR-DDR moves data by words */
955 if (is_hdr_ddr && (buf_sz % 2 != 0)) {
956 LOG_ERR("%s, HDR-DDR data length should be even, len=%#x", __func__, buf_sz);
957 return -EINVAL;
958 }
959
960 /* Emit START if needed */
961 if (emit_start) {
962 /*
963 * For HDR-DDR mode read, RDTERM also includes one word (16 bits) for CRC.
964 * For example, to read 8 bytes, set RDTERM to 6.
965 * (1 word HDR-DDR command + 4 words data + 1 word for CRC)
966 */
967 if (is_hdr_ddr) {
968 if (is_read) {
969 /* The unit of rd_len is "word" in DDR mode */
970 rd_len /= sizeof(uint16_t); /* Byte to word */
971 rd_len += HDR_DDR_CMD_AND_CRC_SZ_WORD;
972 hdr_cmd |= HDR_RD_CMD;
973 } else {
974 hdr_cmd &= ~HDR_RD_CMD;
975 }
976
977 /* Write the command code for the HDR-DDR message */
978 inst->MWDATAB = hdr_cmd;
979 }
980
981 ret = npcx_i3c_request_emit_start(inst, addr, op_type, is_read, rd_len);
982 if (ret != 0) {
983 LOG_ERR("%s: emit start fail", __func__);
984 goto out_do_one_xfer_dma;
985 }
986 }
987
988 /* No data to be transferred */
989 if ((buf == NULL) || (buf_sz == 0)) {
990 goto out_do_one_xfer_dma;
991 }
992
993 /* Select read or write operation */
994 if (is_read) {
995 ret = npcx_i3c_xfer_read_fifo_dma(dev, buf, buf_sz);
996 } else {
997 ret = npcx_i3c_xfer_write_fifo_dma(dev, buf, buf_sz);
998 }
999
1000 if (ret < 0) {
1001 LOG_ERR("%s: %s fifo fail", __func__, is_read ? "read" : "write");
1002 goto out_do_one_xfer_dma;
1003 }
1004
1005 /* Check I3C bus error */
1006 if (npcx_i3c_has_error(inst)) {
1007 ret = -EIO;
1008 LOG_ERR("%s: I3C bus error", __func__);
1009 }
1010
1011 out_do_one_xfer_dma:
1012 /* Emit STOP or exit DDR if needed */
1013 if (emit_stop) {
1014 npcx_i3c_xfer_stop(inst);
1015 }
1016
1017 return ret;
1018 }
1019 #endif /* End of CONFIG_I3C_NPCX_DMA */
1020
1021 /*
1022 * brief: Perform one transfer transaction.
1023 * (Support SDR only)
1024 *
1025 * param[in] inst Pointer to controller registers.
1026 * param[in] addr Target address.
1027 * param[in] op_type Request type.
1028 * param[in] buf Buffer for data to be sent or received.
1029 * param[in] buf_sz Buffer size in bytes.
1030 * param[in] is_read True if this is a read transaction, false if write.
1031 * param[in] emit_start True if START is needed before read/write.
1032 * param[in] emit_stop True if STOP is needed after read/write.
1033 * param[in] no_ending True if not to signal end of write message.
1034 *
1035 * return Number of bytes read/written, or negative if error.
1036 */
npcx_i3c_do_one_xfer(struct i3c_reg * inst,uint8_t addr,enum npcx_i3c_mctrl_type op_type,uint8_t * buf,size_t buf_sz,bool is_read,bool emit_start,bool emit_stop,bool no_ending)1037 static int npcx_i3c_do_one_xfer(struct i3c_reg *inst, uint8_t addr,
1038 enum npcx_i3c_mctrl_type op_type, uint8_t *buf, size_t buf_sz,
1039 bool is_read, bool emit_start, bool emit_stop, bool no_ending)
1040 {
1041 int ret = 0;
1042
1043 npcx_i3c_status_clear_all(inst);
1044 npcx_i3c_errwarn_clear_all(inst);
1045
1046 /* Emit START if needed */
1047 if (emit_start) {
1048 ret = npcx_i3c_request_emit_start(inst, addr, op_type, is_read, buf_sz);
1049 if (ret != 0) {
1050 LOG_ERR("%s: emit start fail", __func__);
1051 goto out_do_one_xfer;
1052 }
1053 }
1054
1055 /* No data to be transferred */
1056 if ((buf == NULL) || (buf_sz == 0)) {
1057 goto out_do_one_xfer;
1058 }
1059
1060 /* Select read or write operation */
1061 if (is_read) {
1062 ret = npcx_i3c_xfer_read_fifo(inst, buf, buf_sz);
1063 } else {
1064 ret = npcx_i3c_xfer_write_fifo(inst, buf, buf_sz, no_ending);
1065 }
1066
1067 if (ret < 0) {
1068 LOG_ERR("%s: %s fifo fail", __func__, is_read ? "read" : "write");
1069 goto out_do_one_xfer;
1070 }
1071
1072 /* Check message complete if is a read transaction or
1073 * ending byte of a write transaction.
1074 */
1075 if (is_read || !no_ending) {
1076 /* Wait message transfer complete */
1077 if (WAIT_FOR(IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_COMPLETE),
1078 NPCX_I3C_CHK_TIMEOUT_US, NULL) == false) {
1079 LOG_DBG("Wait COMPLETE timed out, addr 0x%02x, buf_sz %u", addr, buf_sz);
1080
1081 ret = -ETIMEDOUT;
1082 emit_stop = true;
1083
1084 goto out_do_one_xfer;
1085 }
1086
1087 inst->MSTATUS = BIT(NPCX_I3C_MSTATUS_COMPLETE); /* W1C */
1088 }
1089
1090 /* Check I3C bus error */
1091 if (npcx_i3c_has_error(inst)) {
1092 ret = -EIO;
1093 LOG_ERR("%s: I3C bus error", __func__);
1094 }
1095
1096 out_do_one_xfer:
1097 /* Emit STOP if needed */
1098 if (emit_stop) {
1099 npcx_i3c_request_emit_stop(inst);
1100 }
1101
1102 return ret;
1103 }
1104
1105 /*
1106 * brief: Transfer messages in I3C mode.
1107 *
1108 * see i3c_transfer
1109 *
1110 * param[in] dev Pointer to device driver instance.
1111 * param[in] target Pointer to target device descriptor.
1112 * param[in] msgs Pointer to I3C messages.
1113 * param[in] num_msgs Number of messages to transfers.
1114 *
1115 * return see i3c_transfer
1116 */
npcx_i3c_transfer(const struct device * dev,struct i3c_device_desc * target,struct i3c_msg * msgs,uint8_t num_msgs)1117 static int npcx_i3c_transfer(const struct device *dev, struct i3c_device_desc *target,
1118 struct i3c_msg *msgs, uint8_t num_msgs)
1119 {
1120 const struct npcx_i3c_config *config = dev->config;
1121 struct i3c_reg *inst = config->base;
1122 struct npcx_i3c_data *data = dev->data;
1123 uint32_t intmask;
1124 int xfered_len = 0;
1125 int ret = 0;
1126 bool send_broadcast = true;
1127 bool is_xfer_done = true;
1128 enum npcx_i3c_mctrl_type op_type;
1129
1130 if (msgs == NULL) {
1131 return -EINVAL;
1132 }
1133
1134 if (target->dynamic_addr == 0U) {
1135 return -EINVAL;
1136 }
1137
1138 npcx_i3c_mutex_lock(dev);
1139
1140 /* Check bus in idle state */
1141 if (WAIT_FOR((npcx_i3c_state_get(inst) == MSTATUS_STATE_IDLE), NPCX_I3C_CHK_TIMEOUT_US,
1142 NULL) == false) {
1143 LOG_ERR("%s: xfer state error: %d", __func__, npcx_i3c_state_get(inst));
1144 npcx_i3c_mutex_unlock(dev);
1145 return -ETIMEDOUT;
1146 }
1147
1148 /* Disable interrupt */
1149 intmask = inst->MINTSET;
1150 npcx_i3c_interrupt_all_disable(inst);
1151
1152 npcx_i3c_xfer_reset(inst);
1153
1154 /* Iterate over all the messages */
1155 for (int i = 0; i < num_msgs; i++) {
1156 /*
1157 * Check message is read or write operaion.
1158 * For write operation, check the last data byte of a transmit message.
1159 */
1160 bool is_read = (msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ;
1161 bool no_ending = false;
1162
1163 /*
1164 * Emit start if this is the first message or that
1165 * the RESTART flag is set in message.
1166 */
1167 #ifdef CONFIG_I3C_NPCX_DMA
1168 bool emit_start =
1169 (i == 0) || ((msgs[i].flags & I3C_MSG_RESTART) == I3C_MSG_RESTART);
1170 #endif
1171
1172 bool emit_stop = (msgs[i].flags & I3C_MSG_STOP) == I3C_MSG_STOP;
1173
1174 /*
1175 * The controller requires special treatment of last byte of
1176 * a write message. Since the API permits having a bunch of
1177 * write messages without RESTART in between, this is just some
1178 * logic to determine whether to treat the last byte of this
1179 * message to be the last byte of a series of write mssages.
1180 * If not, tell the write function not to treat it that way.
1181 */
1182 if (!is_read && !emit_stop && ((i + 1) != num_msgs)) {
1183 bool next_is_write = (msgs[i + 1].flags & I3C_MSG_RW_MASK) == I3C_MSG_WRITE;
1184 bool next_is_restart =
1185 ((msgs[i + 1].flags & I3C_MSG_RESTART) == I3C_MSG_RESTART);
1186
1187 /* Check next msg is still write operation and not including Sr */
1188 if (next_is_write && !next_is_restart) {
1189 no_ending = true;
1190 }
1191 }
1192
1193 #ifdef CONFIG_I3C_NPCX_DMA
1194 /* Current DMA not support multi-message write */
1195 if (!is_read && no_ending) {
1196 LOG_ERR("I3C DMA transfer not support multi-message write");
1197 ret = -EINVAL;
1198 break;
1199 }
1200 #endif
1201
1202 /* Check message SDR or HDR mode */
1203 bool is_msg_hdr = (msgs[i].flags & I3C_MSG_HDR) == I3C_MSG_HDR;
1204
1205 /* Set emit start type SDR or HDR-DDR mode */
1206 if (!is_msg_hdr || msgs[i].hdr_mode == 0) {
1207 op_type = NPCX_I3C_MCTRL_TYPE_I3C; /* Set operation type SDR */
1208
1209 /*
1210 * SDR, send boradcast header(0x7E)
1211 *
1212 * Two ways to do read/write transfer (SDR mode).
1213 * 1. [S] + [0x7E] + [address] + [data] + [Sr or P]
1214 * 2. [S] + [address] + [data] + [Sr or P]
1215 *
1216 * Send broadcast header(0x7E) on first transfer or after a STOP,
1217 * unless flag is set not to.
1218 */
1219 if (!(msgs[i].flags & I3C_MSG_NBCH) && send_broadcast) {
1220 ret = npcx_i3c_request_emit_start(inst, I3C_BROADCAST_ADDR,
1221 NPCX_I3C_MCTRL_TYPE_I3C, false,
1222 0);
1223 if (ret < 0) {
1224 LOG_ERR("%s: emit start of broadcast addr failed, error "
1225 "(%d)",
1226 __func__, ret);
1227 break;
1228 }
1229 send_broadcast = false;
1230 }
1231 } else if ((data->common.ctrl_config.supported_hdr & I3C_MSG_HDR_DDR) &&
1232 (msgs[i].hdr_mode == I3C_MSG_HDR_DDR) && is_msg_hdr) {
1233
1234 op_type = NPCX_I3C_MCTRL_TYPE_I3C_HDR_DDR; /* Set operation type DDR */
1235
1236 /* Check HDR-DDR moves data by words */
1237 if ((msgs[i].len % 2) != 0x0) {
1238 LOG_ERR("HDR-DDR data length should be number of words , xfer "
1239 "len=%d",
1240 msgs[i].num_xfer);
1241 ret = -EINVAL;
1242 break;
1243 }
1244 } else {
1245 LOG_ERR("%s: %s controller HDR Mode %#x\r\n"
1246 "msg HDR mode %#x, msg flag %#x",
1247 __func__, dev->name, data->common.ctrl_config.supported_hdr,
1248 msgs[i].hdr_mode, msgs[i].flags);
1249 ret = -ENOTSUP;
1250 break;
1251 }
1252
1253 #ifdef CONFIG_I3C_NPCX_DMA
1254 /* Do transfer with target device */
1255 xfered_len = npcx_i3c_do_one_xfer_dma(dev, target->dynamic_addr, op_type,
1256 msgs[i].buf, msgs[i].len, is_read, emit_start,
1257 emit_stop, msgs[i].hdr_cmd_code);
1258 #endif
1259
1260 if (xfered_len < 0) {
1261 LOG_ERR("%s: do xfer fail", __func__);
1262 ret = xfered_len; /* Set error code to ret */
1263 break;
1264 }
1265
1266 /* Write back the total number of bytes transferred */
1267 msgs[i].num_xfer = xfered_len;
1268
1269 if (emit_stop) {
1270 /* SDR. After a STOP, send broadcast header before next msg */
1271 send_broadcast = true;
1272 }
1273
1274 /* Check emit stop flag including in the final msg */
1275 if ((i == num_msgs - 1) && (emit_stop == false)) {
1276 is_xfer_done = false;
1277 }
1278 }
1279
1280 /* Emit stop if error occurs or stop flag not in the msg */
1281 if ((ret != 0) || (is_xfer_done == false)) {
1282 npcx_i3c_xfer_stop(inst);
1283 }
1284
1285 npcx_i3c_errwarn_clear_all(inst);
1286 npcx_i3c_status_clear_all(inst);
1287
1288 npcx_i3c_interrupt_enable(inst, intmask);
1289
1290 npcx_i3c_mutex_unlock(dev);
1291
1292 return ret;
1293 }
1294
1295 /*
1296 * brief: Perform Dynamic Address Assignment.
1297 *
1298 * param[in] dev Pointer to controller device driver instance.
1299 *
1300 * return 0 If successful.
1301 * -EBUSY Bus is busy.
1302 * -EIO General input / output error.
1303 * -ENODEV If a provisioned ID does not match to any target devices
1304 * in the registered device list.
1305 * -ENOSPC No more free addresses can be assigned to target.
1306 * -ENOSYS Dynamic address assignment is not supported by
1307 * the controller driver.
1308 */
npcx_i3c_do_daa(const struct device * dev)1309 static int npcx_i3c_do_daa(const struct device *dev)
1310 {
1311 const struct npcx_i3c_config *config = dev->config;
1312 struct npcx_i3c_data *data = dev->data;
1313 struct i3c_reg *inst = config->base;
1314 int ret = 0;
1315 uint8_t rx_buf[8];
1316 size_t rx_count;
1317 uint32_t intmask;
1318
1319 npcx_i3c_mutex_lock(dev);
1320
1321 memset(rx_buf, 0xff, sizeof(rx_buf));
1322
1323 /* Check bus in idle state */
1324 if (WAIT_FOR((npcx_i3c_state_get(inst) == MSTATUS_STATE_IDLE), NPCX_I3C_CHK_TIMEOUT_US,
1325 NULL) == false) {
1326 LOG_ERR("%s: DAA state error: %d", __func__, npcx_i3c_state_get(inst));
1327 npcx_i3c_mutex_unlock(dev);
1328 return -ETIMEDOUT;
1329 }
1330
1331 LOG_DBG("DAA: ENTDAA");
1332
1333 /* Disable interrupt */
1334 intmask = inst->MINTSET;
1335 npcx_i3c_interrupt_all_disable(inst);
1336
1337 npcx_i3c_xfer_reset(inst);
1338
1339 /* Emit process DAA */
1340 if (npcx_i3c_request_daa(inst) != 0) {
1341 ret = -ETIMEDOUT;
1342 LOG_ERR("Emit process DAA error");
1343 goto out_do_daa;
1344 }
1345
1346 /* Loop until no more responses from devices */
1347 do {
1348 /* Check ERRWARN bit set */
1349 if (npcx_i3c_has_error(inst)) {
1350 ret = -EIO;
1351 LOG_ERR("DAA recv error");
1352 break;
1353 }
1354
1355 /* Receive Provisioned ID, BCR and DCR (total 8 bytes) */
1356 rx_count = GET_FIELD(inst->MDATACTRL, NPCX_I3C_MDATACTRL_RXCOUNT);
1357
1358 if (rx_count == DAA_TGT_INFO_SZ) {
1359 for (int i = 0; i < rx_count; i++) {
1360 rx_buf[i] = (uint8_t)inst->MRDATAB;
1361 }
1362 } else {
1363 /* Data count not as expected, exit DAA */
1364 ret = -EBADMSG;
1365 LOG_DBG("Rx count not as expected %d, abort DAA", rx_count);
1366 break;
1367 }
1368
1369 /* Start assign dynamic address */
1370 if ((npcx_i3c_state_get(inst) == MSTATUS_STATE_DAA) &&
1371 IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_BETWEEN)) {
1372 struct i3c_device_desc *target;
1373 uint16_t vendor_id;
1374 uint32_t part_no;
1375 uint64_t pid;
1376 uint8_t dyn_addr = 0;
1377
1378 /* PID[47:33] = manufacturer ID */
1379 vendor_id = (((uint16_t)rx_buf[0] << 8U) | (uint16_t)rx_buf[1]) & 0xFFFEU;
1380
1381 /* PID[31:0] = vendor fixed falue or random value */
1382 part_no = (uint32_t)rx_buf[2] << 24U | (uint32_t)rx_buf[3] << 16U |
1383 (uint32_t)rx_buf[4] << 8U | (uint32_t)rx_buf[5];
1384
1385 /* Combine into one Provisioned ID */
1386 pid = (uint64_t)vendor_id << 32U | (uint64_t)part_no;
1387
1388 LOG_DBG("DAA: Rcvd PID 0x%04x%08x", vendor_id, part_no);
1389
1390 /* Find a usable address during ENTDAA */
1391 ret = i3c_dev_list_daa_addr_helper(&data->common.attached_dev.addr_slots,
1392 &config->common.dev_list, pid, false,
1393 false, &target, &dyn_addr);
1394 if (ret != 0) {
1395 LOG_ERR("%s: Assign new DA error", __func__);
1396 break;
1397 }
1398
1399 if (target == NULL) {
1400 LOG_INF("%s: PID 0x%04x%08x is not in registered device "
1401 "list, given dynamic address 0x%02x",
1402 dev->name, vendor_id, part_no, dyn_addr);
1403 } else {
1404 /* Update target descriptor */
1405 target->dynamic_addr = dyn_addr;
1406 target->bcr = rx_buf[6];
1407 target->dcr = rx_buf[7];
1408 }
1409
1410 /* Mark the address as I3C device */
1411 i3c_addr_slots_mark_i3c(&data->common.attached_dev.addr_slots, dyn_addr);
1412
1413 /*
1414 * If the device has static address, after address assignment,
1415 * the device will not respond to the static address anymore.
1416 * So free the static one from address slots if different from
1417 * newly assigned one.
1418 */
1419 if ((target != NULL) && (target->static_addr != 0U) &&
1420 (dyn_addr != target->static_addr)) {
1421 i3c_addr_slots_mark_free(&data->common.attached_dev.addr_slots,
1422 dyn_addr);
1423 }
1424
1425 /* Emit process DAA again to send the address to the device */
1426 inst->MWDATAB = dyn_addr;
1427 ret = npcx_i3c_request_daa(inst);
1428 if (ret != 0) {
1429 LOG_ERR("%s: Assign DA timeout", __func__);
1430 break;
1431 }
1432
1433 LOG_DBG("PID 0x%04x%08x assigned dynamic address 0x%02x", vendor_id,
1434 part_no, dyn_addr);
1435
1436 /* Target did not accept the assigned DA, exit DAA */
1437 if (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_NACKED)) {
1438 ret = -EFAULT;
1439 LOG_DBG("TGT NACK assigned DA %#x", dyn_addr);
1440
1441 /* Free the reserved DA */
1442 i3c_addr_slots_mark_free(&data->common.attached_dev.addr_slots,
1443 dyn_addr);
1444
1445 /* 0 if address has not been assigned */
1446 if (target != NULL) {
1447 target->dynamic_addr = 0;
1448 }
1449
1450 break;
1451 }
1452 }
1453
1454 /* Check all targets have been assigned DA and DAA complete */
1455 } while ((!IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_COMPLETE)) &&
1456 npcx_i3c_state_get(inst) != MSTATUS_STATE_IDLE);
1457
1458 out_do_daa:
1459 /* Exit DAA mode when error occurs */
1460 if (ret != 0) {
1461 npcx_i3c_request_emit_stop(inst);
1462 }
1463
1464 /* Clear all flags. */
1465 npcx_i3c_errwarn_clear_all(inst);
1466 npcx_i3c_status_clear_all(inst);
1467
1468 /* Re-Enable I3C IRQ sources. */
1469 npcx_i3c_interrupt_enable(inst, intmask);
1470
1471 npcx_i3c_fifo_flush(inst);
1472 npcx_i3c_mutex_unlock(dev);
1473
1474 return ret;
1475 }
1476
1477 /*
1478 * brief: Send Common Command Code (CCC).
1479 *
1480 * param[in] dev Pointer to controller device driver instance.
1481 * param[in] payload Pointer to CCC payload.
1482 *
1483 * return: The same as i3c_do_ccc()
1484 * 0 If successful.
1485 * -EBUSY Bus is busy.
1486 * -EIO General Input / output error.
1487 * -EINVAL Invalid valid set in the payload structure.
1488 * -ENOSYS Not implemented.
1489 */
npcx_i3c_do_ccc(const struct device * dev,struct i3c_ccc_payload * payload)1490 static int npcx_i3c_do_ccc(const struct device *dev, struct i3c_ccc_payload *payload)
1491 {
1492 const struct npcx_i3c_config *config = dev->config;
1493 int ret;
1494 struct i3c_reg *inst = config->base;
1495 uint32_t intmask;
1496 int xfered_len;
1497
1498 if (dev == NULL || payload == NULL) {
1499 return -EINVAL;
1500 }
1501
1502 npcx_i3c_mutex_lock(dev);
1503
1504 /* Disable interrupt */
1505 intmask = inst->MINTSET;
1506 npcx_i3c_interrupt_all_disable(inst);
1507
1508 /* Clear status and flush fifo */
1509 npcx_i3c_xfer_reset(inst);
1510
1511 LOG_DBG("CCC[0x%02x]", payload->ccc.id);
1512
1513 /* Write emit START and broadcast address (0x7E) */
1514 ret = npcx_i3c_request_emit_start(inst, I3C_BROADCAST_ADDR, NPCX_I3C_MCTRL_TYPE_I3C, false,
1515 0);
1516 if (ret < 0) {
1517 LOG_ERR("CCC[0x%02x] %s START error (%d)", payload->ccc.id,
1518 i3c_ccc_is_payload_broadcast(payload) ? "broadcast" : "direct", ret);
1519
1520 goto out_do_ccc;
1521 }
1522
1523 /* Write CCC command */
1524 npcx_i3c_status_clear_all(inst);
1525 npcx_i3c_errwarn_clear_all(inst);
1526 xfered_len = npcx_i3c_xfer_write_fifo(inst, &payload->ccc.id, 1, payload->ccc.data_len > 0);
1527 if (xfered_len < 0) {
1528 LOG_ERR("CCC[0x%02x] %s command error (%d)", payload->ccc.id,
1529 i3c_ccc_is_payload_broadcast(payload) ? "broadcast" : "direct", ret);
1530 ret = xfered_len;
1531
1532 goto out_do_ccc;
1533 }
1534
1535 /* Write data (defining byte or data bytes) for CCC if needed */
1536 if (payload->ccc.data_len > 0) {
1537 npcx_i3c_status_clear_all(inst);
1538 npcx_i3c_errwarn_clear_all(inst);
1539 xfered_len = npcx_i3c_xfer_write_fifo(inst, payload->ccc.data,
1540 payload->ccc.data_len, false);
1541 if (xfered_len < 0) {
1542 LOG_ERR("CCC[0x%02x] %s command payload error (%d)", payload->ccc.id,
1543 i3c_ccc_is_payload_broadcast(payload) ? "broadcast" : "direct",
1544 ret);
1545 ret = xfered_len;
1546
1547 goto out_do_ccc;
1548 }
1549
1550 /* Write back the transferred bytes */
1551 payload->ccc.num_xfer = xfered_len;
1552 }
1553
1554 /* Wait message transfer complete */
1555 if (WAIT_FOR(IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_COMPLETE), NPCX_I3C_CHK_TIMEOUT_US,
1556 NULL) == false) {
1557 ret = -ETIMEDOUT;
1558 LOG_DBG("Check complete timeout");
1559 goto out_do_ccc;
1560 }
1561
1562 inst->MSTATUS = BIT(NPCX_I3C_MSTATUS_COMPLETE); /* W1C */
1563
1564 /* For direct CCC */
1565 if (!i3c_ccc_is_payload_broadcast(payload)) {
1566 /*
1567 * If there are payload(s) for each target,
1568 * RESTART and then send payload for each target.
1569 */
1570 for (int idx = 0; idx < payload->targets.num_targets; idx++) {
1571 struct i3c_ccc_target_payload *tgt_payload =
1572 &payload->targets.payloads[idx];
1573
1574 bool is_read = (tgt_payload->rnw == 1U);
1575
1576 xfered_len = npcx_i3c_do_one_xfer(
1577 inst, tgt_payload->addr, NPCX_I3C_MCTRL_TYPE_I3C, tgt_payload->data,
1578 tgt_payload->data_len, is_read, true, false, false);
1579 if (xfered_len < 0) {
1580 LOG_ERR("CCC[0x%02x] target payload error (%d)", payload->ccc.id,
1581 ret);
1582 ret = xfered_len;
1583
1584 goto out_do_ccc;
1585 }
1586
1587 /* Write back the total number of bytes transferred */
1588 tgt_payload->num_xfer = xfered_len;
1589 }
1590 }
1591
1592 out_do_ccc:
1593 npcx_i3c_request_emit_stop(inst);
1594
1595 npcx_i3c_interrupt_enable(inst, intmask);
1596
1597 npcx_i3c_mutex_unlock(dev);
1598
1599 return ret;
1600 }
1601
1602 #ifdef CONFIG_I3C_USE_IBI
1603 /*
1604 * brief Callback to service target initiated IBIs in workqueue.
1605 *
1606 * param[in] work Pointer to k_work item.
1607 */
npcx_i3c_ibi_work(struct k_work * work)1608 static void npcx_i3c_ibi_work(struct k_work *work)
1609 {
1610 uint8_t payload[CONFIG_I3C_IBI_MAX_PAYLOAD_SIZE];
1611 size_t payload_sz = 0;
1612
1613 struct i3c_ibi_work *i3c_ibi_work = CONTAINER_OF(work, struct i3c_ibi_work, work);
1614 const struct device *dev = i3c_ibi_work->controller;
1615 const struct npcx_i3c_config *config = dev->config;
1616 struct npcx_i3c_data *data = dev->data;
1617 struct i3c_reg *inst = config->base;
1618 struct i3c_device_desc *target = NULL;
1619 uint32_t ibitype, ibiaddr;
1620 int ret;
1621
1622 k_sem_take(&data->ibi_lock_sem, K_FOREVER);
1623
1624 if (npcx_i3c_state_get(inst) != MSTATUS_STATE_TGTREQ) {
1625 LOG_DBG("IBI work %p running not because of IBI", work);
1626 LOG_ERR("%s: IBI not in TGTREQ state, state : %#x", __func__,
1627 npcx_i3c_state_get(inst));
1628 LOG_ERR("%s: MSTATUS 0x%08x MERRWARN 0x%08x", __func__, inst->MSTATUS,
1629 inst->MERRWARN);
1630 npcx_i3c_request_emit_stop(inst);
1631
1632 goto out_ibi_work;
1633 };
1634
1635 /* Use auto IBI to service the IBI */
1636 npcx_i3c_request_auto_ibi(inst);
1637
1638 /* Wait for target to win address arbitration (ibitype and ibiaddr) */
1639 if (WAIT_FOR(IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_IBIWON), NPCX_I3C_CHK_TIMEOUT_US,
1640 NULL) == false) {
1641 LOG_ERR("IBI work, IBIWON timeout");
1642 LOG_ERR("%s: MSTATUS 0x%08x MERRWARN 0x%08x", __func__, inst->MSTATUS,
1643 inst->MERRWARN);
1644 npcx_i3c_request_emit_stop(inst);
1645
1646 goto out_ibi_work;
1647 }
1648
1649 ibitype = GET_FIELD(inst->MSTATUS, NPCX_I3C_MSTATUS_IBITYPE);
1650 ibiaddr = GET_FIELD(inst->MSTATUS, NPCX_I3C_MSTATUS_IBIADDR);
1651
1652 switch (ibitype) {
1653 case MSTATUS_IBITYPE_IBI:
1654 ret = npcx_i3c_xfer_read_fifo(inst, &payload[0], sizeof(payload));
1655 if (ret >= 0) {
1656 payload_sz = (size_t)ret;
1657 } else {
1658 LOG_ERR("Error reading IBI payload");
1659 npcx_i3c_request_emit_stop(inst);
1660
1661 goto out_ibi_work;
1662 }
1663 break;
1664 case MSTATUS_IBITYPE_HJ:
1665 npcx_i3c_ibi_respond_ack(inst);
1666 npcx_i3c_request_emit_stop(inst);
1667 break;
1668 case MSTATUS_IBITYPE_CR:
1669 LOG_DBG("Controller role handoff not supported");
1670 npcx_i3c_ibi_respond_nack(inst);
1671 npcx_i3c_request_emit_stop(inst);
1672 break;
1673 default:
1674 break;
1675 }
1676
1677 if (npcx_i3c_has_error(inst)) {
1678 LOG_ERR("%s: unexpected error, ibi type:%d", __func__, ibitype);
1679 /*
1680 * If the controller detects any errors, simply
1681 * emit a STOP to abort the IBI. The target will
1682 * raise IBI again if so desired.
1683 */
1684 npcx_i3c_request_emit_stop(inst);
1685
1686 goto out_ibi_work;
1687 }
1688
1689 switch (ibitype) {
1690 case MSTATUS_IBITYPE_IBI:
1691 target = i3c_dev_list_i3c_addr_find(dev, (uint8_t)ibiaddr);
1692 if (target != NULL) {
1693 if (i3c_ibi_work_enqueue_target_irq(target, &payload[0], payload_sz) != 0) {
1694 LOG_ERR("Error enqueue IBI IRQ work");
1695 }
1696 } else {
1697 LOG_ERR("IBI (MDB) target not in the list");
1698 }
1699
1700 /* Finishing the IBI transaction */
1701 npcx_i3c_request_emit_stop(inst);
1702 break;
1703 case MSTATUS_IBITYPE_HJ:
1704 if (i3c_ibi_work_enqueue_hotjoin(dev) != 0) {
1705 LOG_ERR("Error enqueue IBI HJ work");
1706 }
1707 break;
1708 case MSTATUS_IBITYPE_CR:
1709 /* Not supported, for future use. */
1710 break;
1711 default:
1712 break;
1713 }
1714
1715 out_ibi_work:
1716 npcx_i3c_xfer_reset(inst);
1717
1718 k_sem_give(&data->ibi_lock_sem);
1719
1720 /* Re-enable target initiated IBI interrupt. */
1721 inst->MINTSET = BIT(NPCX_I3C_MINTSET_TGTSTART);
1722 }
1723
1724 /* Set local IBI information to IBIRULES register */
npcx_i3c_ibi_rules_setup(struct npcx_i3c_data * data,struct i3c_reg * inst)1725 static void npcx_i3c_ibi_rules_setup(struct npcx_i3c_data *data, struct i3c_reg *inst)
1726 {
1727 uint32_t ibi_rules;
1728 int idx;
1729
1730 ibi_rules = 0;
1731
1732 for (idx = 0; idx < ARRAY_SIZE(data->ibi.addr); idx++) {
1733 uint32_t addr_6bit;
1734
1735 /* Extract the lower 6-bit of target address */
1736 addr_6bit = (uint32_t)data->ibi.addr[idx] & IBIRULES_ADDR_MSK;
1737
1738 /* Shift into correct place */
1739 addr_6bit <<= idx * IBIRULES_ADDR_SHIFT;
1740
1741 /* Put into the temporary IBI Rules register */
1742 ibi_rules |= addr_6bit;
1743 }
1744
1745 if (!data->ibi.msb) {
1746 /* The MSB0 field is 1 if MSB is 0 */
1747 ibi_rules |= BIT(NPCX_I3C_IBIRULES_MSB0);
1748 }
1749
1750 if (!data->ibi.has_mandatory_byte) {
1751 /* The NOBYTE field is 1 if there is no mandatory byte */
1752 ibi_rules |= BIT(NPCX_I3C_IBIRULES_NOBYTE);
1753 }
1754
1755 /* Update the register */
1756 inst->IBIRULES = ibi_rules;
1757
1758 LOG_DBG("MIBIRULES 0x%08x", ibi_rules);
1759 }
1760
npcx_i3c_ibi_enable(const struct device * dev,struct i3c_device_desc * target)1761 static int npcx_i3c_ibi_enable(const struct device *dev, struct i3c_device_desc *target)
1762 {
1763 const struct npcx_i3c_config *config = dev->config;
1764 struct npcx_i3c_data *data = dev->data;
1765 struct i3c_reg *inst = config->base;
1766 struct i3c_ccc_events i3c_events;
1767 uint8_t idx;
1768 bool msb, has_mandatory_byte;
1769 int ret;
1770
1771 /* Check target IBI request capable */
1772 if (!i3c_device_is_ibi_capable(target)) {
1773 LOG_ERR("%s: device is not ibi capable", __func__);
1774 return -EINVAL;
1775 }
1776
1777 if (data->ibi.num_addr >= ARRAY_SIZE(data->ibi.addr)) {
1778 /* No more free entries in the IBI Rules table */
1779 LOG_ERR("%s: no more free space in the IBI rules table", __func__);
1780 return -ENOMEM;
1781 }
1782
1783 /* Check whether the selected target is already in the list */
1784 for (idx = 0; idx < ARRAY_SIZE(data->ibi.addr); idx++) {
1785 if (data->ibi.addr[idx] == target->dynamic_addr) {
1786 LOG_ERR("%s: selected target is already in the list", __func__);
1787 return -EINVAL;
1788 }
1789 }
1790
1791 /* Disable controller interrupt while we configure IBI rules. */
1792 inst->MINTCLR = BIT(NPCX_I3C_MINTCLR_TGTSTART);
1793
1794 LOG_DBG("IBI enabling for 0x%02x (BCR 0x%02x)", target->dynamic_addr, target->bcr);
1795
1796 msb = (target->dynamic_addr & BIT(6)) == BIT(6); /* Check addess(7-bit) MSB enable */
1797 has_mandatory_byte = i3c_ibi_has_payload(target);
1798
1799 /*
1800 * If there are already addresses in the table, we must
1801 * check if the incoming entry is compatible with
1802 * the existing ones.
1803 *
1804 * All targets in the list should follow the same IBI rules.
1805 */
1806 if (data->ibi.num_addr > 0) {
1807 /*
1808 * 1. All devices in the table must all use mandatory
1809 * bytes, or do not.
1810 *
1811 * 2. Each address in entry only captures the lowest 6-bit.
1812 * The MSB (7th bit) is captured separated in another bit
1813 * in the register. So all addresses must have the same MSB.
1814 */
1815 if ((has_mandatory_byte != data->ibi.has_mandatory_byte) ||
1816 (msb != data->ibi.msb)) {
1817 ret = -EINVAL;
1818 LOG_ERR("%s: New IBI does not have same mandatory byte or msb"
1819 " as previous IBI",
1820 __func__);
1821 goto out_ibi_enable;
1822 }
1823
1824 /* Find an empty address slot */
1825 for (idx = 0; idx < ARRAY_SIZE(data->ibi.addr); idx++) {
1826 if (data->ibi.addr[idx] == 0U) {
1827 break;
1828 }
1829 }
1830
1831 if (idx >= ARRAY_SIZE(data->ibi.addr)) {
1832 ret = -ENOTSUP;
1833 LOG_ERR("Cannot support more IBIs");
1834 goto out_ibi_enable;
1835 }
1836 } else {
1837 /*
1838 * If the incoming address is the first in the table,
1839 * it dictates future compatibilities.
1840 */
1841 data->ibi.has_mandatory_byte = has_mandatory_byte;
1842 data->ibi.msb = msb;
1843
1844 idx = 0;
1845 }
1846
1847 data->ibi.addr[idx] = target->dynamic_addr;
1848 data->ibi.num_addr += 1U;
1849
1850 npcx_i3c_ibi_rules_setup(data, inst);
1851
1852 /* Enable target IBI event by ENEC command */
1853 i3c_events.events = I3C_CCC_EVT_INTR;
1854 ret = i3c_ccc_do_events_set(target, true, &i3c_events);
1855 if (ret != 0) {
1856 LOG_ERR("Error sending IBI ENEC for 0x%02x (%d)", target->dynamic_addr, ret);
1857 }
1858
1859 out_ibi_enable:
1860 if (data->ibi.num_addr > 0U) {
1861 /*
1862 * If there is more than 1 target in the list,
1863 * enable controller to raise interrupt when a target
1864 * initiates IBI.
1865 */
1866 inst->MINTSET = BIT(NPCX_I3C_MINTSET_TGTSTART);
1867 }
1868
1869 return ret;
1870 }
1871
npcx_i3c_ibi_disable(const struct device * dev,struct i3c_device_desc * target)1872 static int npcx_i3c_ibi_disable(const struct device *dev, struct i3c_device_desc *target)
1873 {
1874 const struct npcx_i3c_config *config = dev->config;
1875 struct npcx_i3c_data *data = dev->data;
1876 struct i3c_reg *inst = config->base;
1877 struct i3c_ccc_events i3c_events;
1878 int ret;
1879 int idx;
1880
1881 if (!i3c_device_is_ibi_capable(target)) {
1882 LOG_ERR("%s: device is not ibi capable", __func__);
1883 return -EINVAL;
1884 }
1885
1886 for (idx = 0; idx < ARRAY_SIZE(data->ibi.addr); idx++) {
1887 if (target->dynamic_addr == data->ibi.addr[idx]) {
1888 break;
1889 }
1890 }
1891
1892 if (idx == ARRAY_SIZE(data->ibi.addr)) {
1893 LOG_ERR("%s: target is not in list of registered addresses", __func__);
1894 return -ENODEV;
1895 }
1896
1897 /* Disable controller interrupt while we configure IBI rules. */
1898 inst->MINTCLR = BIT(NPCX_I3C_MINTCLR_TGTSTART);
1899
1900 /* Clear the ibi rule data */
1901 data->ibi.addr[idx] = 0U;
1902 data->ibi.num_addr -= 1U;
1903
1904 /* Disable disable target IBI */
1905 i3c_events.events = I3C_CCC_EVT_INTR;
1906 ret = i3c_ccc_do_events_set(target, false, &i3c_events);
1907 if (ret != 0) {
1908 LOG_ERR("Error sending IBI DISEC for 0x%02x (%d)", target->dynamic_addr, ret);
1909 }
1910
1911 npcx_i3c_ibi_rules_setup(data, inst);
1912
1913 if (data->ibi.num_addr > 0U) {
1914 /*
1915 * Enable controller to raise interrupt when a target
1916 * initiates IBI.
1917 */
1918 inst->MINTSET = BIT(NPCX_I3C_MINTSET_TGTSTART);
1919 }
1920
1921 return ret;
1922 }
1923 #endif /* CONFIG_I3C_USE_IBI */
1924
npcx_i3c_target_ibi_raise(const struct device * dev,struct i3c_ibi * request)1925 static int npcx_i3c_target_ibi_raise(const struct device *dev, struct i3c_ibi *request)
1926 {
1927 const struct npcx_i3c_config *config = dev->config;
1928 struct i3c_reg *inst = config->base;
1929 struct npcx_i3c_data *data = dev->data;
1930 int index;
1931
1932 /* the request or the payload were not specific */
1933 if ((request == NULL) || ((request->payload_len) && (request->payload == NULL))) {
1934 return -EINVAL;
1935 }
1936
1937 /* the I3C was not in target mode or the bus is in HDR mode now */
1938 if (!IS_BIT_SET(inst->CONFIG, NPCX_I3C_CONFIG_TGTENA) ||
1939 IS_BIT_SET(inst->STATUS, NPCX_I3C_STATUS_STHDR)) {
1940 return -EINVAL;
1941 }
1942
1943 switch (request->ibi_type) {
1944 case I3C_IBI_TARGET_INTR:
1945 if (IS_BIT_SET(inst->STATUS, NPCX_I3C_STATUS_IBIDIS)) {
1946 return -ENOTSUP;
1947 }
1948
1949 if (request->payload_len == 0) {
1950 LOG_ERR("%s: IBI invalid payload_len, len: %#x", __func__,
1951 request->payload_len);
1952 return -EINVAL;
1953 }
1954
1955 k_sem_take(&data->target_event_lock_sem, K_FOREVER);
1956 set_oper_state(dev, NPCX_I3C_OP_STATE_IBI);
1957
1958 /* Mandatory data byte */
1959 SET_FIELD(inst->CTRL, NPCX_I3C_CTRL_IBIDATA, request->payload[0]);
1960
1961 /* Extended data */
1962 if (request->payload_len > 1) {
1963 if (request->payload_len <= 32) {
1964 for (index = 1; index < (request->payload_len - 1); index++) {
1965 inst->WDATAB = request->payload[index];
1966 }
1967
1968 inst->WDATABE = request->payload[index];
1969 } else {
1970 /* transfer data from MDMA */
1971 }
1972
1973 SET_FIELD(inst->IBIEXT1, NPCX_I3C_IBIEXT1_CNT, 0);
1974 inst->CTRL |= BIT(NPCX_I3C_CTRL_EXTDATA);
1975 }
1976
1977 SET_FIELD(inst->CTRL, NPCX_I3C_CTRL_EVENT, CTRL_EVENT_IBI);
1978 break;
1979
1980 case I3C_IBI_CONTROLLER_ROLE_REQUEST:
1981 if (IS_BIT_SET(inst->STATUS, NPCX_I3C_STATUS_MRDIS)) {
1982 return -ENOTSUP;
1983 }
1984
1985 /* The bus controller request was generate only a target with controller mode
1986 * capabilities mode
1987 */
1988 if (GET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_CTRENA) != MCONFIG_CTRENA_CAPABLE) {
1989 return -ENOTSUP;
1990 }
1991
1992 k_sem_take(&data->target_event_lock_sem, K_FOREVER);
1993 set_oper_state(dev, NPCX_I3C_OP_STATE_IBI);
1994
1995 SET_FIELD(inst->CTRL, NPCX_I3C_CTRL_EVENT, CTRL_EVENT_CNTLR_REQ);
1996 break;
1997
1998 case I3C_IBI_HOTJOIN:
1999 if (IS_BIT_SET(inst->STATUS, NPCX_I3C_STATUS_HJDIS)) {
2000 return -ENOTSUP;
2001 }
2002
2003 k_sem_take(&data->target_event_lock_sem, K_FOREVER);
2004 set_oper_state(dev, NPCX_I3C_OP_STATE_IBI);
2005
2006 inst->CONFIG &= ~BIT(NPCX_I3C_CONFIG_TGTENA);
2007 SET_FIELD(inst->CTRL, NPCX_I3C_CTRL_EVENT, CTRL_EVENT_HJ);
2008 inst->CONFIG |= BIT(NPCX_I3C_CONFIG_TGTENA);
2009 break;
2010
2011 default:
2012 return -EINVAL;
2013 }
2014
2015 return 0;
2016 }
2017
2018 #ifdef CONFIG_I3C_NPCX_DMA
npcx_i3c_target_get_mdmafb_count(const struct device * dev)2019 static uint16_t npcx_i3c_target_get_mdmafb_count(const struct device *dev)
2020 {
2021 const struct npcx_i3c_config *config = dev->config;
2022 struct mdma_reg *mdma_inst = config->mdma_base;
2023
2024 if (mdma_inst->MDMA_CTCNT0 < mdma_inst->MDMA_TCNT0) {
2025 return (mdma_inst->MDMA_TCNT0 - mdma_inst->MDMA_CTCNT0);
2026 } else {
2027 return 0;
2028 }
2029 }
2030
npcx_i3c_target_get_mdmatb_count(const struct device * dev)2031 static uint16_t npcx_i3c_target_get_mdmatb_count(const struct device *dev)
2032 {
2033 const struct npcx_i3c_config *config = dev->config;
2034 struct mdma_reg *mdma_inst = config->mdma_base;
2035
2036 if (mdma_inst->MDMA_CTCNT1 < mdma_inst->MDMA_TCNT1) {
2037 return (mdma_inst->MDMA_TCNT1 - mdma_inst->MDMA_CTCNT1);
2038 } else {
2039 return 0;
2040 }
2041 }
2042
npcx_i3c_target_disable_mdmafb(const struct device * dev)2043 static void npcx_i3c_target_disable_mdmafb(const struct device *dev)
2044 {
2045 const struct npcx_i3c_config *config = dev->config;
2046 struct i3c_reg *i3c_inst = config->base;
2047 struct mdma_reg *mdma_inst = config->mdma_base;
2048
2049 mdma_inst->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_MDMAEN);
2050 mdma_inst->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_TC); /* W0C */
2051 mdma_inst->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_SIEN);
2052 SET_FIELD(i3c_inst->DMACTRL, NPCX_I3C_DMACTRL_DMAFB, MDMA_DMAFB_DISABLE);
2053
2054 /* Ignore DA and detect all START and STOP */
2055 i3c_inst->CONFIG &= ~BIT(NPCX_I3C_CONFIG_MATCHSS);
2056
2057 /* Flush the tx and rx FIFO */
2058 i3c_inst->DATACTRL |= BIT(NPCX_I3C_DATACTRL_FLUSHTB) | BIT(NPCX_I3C_DATACTRL_FLUSHFB);
2059 }
2060
npcx_i3c_target_enable_mdmafb(const struct device * dev,uint8_t * buf,uint16_t len)2061 static void npcx_i3c_target_enable_mdmafb(const struct device *dev, uint8_t *buf, uint16_t len)
2062 {
2063 const struct npcx_i3c_config *config = dev->config;
2064 struct i3c_reg *i3c_inst = config->base;
2065 struct mdma_reg *mdma_inst = config->mdma_base;
2066
2067 /* Check MDMA disable */
2068 if (IS_BIT_SET(mdma_inst->MDMA_CTL0, NPCX_MDMA_CTL_MDMAEN) != 0) {
2069 mdma_inst->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_MDMAEN);
2070 LOG_DBG("MDMAFB_EN=1 before enable");
2071 }
2072
2073 /* Detect a START and STOP only if the transaction
2074 * address matches the target address (STATUS.MATCHED=1).
2075 */
2076 i3c_inst->CONFIG |= BIT(NPCX_I3C_CONFIG_MATCHSS);
2077 /* Enable manual DMA control */
2078 SET_FIELD(i3c_inst->DMACTRL, NPCX_I3C_DMACTRL_DMAFB, MDMA_DMAFB_EN_MANUAL);
2079
2080 /* Read Operation (MDMA CH_0) */
2081 mdma_inst->MDMA_TCNT0 = len; /* Set MDMA transfer count */
2082 mdma_inst->MDMA_DSTB0 = (uint32_t)buf; /* Set destination address */
2083 mdma_inst->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_TC); /* W0C */
2084 mdma_inst->MDMA_CTL0 |= BIT(NPCX_MDMA_CTL_SIEN); /* Enable stop interrupt */
2085 mdma_inst->MDMA_CTL0 |= BIT(NPCX_MDMA_CTL_MDMAEN); /* Start DMA transfer */
2086 }
2087
npcx_i3c_target_disable_mdmatb(const struct device * dev)2088 static void npcx_i3c_target_disable_mdmatb(const struct device *dev)
2089 {
2090 const struct npcx_i3c_config *config = dev->config;
2091 struct i3c_reg *i3c_inst = config->base;
2092 struct mdma_reg *mdma_inst = config->mdma_base;
2093
2094 mdma_inst->MDMA_CTL1 &= ~BIT(NPCX_MDMA_CTL_MDMAEN);
2095 mdma_inst->MDMA_CTL1 &= ~BIT(NPCX_MDMA_CTL_TC); /* W0C */
2096 mdma_inst->MDMA_CTL1 &= ~BIT(NPCX_MDMA_CTL_SIEN);
2097 SET_FIELD(i3c_inst->DMACTRL, NPCX_I3C_DMACTRL_DMATB, MDMA_DMATB_DISABLE);
2098
2099 /* Ignore DA and detect all START and STOP */
2100 i3c_inst->CONFIG &= ~BIT(NPCX_I3C_CONFIG_MATCHSS);
2101
2102 /* Flush the tx and rx FIFO */
2103 i3c_inst->DATACTRL |= BIT(NPCX_I3C_DATACTRL_FLUSHTB) | BIT(NPCX_I3C_DATACTRL_FLUSHFB);
2104 }
2105
npcx_i3c_target_enable_mdmatb(const struct device * dev,uint8_t * buf,uint16_t len)2106 static void npcx_i3c_target_enable_mdmatb(const struct device *dev, uint8_t *buf, uint16_t len)
2107 {
2108 const struct npcx_i3c_config *config = dev->config;
2109 struct i3c_reg *i3c_inst = config->base;
2110 struct mdma_reg *mdma_inst = config->mdma_base;
2111
2112 /* Check MDMA disable */
2113 if (IS_BIT_SET(mdma_inst->MDMA_CTL1, NPCX_MDMA_CTL_MDMAEN) != 0) {
2114 mdma_inst->MDMA_CTL1 &= ~BIT(NPCX_MDMA_CTL_MDMAEN);
2115 LOG_DBG("MDMATB_EN=1 before enable");
2116 }
2117
2118 /* Detect a START and STOP only if the transaction address matches the target address */
2119 i3c_inst->CONFIG |= BIT(NPCX_I3C_CONFIG_MATCHSS);
2120
2121 /* Enable DMA only for one frame.
2122 * MATCHSS must be set to 1 before selecting '0x1' for DMATB field
2123 *
2124 * In SDR DMATB is automatically cleared if MATCHED bit is set to 1 and either STOP bit
2125 * or START bit is set to 1.
2126 *
2127 * In HDR-DDR mode, DMATB is not automatically cleared.
2128 */
2129 SET_FIELD(i3c_inst->DMACTRL, NPCX_I3C_DMACTRL_DMATB, MDMA_DMAFB_EN_ONE_FRAME);
2130
2131 /* Write Operation (MDMA CH_1) */
2132 mdma_inst->MDMA_TCNT1 = len; /* Set MDMA transfer count */
2133 mdma_inst->MDMA_SRCB1 = (uint32_t)buf; /* Set source address */
2134 mdma_inst->MDMA_CTL1 &= ~BIT(NPCX_MDMA_CTL_TC); /* W0C */
2135 mdma_inst->MDMA_CTL1 |= BIT(NPCX_MDMA_CTL_MDMAEN); /* Start DMA transfer */
2136 }
2137
npcx_i3c_target_rx_read(const struct device * dev)2138 static void npcx_i3c_target_rx_read(const struct device *dev)
2139 {
2140 struct npcx_i3c_data *data = dev->data;
2141 struct i3c_config_target *config_tgt = &data->config_target;
2142
2143 /* Enable the DMA from bus */
2144 npcx_i3c_target_enable_mdmafb(dev, data->mdma_rx_buf, config_tgt->max_read_len);
2145 }
2146
2147 /* brief: Handle the end of transfer (read request or write request).
2148 * The ending signal might be either STOP or Sr.
2149 * return: -EINVAL:
2150 * 1. operation not read or write request.
2151 * 2. start or stop flag is not set.
2152 * -EBUSY: in write request, wait for mdma done.
2153 * 0: success
2154 */
npcx_i3c_target_xfer_end_handle(const struct device * dev)2155 static int npcx_i3c_target_xfer_end_handle(const struct device *dev)
2156 {
2157 struct npcx_i3c_data *data = dev->data;
2158 const struct npcx_i3c_config *config = dev->config;
2159 struct i3c_reg *inst = config->base;
2160 struct mdma_reg *mdma_inst = config->mdma_base;
2161 const struct i3c_target_callbacks *target_cb = data->target_config->callbacks;
2162 bool is_i3c_start = IS_BIT_SET(inst->INTMASKED, NPCX_I3C_INTMASKED_START);
2163 bool is_i3c_stop = IS_BIT_SET(inst->INTMASKED, NPCX_I3C_INTMASKED_STOP);
2164 enum npcx_i3c_oper_state op_state = get_oper_state(dev);
2165 uint32_t cur_xfer_cnt;
2166 uint32_t timer = 0;
2167 int ret = 0;
2168
2169 if ((op_state != NPCX_I3C_OP_STATE_WR) && (op_state != NPCX_I3C_OP_STATE_RD)) {
2170 LOG_ERR("%s: op_staste error :%d", __func__, op_state);
2171 return -EINVAL;
2172 }
2173
2174 if ((is_i3c_start | is_i3c_stop) == 0) {
2175 LOG_ERR("%s: not the end of xfer, is_start: %d, is_stop:%d", __func__, is_i3c_start,
2176 is_i3c_stop);
2177 return -EINVAL;
2178 }
2179
2180 /* Read request */
2181 if (get_oper_state(dev) == NPCX_I3C_OP_STATE_RD) {
2182 npcx_i3c_target_disable_mdmatb(dev);
2183 goto out_tgt_xfer_end_hdl;
2184 }
2185
2186 /* Write request */
2187 /* Check rx fifo count is 0 */
2188 if (WAIT_FOR((GET_FIELD(inst->DATACTRL, NPCX_I3C_DATACTRL_RXCOUNT) == 0),
2189 I3C_TGT_WR_REQ_WAIT_US, NULL) == false) {
2190 LOG_ERR("%s: target wr_req rxcnt timeout %d", __func__,
2191 GET_FIELD(inst->DATACTRL, NPCX_I3C_DATACTRL_RXCOUNT));
2192 ret = -EIO;
2193 npcx_i3c_target_disable_mdmafb(dev);
2194 goto out_tgt_xfer_end_hdl;
2195 }
2196
2197 /* Check mdma rx transfer count stability */
2198 cur_xfer_cnt = mdma_inst->MDMA_CTCNT0;
2199 while (timer < I3C_TGT_WR_REQ_WAIT_US) {
2200 /* After the stop or Sr, the rx fifo is empty, and the last byte has been
2201 * transferred.
2202 */
2203 if (cur_xfer_cnt != mdma_inst->MDMA_CTCNT0) {
2204 break;
2205 }
2206
2207 /* Keep polling if the transferred count does not change */
2208 k_busy_wait(1);
2209 timer++;
2210 cur_xfer_cnt = mdma_inst->MDMA_CTCNT0;
2211 }
2212
2213 npcx_i3c_target_disable_mdmafb(dev); /* Disable mdma and check the final result */
2214
2215 if (cur_xfer_cnt == mdma_inst->MDMA_CTCNT0) {
2216 #ifdef CONFIG_I3C_TARGET_BUFFER_MODE
2217 if (target_cb && target_cb->buf_write_received_cb) {
2218 target_cb->buf_write_received_cb(data->target_config, data->mdma_rx_buf,
2219 npcx_i3c_target_get_mdmafb_count(dev));
2220 }
2221 #endif
2222 } else {
2223 LOG_ERR("(%s) MDMA rx abnormal, force mdma stop, xfer cnt=%#x",
2224 is_i3c_start ? "Sr" : "STOP", cur_xfer_cnt);
2225 ret = -EBUSY;
2226 }
2227
2228 out_tgt_xfer_end_hdl:
2229 /* Clear DA matched status and re-enable interrupt */
2230 inst->STATUS = BIT(NPCX_I3C_STATUS_MATCHED);
2231 inst->INTSET = BIT(NPCX_I3C_INTSET_MATCHED);
2232
2233 if (is_i3c_start) {
2234 set_oper_state(dev, NPCX_I3C_OP_STATE_IDLE);
2235 }
2236
2237 return ret;
2238 }
2239 #endif /* End of CONFIG_I3C_NPCX_DMA */
2240
npcx_i3c_target_tx_write(const struct device * dev,uint8_t * buf,uint16_t len,uint8_t hdr_mode)2241 static int npcx_i3c_target_tx_write(const struct device *dev, uint8_t *buf, uint16_t len,
2242 uint8_t hdr_mode)
2243 {
2244 if ((buf == NULL) || (len == 0)) {
2245 LOG_ERR("%s: Data buffer configuration failed", __func__);
2246 return -EINVAL;
2247 }
2248
2249 if (hdr_mode != 0) {
2250 LOG_ERR("%s: HDR not supported", __func__);
2251 return -ENOSYS;
2252 }
2253
2254 #ifdef CONFIG_I3C_NPCX_DMA
2255 npcx_i3c_target_enable_mdmatb(dev, buf, len);
2256
2257 return npcx_i3c_target_get_mdmatb_count(dev); /* Return total bytes written */
2258 #else
2259 LOG_ERR("%s: Support dma mode only", __func__);
2260 return -ENOSYS;
2261 #endif
2262 }
2263
npcx_i3c_target_register(const struct device * dev,struct i3c_target_config * cfg)2264 static int npcx_i3c_target_register(const struct device *dev, struct i3c_target_config *cfg)
2265 {
2266 struct npcx_i3c_data *data = dev->data;
2267
2268 data->target_config = cfg;
2269
2270 return 0;
2271 }
2272
npcx_i3c_target_unregister(const struct device * dev,struct i3c_target_config * cfg)2273 static int npcx_i3c_target_unregister(const struct device *dev, struct i3c_target_config *cfg)
2274 {
2275 struct npcx_i3c_data *data = dev->data;
2276
2277 data->target_config = NULL;
2278
2279 return 0;
2280 }
2281
npcx_i3c_get_scl_config(struct npcx_i3c_timing_cfg * cfg,uint32_t i3c_src_clk,uint32_t pp_baudrate_hz,uint32_t od_baudrate_hz)2282 static int npcx_i3c_get_scl_config(struct npcx_i3c_timing_cfg *cfg, uint32_t i3c_src_clk,
2283 uint32_t pp_baudrate_hz, uint32_t od_baudrate_hz)
2284 {
2285 uint32_t i3c_div, freq;
2286 uint32_t ppbaud, odbaud;
2287 uint32_t pplow_ns, odlow_ns;
2288
2289 if (cfg == NULL) {
2290 LOG_ERR("Freq config NULL");
2291 return -EINVAL;
2292 }
2293
2294 if ((pp_baudrate_hz == 0) || (pp_baudrate_hz > I3C_SCL_PP_FREQ_MAX_MHZ) ||
2295 (od_baudrate_hz == 0) || (od_baudrate_hz > I3C_SCL_OD_FREQ_MAX_MHZ)) {
2296 LOG_ERR("I3C PP_SCL should within 12.5 Mhz, input: %d", pp_baudrate_hz);
2297 LOG_ERR("I3C OD_SCL should within 4.17 Mhz, input: %d", od_baudrate_hz);
2298 return -EINVAL;
2299 }
2300
2301 /* Fixed PPLOW = 0 to achieve 50% duty cycle */
2302 /* pp_freq = ((f_mclkd / 2) / (PPBAUD+1)) */
2303 freq = i3c_src_clk / 2UL;
2304
2305 i3c_div = freq / pp_baudrate_hz;
2306 i3c_div = (i3c_div == 0UL) ? 1UL : i3c_div;
2307 if (freq / i3c_div > pp_baudrate_hz) {
2308 i3c_div++;
2309 }
2310
2311 if (i3c_div > PPBAUD_DIV_MAX) {
2312 LOG_ERR("PPBAUD out of range");
2313 return -EINVAL;
2314 }
2315
2316 ppbaud = i3c_div - 1UL;
2317 freq /= i3c_div;
2318
2319 /* Check PP low period in spec (should be the same as PPHIGH) */
2320 pplow_ns = (uint32_t)(NSEC_PER_SEC / (2UL * freq));
2321 if (pplow_ns < I3C_BUS_TLOW_PP_MIN_NS) {
2322 LOG_ERR("PPLOW ns out of spec");
2323 return -EINVAL;
2324 }
2325
2326 /* Fixed odhpp = 1 configuration */
2327 /* odFreq = (2*freq) / (ODBAUD + 2), 1 <= ODBAUD <= 255 */
2328 i3c_div = (2UL * freq) / od_baudrate_hz;
2329 i3c_div = i3c_div < 2UL ? 2UL : i3c_div;
2330 if ((2UL * freq / i3c_div) > od_baudrate_hz) {
2331 i3c_div++;
2332 }
2333
2334 odbaud = i3c_div - 2UL;
2335 freq = (2UL * freq) / i3c_div; /* For I2C usage in the future */
2336
2337 /* Check OD low period in spec */
2338 odlow_ns = (odbaud + 1UL) * pplow_ns;
2339 if (odlow_ns < I3C_BUS_TLOW_OD_MIN_NS) {
2340 LOG_ERR("ODBAUD ns out of spec");
2341 return -EINVAL;
2342 }
2343
2344 cfg->pplow = 0;
2345 cfg->odhpp = 1;
2346 cfg->ppbaud = ppbaud;
2347 cfg->odbaud = odbaud;
2348
2349 return 0;
2350 }
2351
npcx_i3c_freq_init(const struct device * dev)2352 static int npcx_i3c_freq_init(const struct device *dev)
2353 {
2354 const struct npcx_i3c_config *config = dev->config;
2355 struct npcx_i3c_data *data = dev->data;
2356 struct i3c_reg *inst = config->base;
2357 const struct device *const clk_dev = config->clock_dev;
2358 struct i3c_config_controller *ctrl_config = &data->common.ctrl_config;
2359 uint32_t scl_pp = ctrl_config->scl.i3c;
2360 uint32_t scl_od = config->clocks.i3c_od_scl_hz;
2361 struct npcx_i3c_timing_cfg timing_cfg;
2362 uint32_t mclkd;
2363 int ret;
2364
2365 ret = clock_control_get_rate(clk_dev, (clock_control_subsys_t)&config->clock_subsys,
2366 &mclkd);
2367 if (ret != 0x0) {
2368 LOG_ERR("Get I3C source clock fail %d", ret);
2369 return -EINVAL;
2370 }
2371
2372 LOG_DBG("MCLKD: %d", mclkd);
2373 LOG_DBG("SCL_PP_FEQ MAX: %d", I3C_SCL_PP_FREQ_MAX_MHZ);
2374 LOG_DBG("SCL_OD_FEQ MAX: %d", I3C_SCL_OD_FREQ_MAX_MHZ);
2375 LOG_DBG("scl_pp: %d", scl_pp);
2376 LOG_DBG("scl_od: %d", scl_od);
2377 LOG_DBG("hdr: %d", ctrl_config->supported_hdr);
2378
2379 /* MCLKD = MCLK / I3C_DIV(1 or 2)
2380 * MCLKD must between 40 mhz to 50 mhz.
2381 */
2382 if (mclkd == MCLKD_FREQ_MHZ(40)) {
2383 /* Set default I3C_SCL configuration */
2384 timing_cfg = npcx_def_speed_cfg[NPCX_I3C_BUS_SPEED_40MHZ];
2385 } else if (mclkd == MCLKD_FREQ_MHZ(45)) {
2386 /* Set default I3C_SCL configuration */
2387 timing_cfg = npcx_def_speed_cfg[NPCX_I3C_BUS_SPEED_45MHZ];
2388 } else if (mclkd == MCLKD_FREQ_MHZ(48)) {
2389 /* Set default I3C_SCL configuration */
2390 timing_cfg = npcx_def_speed_cfg[NPCX_I3C_BUS_SPEED_48MHZ];
2391 } else if (mclkd == MCLKD_FREQ_MHZ(50)) {
2392 /* Set default I3C_SCL configuration */
2393 timing_cfg = npcx_def_speed_cfg[NPCX_I3C_BUS_SPEED_50MHZ];
2394 } else {
2395 LOG_ERR("Unsupported MCLKD freq for %s.", dev->name);
2396 return -EINVAL;
2397 }
2398
2399 ret = npcx_i3c_get_scl_config(&timing_cfg, mclkd, scl_pp, scl_od);
2400 if (ret != 0x0) {
2401 LOG_ERR("Adjust I3C frequency fail");
2402 return -EINVAL;
2403 }
2404
2405 /* Apply SCL_PP and SCL_OD */
2406 SET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_PPBAUD, timing_cfg.ppbaud);
2407 SET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_PPLOW, timing_cfg.pplow);
2408 SET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_ODBAUD, timing_cfg.odbaud);
2409 if (timing_cfg.odhpp != 0) {
2410 inst->MCONFIG |= BIT(NPCX_I3C_MCONFIG_ODHPP);
2411 } else {
2412 inst->MCONFIG &= ~BIT(NPCX_I3C_MCONFIG_ODHPP);
2413 }
2414 SET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_I2CBAUD, I3C_BUS_I2C_BAUD_RATE_FAST_MODE);
2415
2416 LOG_DBG("ppbaud: %d", GET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_PPBAUD));
2417 LOG_DBG("odbaud: %d", GET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_ODBAUD));
2418 LOG_DBG("pplow: %d", GET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_PPLOW));
2419 LOG_DBG("odhpp: %d", IS_BIT_SET(inst->MCONFIG, NPCX_I3C_MCONFIG_ODHPP));
2420 LOG_DBG("i2cbaud: %d", GET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_I2CBAUD));
2421
2422 return 0;
2423 }
2424
npcx_i3c_apply_cntlr_config(const struct device * dev)2425 static int npcx_i3c_apply_cntlr_config(const struct device *dev)
2426 {
2427 const struct npcx_i3c_config *config = dev->config;
2428 struct i3c_reg *inst = config->base;
2429 const struct device *const clk_dev = config->clock_dev;
2430 int idx_module = GET_MODULE_ID(config->instance_id);
2431 uint32_t apb4_rate;
2432 uint8_t bamatch;
2433 int ret;
2434
2435 /* I3C module mdma cotroller or target mode select */
2436 npcx_i3c_target_sel(idx_module, false);
2437
2438 /* Disable all interrupts */
2439 npcx_i3c_interrupt_all_disable(inst);
2440
2441 /* Initial baudrate. PPLOW=1, PPBAUD, ODHPP=1, ODBAUD */
2442 if (npcx_i3c_freq_init(dev) != 0x0) {
2443 return -EINVAL;
2444 }
2445
2446 /* Enable external high-keeper */
2447 SET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_HKEEP, MCONFIG_HKEEP_EXT_SDA_SCL);
2448 /* Enable open-drain stop */
2449 inst->MCONFIG |= BIT(NPCX_I3C_MCONFIG_ODSTOP);
2450 /* Enable timeout */
2451 inst->MCONFIG &= ~BIT(NPCX_I3C_MCONFIG_DISTO);
2452 /* Flush tx and tx FIFO buffer */
2453 npcx_i3c_fifo_flush(inst);
2454
2455 /* Set bus available match value in target register */
2456 ret = clock_control_get_rate(clk_dev, (clock_control_subsys_t)&config->ref_clk_subsys,
2457 &apb4_rate);
2458 LOG_DBG("APB4_CLK: %d", apb4_rate);
2459
2460 if (ret != 0x0) {
2461 LOG_ERR("%s: Get APB4 source clock fail %d", __func__, ret);
2462 return -EINVAL;
2463 }
2464
2465 bamatch = get_bus_available_match_val(apb4_rate);
2466 LOG_DBG("BAMATCH: %d", bamatch);
2467
2468 SET_FIELD(inst->CONFIG, NPCX_I3C_CONFIG_BAMATCH, bamatch);
2469
2470 return 0;
2471 }
2472
npcx_i3c_apply_target_config(const struct device * dev)2473 static int npcx_i3c_apply_target_config(const struct device *dev)
2474 {
2475 const struct npcx_i3c_config *config = dev->config;
2476 struct npcx_i3c_data *data = dev->data;
2477 struct i3c_config_target *config_target = &data->config_target;
2478 struct i3c_reg *inst = config->base;
2479 const struct device *const clk_dev = config->clock_dev;
2480 uint32_t apb4_rate;
2481 uint8_t bamatch;
2482 int idx_module = GET_MODULE_ID(config->instance_id);
2483 int ret;
2484 uint64_t pid;
2485
2486 /* I3C module mdma cotroller or target mode select */
2487 npcx_i3c_target_sel(idx_module, true);
2488
2489 /* Set bus available match value in target register */
2490 ret = clock_control_get_rate(clk_dev, (clock_control_subsys_t)&config->ref_clk_subsys,
2491 &apb4_rate);
2492 LOG_DBG("APB4_CLK: %d", apb4_rate);
2493
2494 if (ret != 0x0) {
2495 LOG_ERR("%s: Get APB4 source clock fail %d", __func__, ret);
2496 return -EINVAL;
2497 }
2498
2499 bamatch = get_bus_available_match_val(apb4_rate);
2500 LOG_DBG("BAMATCH: %d", bamatch);
2501 SET_FIELD(inst->CONFIG, NPCX_I3C_CONFIG_BAMATCH, bamatch);
2502
2503 /* Set Provisional ID */
2504 pid = config_target->pid;
2505
2506 /* PID[47:33] MIPI manufacturer ID */
2507 SET_FIELD(inst->VENDORID, NPCX_I3C_VENDORID_VID, (uint32_t)GET_PID_VENDOR_ID(pid));
2508
2509 /* PID[32] Vendor fixed value(0) or random value(1) */
2510 if (config_target->pid_random) {
2511 inst->CONFIG |= BIT(NPCX_I3C_CONFIG_IDRAND);
2512 } else {
2513 inst->CONFIG &= ~BIT(NPCX_I3C_CONFIG_IDRAND);
2514 }
2515
2516 /* PID[31:0] vendor fixed value */
2517 inst->PARTNO = (uint32_t)GET_PID_PARTNO(pid);
2518
2519 LOG_DBG("pid: %#llx", pid);
2520 LOG_DBG("vendro id: %#x", (uint32_t)GET_PID_VENDOR_ID(pid));
2521 LOG_DBG("id type: %d", (uint32_t)GET_PID_ID_TYP(pid));
2522 LOG_DBG("partno: %#x", (uint32_t)GET_PID_PARTNO(pid));
2523
2524 SET_FIELD(inst->IDEXT, NPCX_I3C_IDEXT_DCR, config_target->dcr);
2525 SET_FIELD(inst->IDEXT, NPCX_I3C_IDEXT_BCR, config_target->bcr);
2526 SET_FIELD(inst->CONFIG, NPCX_I3C_CONFIG_SADDR, config_target->static_addr);
2527 SET_FIELD(inst->CONFIG, NPCX_I3C_CONFIG_HDRCMD, CFG_HDRCMD_RD_FROM_FIFIO);
2528 SET_FIELD(inst->MAXLIMITS, NPCX_I3C_MAXLIMITS_MAXRD, (config_target->max_read_len) & 0xfff);
2529 SET_FIELD(inst->MAXLIMITS, NPCX_I3C_MAXLIMITS_MAXWR,
2530 (config_target->max_write_len) & 0xfff);
2531
2532 /* Ignore DA and detect all START and STOP */
2533 inst->CONFIG &= ~BIT(NPCX_I3C_CONFIG_MATCHSS);
2534
2535 /* Enable the target interrupt events */
2536 npcx_i3c_enable_target_interrupt(dev, true);
2537
2538 return 0;
2539 }
2540
npcx_i3c_dev_init(const struct device * dev)2541 static void npcx_i3c_dev_init(const struct device *dev)
2542 {
2543 const struct npcx_i3c_config *config = dev->config;
2544 struct npcx_i3c_data *data = dev->data;
2545 struct i3c_reg *inst = config->base;
2546 struct i3c_config_controller *config_cntlr = &data->common.ctrl_config;
2547 struct i3c_config_target *config_target = &data->config_target;
2548 int idx_module = GET_MODULE_ID(config->instance_id);
2549
2550 /* Reset I3C module */
2551 reset_line_toggle_dt(&config->reset);
2552
2553 if (I3C_BCR_DEVICE_ROLE(config_target->bcr) == I3C_BCR_DEVICE_ROLE_I3C_CONTROLLER_CAPABLE) {
2554 npcx_i3c_apply_cntlr_config(dev);
2555 npcx_i3c_apply_target_config(dev);
2556
2557 if (config_cntlr->is_secondary) {
2558 /* Secondary controller enable, so boot as a target */
2559 SET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_CTRENA, MCONFIG_CTRENA_CAPABLE);
2560 inst->CONFIG |= BIT(NPCX_I3C_CONFIG_TGTENA); /* Target mode enable */
2561 } else {
2562 npcx_i3c_target_sel(idx_module, false); /* Set mdma as controlelr */
2563 /* Primary Controller enable */
2564 SET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_CTRENA, MCONFIG_CTRENA_ON);
2565 }
2566 } else {
2567 npcx_i3c_apply_target_config(dev);
2568 SET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_CTRENA,
2569 MCONFIG_CTRENA_OFF); /* Controller mode off */
2570 inst->CONFIG |= BIT(NPCX_I3C_CONFIG_TGTENA); /* Target mode enable */
2571 }
2572 }
2573
npcx_i3c_configure(const struct device * dev,enum i3c_config_type type,void * config)2574 static int npcx_i3c_configure(const struct device *dev, enum i3c_config_type type, void *config)
2575 {
2576 struct npcx_i3c_data *dev_data = dev->data;
2577 struct i3c_config_controller *config_cntlr;
2578 struct i3c_config_target *config_target;
2579
2580 if (config == NULL) {
2581 LOG_ERR("%s: config is NULL", __func__);
2582 return -EINVAL;
2583 }
2584
2585 if (type == I3C_CONFIG_CONTROLLER) {
2586 config_cntlr = config;
2587 /*
2588 * Check for valid configuration parameters.
2589 * Currently, must be the primary controller.
2590 */
2591 if (config_cntlr->scl.i3c == 0U) {
2592 LOG_ERR("%s: configure controller failed", __func__);
2593 return -EINVAL;
2594 }
2595
2596 /* Save requested config to dev */
2597 (void)memcpy(&dev_data->common.ctrl_config, config_cntlr, sizeof(*config_cntlr));
2598
2599 return npcx_i3c_apply_cntlr_config(dev);
2600 } else if (type == I3C_CONFIG_TARGET) {
2601 config_target = config;
2602
2603 if (config_target->pid == 0) {
2604 LOG_ERR("%s: configure target failed", __func__);
2605 return -EINVAL;
2606 }
2607
2608 return npcx_i3c_apply_target_config(dev);
2609 }
2610
2611 LOG_ERR("Config type not supported, %d", type);
2612
2613 return -EINVAL;
2614 }
2615
npcx_i3c_config_get(const struct device * dev,enum i3c_config_type type,void * config)2616 static int npcx_i3c_config_get(const struct device *dev, enum i3c_config_type type, void *config)
2617 {
2618 struct npcx_i3c_data *data = dev->data;
2619
2620 if (config == NULL) {
2621 return -EINVAL;
2622 }
2623
2624 if (type == I3C_CONFIG_CONTROLLER) {
2625 (void)memcpy(config, &data->common.ctrl_config, sizeof(data->common.ctrl_config));
2626 } else if (type == I3C_CONFIG_TARGET) {
2627 (void)memcpy(config, &data->config_target, sizeof(data->config_target));
2628 } else {
2629 return -EINVAL;
2630 }
2631
2632 return 0;
2633 }
2634
npcx_i3c_target_isr(const struct device * dev)2635 static void npcx_i3c_target_isr(const struct device *dev)
2636 {
2637 struct npcx_i3c_data *data = dev->data;
2638 const struct npcx_i3c_config *config = dev->config;
2639 struct i3c_config_target *config_tgt = &data->config_target;
2640 struct i3c_target_config *target_config = data->target_config;
2641 struct i3c_reg *inst = config->base;
2642 const struct i3c_target_callbacks *target_cb = data->target_config->callbacks;
2643
2644 #ifdef CONFIG_I3C_NPCX_DMA
2645 struct mdma_reg *mdma_inst = config->mdma_base;
2646
2647 /* Check mdma read end (for write request) */
2648 if (IS_BIT_SET(mdma_inst->MDMA_CTL0, NPCX_MDMA_CTL_TC)) {
2649 /* Disable target read operation */
2650 npcx_i3c_target_disable_mdmafb(dev);
2651
2652 /* End of mdma read (write request) */
2653 if (get_oper_state(dev) == NPCX_I3C_OP_STATE_WR) {
2654 #ifdef CONFIG_I3C_TARGET_BUFFER_MODE
2655 if ((target_cb != NULL) && (target_cb->buf_write_received_cb != NULL)) {
2656 target_cb->buf_write_received_cb(
2657 data->target_config, data->mdma_rx_buf,
2658 npcx_i3c_target_get_mdmafb_count(dev));
2659 }
2660 #endif
2661 } else {
2662 LOG_ERR("%s: write request TC=1, operation state error, %d", __func__,
2663 data->oper_state);
2664 }
2665
2666 set_oper_state(dev, NPCX_I3C_OP_STATE_IDLE);
2667 }
2668 #endif /* CONFIG_I3C_NPCX_DMA */
2669
2670 while (inst->INTMASKED) {
2671 /* Check STOP detected */
2672 if (IS_BIT_SET(inst->INTMASKED, NPCX_I3C_INTMASKED_STOP)) {
2673
2674 if (IS_BIT_SET(inst->INTMASKED, NPCX_I3C_INTMASKED_START)) {
2675 inst->STATUS = BIT(NPCX_I3C_STATUS_START);
2676 }
2677
2678 #ifdef CONFIG_I3C_NPCX_DMA
2679 /* The end of xfer is a stop.
2680 * For write request: check whether mdma TC is done or still busy.
2681 * For read request: disable the mdma operation.
2682 */
2683 if ((get_oper_state(dev) == NPCX_I3C_OP_STATE_WR) ||
2684 (get_oper_state(dev) == NPCX_I3C_OP_STATE_RD)) {
2685 if (npcx_i3c_target_xfer_end_handle(dev) != 0) {
2686 LOG_ERR("xfer end handle failed after stop, op state=%d",
2687 get_oper_state(dev));
2688 }
2689 }
2690
2691 inst->STATUS = BIT(NPCX_I3C_STATUS_STOP);
2692 #endif
2693
2694 /* Notify upper layer a STOP condition received */
2695 if ((target_cb != NULL) && (target_cb->stop_cb != NULL)) {
2696 target_cb->stop_cb(data->target_config);
2697 }
2698
2699 /* Clear DA matched status and re-enable interrupt */
2700 inst->STATUS = BIT(NPCX_I3C_STATUS_MATCHED);
2701 inst->INTSET = BIT(NPCX_I3C_INTSET_MATCHED);
2702 set_oper_state(dev, NPCX_I3C_OP_STATE_IDLE);
2703 }
2704
2705 /* Check START or Sr detected */
2706 if (IS_BIT_SET(inst->INTMASKED, NPCX_I3C_INTMASKED_START)) {
2707 /* The end of xfer is a Sr */
2708 if ((get_oper_state(dev) == NPCX_I3C_OP_STATE_WR) ||
2709 (get_oper_state(dev) == NPCX_I3C_OP_STATE_RD)) {
2710 if (-EBUSY == npcx_i3c_target_xfer_end_handle(dev)) {
2711 return;
2712 }
2713 }
2714
2715 inst->STATUS = BIT(NPCX_I3C_STATUS_START);
2716 }
2717
2718 if (IS_BIT_SET(inst->INTMASKED, NPCX_I3C_INTMASKED_TGTRST)) {
2719 inst->STATUS = BIT(NPCX_I3C_STATUS_TGTRST);
2720 }
2721
2722 /* Check error or warning has occurred */
2723 if (IS_BIT_SET(inst->INTMASKED, NPCX_I3C_INTMASKED_ERRWARN)) {
2724 LOG_ERR("%s: Error %#x", __func__, inst->ERRWARN);
2725 inst->ERRWARN = inst->ERRWARN;
2726 }
2727
2728 /* Check incoming header matched target dynamic address */
2729 if (IS_BIT_SET(inst->INTMASKED, NPCX_I3C_INTMASKED_MATCHED)) {
2730 if (get_oper_state(dev) != NPCX_I3C_OP_STATE_IBI) {
2731 /* The current bus request is an SDR mode read or write */
2732 if (IS_BIT_SET(inst->STATUS, NPCX_I3C_STATUS_STREQRD)) {
2733 /* SDR read request */
2734 set_oper_state(dev, NPCX_I3C_OP_STATE_RD);
2735
2736 /* Emit read request callback */
2737 #if CONFIG_I3C_TARGET_BUFFER_MODE
2738 /* It will be too late to enable mdma here, use
2739 * target_tx_write() to write tx data into fifo before
2740 * controller send read request.
2741 */
2742 if ((target_cb != NULL) &&
2743 (target_cb->buf_read_requested_cb != NULL)) {
2744 target_cb->buf_read_requested_cb(
2745 data->target_config, NULL, NULL, NULL);
2746 }
2747 #endif
2748 } else {
2749 /* SDR write request */
2750 set_oper_state(dev, NPCX_I3C_OP_STATE_WR);
2751
2752 /* Emit write request callback */
2753 if ((target_cb != NULL) &&
2754 (target_cb->write_requested_cb != NULL)) {
2755 target_cb->write_requested_cb(data->target_config);
2756 }
2757
2758 npcx_i3c_target_rx_read(dev);
2759 }
2760 }
2761
2762 /* If CONFIG.MATCHSS=1, MATCHED bit must remain 1 to detect next start
2763 * or stop.
2764 *
2765 * Clear the status bit in STOP or START handler.
2766 */
2767 if (IS_BIT_SET(inst->CONFIG, NPCX_I3C_CONFIG_MATCHSS)) {
2768 inst->INTCLR = BIT(NPCX_I3C_INTCLR_MATCHED);
2769 } else {
2770 inst->STATUS = BIT(NPCX_I3C_STATUS_MATCHED);
2771 }
2772 }
2773
2774 /* Check dynamic address changed */
2775 if (IS_BIT_SET(inst->INTMASKED, NPCX_I3C_INTMASKED_DACHG)) {
2776 inst->STATUS = BIT(NPCX_I3C_STATUS_DACHG);
2777
2778 if (IS_BIT_SET(inst->DYNADDR, NPCX_I3C_DYNADDR_DAVALID)) {
2779 if (target_config != NULL) {
2780 config_tgt->dynamic_addr =
2781 GET_FIELD(inst->DYNADDR, NPCX_I3C_DYNADDR_DADDR);
2782 }
2783 }
2784 }
2785
2786 /* CCC 'not' automatically handled was received */
2787 if (IS_BIT_SET(inst->INTMASKED, NPCX_I3C_INTMASKED_CCC)) {
2788 inst->STATUS = BIT(NPCX_I3C_STATUS_CCC);
2789 }
2790
2791 /* HDR command, address match */
2792 if (IS_BIT_SET(inst->INTMASKED, NPCX_I3C_INTMASKED_HDRMATCH)) {
2793 inst->STATUS = BIT(NPCX_I3C_STATUS_HDRMATCH);
2794 }
2795
2796 /* CCC handled (handled by IP) */
2797 if (IS_BIT_SET(inst->INTMASKED, NPCX_I3C_INTMASKED_CHANDLED)) {
2798 inst->STATUS = BIT(NPCX_I3C_STATUS_CHANDLED);
2799 }
2800
2801 /* Event requested. IBI, hot-join, bus control */
2802 if (IS_BIT_SET(inst->INTMASKED, NPCX_I3C_INTMASKED_EVENT)) {
2803 inst->STATUS = BIT(NPCX_I3C_STATUS_EVENT);
2804
2805 if (GET_FIELD(inst->STATUS, NPCX_I3C_STATUS_EVDET) ==
2806 STATUS_EVDET_REQ_SENT_ACKED) {
2807 k_sem_give(&data->target_event_lock_sem);
2808 }
2809 }
2810 }
2811
2812 /* Secondary controller (Controller register).
2813 * Check I3C now bus controller.
2814 * Disable target mode if target switch to controller mode success.
2815 */
2816 if (IS_BIT_SET(inst->MINTMASKED, NPCX_I3C_MINTMASKED_NOWCNTLR)) {
2817 inst->MSTATUS = BIT(NPCX_I3C_MSTATUS_NOWCNTLR); /* W1C */
2818 inst->CONFIG &= ~BIT(NPCX_I3C_CONFIG_TGTENA); /* Disable target mode */
2819 }
2820 }
2821
npcx_i3c_isr(const struct device * dev)2822 static void npcx_i3c_isr(const struct device *dev)
2823 {
2824 const struct npcx_i3c_config *config = dev->config;
2825 struct i3c_reg *inst = config->base;
2826
2827 if (IS_BIT_SET(inst->CONFIG, NPCX_I3C_CONFIG_TGTENA)) {
2828 npcx_i3c_target_isr(dev);
2829 return;
2830 }
2831
2832 #ifdef CONFIG_I3C_NPCX_DMA
2833 struct mdma_reg *mdma_inst = config->mdma_base;
2834
2835 /* Controller write end */
2836 if (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_COMPLETE)) {
2837 inst->MSTATUS = BIT(NPCX_I3C_MSTATUS_COMPLETE); /* W1C */
2838
2839 /* MDMA write */
2840 if (get_oper_state(dev) == NPCX_I3C_OP_STATE_WR) {
2841 i3c_ctrl_notify(dev);
2842 return;
2843 }
2844 }
2845
2846 /* Controller read end */
2847 if (IS_BIT_SET(mdma_inst->MDMA_CTL0, NPCX_MDMA_CTL_TC)) {
2848 mdma_inst->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_TC); /* W0C */
2849
2850 /* MDMA read */
2851 if (get_oper_state(dev) == NPCX_I3C_OP_STATE_RD) {
2852 i3c_ctrl_notify(dev);
2853 return;
2854 }
2855 }
2856 #endif /* CONFIG_I3C_NPCX_DMA */
2857
2858 #ifdef CONFIG_I3C_USE_IBI
2859 int ret;
2860
2861 /* Target start detected */
2862 if (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_TGTSTART)) {
2863 LOG_DBG("ISR TGTSTART !");
2864
2865 /* Disable further target initiated IBI interrupt */
2866 inst->MINTCLR = BIT(NPCX_I3C_MINTCLR_TGTSTART);
2867 /* Clear TGTSTART interrupt */
2868 inst->MSTATUS = BIT(NPCX_I3C_MSTATUS_TGTSTART);
2869
2870 /* Handle IBI in workqueue */
2871 ret = i3c_ibi_work_enqueue_cb(dev, npcx_i3c_ibi_work);
2872 if (ret < 0) {
2873 LOG_ERR("Enqueuing ibi work fail, ret %d", ret);
2874 inst->MINTSET = BIT(NPCX_I3C_MINTSET_TGTSTART);
2875 }
2876 }
2877 #endif /* CONFIG_I3C_USE_IBI */
2878 }
2879
npcx_i3c_init(const struct device * dev)2880 static int npcx_i3c_init(const struct device *dev)
2881 {
2882 const struct npcx_i3c_config *config = dev->config;
2883 struct npcx_i3c_data *data = dev->data;
2884 struct i3c_config_controller *config_cntlr = &data->common.ctrl_config;
2885 const struct device *const clk_dev = config->clock_dev;
2886 struct i3c_reg *inst = config->base;
2887 int ret;
2888
2889 /* Check clock device ready */
2890 if (!device_is_ready(clk_dev)) {
2891 LOG_ERR("%s Clk device not ready", clk_dev->name);
2892 return -ENODEV;
2893 }
2894
2895 /* Set I3C_PD operational */
2896 ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->clock_subsys);
2897 if (ret < 0) {
2898 LOG_ERR("Turn on I3C clock fail %d", ret);
2899 return ret;
2900 }
2901
2902 #ifdef CONFIG_I3C_NPCX_DMA
2903 ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->mdma_clk_subsys);
2904 if (ret < 0) {
2905 LOG_ERR("Turn on I3C MDMA clock fail %d", ret);
2906 return ret;
2907 }
2908 #endif
2909
2910 /* Apply pin-muxing */
2911 ret = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
2912 if (ret != 0) {
2913 LOG_ERR("Apply pinctrl fail %d", ret);
2914 return ret;
2915 }
2916
2917 /* Lock initial */
2918 k_mutex_init(&data->lock_mutex);
2919 k_sem_init(&data->sync_sem, 0, 1);
2920 k_sem_init(&data->ibi_lock_sem, 1, 1);
2921 k_sem_init(&data->target_lock_sem, 1, 1);
2922 k_sem_init(&data->target_event_lock_sem, 1, 1);
2923
2924 ret = i3c_addr_slots_init(dev);
2925 if (ret != 0) {
2926 LOG_ERR("Addr slots init fail %d", ret);
2927 return ret;
2928 }
2929
2930 /* Set controller default configuration */
2931 config_cntlr->supported_hdr = I3C_MSG_HDR_DDR; /* HDR-DDR mode is supported. */
2932 config_cntlr->scl.i3c = config->clocks.i3c_pp_scl_hz; /* Set I3C frequency */
2933
2934 /* Initial I3C device as controller or target */
2935 npcx_i3c_dev_init(dev);
2936
2937 if (GET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_CTRENA) == MCONFIG_CTRENA_ON) {
2938 /* Just in case the bus is not in idle. */
2939 ret = npcx_i3c_recover_bus(dev);
2940 if (ret != 0) {
2941 LOG_ERR("Apply i3c_recover_bus() fail %d", ret);
2942 return ret;
2943 }
2944 }
2945
2946 /* Configure interrupt */
2947 config->irq_config_func(dev);
2948
2949 /* Initialize driver status machine */
2950 set_oper_state(dev, NPCX_I3C_OP_STATE_IDLE);
2951
2952 /* Check I3C is controller mode and target device exist in device tree */
2953 if ((config->common.dev_list.num_i3c > 0) &&
2954 GET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_CTRENA) == MCONFIG_CTRENA_ON) {
2955 /* Perform bus initialization */
2956 ret = i3c_bus_init(dev, &config->common.dev_list);
2957 if (ret != 0) {
2958 LOG_ERR("Apply i3c_bus_init() fail %d", ret);
2959 return ret;
2960 }
2961 }
2962
2963 return 0;
2964 }
2965
2966 static DEVICE_API(i3c, npcx_i3c_driver_api) = {
2967 .configure = npcx_i3c_configure,
2968 .config_get = npcx_i3c_config_get,
2969
2970 .recover_bus = npcx_i3c_recover_bus,
2971
2972 .do_daa = npcx_i3c_do_daa,
2973 .do_ccc = npcx_i3c_do_ccc,
2974
2975 .i3c_device_find = npcx_i3c_device_find,
2976
2977 .i3c_xfers = npcx_i3c_transfer,
2978
2979 .target_tx_write = npcx_i3c_target_tx_write,
2980 .target_register = npcx_i3c_target_register,
2981 .target_unregister = npcx_i3c_target_unregister,
2982
2983 #ifdef CONFIG_I3C_USE_IBI
2984 .ibi_enable = npcx_i3c_ibi_enable,
2985 .ibi_disable = npcx_i3c_ibi_disable,
2986
2987 .ibi_raise = npcx_i3c_target_ibi_raise,
2988 #endif
2989
2990 #ifdef CONFIG_I3C_RTIO
2991 .iodev_submit = i3c_iodev_submit_fallback,
2992 #endif
2993 };
2994
2995 #define DT_INST_TGT_PID_PROP_OR(id, prop, idx) \
2996 COND_CODE_1(DT_INST_PROP_HAS_IDX(id, prop, idx), (DT_INST_PROP_BY_IDX(id, prop, idx)), (0))
2997 #define DT_INST_TGT_PID_RAND_PROP_OR(id, prop, idx) \
2998 COND_CODE_1(DT_INST_PROP_HAS_IDX(id, prop, idx), \
2999 IS_BIT_SET(DT_INST_PROP_BY_IDX(id, prop, 0), 0), (0))
3000
3001 #define I3C_NPCX_DEVICE(id) \
3002 PINCTRL_DT_INST_DEFINE(id); \
3003 static void npcx_i3c_config_func_##id(const struct device *dev) \
3004 { \
3005 IRQ_CONNECT(DT_INST_IRQN(id), DT_INST_IRQ(id, priority), npcx_i3c_isr, \
3006 DEVICE_DT_INST_GET(id), 0); \
3007 irq_enable(DT_INST_IRQN(id)); \
3008 }; \
3009 static struct i3c_device_desc npcx_i3c_device_array_##id[] = I3C_DEVICE_ARRAY_DT_INST(id); \
3010 static struct i3c_i2c_device_desc npcx_i3c_i2c_device_array_##id[] = \
3011 I3C_I2C_DEVICE_ARRAY_DT_INST(id); \
3012 static const struct npcx_i3c_config npcx_i3c_config_##id = { \
3013 .base = (struct i3c_reg *)DT_INST_REG_ADDR(id), \
3014 .clock_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE), \
3015 .reset = RESET_DT_SPEC_INST_GET(id), \
3016 .clock_subsys = NPCX_DT_CLK_CFG_ITEM_BY_NAME(id, mclkd), \
3017 .ref_clk_subsys = NPCX_DT_CLK_CFG_ITEM_BY_NAME(id, apb4), \
3018 .irq_config_func = npcx_i3c_config_func_##id, \
3019 .common.dev_list.i3c = npcx_i3c_device_array_##id, \
3020 .common.dev_list.num_i3c = ARRAY_SIZE(npcx_i3c_device_array_##id), \
3021 .common.dev_list.i2c = npcx_i3c_i2c_device_array_##id, \
3022 .common.dev_list.num_i2c = ARRAY_SIZE(npcx_i3c_i2c_device_array_##id), \
3023 .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \
3024 .instance_id = DT_INST_PROP(id, instance_id), \
3025 .clocks.i3c_pp_scl_hz = DT_INST_PROP_OR(id, i3c_scl_hz, 0), \
3026 .clocks.i3c_od_scl_hz = DT_INST_PROP_OR(id, i3c_od_scl_hz, 0), \
3027 IF_ENABLED(CONFIG_I3C_NPCX_DMA, ( \
3028 .mdma_clk_subsys = NPCX_DT_CLK_CFG_ITEM_BY_IDX(id, 2), \
3029 )) \
3030 IF_ENABLED(CONFIG_I3C_NPCX_DMA, ( \
3031 .mdma_base = (struct mdma_reg *)DT_INST_REG_ADDR_BY_IDX(id, 1), \
3032 )) }; \
3033 static struct npcx_i3c_data npcx_i3c_data_##id = { \
3034 .common.ctrl_config.is_secondary = DT_INST_PROP_OR(id, secondary, false), \
3035 .config_target.static_addr = DT_INST_PROP_OR(id, static_address, 0), \
3036 .config_target.pid = ((uint64_t)DT_INST_TGT_PID_PROP_OR(id, tgt_pid, 0) << 32) | \
3037 DT_INST_TGT_PID_PROP_OR(id, tgt_pid, 1), \
3038 .config_target.pid_random = DT_INST_TGT_PID_RAND_PROP_OR(id, tgt_pid, 0), \
3039 .config_target.bcr = DT_INST_PROP(id, bcr), \
3040 .config_target.dcr = DT_INST_PROP_OR(id, dcr, 0), \
3041 .config_target.max_read_len = DT_INST_PROP_OR(id, maximum_read, 0), \
3042 .config_target.max_write_len = DT_INST_PROP_OR(id, maximum_write, 0), \
3043 .config_target.supported_hdr = false, \
3044 }; \
3045 DEVICE_DT_INST_DEFINE(id, npcx_i3c_init, NULL, &npcx_i3c_data_##id, &npcx_i3c_config_##id, \
3046 POST_KERNEL, CONFIG_I3C_CONTROLLER_INIT_PRIORITY, \
3047 &npcx_i3c_driver_api);
3048
3049 DT_INST_FOREACH_STATUS_OKAY(I3C_NPCX_DEVICE)
3050